xref: /linux/drivers/scsi/scsi_debug.c (revision d4a379a52c3c2dc44366c4f6722c063a7d0de179)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47 
48 #include <net/checksum.h>
49 
50 #include <linux/unaligned.h>
51 
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60 
61 #include "sd.h"
62 #include "scsi_logging.h"
63 
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67 
68 #define MY_NAME "scsi_debug"
69 
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define FILEMARK_DETECTED_ASCQ 0x1
75 #define EOP_EOM_DETECTED_ASCQ 0x2
76 #define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
77 #define EOD_DETECTED_ASCQ 0x5
78 #define LOGICAL_UNIT_NOT_READY 0x4
79 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
80 #define UNRECOVERED_READ_ERR 0x11
81 #define PARAMETER_LIST_LENGTH_ERR 0x1a
82 #define INVALID_OPCODE 0x20
83 #define LBA_OUT_OF_RANGE 0x21
84 #define INVALID_FIELD_IN_CDB 0x24
85 #define INVALID_FIELD_IN_PARAM_LIST 0x26
86 #define WRITE_PROTECTED 0x27
87 #define UA_READY_ASC 0x28
88 #define UA_RESET_ASC 0x29
89 #define UA_CHANGED_ASC 0x2a
90 #define TOO_MANY_IN_PARTITION_ASC 0x3b
91 #define TARGET_CHANGED_ASC 0x3f
92 #define LUNS_CHANGED_ASCQ 0x0e
93 #define INSUFF_RES_ASC 0x55
94 #define INSUFF_RES_ASCQ 0x3
95 #define POWER_ON_RESET_ASCQ 0x0
96 #define POWER_ON_OCCURRED_ASCQ 0x1
97 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
98 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
99 #define CAPACITY_CHANGED_ASCQ 0x9
100 #define SAVING_PARAMS_UNSUP 0x39
101 #define TRANSPORT_PROBLEM 0x4b
102 #define THRESHOLD_EXCEEDED 0x5d
103 #define LOW_POWER_COND_ON 0x5e
104 #define MISCOMPARE_VERIFY_ASC 0x1d
105 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
106 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
107 #define WRITE_ERROR_ASC 0xc
108 #define UNALIGNED_WRITE_ASCQ 0x4
109 #define WRITE_BOUNDARY_ASCQ 0x5
110 #define READ_INVDATA_ASCQ 0x6
111 #define READ_BOUNDARY_ASCQ 0x7
112 #define ATTEMPT_ACCESS_GAP 0x9
113 #define INSUFF_ZONE_ASCQ 0xe
114 /* see drivers/scsi/sense_codes.h */
115 
116 /* Additional Sense Code Qualifier (ASCQ) */
117 #define ACK_NAK_TO 0x3
118 
119 /* Default values for driver parameters */
120 #define DEF_NUM_HOST   1
121 #define DEF_NUM_TGTS   1
122 #define DEF_MAX_LUNS   1
123 /* With these defaults, this driver will make 1 host with 1 target
124  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
125  */
126 #define DEF_ATO 1
127 #define DEF_CDB_LEN 10
128 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
129 #define DEF_DEV_SIZE_PRE_INIT   0
130 #define DEF_DEV_SIZE_MB   8
131 #define DEF_ZBC_DEV_SIZE_MB   128
132 #define DEF_DIF 0
133 #define DEF_DIX 0
134 #define DEF_PER_HOST_STORE false
135 #define DEF_D_SENSE   0
136 #define DEF_EVERY_NTH   0
137 #define DEF_FAKE_RW	0
138 #define DEF_GUARD 0
139 #define DEF_HOST_LOCK 0
140 #define DEF_LBPU 0
141 #define DEF_LBPWS 0
142 #define DEF_LBPWS10 0
143 #define DEF_LBPRZ 1
144 #define DEF_LOWEST_ALIGNED 0
145 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
146 #define DEF_NO_LUN_0   0
147 #define DEF_NUM_PARTS   0
148 #define DEF_OPTS   0
149 #define DEF_OPT_BLKS 1024
150 #define DEF_PHYSBLK_EXP 0
151 #define DEF_OPT_XFERLEN_EXP 0
152 #define DEF_PTYPE   TYPE_DISK
153 #define DEF_RANDOM false
154 #define DEF_REMOVABLE false
155 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
156 #define DEF_SECTOR_SIZE 512
157 #define DEF_UNMAP_ALIGNMENT 0
158 #define DEF_UNMAP_GRANULARITY 1
159 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
160 #define DEF_UNMAP_MAX_DESC 256
161 #define DEF_VIRTUAL_GB   0
162 #define DEF_VPD_USE_HOSTNO 1
163 #define DEF_WRITESAME_LENGTH 0xFFFF
164 #define DEF_ATOMIC_WR 0
165 #define DEF_ATOMIC_WR_MAX_LENGTH 128
166 #define DEF_ATOMIC_WR_ALIGN 2
167 #define DEF_ATOMIC_WR_GRAN 2
168 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
169 #define DEF_ATOMIC_WR_MAX_BNDRY 128
170 #define DEF_STRICT 0
171 #define DEF_STATISTICS false
172 #define DEF_SUBMIT_QUEUES 1
173 #define DEF_TUR_MS_TO_READY 0
174 #define DEF_UUID_CTL 0
175 #define JDELAY_OVERRIDDEN -9999
176 
177 /* Default parameters for ZBC drives */
178 #define DEF_ZBC_ZONE_SIZE_MB	128
179 #define DEF_ZBC_MAX_OPEN_ZONES	8
180 #define DEF_ZBC_NR_CONV_ZONES	1
181 
182 /* Default parameters for tape drives */
183 #define TAPE_DEF_DENSITY  0x0
184 #define TAPE_BAD_DENSITY  0x65
185 #define TAPE_DEF_BLKSIZE  0
186 #define TAPE_MIN_BLKSIZE  512
187 #define TAPE_MAX_BLKSIZE  1048576
188 #define TAPE_EW 20
189 #define TAPE_MAX_PARTITIONS 2
190 #define TAPE_UNITS 10000
191 #define TAPE_PARTITION_1_UNITS 1000
192 
193 /* The tape block data definitions */
194 #define TAPE_BLOCK_FM_FLAG   ((u32)0x1 << 30)
195 #define TAPE_BLOCK_EOD_FLAG  ((u32)0x2 << 30)
196 #define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
197 #define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
198 #define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
199 #define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
200 #define IS_TAPE_BLOCK_FM(a)   ((a & TAPE_BLOCK_FM_FLAG) != 0)
201 #define IS_TAPE_BLOCK_EOD(a)  ((a & TAPE_BLOCK_EOD_FLAG) != 0)
202 
203 struct tape_block {
204 	u32 fl_size;
205 	unsigned char data[4];
206 };
207 
208 /* Flags for sense data */
209 #define SENSE_FLAG_FILEMARK  0x80
210 #define SENSE_FLAG_EOM 0x40
211 #define SENSE_FLAG_ILI 0x20
212 
213 #define SDEBUG_LUN_0_VAL 0
214 
215 /* bit mask values for sdebug_opts */
216 #define SDEBUG_OPT_NOISE		1
217 #define SDEBUG_OPT_MEDIUM_ERR		2
218 #define SDEBUG_OPT_TIMEOUT		4
219 #define SDEBUG_OPT_RECOVERED_ERR	8
220 #define SDEBUG_OPT_TRANSPORT_ERR	16
221 #define SDEBUG_OPT_DIF_ERR		32
222 #define SDEBUG_OPT_DIX_ERR		64
223 #define SDEBUG_OPT_MAC_TIMEOUT		128
224 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
225 #define SDEBUG_OPT_Q_NOISE		0x200
226 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
227 #define SDEBUG_OPT_RARE_TSF		0x800
228 #define SDEBUG_OPT_N_WCE		0x1000
229 #define SDEBUG_OPT_RESET_NOISE		0x2000
230 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
231 #define SDEBUG_OPT_HOST_BUSY		0x8000
232 #define SDEBUG_OPT_CMD_ABORT		0x10000
233 #define SDEBUG_OPT_UNALIGNED_WRITE	0x20000
234 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
235 			      SDEBUG_OPT_RESET_NOISE)
236 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
237 				  SDEBUG_OPT_TRANSPORT_ERR | \
238 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
239 				  SDEBUG_OPT_SHORT_TRANSFER | \
240 				  SDEBUG_OPT_HOST_BUSY | \
241 				  SDEBUG_OPT_CMD_ABORT | \
242 				  SDEBUG_OPT_UNALIGNED_WRITE)
243 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
244 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
245 
246 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
247  * priority order. In the subset implemented here lower numbers have higher
248  * priority. The UA numbers should be a sequence starting from 0 with
249  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
250 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
251 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
252 #define SDEBUG_UA_BUS_RESET 2
253 #define SDEBUG_UA_MODE_CHANGED 3
254 #define SDEBUG_UA_CAPACITY_CHANGED 4
255 #define SDEBUG_UA_LUNS_CHANGED 5
256 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
257 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
258 #define SDEBUG_UA_NOT_READY_TO_READY 8
259 #define SDEBUG_NUM_UAS 9
260 
261 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
262  * sector on read commands: */
263 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
264 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
265 
266 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
267  * (for response) per submit queue at one time. Can be reduced by max_queue
268  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
269  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
270  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
271  * but cannot exceed SDEBUG_CANQUEUE .
272  */
273 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
274 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
275 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
276 
277 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
278 #define F_D_IN			1	/* Data-in command (e.g. READ) */
279 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
280 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
281 #define F_D_UNKN		8
282 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
283 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
284 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
285 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
286 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
287 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
288 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
289 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
290 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
291 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
292 
293 /* Useful combinations of the above flags */
294 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
295 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
296 #define FF_SA (F_SA_HIGH | F_SA_LOW)
297 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
298 
299 /* Device selection bit mask */
300 #define DS_ALL     0xffffffff
301 #define DS_SBC     (1 << TYPE_DISK)
302 #define DS_SSC     (1 << TYPE_TAPE)
303 #define DS_ZBC     (1 << TYPE_ZBC)
304 
305 #define DS_NO_SSC  (DS_ALL & ~DS_SSC)
306 
307 #define SDEBUG_MAX_PARTS 4
308 
309 #define SDEBUG_MAX_CMD_LEN 32
310 
311 #define SDEB_XA_NOT_IN_USE XA_MARK_1
312 
313 /* Zone types (zbcr05 table 25) */
314 enum sdebug_z_type {
315 	ZBC_ZTYPE_CNV	= 0x1,
316 	ZBC_ZTYPE_SWR	= 0x2,
317 	ZBC_ZTYPE_SWP	= 0x3,
318 	/* ZBC_ZTYPE_SOBR = 0x4, */
319 	ZBC_ZTYPE_GAP	= 0x5,
320 };
321 
322 /* enumeration names taken from table 26, zbcr05 */
323 enum sdebug_z_cond {
324 	ZBC_NOT_WRITE_POINTER	= 0x0,
325 	ZC1_EMPTY		= 0x1,
326 	ZC2_IMPLICIT_OPEN	= 0x2,
327 	ZC3_EXPLICIT_OPEN	= 0x3,
328 	ZC4_CLOSED		= 0x4,
329 	ZC6_READ_ONLY		= 0xd,
330 	ZC5_FULL		= 0xe,
331 	ZC7_OFFLINE		= 0xf,
332 };
333 
334 struct sdeb_zone_state {	/* ZBC: per zone state */
335 	enum sdebug_z_type z_type;
336 	enum sdebug_z_cond z_cond;
337 	bool z_non_seq_resource;
338 	unsigned int z_size;
339 	sector_t z_start;
340 	sector_t z_wp;
341 };
342 
343 enum sdebug_err_type {
344 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
345 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
346 					/* queuecmd return failed */
347 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
348 					/* queuecmd return succeed but */
349 					/* with errors set in scsi_cmnd */
350 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
351 					/* scsi_debug_abort() */
352 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
353 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
354 };
355 
356 struct sdebug_err_inject {
357 	int type;
358 	struct list_head list;
359 	int cnt;
360 	unsigned char cmd;
361 	struct rcu_head rcu;
362 
363 	union {
364 		/*
365 		 * For ERR_FAIL_QUEUE_CMD
366 		 */
367 		int queuecmd_ret;
368 
369 		/*
370 		 * For ERR_FAIL_CMD
371 		 */
372 		struct {
373 			unsigned char host_byte;
374 			unsigned char driver_byte;
375 			unsigned char status_byte;
376 			unsigned char sense_key;
377 			unsigned char asc;
378 			unsigned char asq;
379 		};
380 	};
381 };
382 
383 struct sdebug_dev_info {
384 	struct list_head dev_list;
385 	unsigned int channel;
386 	unsigned int target;
387 	u64 lun;
388 	uuid_t lu_name;
389 	struct sdebug_host_info *sdbg_host;
390 	unsigned long uas_bm[1];
391 	atomic_t stopped;	/* 1: by SSU, 2: device start */
392 	bool used;
393 
394 	/* For ZBC devices */
395 	bool zoned;
396 	unsigned int zcap;
397 	unsigned int zsize;
398 	unsigned int zsize_shift;
399 	unsigned int nr_zones;
400 	unsigned int nr_conv_zones;
401 	unsigned int nr_seq_zones;
402 	unsigned int nr_imp_open;
403 	unsigned int nr_exp_open;
404 	unsigned int nr_closed;
405 	unsigned int max_open;
406 	ktime_t create_ts;	/* time since bootup that this device was created */
407 	struct sdeb_zone_state *zstate;
408 
409 	/* For tapes */
410 	unsigned int tape_blksize;
411 	unsigned int tape_density;
412 	unsigned char tape_partition;
413 	unsigned char tape_nbr_partitions;
414 	unsigned char tape_pending_nbr_partitions;
415 	unsigned int tape_pending_part_0_size;
416 	unsigned int tape_pending_part_1_size;
417 	unsigned char tape_dce;
418 	unsigned int tape_location[TAPE_MAX_PARTITIONS];
419 	unsigned int tape_eop[TAPE_MAX_PARTITIONS];
420 	struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
421 
422 	struct dentry *debugfs_entry;
423 	struct spinlock list_lock;
424 	struct list_head inject_err_list;
425 };
426 
427 struct sdebug_target_info {
428 	bool reset_fail;
429 	struct dentry *debugfs_entry;
430 };
431 
432 struct sdebug_host_info {
433 	struct list_head host_list;
434 	int si_idx;	/* sdeb_store_info (per host) xarray index */
435 	struct Scsi_Host *shost;
436 	struct device dev;
437 	struct list_head dev_info_list;
438 };
439 
440 /* There is an xarray of pointers to this struct's objects, one per host */
441 struct sdeb_store_info {
442 	rwlock_t macc_data_lck;	/* for media data access on this store */
443 	rwlock_t macc_meta_lck;	/* for atomic media meta access on this store */
444 	rwlock_t macc_sector_lck;	/* per-sector media data access on this store */
445 	u8 *storep;		/* user data storage (ram) */
446 	struct t10_pi_tuple *dif_storep; /* protection info */
447 	void *map_storep;	/* provisioning map */
448 };
449 
450 #define dev_to_sdebug_host(d)	\
451 	container_of(d, struct sdebug_host_info, dev)
452 
453 #define shost_to_sdebug_host(shost)	\
454 	dev_to_sdebug_host(shost->dma_dev)
455 
456 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
457 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
458 
459 struct sdebug_defer {
460 	struct hrtimer hrt;
461 	struct execute_work ew;
462 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
463 	int issuing_cpu;
464 	bool aborted;	/* true when blk_abort_request() already called */
465 	enum sdeb_defer_type defer_t;
466 };
467 
468 struct sdebug_scsi_cmd {
469 	spinlock_t   lock;
470 	struct sdebug_defer sd_dp;
471 };
472 
473 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
474 static atomic_t sdebug_completions;  /* count of deferred completions */
475 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
476 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
477 static atomic_t sdeb_inject_pending;
478 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
479 
480 struct opcode_info_t {
481 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
482 				/* for terminating element */
483 	u8 opcode;		/* if num_attached > 0, preferred */
484 	u16 sa;			/* service action */
485 	u32 devsel;		/* device type mask for this definition */
486 	u32 flags;		/* OR-ed set of SDEB_F_* */
487 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
488 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
489 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
490 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
491 };
492 
493 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
494 enum sdeb_opcode_index {
495 	SDEB_I_INVALID_OPCODE =	0,
496 	SDEB_I_INQUIRY = 1,
497 	SDEB_I_REPORT_LUNS = 2,
498 	SDEB_I_REQUEST_SENSE = 3,
499 	SDEB_I_TEST_UNIT_READY = 4,
500 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
501 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
502 	SDEB_I_LOG_SENSE = 7,
503 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
504 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
505 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
506 	SDEB_I_START_STOP = 11,
507 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
508 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
509 	SDEB_I_MAINT_IN = 14,
510 	SDEB_I_MAINT_OUT = 15,
511 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
512 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
513 	SDEB_I_RESERVE = 18,		/* 6, 10 */
514 	SDEB_I_RELEASE = 19,		/* 6, 10 */
515 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
516 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
517 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
518 	SDEB_I_SEND_DIAG = 23,
519 	SDEB_I_UNMAP = 24,
520 	SDEB_I_WRITE_BUFFER = 25,
521 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
522 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
523 	SDEB_I_COMP_WRITE = 28,
524 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
525 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
526 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
527 	SDEB_I_ATOMIC_WRITE_16 = 32,
528 	SDEB_I_READ_BLOCK_LIMITS = 33,
529 	SDEB_I_LOCATE = 34,
530 	SDEB_I_WRITE_FILEMARKS = 35,
531 	SDEB_I_SPACE = 36,
532 	SDEB_I_FORMAT_MEDIUM = 37,
533 	SDEB_I_ERASE = 38,
534 	SDEB_I_LAST_ELEM_P1 = 39,	/* keep this last (previous + 1) */
535 };
536 
537 
538 static const unsigned char opcode_ind_arr[256] = {
539 /* 0x0; 0x0->0x1f: 6 byte cdbs */
540 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
541 	    SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
542 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
543 	SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
544 	    SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
545 	0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
546 	    SDEB_I_ALLOW_REMOVAL, 0,
547 /* 0x20; 0x20->0x3f: 10 byte cdbs */
548 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
549 	SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
550 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
551 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
552 /* 0x40; 0x40->0x5f: 10 byte cdbs */
553 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
554 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
555 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
556 	    SDEB_I_RELEASE,
557 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
558 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
559 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
560 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
561 	0, SDEB_I_VARIABLE_LEN,
562 /* 0x80; 0x80->0x9f: 16 byte cdbs */
563 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
564 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
565 	0, 0, 0, SDEB_I_VERIFY,
566 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
567 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
568 	0, 0, 0, 0,
569 	SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
570 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
571 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
572 	     SDEB_I_MAINT_OUT, 0, 0, 0,
573 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
574 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
575 	0, 0, 0, 0, 0, 0, 0, 0,
576 	0, 0, 0, 0, 0, 0, 0, 0,
577 /* 0xc0; 0xc0->0xff: vendor specific */
578 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
579 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
580 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
581 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
582 };
583 
584 /*
585  * The following "response" functions return the SCSI mid-level's 4 byte
586  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
587  * command completion, they can mask their return value with
588  * SDEG_RES_IMMED_MASK .
589  */
590 #define SDEG_RES_IMMED_MASK 0x40000000
591 
592 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
593 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
594 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
595 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
596 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
597 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
598 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
599 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
600 static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
601 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
602 static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
603 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
604 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
605 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
606 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
607 static int resp_get_stream_status(struct scsi_cmnd *scp,
608 				  struct sdebug_dev_info *devip);
609 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
610 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
611 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
612 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
613 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
614 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
615 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
616 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
617 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
618 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
619 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
620 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
621 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
622 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
623 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
624 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
625 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
626 static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
627 static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
628 static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
629 static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
630 static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *);
631 static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
632 static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
633 static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *);
634 
635 static int sdebug_do_add_host(bool mk_new_store);
636 static int sdebug_add_host_helper(int per_host_idx);
637 static void sdebug_do_remove_host(bool the_end);
638 static int sdebug_add_store(void);
639 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
640 static void sdebug_erase_all_stores(bool apart_from_first);
641 
642 /*
643  * The following are overflow arrays for cdbs that "hit" the same index in
644  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
645  * should be placed in opcode_info_arr[], the others should be placed here.
646  */
647 static const struct opcode_info_t msense_iarr[] = {
648 	{0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL,
649 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
650 };
651 
652 static const struct opcode_info_t mselect_iarr[] = {
653 	{0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL,
654 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 };
656 
657 static const struct opcode_info_t read_iarr[] = {
658 	{0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
659 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
660 	     0, 0, 0, 0} },
661 	{0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */
662 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
663 	{0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */
664 	    {6,  0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
665 	{0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
666 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
667 	     0xc7, 0, 0, 0, 0} },
668 };
669 
670 static const struct opcode_info_t write_iarr[] = {
671 	{0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
672 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
673 		   0, 0, 0, 0, 0, 0} },
674 	{0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */
675 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
676 		   0, 0, 0} },
677 	{0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */
678 	    NULL, {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
679 		   0, 0, 0} },
680 	{0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
681 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
682 		   0xbf, 0xc7, 0, 0, 0, 0} },
683 };
684 
685 static const struct opcode_info_t verify_iarr[] = {
686 	{0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
687 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
688 		   0, 0, 0, 0, 0, 0} },
689 };
690 
691 static const struct opcode_info_t sa_in_16_iarr[] = {
692 	{0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
693 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
694 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
695 	{0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
696 	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
697 	     0, 0} },	/* GET STREAM STATUS */
698 };
699 
700 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
701 	{0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
702 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
703 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
704 	{0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
705 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
706 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
707 };
708 
709 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
710 	{0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
711 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
712 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
713 	{0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
714 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
715 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
716 };
717 
718 static const struct opcode_info_t write_same_iarr[] = {
719 	{0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
720 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
721 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
722 };
723 
724 static const struct opcode_info_t reserve_iarr[] = {
725 	{0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
726 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
727 };
728 
729 static const struct opcode_info_t release_iarr[] = {
730 	{0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
731 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
732 };
733 
734 static const struct opcode_info_t sync_cache_iarr[] = {
735 	{0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
736 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
737 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
738 };
739 
740 static const struct opcode_info_t pre_fetch_iarr[] = {
741 	{0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
742 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
743 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
744 	{0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL,
745 	    {10,  0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0,
746 	     0, 0, 0, 0} },				/* READ POSITION (10) */
747 };
748 
749 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
750 	{0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
751 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
752 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
753 	{0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
754 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
755 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
756 	{0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
757 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
758 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
759 };
760 
761 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
762 	{0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
763 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
764 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
765 };
766 
767 
768 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
769  * plus the terminating elements for logic that scans this table such as
770  * REPORT SUPPORTED OPERATION CODES. */
771 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
772 /* 0 */
773 	{0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
774 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
775 	{0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
776 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
777 	{0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
778 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
779 	     0, 0} },					/* REPORT LUNS */
780 	{0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL,
781 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
782 	{0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
783 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
784 /* 5 */
785 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN,	/* MODE SENSE(10) */
786 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
787 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
788 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT,	/* MODE SELECT(10) */
789 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
790 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
791 	{0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
792 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
793 	     0, 0, 0} },
794 	{0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
795 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
796 	     0, 0} },
797 	{ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */
798 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
799 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
800 /* 10 */
801 	{ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
802 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
803 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
804 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
805 	{0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
806 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
807 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN,
808 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
809 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
810 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
811 	{0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
812 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
813 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
814 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN,
815 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
816 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
817 				0xff, 0, 0xc7, 0, 0, 0, 0} },
818 /* 15 */
819 	{0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
820 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
821 	{ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC,
822 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
823 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
824 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
825 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
826 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
827 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
828 	     0xff, 0xff} },
829 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT,
830 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
831 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
832 	     0} },
833 	{ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT,
834 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
835 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
836 	     0} },
837 /* 20 */
838 	{0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */
839 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
840 	{0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL,
841 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
842 	{0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
843 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
844 	{0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL,      /* SEND DIAGNOSTIC */
845 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
846 	{0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
847 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
848 /* 25 */
849 	{0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL,
850 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
851 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
852 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO,
853 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
854 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
855 		 0, 0, 0, 0, 0} },
856 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS,
857 	    resp_sync_cache, sync_cache_iarr,
858 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
859 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
860 	{0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
861 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
862 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
863 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO,
864 	    resp_pre_fetch, pre_fetch_iarr,
865 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
866 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
867 						/* READ POSITION (10) */
868 
869 /* 30 */
870 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
871 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
872 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
873 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
874 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
875 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
876 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
877 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
878 /* 32 */
879 	{0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
880 	    resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
881 		{16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
882 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
883 	{0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL,    /* READ BLOCK LIMITS (6) */
884 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
885 	{0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL,	   /* LOCATE (10) */
886 	    {10,  0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
887 	     0, 0, 0, 0} },
888 	{0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL,   /* WRITE FILEMARKS (6) */
889 	    {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
890 	{0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL,    /* SPACE (6) */
891 	    {6,  0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
892 	{0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL,  /* FORMAT MEDIUM (6) */
893 	    {6,  0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
894 	{0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL,    /* ERASE (6) */
895 	    {6,  0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
896 /* 39 */
897 /* sentinel */
898 	{0xff, 0, 0, 0, 0, NULL, NULL,		/* terminating element */
899 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
900 };
901 
902 static int sdebug_num_hosts;
903 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
904 static int sdebug_ato = DEF_ATO;
905 static int sdebug_cdb_len = DEF_CDB_LEN;
906 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
907 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
908 static int sdebug_dif = DEF_DIF;
909 static int sdebug_dix = DEF_DIX;
910 static int sdebug_dsense = DEF_D_SENSE;
911 static int sdebug_every_nth = DEF_EVERY_NTH;
912 static int sdebug_fake_rw = DEF_FAKE_RW;
913 static unsigned int sdebug_guard = DEF_GUARD;
914 static int sdebug_host_max_queue;	/* per host */
915 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
916 static int sdebug_max_luns = DEF_MAX_LUNS;
917 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
918 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
919 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
920 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
921 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
922 static int sdebug_no_uld;
923 static int sdebug_num_parts = DEF_NUM_PARTS;
924 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
925 static int sdebug_opt_blks = DEF_OPT_BLKS;
926 static int sdebug_opts = DEF_OPTS;
927 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
928 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
929 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
930 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
931 static int sdebug_sector_size = DEF_SECTOR_SIZE;
932 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
933 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
934 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
935 static unsigned int sdebug_lbpu = DEF_LBPU;
936 static unsigned int sdebug_lbpws = DEF_LBPWS;
937 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
938 static unsigned int sdebug_lbprz = DEF_LBPRZ;
939 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
940 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
941 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
942 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
943 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
944 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
945 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
946 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
947 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
948 static unsigned int sdebug_atomic_wr_max_length_bndry =
949 			DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
950 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
951 static int sdebug_uuid_ctl = DEF_UUID_CTL;
952 static bool sdebug_random = DEF_RANDOM;
953 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
954 static bool sdebug_removable = DEF_REMOVABLE;
955 static bool sdebug_clustering;
956 static bool sdebug_host_lock = DEF_HOST_LOCK;
957 static bool sdebug_strict = DEF_STRICT;
958 static bool sdebug_any_injecting_opt;
959 static bool sdebug_no_rwlock;
960 static bool sdebug_verbose;
961 static bool have_dif_prot;
962 static bool write_since_sync;
963 static bool sdebug_statistics = DEF_STATISTICS;
964 static bool sdebug_wp;
965 static bool sdebug_allow_restart;
966 static enum {
967 	BLK_ZONED_NONE	= 0,
968 	BLK_ZONED_HA	= 1,
969 	BLK_ZONED_HM	= 2,
970 } sdeb_zbc_model = BLK_ZONED_NONE;
971 static char *sdeb_zbc_model_s;
972 
973 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
974 			  SAM_LUN_AM_FLAT = 0x1,
975 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
976 			  SAM_LUN_AM_EXTENDED = 0x3};
977 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
978 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
979 
980 static unsigned int sdebug_store_sectors;
981 static sector_t sdebug_capacity;	/* in sectors */
982 
983 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
984    may still need them */
985 static int sdebug_heads;		/* heads per disk */
986 static int sdebug_cylinders_per;	/* cylinders per surface */
987 static int sdebug_sectors_per;		/* sectors per cylinder */
988 
989 static LIST_HEAD(sdebug_host_list);
990 static DEFINE_MUTEX(sdebug_host_list_mutex);
991 
992 static struct xarray per_store_arr;
993 static struct xarray *per_store_ap = &per_store_arr;
994 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
995 static int sdeb_most_recent_idx = -1;
996 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
997 
998 static unsigned long map_size;
999 static int num_aborts;
1000 static int num_dev_resets;
1001 static int num_target_resets;
1002 static int num_bus_resets;
1003 static int num_host_resets;
1004 static int dix_writes;
1005 static int dix_reads;
1006 static int dif_errors;
1007 
1008 /* ZBC global data */
1009 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
1010 static int sdeb_zbc_zone_cap_mb;
1011 static int sdeb_zbc_zone_size_mb;
1012 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
1013 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
1014 
1015 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
1016 static int poll_queues; /* iouring iopoll interface.*/
1017 
1018 static atomic_long_t writes_by_group_number[64];
1019 
1020 static char sdebug_proc_name[] = MY_NAME;
1021 static const char *my_name = MY_NAME;
1022 
1023 static const struct bus_type pseudo_lld_bus;
1024 
1025 static struct device_driver sdebug_driverfs_driver = {
1026 	.name 		= sdebug_proc_name,
1027 	.bus		= &pseudo_lld_bus,
1028 };
1029 
1030 static const int check_condition_result =
1031 	SAM_STAT_CHECK_CONDITION;
1032 
1033 static const int illegal_condition_result =
1034 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
1035 
1036 static const int device_qfull_result =
1037 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
1038 
1039 static const int condition_met_result = SAM_STAT_CONDITION_MET;
1040 
1041 static struct dentry *sdebug_debugfs_root;
1042 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
1043 
1044 static u32 sdebug_get_devsel(struct scsi_device *sdp)
1045 {
1046 	unsigned char devtype = sdp->type;
1047 	u32 devsel;
1048 
1049 	if (devtype < 32)
1050 		devsel = (1 << devtype);
1051 	else
1052 		devsel = DS_ALL;
1053 
1054 	return devsel;
1055 }
1056 
1057 static void sdebug_err_free(struct rcu_head *head)
1058 {
1059 	struct sdebug_err_inject *inject =
1060 		container_of(head, typeof(*inject), rcu);
1061 
1062 	kfree(inject);
1063 }
1064 
1065 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
1066 {
1067 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1068 	struct sdebug_err_inject *err;
1069 
1070 	spin_lock(&devip->list_lock);
1071 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1072 		if (err->type == new->type && err->cmd == new->cmd) {
1073 			list_del_rcu(&err->list);
1074 			call_rcu(&err->rcu, sdebug_err_free);
1075 		}
1076 	}
1077 
1078 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
1079 	spin_unlock(&devip->list_lock);
1080 }
1081 
1082 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
1083 {
1084 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1085 	struct sdebug_err_inject *err;
1086 	int type;
1087 	unsigned char cmd;
1088 
1089 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
1090 		kfree(buf);
1091 		return -EINVAL;
1092 	}
1093 
1094 	spin_lock(&devip->list_lock);
1095 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1096 		if (err->type == type && err->cmd == cmd) {
1097 			list_del_rcu(&err->list);
1098 			call_rcu(&err->rcu, sdebug_err_free);
1099 			spin_unlock(&devip->list_lock);
1100 			kfree(buf);
1101 			return count;
1102 		}
1103 	}
1104 	spin_unlock(&devip->list_lock);
1105 
1106 	kfree(buf);
1107 	return -EINVAL;
1108 }
1109 
1110 static int sdebug_error_show(struct seq_file *m, void *p)
1111 {
1112 	struct scsi_device *sdev = (struct scsi_device *)m->private;
1113 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1114 	struct sdebug_err_inject *err;
1115 
1116 	seq_puts(m, "Type\tCount\tCommand\n");
1117 
1118 	rcu_read_lock();
1119 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1120 		switch (err->type) {
1121 		case ERR_TMOUT_CMD:
1122 		case ERR_ABORT_CMD_FAILED:
1123 		case ERR_LUN_RESET_FAILED:
1124 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1125 				err->cmd);
1126 		break;
1127 
1128 		case ERR_FAIL_QUEUE_CMD:
1129 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1130 				err->cnt, err->cmd, err->queuecmd_ret);
1131 		break;
1132 
1133 		case ERR_FAIL_CMD:
1134 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1135 				err->type, err->cnt, err->cmd,
1136 				err->host_byte, err->driver_byte,
1137 				err->status_byte, err->sense_key,
1138 				err->asc, err->asq);
1139 		break;
1140 		}
1141 	}
1142 	rcu_read_unlock();
1143 
1144 	return 0;
1145 }
1146 
1147 static int sdebug_error_open(struct inode *inode, struct file *file)
1148 {
1149 	return single_open(file, sdebug_error_show, inode->i_private);
1150 }
1151 
1152 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1153 		size_t count, loff_t *ppos)
1154 {
1155 	char *buf;
1156 	unsigned int inject_type;
1157 	struct sdebug_err_inject *inject;
1158 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1159 
1160 	buf = memdup_user_nul(ubuf, count);
1161 	if (IS_ERR(buf))
1162 		return PTR_ERR(buf);
1163 
1164 	if (buf[0] == '-')
1165 		return sdebug_err_remove(sdev, buf, count);
1166 
1167 	if (sscanf(buf, "%d", &inject_type) != 1) {
1168 		kfree(buf);
1169 		return -EINVAL;
1170 	}
1171 
1172 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1173 	if (!inject) {
1174 		kfree(buf);
1175 		return -ENOMEM;
1176 	}
1177 
1178 	switch (inject_type) {
1179 	case ERR_TMOUT_CMD:
1180 	case ERR_ABORT_CMD_FAILED:
1181 	case ERR_LUN_RESET_FAILED:
1182 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1183 			   &inject->cmd) != 3)
1184 			goto out_error;
1185 	break;
1186 
1187 	case ERR_FAIL_QUEUE_CMD:
1188 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1189 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1190 			goto out_error;
1191 	break;
1192 
1193 	case ERR_FAIL_CMD:
1194 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1195 			   &inject->type, &inject->cnt, &inject->cmd,
1196 			   &inject->host_byte, &inject->driver_byte,
1197 			   &inject->status_byte, &inject->sense_key,
1198 			   &inject->asc, &inject->asq) != 9)
1199 			goto out_error;
1200 	break;
1201 
1202 	default:
1203 		goto out_error;
1204 	break;
1205 	}
1206 
1207 	kfree(buf);
1208 	sdebug_err_add(sdev, inject);
1209 
1210 	return count;
1211 
1212 out_error:
1213 	kfree(buf);
1214 	kfree(inject);
1215 	return -EINVAL;
1216 }
1217 
1218 static const struct file_operations sdebug_error_fops = {
1219 	.open	= sdebug_error_open,
1220 	.read	= seq_read,
1221 	.write	= sdebug_error_write,
1222 	.release = single_release,
1223 };
1224 
1225 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1226 {
1227 	struct scsi_target *starget = (struct scsi_target *)m->private;
1228 	struct sdebug_target_info *targetip =
1229 		(struct sdebug_target_info *)starget->hostdata;
1230 
1231 	if (targetip)
1232 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1233 
1234 	return 0;
1235 }
1236 
1237 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1238 {
1239 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1240 }
1241 
1242 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1243 		const char __user *ubuf, size_t count, loff_t *ppos)
1244 {
1245 	int ret;
1246 	struct scsi_target *starget =
1247 		(struct scsi_target *)file->f_inode->i_private;
1248 	struct sdebug_target_info *targetip =
1249 		(struct sdebug_target_info *)starget->hostdata;
1250 
1251 	if (targetip) {
1252 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1253 		return ret < 0 ? ret : count;
1254 	}
1255 	return -ENODEV;
1256 }
1257 
1258 static const struct file_operations sdebug_target_reset_fail_fops = {
1259 	.open	= sdebug_target_reset_fail_open,
1260 	.read	= seq_read,
1261 	.write	= sdebug_target_reset_fail_write,
1262 	.release = single_release,
1263 };
1264 
1265 static int sdebug_target_alloc(struct scsi_target *starget)
1266 {
1267 	struct sdebug_target_info *targetip;
1268 
1269 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1270 	if (!targetip)
1271 		return -ENOMEM;
1272 
1273 	async_synchronize_full_domain(&sdebug_async_domain);
1274 
1275 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1276 				sdebug_debugfs_root);
1277 
1278 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1279 				&sdebug_target_reset_fail_fops);
1280 
1281 	starget->hostdata = targetip;
1282 
1283 	return 0;
1284 }
1285 
1286 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1287 {
1288 	struct sdebug_target_info *targetip = data;
1289 
1290 	debugfs_remove(targetip->debugfs_entry);
1291 	kfree(targetip);
1292 }
1293 
1294 static void sdebug_target_destroy(struct scsi_target *starget)
1295 {
1296 	struct sdebug_target_info *targetip;
1297 
1298 	targetip = (struct sdebug_target_info *)starget->hostdata;
1299 	if (targetip) {
1300 		starget->hostdata = NULL;
1301 		async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1302 				&sdebug_async_domain);
1303 	}
1304 }
1305 
1306 /* Only do the extra work involved in logical block provisioning if one or
1307  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1308  * real reads and writes (i.e. not skipping them for speed).
1309  */
1310 static inline bool scsi_debug_lbp(void)
1311 {
1312 	return 0 == sdebug_fake_rw &&
1313 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1314 }
1315 
1316 static inline bool scsi_debug_atomic_write(void)
1317 {
1318 	return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1319 }
1320 
1321 static void *lba2fake_store(struct sdeb_store_info *sip,
1322 			    unsigned long long lba)
1323 {
1324 	struct sdeb_store_info *lsip = sip;
1325 
1326 	lba = do_div(lba, sdebug_store_sectors);
1327 	if (!sip || !sip->storep) {
1328 		WARN_ON_ONCE(true);
1329 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1330 	}
1331 	return lsip->storep + lba * sdebug_sector_size;
1332 }
1333 
1334 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1335 				      sector_t sector)
1336 {
1337 	sector = sector_div(sector, sdebug_store_sectors);
1338 
1339 	return sip->dif_storep + sector;
1340 }
1341 
1342 static void sdebug_max_tgts_luns(void)
1343 {
1344 	struct sdebug_host_info *sdbg_host;
1345 	struct Scsi_Host *hpnt;
1346 
1347 	mutex_lock(&sdebug_host_list_mutex);
1348 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1349 		hpnt = sdbg_host->shost;
1350 		if ((hpnt->this_id >= 0) &&
1351 		    (sdebug_num_tgts > hpnt->this_id))
1352 			hpnt->max_id = sdebug_num_tgts + 1;
1353 		else
1354 			hpnt->max_id = sdebug_num_tgts;
1355 		/* sdebug_max_luns; */
1356 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1357 	}
1358 	mutex_unlock(&sdebug_host_list_mutex);
1359 }
1360 
1361 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1362 
1363 /* Set in_bit to -1 to indicate no bit position of invalid field */
1364 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1365 				 enum sdeb_cmd_data c_d,
1366 				 int in_byte, int in_bit)
1367 {
1368 	unsigned char *sbuff;
1369 	u8 sks[4];
1370 	int sl, asc;
1371 
1372 	sbuff = scp->sense_buffer;
1373 	if (!sbuff) {
1374 		sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n");
1375 		return;
1376 	}
1377 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1378 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1379 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1380 	memset(sks, 0, sizeof(sks));
1381 	sks[0] = 0x80;
1382 	if (c_d)
1383 		sks[0] |= 0x40;
1384 	if (in_bit >= 0) {
1385 		sks[0] |= 0x8;
1386 		sks[0] |= 0x7 & in_bit;
1387 	}
1388 	put_unaligned_be16(in_byte, sks + 1);
1389 	if (sdebug_dsense) {
1390 		sl = sbuff[7] + 8;
1391 		sbuff[7] = sl;
1392 		sbuff[sl] = 0x2;
1393 		sbuff[sl + 1] = 0x6;
1394 		memcpy(sbuff + sl + 4, sks, 3);
1395 	} else
1396 		memcpy(sbuff + 15, sks, 3);
1397 	if (sdebug_verbose)
1398 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1399 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1400 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1401 }
1402 
1403 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1404 {
1405 	if (!scp->sense_buffer) {
1406 		sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n");
1407 		return;
1408 	}
1409 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1410 
1411 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1412 
1413 	if (sdebug_verbose)
1414 		sdev_printk(KERN_INFO, scp->device,
1415 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1416 			    my_name, key, asc, asq);
1417 }
1418 
1419 /* Sense data that has information fields for tapes */
1420 static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
1421 			unsigned int information, unsigned char tape_flags)
1422 {
1423 	if (!scp->sense_buffer) {
1424 		sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n");
1425 		return;
1426 	}
1427 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1428 
1429 	scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
1430 	/* only fixed format so far */
1431 
1432 	scp->sense_buffer[0] |= 0x80; /* valid */
1433 	scp->sense_buffer[2] |= tape_flags;
1434 	put_unaligned_be32(information, &scp->sense_buffer[3]);
1435 
1436 	if (sdebug_verbose)
1437 		sdev_printk(KERN_INFO, scp->device,
1438 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1439 			    my_name, key, asc, asq);
1440 }
1441 
1442 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1443 {
1444 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1445 }
1446 
1447 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1448 			    void __user *arg)
1449 {
1450 	if (sdebug_verbose) {
1451 		if (0x1261 == cmd)
1452 			sdev_printk(KERN_INFO, dev, "BLKFLSBUF [0x1261]\n");
1453 		else if (0x5331 == cmd)
1454 			sdev_printk(KERN_INFO, dev,
1455 				    "CDROM_GET_CAPABILITY [0x5331]\n");
1456 		else
1457 			sdev_printk(KERN_INFO, dev, "cmd=0x%x\n", cmd);
1458 	}
1459 	return -EINVAL;
1460 	/* return -ENOTTY; // correct return but upsets fdisk */
1461 }
1462 
1463 static void config_cdb_len(struct scsi_device *sdev)
1464 {
1465 	switch (sdebug_cdb_len) {
1466 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1467 		sdev->use_10_for_rw = false;
1468 		sdev->use_16_for_rw = false;
1469 		sdev->use_10_for_ms = false;
1470 		break;
1471 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1472 		sdev->use_10_for_rw = true;
1473 		sdev->use_16_for_rw = false;
1474 		sdev->use_10_for_ms = false;
1475 		break;
1476 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1477 		sdev->use_10_for_rw = true;
1478 		sdev->use_16_for_rw = false;
1479 		sdev->use_10_for_ms = true;
1480 		break;
1481 	case 16:
1482 		sdev->use_10_for_rw = false;
1483 		sdev->use_16_for_rw = true;
1484 		sdev->use_10_for_ms = true;
1485 		break;
1486 	case 32: /* No knobs to suggest this so same as 16 for now */
1487 		sdev->use_10_for_rw = false;
1488 		sdev->use_16_for_rw = true;
1489 		sdev->use_10_for_ms = true;
1490 		break;
1491 	default:
1492 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1493 			sdebug_cdb_len);
1494 		sdev->use_10_for_rw = true;
1495 		sdev->use_16_for_rw = false;
1496 		sdev->use_10_for_ms = false;
1497 		sdebug_cdb_len = 10;
1498 		break;
1499 	}
1500 }
1501 
1502 static void all_config_cdb_len(void)
1503 {
1504 	struct sdebug_host_info *sdbg_host;
1505 	struct Scsi_Host *shost;
1506 	struct scsi_device *sdev;
1507 
1508 	mutex_lock(&sdebug_host_list_mutex);
1509 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1510 		shost = sdbg_host->shost;
1511 		shost_for_each_device(sdev, shost) {
1512 			config_cdb_len(sdev);
1513 		}
1514 	}
1515 	mutex_unlock(&sdebug_host_list_mutex);
1516 }
1517 
1518 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1519 {
1520 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1521 	struct sdebug_dev_info *dp;
1522 
1523 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1524 		if ((devip->sdbg_host == dp->sdbg_host) &&
1525 		    (devip->target == dp->target)) {
1526 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1527 		}
1528 	}
1529 }
1530 
1531 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1532 {
1533 	int k;
1534 
1535 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1536 	if (k != SDEBUG_NUM_UAS) {
1537 		const char *cp = NULL;
1538 
1539 		switch (k) {
1540 		case SDEBUG_UA_POR:
1541 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1542 					POWER_ON_RESET_ASCQ);
1543 			if (sdebug_verbose)
1544 				cp = "power on reset";
1545 			break;
1546 		case SDEBUG_UA_POOCCUR:
1547 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1548 					POWER_ON_OCCURRED_ASCQ);
1549 			if (sdebug_verbose)
1550 				cp = "power on occurred";
1551 			break;
1552 		case SDEBUG_UA_BUS_RESET:
1553 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1554 					BUS_RESET_ASCQ);
1555 			if (sdebug_verbose)
1556 				cp = "bus reset";
1557 			break;
1558 		case SDEBUG_UA_MODE_CHANGED:
1559 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1560 					MODE_CHANGED_ASCQ);
1561 			if (sdebug_verbose)
1562 				cp = "mode parameters changed";
1563 			break;
1564 		case SDEBUG_UA_CAPACITY_CHANGED:
1565 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1566 					CAPACITY_CHANGED_ASCQ);
1567 			if (sdebug_verbose)
1568 				cp = "capacity data changed";
1569 			break;
1570 		case SDEBUG_UA_MICROCODE_CHANGED:
1571 			mk_sense_buffer(scp, UNIT_ATTENTION,
1572 					TARGET_CHANGED_ASC,
1573 					MICROCODE_CHANGED_ASCQ);
1574 			if (sdebug_verbose)
1575 				cp = "microcode has been changed";
1576 			break;
1577 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1578 			mk_sense_buffer(scp, UNIT_ATTENTION,
1579 					TARGET_CHANGED_ASC,
1580 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1581 			if (sdebug_verbose)
1582 				cp = "microcode has been changed without reset";
1583 			break;
1584 		case SDEBUG_UA_LUNS_CHANGED:
1585 			/*
1586 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1587 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1588 			 * on the target, until a REPORT LUNS command is
1589 			 * received.  SPC-4 behavior is to report it only once.
1590 			 * NOTE:  sdebug_scsi_level does not use the same
1591 			 * values as struct scsi_device->scsi_level.
1592 			 */
1593 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1594 				clear_luns_changed_on_target(devip);
1595 			mk_sense_buffer(scp, UNIT_ATTENTION,
1596 					TARGET_CHANGED_ASC,
1597 					LUNS_CHANGED_ASCQ);
1598 			if (sdebug_verbose)
1599 				cp = "reported luns data has changed";
1600 			break;
1601 		case SDEBUG_UA_NOT_READY_TO_READY:
1602 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
1603 					0);
1604 			if (sdebug_verbose)
1605 				cp = "not ready to ready transition/media change";
1606 			break;
1607 		default:
1608 			pr_warn("unexpected unit attention code=%d\n", k);
1609 			if (sdebug_verbose)
1610 				cp = "unknown";
1611 			break;
1612 		}
1613 		clear_bit(k, devip->uas_bm);
1614 		if (sdebug_verbose)
1615 			sdev_printk(KERN_INFO, scp->device,
1616 				   "%s reports: Unit attention: %s\n",
1617 				   my_name, cp);
1618 		return check_condition_result;
1619 	}
1620 	return 0;
1621 }
1622 
1623 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1624 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1625 				int arr_len)
1626 {
1627 	int act_len;
1628 	struct scsi_data_buffer *sdb = &scp->sdb;
1629 
1630 	if (!sdb->length)
1631 		return 0;
1632 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1633 		return DID_ERROR << 16;
1634 
1635 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1636 				      arr, arr_len);
1637 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1638 
1639 	return 0;
1640 }
1641 
1642 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1643  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1644  * calls, not required to write in ascending offset order. Assumes resid
1645  * set to scsi_bufflen() prior to any calls.
1646  */
1647 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1648 				  int arr_len, unsigned int off_dst)
1649 {
1650 	unsigned int act_len, n;
1651 	struct scsi_data_buffer *sdb = &scp->sdb;
1652 	off_t skip = off_dst;
1653 
1654 	if (sdb->length <= off_dst)
1655 		return 0;
1656 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1657 		return DID_ERROR << 16;
1658 
1659 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1660 				       arr, arr_len, skip);
1661 	pr_debug("off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1662 		 off_dst, scsi_bufflen(scp), act_len,
1663 		 scsi_get_resid(scp));
1664 	n = scsi_bufflen(scp) - (off_dst + act_len);
1665 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1666 	return 0;
1667 }
1668 
1669 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1670  * 'arr' or -1 if error.
1671  */
1672 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1673 			       int arr_len)
1674 {
1675 	if (!scsi_bufflen(scp))
1676 		return 0;
1677 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1678 		return -1;
1679 
1680 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1681 }
1682 
1683 
1684 static char sdebug_inq_vendor_id[9] = "Linux   ";
1685 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1686 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1687 /* Use some locally assigned NAAs for SAS addresses. */
1688 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1689 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1690 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1691 
1692 /* Device identification VPD page. Returns number of bytes placed in arr */
1693 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1694 			  int target_dev_id, int dev_id_num,
1695 			  const char *dev_id_str, int dev_id_str_len,
1696 			  const uuid_t *lu_name)
1697 {
1698 	int num, port_a;
1699 	char b[32];
1700 
1701 	port_a = target_dev_id + 1;
1702 	/* T10 vendor identifier field format (faked) */
1703 	arr[0] = 0x2;	/* ASCII */
1704 	arr[1] = 0x1;
1705 	arr[2] = 0x0;
1706 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1707 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1708 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1709 	num = 8 + 16 + dev_id_str_len;
1710 	arr[3] = num;
1711 	num += 4;
1712 	if (dev_id_num >= 0) {
1713 		if (sdebug_uuid_ctl) {
1714 			/* Locally assigned UUID */
1715 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1716 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1717 			arr[num++] = 0x0;
1718 			arr[num++] = 0x12;
1719 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1720 			arr[num++] = 0x0;
1721 			memcpy(arr + num, lu_name, 16);
1722 			num += 16;
1723 		} else {
1724 			/* NAA-3, Logical unit identifier (binary) */
1725 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1726 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1727 			arr[num++] = 0x0;
1728 			arr[num++] = 0x8;
1729 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1730 			num += 8;
1731 		}
1732 		/* Target relative port number */
1733 		arr[num++] = 0x61;	/* proto=sas, binary */
1734 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1735 		arr[num++] = 0x0;	/* reserved */
1736 		arr[num++] = 0x4;	/* length */
1737 		arr[num++] = 0x0;	/* reserved */
1738 		arr[num++] = 0x0;	/* reserved */
1739 		arr[num++] = 0x0;
1740 		arr[num++] = 0x1;	/* relative port A */
1741 	}
1742 	/* NAA-3, Target port identifier */
1743 	arr[num++] = 0x61;	/* proto=sas, binary */
1744 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1745 	arr[num++] = 0x0;
1746 	arr[num++] = 0x8;
1747 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1748 	num += 8;
1749 	/* NAA-3, Target port group identifier */
1750 	arr[num++] = 0x61;	/* proto=sas, binary */
1751 	arr[num++] = 0x95;	/* piv=1, target port group id */
1752 	arr[num++] = 0x0;
1753 	arr[num++] = 0x4;
1754 	arr[num++] = 0;
1755 	arr[num++] = 0;
1756 	put_unaligned_be16(port_group_id, arr + num);
1757 	num += 2;
1758 	/* NAA-3, Target device identifier */
1759 	arr[num++] = 0x61;	/* proto=sas, binary */
1760 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1761 	arr[num++] = 0x0;
1762 	arr[num++] = 0x8;
1763 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1764 	num += 8;
1765 	/* SCSI name string: Target device identifier */
1766 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1767 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1768 	arr[num++] = 0x0;
1769 	arr[num++] = 24;
1770 	memcpy(arr + num, "naa.32222220", 12);
1771 	num += 12;
1772 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1773 	memcpy(arr + num, b, 8);
1774 	num += 8;
1775 	memset(arr + num, 0, 4);
1776 	num += 4;
1777 	return num;
1778 }
1779 
1780 static unsigned char vpd84_data[] = {
1781 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1782     0x22,0x22,0x22,0x0,0xbb,0x1,
1783     0x22,0x22,0x22,0x0,0xbb,0x2,
1784 };
1785 
1786 /*  Software interface identification VPD page */
1787 static int inquiry_vpd_84(unsigned char *arr)
1788 {
1789 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1790 	return sizeof(vpd84_data);
1791 }
1792 
1793 /* Management network addresses VPD page */
1794 static int inquiry_vpd_85(unsigned char *arr)
1795 {
1796 	int num = 0;
1797 	const char *na1 = "https://www.kernel.org/config";
1798 	const char *na2 = "http://www.kernel.org/log";
1799 	int plen, olen;
1800 
1801 	arr[num++] = 0x1;	/* lu, storage config */
1802 	arr[num++] = 0x0;	/* reserved */
1803 	arr[num++] = 0x0;
1804 	olen = strlen(na1);
1805 	plen = olen + 1;
1806 	if (plen % 4)
1807 		plen = ((plen / 4) + 1) * 4;
1808 	arr[num++] = plen;	/* length, null termianted, padded */
1809 	memcpy(arr + num, na1, olen);
1810 	memset(arr + num + olen, 0, plen - olen);
1811 	num += plen;
1812 
1813 	arr[num++] = 0x4;	/* lu, logging */
1814 	arr[num++] = 0x0;	/* reserved */
1815 	arr[num++] = 0x0;
1816 	olen = strlen(na2);
1817 	plen = olen + 1;
1818 	if (plen % 4)
1819 		plen = ((plen / 4) + 1) * 4;
1820 	arr[num++] = plen;	/* length, null terminated, padded */
1821 	memcpy(arr + num, na2, olen);
1822 	memset(arr + num + olen, 0, plen - olen);
1823 	num += plen;
1824 
1825 	return num;
1826 }
1827 
1828 /* SCSI ports VPD page */
1829 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1830 {
1831 	int num = 0;
1832 	int port_a, port_b;
1833 
1834 	port_a = target_dev_id + 1;
1835 	port_b = port_a + 1;
1836 	arr[num++] = 0x0;	/* reserved */
1837 	arr[num++] = 0x0;	/* reserved */
1838 	arr[num++] = 0x0;
1839 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1840 	memset(arr + num, 0, 6);
1841 	num += 6;
1842 	arr[num++] = 0x0;
1843 	arr[num++] = 12;	/* length tp descriptor */
1844 	/* naa-5 target port identifier (A) */
1845 	arr[num++] = 0x61;	/* proto=sas, binary */
1846 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1847 	arr[num++] = 0x0;	/* reserved */
1848 	arr[num++] = 0x8;	/* length */
1849 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1850 	num += 8;
1851 	arr[num++] = 0x0;	/* reserved */
1852 	arr[num++] = 0x0;	/* reserved */
1853 	arr[num++] = 0x0;
1854 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1855 	memset(arr + num, 0, 6);
1856 	num += 6;
1857 	arr[num++] = 0x0;
1858 	arr[num++] = 12;	/* length tp descriptor */
1859 	/* naa-5 target port identifier (B) */
1860 	arr[num++] = 0x61;	/* proto=sas, binary */
1861 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1862 	arr[num++] = 0x0;	/* reserved */
1863 	arr[num++] = 0x8;	/* length */
1864 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1865 	num += 8;
1866 
1867 	return num;
1868 }
1869 
1870 
1871 static unsigned char vpd89_data[] = {
1872 /* from 4th byte */ 0,0,0,0,
1873 'l','i','n','u','x',' ',' ',' ',
1874 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1875 '1','2','3','4',
1876 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1877 0xec,0,0,0,
1878 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1879 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1880 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1881 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1882 0x53,0x41,
1883 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1884 0x20,0x20,
1885 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1886 0x10,0x80,
1887 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1888 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1889 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1890 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1891 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1892 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1893 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1894 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1895 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1896 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1897 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1898 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1899 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1900 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1901 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1902 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1903 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1904 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1905 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1906 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1907 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1908 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1909 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1910 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1911 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1912 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1913 };
1914 
1915 /* ATA Information VPD page */
1916 static int inquiry_vpd_89(unsigned char *arr)
1917 {
1918 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1919 	return sizeof(vpd89_data);
1920 }
1921 
1922 
1923 static unsigned char vpdb0_data[] = {
1924 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1925 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1926 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1927 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1928 };
1929 
1930 /* Block limits VPD page (SBC-3) */
1931 static int inquiry_vpd_b0(unsigned char *arr)
1932 {
1933 	unsigned int gran;
1934 
1935 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1936 
1937 	/* Optimal transfer length granularity */
1938 	if (sdebug_opt_xferlen_exp != 0 &&
1939 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1940 		gran = 1 << sdebug_opt_xferlen_exp;
1941 	else
1942 		gran = 1 << sdebug_physblk_exp;
1943 	put_unaligned_be16(gran, arr + 2);
1944 
1945 	/* Maximum Transfer Length */
1946 	if (sdebug_store_sectors > 0x400)
1947 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1948 
1949 	/* Optimal Transfer Length */
1950 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1951 
1952 	if (sdebug_lbpu) {
1953 		/* Maximum Unmap LBA Count */
1954 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1955 
1956 		/* Maximum Unmap Block Descriptor Count */
1957 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1958 	}
1959 
1960 	/* Unmap Granularity Alignment */
1961 	if (sdebug_unmap_alignment) {
1962 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1963 		arr[28] |= 0x80; /* UGAVALID */
1964 	}
1965 
1966 	/* Optimal Unmap Granularity */
1967 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1968 
1969 	/* Maximum WRITE SAME Length */
1970 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1971 
1972 	if (sdebug_atomic_wr) {
1973 		put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1974 		put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1975 		put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1976 		put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1977 		put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1978 	}
1979 
1980 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1981 }
1982 
1983 /* Block device characteristics VPD page (SBC-3) */
1984 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1985 {
1986 	memset(arr, 0, 0x3c);
1987 	arr[0] = 0;
1988 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1989 	arr[2] = 0;
1990 	arr[3] = 5;	/* less than 1.8" */
1991 
1992 	return 0x3c;
1993 }
1994 
1995 /* Logical block provisioning VPD page (SBC-4) */
1996 static int inquiry_vpd_b2(unsigned char *arr)
1997 {
1998 	memset(arr, 0, 0x4);
1999 	arr[0] = 0;			/* threshold exponent */
2000 	if (sdebug_lbpu)
2001 		arr[1] = 1 << 7;
2002 	if (sdebug_lbpws)
2003 		arr[1] |= 1 << 6;
2004 	if (sdebug_lbpws10)
2005 		arr[1] |= 1 << 5;
2006 	if (sdebug_lbprz && scsi_debug_lbp())
2007 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
2008 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
2009 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
2010 	/* threshold_percentage=0 */
2011 	return 0x4;
2012 }
2013 
2014 /* Zoned block device characteristics VPD page (ZBC mandatory) */
2015 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
2016 {
2017 	memset(arr, 0, 0x3c);
2018 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
2019 	/*
2020 	 * Set Optimal number of open sequential write preferred zones and
2021 	 * Optimal number of non-sequentially written sequential write
2022 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
2023 	 * fields set to zero, apart from Max. number of open swrz_s field.
2024 	 */
2025 	put_unaligned_be32(0xffffffff, &arr[4]);
2026 	put_unaligned_be32(0xffffffff, &arr[8]);
2027 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
2028 		put_unaligned_be32(devip->max_open, &arr[12]);
2029 	else
2030 		put_unaligned_be32(0xffffffff, &arr[12]);
2031 	if (devip->zcap < devip->zsize) {
2032 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
2033 		put_unaligned_be64(devip->zsize, &arr[20]);
2034 	} else {
2035 		arr[19] = 0;
2036 	}
2037 	return 0x3c;
2038 }
2039 
2040 #define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
2041 
2042 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
2043 
2044 /* Block limits extension VPD page (SBC-4) */
2045 static int inquiry_vpd_b7(unsigned char *arrb4)
2046 {
2047 	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
2048 	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
2049 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
2050 	return SDEBUG_BLE_LEN_AFTER_B4;
2051 }
2052 
2053 #define SDEBUG_LONG_INQ_SZ 96
2054 #define SDEBUG_MAX_INQ_ARR_SZ 584
2055 
2056 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2057 {
2058 	unsigned char pq_pdt;
2059 	unsigned char *arr;
2060 	unsigned char *cmd = scp->cmnd;
2061 	u32 alloc_len, n;
2062 	int ret;
2063 	bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape;
2064 
2065 	alloc_len = get_unaligned_be16(cmd + 3);
2066 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
2067 	if (! arr)
2068 		return DID_REQUEUE << 16;
2069 	if (scp->device->type >= 32) {
2070 		is_disk = (sdebug_ptype == TYPE_DISK);
2071 		is_tape = (sdebug_ptype == TYPE_TAPE);
2072 	} else {
2073 		is_disk = (scp->device->type == TYPE_DISK);
2074 		is_tape = (scp->device->type == TYPE_TAPE);
2075 	}
2076 	is_zbc = devip->zoned;
2077 	is_disk_zbc = (is_disk || is_zbc);
2078 	have_wlun = scsi_is_wlun(scp->device->lun);
2079 	if (have_wlun)
2080 		pq_pdt = TYPE_WLUN;	/* present, wlun */
2081 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
2082 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
2083 	else
2084 		pq_pdt = ((scp->device->type >= 32 ?
2085 				sdebug_ptype : scp->device->type) & 0x1f);
2086 	arr[0] = pq_pdt;
2087 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
2088 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
2089 		kfree(arr);
2090 		return check_condition_result;
2091 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
2092 		int lu_id_num, port_group_id, target_dev_id;
2093 		u32 len;
2094 		char lu_id_str[6];
2095 		int host_no = devip->sdbg_host->shost->host_no;
2096 
2097 		arr[1] = cmd[2];
2098 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
2099 		    (devip->channel & 0x7f);
2100 		if (sdebug_vpd_use_hostno == 0)
2101 			host_no = 0;
2102 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
2103 			    (devip->target * 1000) + devip->lun);
2104 		target_dev_id = ((host_no + 1) * 2000) +
2105 				 (devip->target * 1000) - 3;
2106 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
2107 		if (0 == cmd[2]) { /* supported vital product data pages */
2108 			n = 4;
2109 			arr[n++] = 0x0;   /* this page */
2110 			arr[n++] = 0x80;  /* unit serial number */
2111 			arr[n++] = 0x83;  /* device identification */
2112 			arr[n++] = 0x84;  /* software interface ident. */
2113 			arr[n++] = 0x85;  /* management network addresses */
2114 			arr[n++] = 0x86;  /* extended inquiry */
2115 			arr[n++] = 0x87;  /* mode page policy */
2116 			arr[n++] = 0x88;  /* SCSI ports */
2117 			if (is_disk_zbc) {	  /* SBC or ZBC */
2118 				arr[n++] = 0x89;  /* ATA information */
2119 				arr[n++] = 0xb0;  /* Block limits */
2120 				arr[n++] = 0xb1;  /* Block characteristics */
2121 				if (is_disk)
2122 					arr[n++] = 0xb2;  /* LB Provisioning */
2123 				if (is_zbc)
2124 					arr[n++] = 0xb6;  /* ZB dev. char. */
2125 				arr[n++] = 0xb7;  /* Block limits extension */
2126 			}
2127 			arr[3] = n - 4;	  /* number of supported VPD pages */
2128 		} else if (0x80 == cmd[2]) { /* unit serial number */
2129 			arr[3] = len;
2130 			memcpy(&arr[4], lu_id_str, len);
2131 		} else if (0x83 == cmd[2]) { /* device identification */
2132 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2133 						target_dev_id, lu_id_num,
2134 						lu_id_str, len,
2135 						&devip->lu_name);
2136 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
2137 			arr[3] = inquiry_vpd_84(&arr[4]);
2138 		} else if (0x85 == cmd[2]) { /* Management network addresses */
2139 			arr[3] = inquiry_vpd_85(&arr[4]);
2140 		} else if (0x86 == cmd[2]) { /* extended inquiry */
2141 			arr[3] = 0x3c;	/* number of following entries */
2142 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2143 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
2144 			else if (have_dif_prot)
2145 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
2146 			else
2147 				arr[4] = 0x0;   /* no protection stuff */
2148 			/*
2149 			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2150 			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2151 			 */
2152 			arr[5] = 0x17;
2153 		} else if (0x87 == cmd[2]) { /* mode page policy */
2154 			arr[3] = 0x8;	/* number of following entries */
2155 			arr[4] = 0x2;	/* disconnect-reconnect mp */
2156 			arr[6] = 0x80;	/* mlus, shared */
2157 			arr[8] = 0x18;	 /* protocol specific lu */
2158 			arr[10] = 0x82;	 /* mlus, per initiator port */
2159 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
2160 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2161 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2162 			n = inquiry_vpd_89(&arr[4]);
2163 			put_unaligned_be16(n, arr + 2);
2164 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2165 			arr[3] = inquiry_vpd_b0(&arr[4]);
2166 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2167 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2168 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2169 			arr[3] = inquiry_vpd_b2(&arr[4]);
2170 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2171 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2172 		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2173 			arr[3] = inquiry_vpd_b7(&arr[4]);
2174 		} else {
2175 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2176 			kfree(arr);
2177 			return check_condition_result;
2178 		}
2179 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2180 		ret = fill_from_dev_buffer(scp, arr,
2181 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2182 		kfree(arr);
2183 		return ret;
2184 	}
2185 	/* drops through here for a standard inquiry */
2186 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2187 	arr[2] = sdebug_scsi_level;
2188 	arr[3] = 2;    /* response_data_format==2 */
2189 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2190 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2191 	if (sdebug_vpd_use_hostno == 0)
2192 		arr[5] |= 0x10; /* claim: implicit TPGS */
2193 	arr[6] = 0x10; /* claim: MultiP */
2194 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2195 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2196 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2197 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2198 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2199 	/* Use Vendor Specific area to place driver date in ASCII hex */
2200 	memcpy(&arr[36], sdebug_version_date, 8);
2201 	/* version descriptors (2 bytes each) follow */
2202 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2203 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2204 	n = 62;
2205 	if (is_disk) {		/* SBC-4 no version claimed */
2206 		put_unaligned_be16(0x600, arr + n);
2207 		n += 2;
2208 	} else if (is_tape) {	/* SSC-4 rev 3 */
2209 		put_unaligned_be16(0x525, arr + n);
2210 		n += 2;
2211 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2212 		put_unaligned_be16(0x624, arr + n);
2213 		n += 2;
2214 	}
2215 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2216 	ret = fill_from_dev_buffer(scp, arr,
2217 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2218 	kfree(arr);
2219 	return ret;
2220 }
2221 
2222 /* See resp_iec_m_pg() for how this data is manipulated */
2223 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2224 				   0, 0, 0x0, 0x0};
2225 
2226 static int resp_requests(struct scsi_cmnd *scp,
2227 			 struct sdebug_dev_info *devip)
2228 {
2229 	unsigned char *cmd = scp->cmnd;
2230 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2231 	bool dsense = !!(cmd[1] & 1);
2232 	u32 alloc_len = cmd[4];
2233 	u32 len = 18;
2234 	int stopped_state = atomic_read(&devip->stopped);
2235 
2236 	memset(arr, 0, sizeof(arr));
2237 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2238 		if (dsense) {
2239 			arr[0] = 0x72;
2240 			arr[1] = NOT_READY;
2241 			arr[2] = LOGICAL_UNIT_NOT_READY;
2242 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2243 			len = 8;
2244 		} else {
2245 			arr[0] = 0x70;
2246 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2247 			arr[7] = 0xa;			/* 18 byte sense buffer */
2248 			arr[12] = LOGICAL_UNIT_NOT_READY;
2249 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2250 		}
2251 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2252 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2253 		if (dsense) {
2254 			arr[0] = 0x72;
2255 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2256 			arr[2] = THRESHOLD_EXCEEDED;
2257 			arr[3] = 0xff;		/* Failure prediction(false) */
2258 			len = 8;
2259 		} else {
2260 			arr[0] = 0x70;
2261 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2262 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2263 			arr[12] = THRESHOLD_EXCEEDED;
2264 			arr[13] = 0xff;		/* Failure prediction(false) */
2265 		}
2266 	} else {	/* nothing to report */
2267 		if (dsense) {
2268 			len = 8;
2269 			memset(arr, 0, len);
2270 			arr[0] = 0x72;
2271 		} else {
2272 			memset(arr, 0, len);
2273 			arr[0] = 0x70;
2274 			arr[7] = 0xa;
2275 		}
2276 	}
2277 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2278 }
2279 
2280 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2281 {
2282 	unsigned char *cmd = scp->cmnd;
2283 	int power_cond, want_stop, stopped_state;
2284 	bool changing;
2285 
2286 	power_cond = (cmd[4] & 0xf0) >> 4;
2287 	if (power_cond) {
2288 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2289 		return check_condition_result;
2290 	}
2291 	want_stop = !(cmd[4] & 1);
2292 	stopped_state = atomic_read(&devip->stopped);
2293 	if (stopped_state == 2) {
2294 		ktime_t now_ts = ktime_get_boottime();
2295 
2296 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2297 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2298 
2299 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2300 				/* tur_ms_to_ready timer extinguished */
2301 				atomic_set(&devip->stopped, 0);
2302 				stopped_state = 0;
2303 			}
2304 		}
2305 		if (stopped_state == 2) {
2306 			if (want_stop) {
2307 				stopped_state = 1;	/* dummy up success */
2308 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2309 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2310 				return check_condition_result;
2311 			}
2312 		}
2313 	}
2314 	changing = (stopped_state != want_stop);
2315 	if (changing)
2316 		atomic_xchg(&devip->stopped, want_stop);
2317 	if (scp->device->type == TYPE_TAPE && !want_stop) {
2318 		int i;
2319 
2320 		set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
2321 		for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
2322 			devip->tape_location[i] = 0;
2323 		devip->tape_partition = 0;
2324 	}
2325 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2326 		return SDEG_RES_IMMED_MASK;
2327 	else
2328 		return 0;
2329 }
2330 
2331 static sector_t get_sdebug_capacity(void)
2332 {
2333 	static const unsigned int gibibyte = 1073741824;
2334 
2335 	if (sdebug_virtual_gb > 0)
2336 		return (sector_t)sdebug_virtual_gb *
2337 			(gibibyte / sdebug_sector_size);
2338 	else
2339 		return sdebug_store_sectors;
2340 }
2341 
2342 #define SDEBUG_READCAP_ARR_SZ 8
2343 static int resp_readcap(struct scsi_cmnd *scp,
2344 			struct sdebug_dev_info *devip)
2345 {
2346 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2347 	unsigned int capac;
2348 
2349 	/* following just in case virtual_gb changed */
2350 	sdebug_capacity = get_sdebug_capacity();
2351 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2352 	if (sdebug_capacity < 0xffffffff) {
2353 		capac = (unsigned int)sdebug_capacity - 1;
2354 		put_unaligned_be32(capac, arr + 0);
2355 	} else
2356 		put_unaligned_be32(0xffffffff, arr + 0);
2357 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2358 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2359 }
2360 
2361 #define SDEBUG_READCAP16_ARR_SZ 32
2362 static int resp_readcap16(struct scsi_cmnd *scp,
2363 			  struct sdebug_dev_info *devip)
2364 {
2365 	unsigned char *cmd = scp->cmnd;
2366 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2367 	u32 alloc_len;
2368 
2369 	alloc_len = get_unaligned_be32(cmd + 10);
2370 	/* following just in case virtual_gb changed */
2371 	sdebug_capacity = get_sdebug_capacity();
2372 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2373 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2374 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2375 	arr[13] = sdebug_physblk_exp & 0xf;
2376 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2377 
2378 	if (scsi_debug_lbp()) {
2379 		arr[14] |= 0x80; /* LBPME */
2380 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2381 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2382 		 * in the wider field maps to 0 in this field.
2383 		 */
2384 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2385 			arr[14] |= 0x40;
2386 	}
2387 
2388 	/*
2389 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2390 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2391 	 */
2392 	if (devip->zoned)
2393 		arr[12] |= 1 << 4;
2394 
2395 	arr[15] = sdebug_lowest_aligned & 0xff;
2396 
2397 	if (have_dif_prot) {
2398 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2399 		arr[12] |= 1; /* PROT_EN */
2400 	}
2401 
2402 	return fill_from_dev_buffer(scp, arr,
2403 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2404 }
2405 
2406 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2407 
2408 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2409 			      struct sdebug_dev_info *devip)
2410 {
2411 	unsigned char *cmd = scp->cmnd;
2412 	unsigned char *arr;
2413 	int host_no = devip->sdbg_host->shost->host_no;
2414 	int port_group_a, port_group_b, port_a, port_b;
2415 	u32 alen, n, rlen;
2416 	int ret;
2417 
2418 	alen = get_unaligned_be32(cmd + 6);
2419 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2420 	if (! arr)
2421 		return DID_REQUEUE << 16;
2422 	/*
2423 	 * EVPD page 0x88 states we have two ports, one
2424 	 * real and a fake port with no device connected.
2425 	 * So we create two port groups with one port each
2426 	 * and set the group with port B to unavailable.
2427 	 */
2428 	port_a = 0x1; /* relative port A */
2429 	port_b = 0x2; /* relative port B */
2430 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2431 			(devip->channel & 0x7f);
2432 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2433 			(devip->channel & 0x7f) + 0x80;
2434 
2435 	/*
2436 	 * The asymmetric access state is cycled according to the host_id.
2437 	 */
2438 	n = 4;
2439 	if (sdebug_vpd_use_hostno == 0) {
2440 		arr[n++] = host_no % 3; /* Asymm access state */
2441 		arr[n++] = 0x0F; /* claim: all states are supported */
2442 	} else {
2443 		arr[n++] = 0x0; /* Active/Optimized path */
2444 		arr[n++] = 0x01; /* only support active/optimized paths */
2445 	}
2446 	put_unaligned_be16(port_group_a, arr + n);
2447 	n += 2;
2448 	arr[n++] = 0;    /* Reserved */
2449 	arr[n++] = 0;    /* Status code */
2450 	arr[n++] = 0;    /* Vendor unique */
2451 	arr[n++] = 0x1;  /* One port per group */
2452 	arr[n++] = 0;    /* Reserved */
2453 	arr[n++] = 0;    /* Reserved */
2454 	put_unaligned_be16(port_a, arr + n);
2455 	n += 2;
2456 	arr[n++] = 3;    /* Port unavailable */
2457 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2458 	put_unaligned_be16(port_group_b, arr + n);
2459 	n += 2;
2460 	arr[n++] = 0;    /* Reserved */
2461 	arr[n++] = 0;    /* Status code */
2462 	arr[n++] = 0;    /* Vendor unique */
2463 	arr[n++] = 0x1;  /* One port per group */
2464 	arr[n++] = 0;    /* Reserved */
2465 	arr[n++] = 0;    /* Reserved */
2466 	put_unaligned_be16(port_b, arr + n);
2467 	n += 2;
2468 
2469 	rlen = n - 4;
2470 	put_unaligned_be32(rlen, arr + 0);
2471 
2472 	/*
2473 	 * Return the smallest value of either
2474 	 * - The allocated length
2475 	 * - The constructed command length
2476 	 * - The maximum array size
2477 	 */
2478 	rlen = min(alen, n);
2479 	ret = fill_from_dev_buffer(scp, arr,
2480 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2481 	kfree(arr);
2482 	return ret;
2483 }
2484 
2485 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2486 			     struct sdebug_dev_info *devip)
2487 {
2488 	bool rctd;
2489 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2490 	u16 req_sa, u;
2491 	u32 alloc_len, a_len;
2492 	int k, offset, len, errsts, bump, na;
2493 	const struct opcode_info_t *oip;
2494 	const struct opcode_info_t *r_oip;
2495 	u8 *arr;
2496 	u8 *cmd = scp->cmnd;
2497 	u32 devsel = sdebug_get_devsel(scp->device);
2498 
2499 	rctd = !!(cmd[2] & 0x80);
2500 	reporting_opts = cmd[2] & 0x7;
2501 	req_opcode = cmd[3];
2502 	req_sa = get_unaligned_be16(cmd + 4);
2503 	alloc_len = get_unaligned_be32(cmd + 6);
2504 	if (alloc_len < 4 || alloc_len > 0xffff) {
2505 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2506 		return check_condition_result;
2507 	}
2508 	if (alloc_len > 8192)
2509 		a_len = 8192;
2510 	else
2511 		a_len = alloc_len;
2512 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2513 	if (NULL == arr) {
2514 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2515 				INSUFF_RES_ASCQ);
2516 		return check_condition_result;
2517 	}
2518 	switch (reporting_opts) {
2519 	case 0:	/* all commands */
2520 		bump = rctd ? 20 : 8;
2521 		for (offset = 4, oip = opcode_info_arr;
2522 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2523 			if (F_INV_OP & oip->flags)
2524 				continue;
2525 			if ((devsel & oip->devsel) != 0) {
2526 				arr[offset] = oip->opcode;
2527 				put_unaligned_be16(oip->sa, arr + offset + 2);
2528 				if (rctd)
2529 					arr[offset + 5] |= 0x2;
2530 				if (FF_SA & oip->flags)
2531 					arr[offset + 5] |= 0x1;
2532 				put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2533 				if (rctd)
2534 					put_unaligned_be16(0xa, arr + offset + 8);
2535 				offset += bump;
2536 			}
2537 			na = oip->num_attached;
2538 			r_oip = oip;
2539 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2540 				if (F_INV_OP & oip->flags)
2541 					continue;
2542 				if ((devsel & oip->devsel) == 0)
2543 					continue;
2544 				arr[offset] = oip->opcode;
2545 				put_unaligned_be16(oip->sa, arr + offset + 2);
2546 				if (rctd)
2547 					arr[offset + 5] |= 0x2;
2548 				if (FF_SA & oip->flags)
2549 					arr[offset + 5] |= 0x1;
2550 				put_unaligned_be16(oip->len_mask[0],
2551 						arr + offset + 6);
2552 				if (rctd)
2553 					put_unaligned_be16(0xa,
2554 							   arr + offset + 8);
2555 				offset += bump;
2556 			}
2557 			oip = r_oip;
2558 		}
2559 		put_unaligned_be32(offset - 4, arr);
2560 		break;
2561 	case 1:	/* one command: opcode only */
2562 	case 2:	/* one command: opcode plus service action */
2563 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2564 		sdeb_i = opcode_ind_arr[req_opcode];
2565 		oip = &opcode_info_arr[sdeb_i];
2566 		if (F_INV_OP & oip->flags) {
2567 			supp = 1;
2568 			offset = 4;
2569 		} else {
2570 			if (1 == reporting_opts) {
2571 				if (FF_SA & oip->flags) {
2572 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2573 							     2, 2);
2574 					kfree(arr);
2575 					return check_condition_result;
2576 				}
2577 				req_sa = 0;
2578 			} else if (2 == reporting_opts &&
2579 				   0 == (FF_SA & oip->flags)) {
2580 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2581 				kfree(arr);	/* point at requested sa */
2582 				return check_condition_result;
2583 			}
2584 			if (0 == (FF_SA & oip->flags) &&
2585 				(devsel & oip->devsel) != 0 &&
2586 				req_opcode == oip->opcode)
2587 				supp = 3;
2588 			else if (0 == (FF_SA & oip->flags)) {
2589 				na = oip->num_attached;
2590 				for (k = 0, oip = oip->arrp; k < na;
2591 				     ++k, ++oip) {
2592 					if (req_opcode == oip->opcode &&
2593 						(devsel & oip->devsel) != 0)
2594 						break;
2595 				}
2596 				supp = (k >= na) ? 1 : 3;
2597 			} else if (req_sa != oip->sa) {
2598 				na = oip->num_attached;
2599 				for (k = 0, oip = oip->arrp; k < na;
2600 				     ++k, ++oip) {
2601 					if (req_sa == oip->sa &&
2602 						(devsel & oip->devsel) != 0)
2603 						break;
2604 				}
2605 				supp = (k >= na) ? 1 : 3;
2606 			} else
2607 				supp = 3;
2608 			if (3 == supp) {
2609 				u = oip->len_mask[0];
2610 				put_unaligned_be16(u, arr + 2);
2611 				arr[4] = oip->opcode;
2612 				for (k = 1; k < u; ++k)
2613 					arr[4 + k] = (k < 16) ?
2614 						 oip->len_mask[k] : 0xff;
2615 				offset = 4 + u;
2616 			} else
2617 				offset = 4;
2618 		}
2619 		arr[1] = (rctd ? 0x80 : 0) | supp;
2620 		if (rctd) {
2621 			put_unaligned_be16(0xa, arr + offset);
2622 			offset += 12;
2623 		}
2624 		break;
2625 	default:
2626 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2627 		kfree(arr);
2628 		return check_condition_result;
2629 	}
2630 	offset = (offset < a_len) ? offset : a_len;
2631 	len = (offset < alloc_len) ? offset : alloc_len;
2632 	errsts = fill_from_dev_buffer(scp, arr, len);
2633 	kfree(arr);
2634 	return errsts;
2635 }
2636 
2637 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2638 			  struct sdebug_dev_info *devip)
2639 {
2640 	bool repd;
2641 	u32 alloc_len, len;
2642 	u8 arr[16];
2643 	u8 *cmd = scp->cmnd;
2644 
2645 	memset(arr, 0, sizeof(arr));
2646 	repd = !!(cmd[2] & 0x80);
2647 	alloc_len = get_unaligned_be32(cmd + 6);
2648 	if (alloc_len < 4) {
2649 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2650 		return check_condition_result;
2651 	}
2652 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2653 	arr[1] = 0x1;		/* ITNRS */
2654 	if (repd) {
2655 		arr[3] = 0xc;
2656 		len = 16;
2657 	} else
2658 		len = 4;
2659 
2660 	len = (len < alloc_len) ? len : alloc_len;
2661 	return fill_from_dev_buffer(scp, arr, len);
2662 }
2663 
2664 /* <<Following mode page info copied from ST318451LW>> */
2665 
2666 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2667 {	/* Read-Write Error Recovery page for mode_sense */
2668 	static const unsigned char err_recov_pg[] = {
2669 		0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2670 		5, 0, 0xff, 0xff
2671 	};
2672 
2673 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2674 	if (1 == pcontrol)
2675 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2676 	return sizeof(err_recov_pg);
2677 }
2678 
2679 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2680 { 	/* Disconnect-Reconnect page for mode_sense */
2681 	static const unsigned char disconnect_pg[] = {
2682 		0x2, 0xe, 128, 128, 0, 10, 0, 0,
2683 		0, 0, 0, 0, 0, 0, 0, 0
2684 	};
2685 
2686 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2687 	if (1 == pcontrol)
2688 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2689 	return sizeof(disconnect_pg);
2690 }
2691 
2692 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2693 {       /* Format device page for mode_sense */
2694 	static const unsigned char format_pg[] = {
2695 		0x3, 0x16, 0, 0, 0, 0, 0, 0,
2696 		0, 0, 0, 0, 0, 0, 0, 0,
2697 		0, 0, 0, 0, 0x40, 0, 0, 0
2698 	};
2699 
2700 	memcpy(p, format_pg, sizeof(format_pg));
2701 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2702 	put_unaligned_be16(sdebug_sector_size, p + 12);
2703 	if (sdebug_removable)
2704 		p[20] |= 0x20; /* should agree with INQUIRY */
2705 	if (1 == pcontrol)
2706 		memset(p + 2, 0, sizeof(format_pg) - 2);
2707 	return sizeof(format_pg);
2708 }
2709 
2710 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2711 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2712 				     0, 0, 0, 0};
2713 
2714 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2715 { 	/* Caching page for mode_sense */
2716 	static const unsigned char ch_caching_pg[] = {
2717 		/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2718 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2719 	};
2720 	static const unsigned char d_caching_pg[] = {
2721 		0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2722 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0
2723 	};
2724 
2725 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2726 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2727 	memcpy(p, caching_pg, sizeof(caching_pg));
2728 	if (1 == pcontrol)
2729 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2730 	else if (2 == pcontrol)
2731 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2732 	return sizeof(caching_pg);
2733 }
2734 
2735 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2736 				    0, 0, 0x2, 0x4b};
2737 
2738 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2739 { 	/* Control mode page for mode_sense */
2740 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2741 					0, 0, 0, 0};
2742 	static const unsigned char d_ctrl_m_pg[] = {
2743 		0xa, 10, 2, 0, 0, 0, 0, 0,
2744 		0, 0, 0x2, 0x4b
2745 	};
2746 
2747 	if (sdebug_dsense)
2748 		ctrl_m_pg[2] |= 0x4;
2749 	else
2750 		ctrl_m_pg[2] &= ~0x4;
2751 
2752 	if (sdebug_ato)
2753 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2754 
2755 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2756 	if (1 == pcontrol)
2757 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2758 	else if (2 == pcontrol)
2759 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2760 	return sizeof(ctrl_m_pg);
2761 }
2762 
2763 /* IO Advice Hints Grouping mode page */
2764 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2765 {
2766 	/* IO Advice Hints Grouping mode page */
2767 	struct grouping_m_pg {
2768 		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2769 		u8 subpage_code;
2770 		__be16 page_length;
2771 		u8 reserved[12];
2772 		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2773 	};
2774 	static const struct grouping_m_pg gr_m_pg = {
2775 		.page_code = 0xa | 0x40,
2776 		.subpage_code = 5,
2777 		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2778 		.descr = {
2779 			{ .st_enble = 1 },
2780 			{ .st_enble = 1 },
2781 			{ .st_enble = 1 },
2782 			{ .st_enble = 1 },
2783 			{ .st_enble = 1 },
2784 			{ .st_enble = 0 },
2785 		}
2786 	};
2787 
2788 	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2789 		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2790 	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2791 	if (1 == pcontrol) {
2792 		/* There are no changeable values so clear from byte 4 on. */
2793 		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2794 	}
2795 	return sizeof(gr_m_pg);
2796 }
2797 
2798 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2799 {	/* Informational Exceptions control mode page for mode_sense */
2800 	static const unsigned char ch_iec_m_pg[] = {
2801 		/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2802 		0, 0, 0x0, 0x0
2803 	};
2804 	static const unsigned char d_iec_m_pg[] = {
2805 		0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2806 		0, 0, 0x0, 0x0
2807 	};
2808 
2809 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2810 	if (1 == pcontrol)
2811 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2812 	else if (2 == pcontrol)
2813 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2814 	return sizeof(iec_m_pg);
2815 }
2816 
2817 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2818 {	/* SAS SSP mode page - short format for mode_sense */
2819 	static const unsigned char sas_sf_m_pg[] = {
2820 		0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0
2821 	};
2822 
2823 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2824 	if (1 == pcontrol)
2825 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2826 	return sizeof(sas_sf_m_pg);
2827 }
2828 
2829 
2830 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2831 			      int target_dev_id)
2832 {	/* SAS phy control and discover mode page for mode_sense */
2833 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2834 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2835 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2836 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2837 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2838 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2839 		    0, 0, 0, 0, 0, 0, 0, 0,
2840 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2841 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2842 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2843 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2844 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2845 		    0, 0, 0, 0, 0, 0, 0, 0,
2846 		};
2847 	int port_a, port_b;
2848 
2849 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2850 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2851 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2852 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2853 	port_a = target_dev_id + 1;
2854 	port_b = port_a + 1;
2855 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2856 	put_unaligned_be32(port_a, p + 20);
2857 	put_unaligned_be32(port_b, p + 48 + 20);
2858 	if (1 == pcontrol)
2859 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2860 	return sizeof(sas_pcd_m_pg);
2861 }
2862 
2863 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2864 {	/* SAS SSP shared protocol specific port mode subpage */
2865 	static const unsigned char sas_sha_m_pg[] = {
2866 		0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2867 		0, 0, 0, 0, 0, 0, 0, 0,
2868 	};
2869 
2870 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2871 	if (1 == pcontrol)
2872 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2873 	return sizeof(sas_sha_m_pg);
2874 }
2875 
2876 static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
2877 	0xff, 0xff, 0x00, 0x00};
2878 
2879 static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
2880 {	/* Partition page for mode_sense (tape) */
2881 	memcpy(p, partition_pg, sizeof(partition_pg));
2882 	if (pcontrol == 1)
2883 		memset(p + 2, 0, sizeof(partition_pg) - 2);
2884 	return sizeof(partition_pg);
2885 }
2886 
2887 static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
2888 				unsigned char *new, int pg_len)
2889 {
2890 	int new_nbr, p0_size, p1_size;
2891 
2892 	if ((new[4] & 0x80) != 0) { /* FDP */
2893 		partition_pg[4] |= 0x80;
2894 		devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
2895 		devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
2896 		devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
2897 	} else {
2898 		new_nbr = new[3] + 1;
2899 		if (new_nbr > TAPE_MAX_PARTITIONS)
2900 			return 3;
2901 		if ((new[4] & 0x40) != 0) { /* SDP */
2902 			p1_size = TAPE_PARTITION_1_UNITS;
2903 			p0_size = TAPE_UNITS - p1_size;
2904 			if (p0_size < 100)
2905 				return 4;
2906 		} else if ((new[4] & 0x20) != 0) {
2907 			if (new_nbr > 1) {
2908 				p0_size = get_unaligned_be16(new + 8);
2909 				p1_size = get_unaligned_be16(new + 10);
2910 				if (p1_size == 0xFFFF)
2911 					p1_size = TAPE_UNITS - p0_size;
2912 				else if (p0_size == 0xFFFF)
2913 					p0_size = TAPE_UNITS - p1_size;
2914 				if (p0_size < 100 || p1_size < 100)
2915 					return 8;
2916 			} else {
2917 				p0_size = TAPE_UNITS;
2918 				p1_size = 0;
2919 			}
2920 		} else
2921 			return 6;
2922 		devip->tape_pending_nbr_partitions = new_nbr;
2923 		devip->tape_pending_part_0_size = p0_size;
2924 		devip->tape_pending_part_1_size = p1_size;
2925 		partition_pg[3] = new_nbr;
2926 		devip->tape_pending_nbr_partitions = new_nbr;
2927 	}
2928 
2929 	return 0;
2930 }
2931 
2932 static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
2933 	unsigned char dce)
2934 {	/* Compression page for mode_sense (tape) */
2935 	static const unsigned char compression_pg[] = {
2936 		0x0f, 14, 0x40, 0, 0, 0, 0, 0,
2937 		0, 0, 0, 0, 0, 0
2938 	};
2939 
2940 	memcpy(p, compression_pg, sizeof(compression_pg));
2941 	if (dce)
2942 		p[2] |= 0x80;
2943 	if (pcontrol == 1)
2944 		memset(p + 2, 0, sizeof(compression_pg) - 2);
2945 	return sizeof(compression_pg);
2946 }
2947 
2948 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2949 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2950 
2951 static int resp_mode_sense(struct scsi_cmnd *scp,
2952 			   struct sdebug_dev_info *devip)
2953 {
2954 	int pcontrol, pcode, subpcode, bd_len;
2955 	unsigned char dev_spec;
2956 	u32 alloc_len, offset, len;
2957 	int target_dev_id;
2958 	int target = scp->device->id;
2959 	unsigned char *ap;
2960 	unsigned char *cmd = scp->cmnd;
2961 	bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
2962 
2963 	unsigned char *arr __free(kfree) = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2964 
2965 	if (!arr)
2966 		return -ENOMEM;
2967 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2968 	pcontrol = (cmd[2] & 0xc0) >> 6;
2969 	pcode = cmd[2] & 0x3f;
2970 	subpcode = cmd[3];
2971 	msense_6 = (MODE_SENSE == cmd[0]);
2972 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2973 	is_disk = (scp->device->type == TYPE_DISK);
2974 	is_zbc = devip->zoned;
2975 	is_tape = (scp->device->type == TYPE_TAPE);
2976 	if ((is_disk || is_zbc || is_tape) && !dbd)
2977 		bd_len = llbaa ? 16 : 8;
2978 	else
2979 		bd_len = 0;
2980 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2981 	if (0x3 == pcontrol) {  /* Saving values not supported */
2982 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2983 		return check_condition_result;
2984 	}
2985 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2986 			(devip->target * 1000) - 3;
2987 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2988 	if (is_disk || is_zbc) {
2989 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2990 		if (sdebug_wp)
2991 			dev_spec |= 0x80;
2992 	} else
2993 		dev_spec = 0x0;
2994 	if (msense_6) {
2995 		arr[2] = dev_spec;
2996 		arr[3] = bd_len;
2997 		offset = 4;
2998 	} else {
2999 		arr[3] = dev_spec;
3000 		if (16 == bd_len)
3001 			arr[4] = 0x1;	/* set LONGLBA bit */
3002 		arr[7] = bd_len;	/* assume 255 or less */
3003 		offset = 8;
3004 	}
3005 	ap = arr + offset;
3006 	if ((bd_len > 0) && (!sdebug_capacity))
3007 		sdebug_capacity = get_sdebug_capacity();
3008 
3009 	if (8 == bd_len) {
3010 		if (sdebug_capacity > 0xfffffffe)
3011 			put_unaligned_be32(0xffffffff, ap + 0);
3012 		else
3013 			put_unaligned_be32(sdebug_capacity, ap + 0);
3014 		if (is_tape) {
3015 			ap[0] = devip->tape_density;
3016 			put_unaligned_be16(devip->tape_blksize, ap + 6);
3017 		} else
3018 			put_unaligned_be16(sdebug_sector_size, ap + 6);
3019 		offset += bd_len;
3020 		ap = arr + offset;
3021 	} else if (16 == bd_len) {
3022 		if (is_tape) {
3023 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
3024 			return check_condition_result;
3025 		}
3026 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
3027 		put_unaligned_be32(sdebug_sector_size, ap + 12);
3028 		offset += bd_len;
3029 		ap = arr + offset;
3030 	}
3031 	if (cmd[2] == 0)
3032 		goto only_bd; /* Only block descriptor requested */
3033 
3034 	/*
3035 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
3036 	 *        len += resp_*_pg(ap + len, pcontrol, target);
3037 	 */
3038 	switch (pcode) {
3039 	case 0x1:	/* Read-Write error recovery page, direct access */
3040 		if (subpcode > 0x0 && subpcode < 0xff)
3041 			goto bad_subpcode;
3042 		len = resp_err_recov_pg(ap, pcontrol, target);
3043 		offset += len;
3044 		break;
3045 	case 0x2:	/* Disconnect-Reconnect page, all devices */
3046 		if (subpcode > 0x0 && subpcode < 0xff)
3047 			goto bad_subpcode;
3048 		len = resp_disconnect_pg(ap, pcontrol, target);
3049 		offset += len;
3050 		break;
3051 	case 0x3:       /* Format device page, direct access */
3052 		if (subpcode > 0x0 && subpcode < 0xff)
3053 			goto bad_subpcode;
3054 		if (is_disk) {
3055 			len = resp_format_pg(ap, pcontrol, target);
3056 			offset += len;
3057 		} else {
3058 			goto bad_pcode;
3059 		}
3060 		break;
3061 	case 0x8:	/* Caching page, direct access */
3062 		if (subpcode > 0x0 && subpcode < 0xff)
3063 			goto bad_subpcode;
3064 		if (is_disk || is_zbc) {
3065 			len = resp_caching_pg(ap, pcontrol, target);
3066 			offset += len;
3067 		} else {
3068 			goto bad_pcode;
3069 		}
3070 		break;
3071 	case 0xa:	/* Control Mode page, all devices */
3072 		switch (subpcode) {
3073 		case 0:
3074 			len = resp_ctrl_m_pg(ap, pcontrol, target);
3075 			break;
3076 		case 0x05:
3077 			len = resp_grouping_m_pg(ap, pcontrol, target);
3078 			break;
3079 		case 0xff:
3080 			len = resp_ctrl_m_pg(ap, pcontrol, target);
3081 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
3082 			break;
3083 		default:
3084 			goto bad_subpcode;
3085 		}
3086 		offset += len;
3087 		break;
3088 	case 0xf:	/* Compression Mode Page (tape) */
3089 		if (!is_tape)
3090 			goto bad_pcode;
3091 		len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
3092 		offset += len;
3093 		break;
3094 	case 0x11:	/* Partition Mode Page (tape) */
3095 		if (!is_tape)
3096 			goto bad_pcode;
3097 		len = resp_partition_m_pg(ap, pcontrol, target);
3098 		offset += len;
3099 		break;
3100 	case 0x19:	/* if spc==1 then sas phy, control+discover */
3101 		if (subpcode > 0x2 && subpcode < 0xff)
3102 			goto bad_subpcode;
3103 		len = 0;
3104 		if ((0x0 == subpcode) || (0xff == subpcode))
3105 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3106 		if ((0x1 == subpcode) || (0xff == subpcode))
3107 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3108 						  target_dev_id);
3109 		if ((0x2 == subpcode) || (0xff == subpcode))
3110 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
3111 		offset += len;
3112 		break;
3113 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
3114 		if (subpcode > 0x0 && subpcode < 0xff)
3115 			goto bad_subpcode;
3116 		len = resp_iec_m_pg(ap, pcontrol, target);
3117 		offset += len;
3118 		break;
3119 	case 0x3f:	/* Read all Mode pages */
3120 		if (subpcode > 0x0 && subpcode < 0xff)
3121 			goto bad_subpcode;
3122 		len = resp_err_recov_pg(ap, pcontrol, target);
3123 		len += resp_disconnect_pg(ap + len, pcontrol, target);
3124 		if (is_disk) {
3125 			len += resp_format_pg(ap + len, pcontrol, target);
3126 			len += resp_caching_pg(ap + len, pcontrol, target);
3127 		} else if (is_zbc) {
3128 			len += resp_caching_pg(ap + len, pcontrol, target);
3129 		}
3130 		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
3131 		if (0xff == subpcode)
3132 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
3133 		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3134 		if (0xff == subpcode) {
3135 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3136 						  target_dev_id);
3137 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
3138 		}
3139 		len += resp_iec_m_pg(ap + len, pcontrol, target);
3140 		offset += len;
3141 		break;
3142 	default:
3143 		goto bad_pcode;
3144 	}
3145 only_bd:
3146 	if (msense_6)
3147 		arr[0] = offset - 1;
3148 	else
3149 		put_unaligned_be16((offset - 2), arr + 0);
3150 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
3151 
3152 bad_pcode:
3153 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3154 	return check_condition_result;
3155 
3156 bad_subpcode:
3157 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3158 	return check_condition_result;
3159 }
3160 
3161 #define SDEBUG_MAX_MSELECT_SZ 512
3162 
3163 static int resp_mode_select(struct scsi_cmnd *scp,
3164 			    struct sdebug_dev_info *devip)
3165 {
3166 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
3167 	int param_len, res, mpage;
3168 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
3169 	unsigned char *cmd = scp->cmnd;
3170 	int mselect6 = (MODE_SELECT == cmd[0]);
3171 
3172 	memset(arr, 0, sizeof(arr));
3173 	pf = cmd[1] & 0x10;
3174 	sp = cmd[1] & 0x1;
3175 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
3176 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
3177 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
3178 		return check_condition_result;
3179 	}
3180 	res = fetch_to_dev_buffer(scp, arr, param_len);
3181 	if (-1 == res)
3182 		return DID_ERROR << 16;
3183 	else if (sdebug_verbose && (res < param_len))
3184 		sdev_printk(KERN_INFO, scp->device,
3185 			    "cdb indicated=%d, IO sent=%d bytes\n",
3186 			    param_len, res);
3187 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
3188 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
3189 	off = (mselect6 ? 4 : 8);
3190 	if (scp->device->type == TYPE_TAPE) {
3191 		int blksize;
3192 
3193 		if (bd_len != 8) {
3194 			mk_sense_invalid_fld(scp, SDEB_IN_DATA,
3195 					mselect6 ? 3 : 6, -1);
3196 			return check_condition_result;
3197 		}
3198 		if (arr[off] == TAPE_BAD_DENSITY) {
3199 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3200 			return check_condition_result;
3201 		}
3202 		blksize = get_unaligned_be16(arr + off + 6);
3203 		if (blksize != 0 &&
3204 			(blksize < TAPE_MIN_BLKSIZE ||
3205 				blksize > TAPE_MAX_BLKSIZE ||
3206 				(blksize % 4) != 0)) {
3207 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
3208 			return check_condition_result;
3209 		}
3210 		devip->tape_density = arr[off];
3211 		devip->tape_blksize = blksize;
3212 	}
3213 	off += bd_len;
3214 	if (off >= res)
3215 		return 0; /* No page written, just descriptors */
3216 	if (md_len > 2) {
3217 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3218 		return check_condition_result;
3219 	}
3220 	mpage = arr[off] & 0x3f;
3221 	ps = !!(arr[off] & 0x80);
3222 	if (ps) {
3223 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
3224 		return check_condition_result;
3225 	}
3226 	spf = !!(arr[off] & 0x40);
3227 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
3228 		       (arr[off + 1] + 2);
3229 	if ((pg_len + off) > param_len) {
3230 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
3231 				PARAMETER_LIST_LENGTH_ERR, 0);
3232 		return check_condition_result;
3233 	}
3234 	switch (mpage) {
3235 	case 0x8:      /* Caching Mode page */
3236 		if (caching_pg[1] == arr[off + 1]) {
3237 			memcpy(caching_pg + 2, arr + off + 2,
3238 			       sizeof(caching_pg) - 2);
3239 			goto set_mode_changed_ua;
3240 		}
3241 		break;
3242 	case 0xa:      /* Control Mode page */
3243 		if (ctrl_m_pg[1] == arr[off + 1]) {
3244 			memcpy(ctrl_m_pg + 2, arr + off + 2,
3245 			       sizeof(ctrl_m_pg) - 2);
3246 			if (ctrl_m_pg[4] & 0x8)
3247 				sdebug_wp = true;
3248 			else
3249 				sdebug_wp = false;
3250 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
3251 			goto set_mode_changed_ua;
3252 		}
3253 		break;
3254 	case 0xf:       /* Compression mode page */
3255 		if (scp->device->type != TYPE_TAPE)
3256 			goto bad_pcode;
3257 		if ((arr[off + 2] & 0x40) != 0) {
3258 			devip->tape_dce = (arr[off + 2] & 0x80) != 0;
3259 			return 0;
3260 		}
3261 		break;
3262 	case 0x11:	/* Medium Partition Mode Page (tape) */
3263 		if (scp->device->type == TYPE_TAPE) {
3264 			int fld;
3265 
3266 			fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
3267 			if (fld == 0)
3268 				return 0;
3269 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
3270 			return check_condition_result;
3271 		}
3272 		break;
3273 	case 0x1c:      /* Informational Exceptions Mode page */
3274 		if (iec_m_pg[1] == arr[off + 1]) {
3275 			memcpy(iec_m_pg + 2, arr + off + 2,
3276 			       sizeof(iec_m_pg) - 2);
3277 			goto set_mode_changed_ua;
3278 		}
3279 		break;
3280 	default:
3281 		break;
3282 	}
3283 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
3284 	return check_condition_result;
3285 set_mode_changed_ua:
3286 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3287 	return 0;
3288 
3289 bad_pcode:
3290 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3291 	return check_condition_result;
3292 }
3293 
3294 static int resp_temp_l_pg(unsigned char *arr)
3295 {
3296 	static const unsigned char temp_l_pg[] = {
3297 		0x0, 0x0, 0x3, 0x2, 0x0, 38,
3298 		0x0, 0x1, 0x3, 0x2, 0x0, 65,
3299 	};
3300 
3301 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3302 	return sizeof(temp_l_pg);
3303 }
3304 
3305 static int resp_ie_l_pg(unsigned char *arr)
3306 {
3307 	static const unsigned char ie_l_pg[] = {
3308 		0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3309 	};
3310 
3311 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3312 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
3313 		arr[4] = THRESHOLD_EXCEEDED;
3314 		arr[5] = 0xff;
3315 	}
3316 	return sizeof(ie_l_pg);
3317 }
3318 
3319 static int resp_env_rep_l_spg(unsigned char *arr)
3320 {
3321 	static const unsigned char env_rep_l_spg[] = {
3322 		0x0, 0x0, 0x23, 0x8,
3323 		0x0, 40, 72, 0xff, 45, 18, 0, 0,
3324 		0x1, 0x0, 0x23, 0x8,
3325 		0x0, 55, 72, 35, 55, 45, 0, 0,
3326 	};
3327 
3328 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3329 	return sizeof(env_rep_l_spg);
3330 }
3331 
3332 #define SDEBUG_MAX_LSENSE_SZ 512
3333 
3334 static int resp_log_sense(struct scsi_cmnd *scp,
3335 			  struct sdebug_dev_info *devip)
3336 {
3337 	int ppc, sp, pcode, subpcode;
3338 	u32 alloc_len, len, n;
3339 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3340 	unsigned char *cmd = scp->cmnd;
3341 
3342 	memset(arr, 0, sizeof(arr));
3343 	ppc = cmd[1] & 0x2;
3344 	sp = cmd[1] & 0x1;
3345 	if (ppc || sp) {
3346 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3347 		return check_condition_result;
3348 	}
3349 	pcode = cmd[2] & 0x3f;
3350 	subpcode = cmd[3] & 0xff;
3351 	alloc_len = get_unaligned_be16(cmd + 7);
3352 	arr[0] = pcode;
3353 	if (0 == subpcode) {
3354 		switch (pcode) {
3355 		case 0x0:	/* Supported log pages log page */
3356 			n = 4;
3357 			arr[n++] = 0x0;		/* this page */
3358 			arr[n++] = 0xd;		/* Temperature */
3359 			arr[n++] = 0x2f;	/* Informational exceptions */
3360 			arr[3] = n - 4;
3361 			break;
3362 		case 0xd:	/* Temperature log page */
3363 			arr[3] = resp_temp_l_pg(arr + 4);
3364 			break;
3365 		case 0x2f:	/* Informational exceptions log page */
3366 			arr[3] = resp_ie_l_pg(arr + 4);
3367 			break;
3368 		default:
3369 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3370 			return check_condition_result;
3371 		}
3372 	} else if (0xff == subpcode) {
3373 		arr[0] |= 0x40;
3374 		arr[1] = subpcode;
3375 		switch (pcode) {
3376 		case 0x0:	/* Supported log pages and subpages log page */
3377 			n = 4;
3378 			arr[n++] = 0x0;
3379 			arr[n++] = 0x0;		/* 0,0 page */
3380 			arr[n++] = 0x0;
3381 			arr[n++] = 0xff;	/* this page */
3382 			arr[n++] = 0xd;
3383 			arr[n++] = 0x0;		/* Temperature */
3384 			arr[n++] = 0xd;
3385 			arr[n++] = 0x1;		/* Environment reporting */
3386 			arr[n++] = 0xd;
3387 			arr[n++] = 0xff;	/* all 0xd subpages */
3388 			arr[n++] = 0x2f;
3389 			arr[n++] = 0x0;	/* Informational exceptions */
3390 			arr[n++] = 0x2f;
3391 			arr[n++] = 0xff;	/* all 0x2f subpages */
3392 			arr[3] = n - 4;
3393 			break;
3394 		case 0xd:	/* Temperature subpages */
3395 			n = 4;
3396 			arr[n++] = 0xd;
3397 			arr[n++] = 0x0;		/* Temperature */
3398 			arr[n++] = 0xd;
3399 			arr[n++] = 0x1;		/* Environment reporting */
3400 			arr[n++] = 0xd;
3401 			arr[n++] = 0xff;	/* these subpages */
3402 			arr[3] = n - 4;
3403 			break;
3404 		case 0x2f:	/* Informational exceptions subpages */
3405 			n = 4;
3406 			arr[n++] = 0x2f;
3407 			arr[n++] = 0x0;		/* Informational exceptions */
3408 			arr[n++] = 0x2f;
3409 			arr[n++] = 0xff;	/* these subpages */
3410 			arr[3] = n - 4;
3411 			break;
3412 		default:
3413 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3414 			return check_condition_result;
3415 		}
3416 	} else if (subpcode > 0) {
3417 		arr[0] |= 0x40;
3418 		arr[1] = subpcode;
3419 		if (pcode == 0xd && subpcode == 1)
3420 			arr[3] = resp_env_rep_l_spg(arr + 4);
3421 		else {
3422 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3423 			return check_condition_result;
3424 		}
3425 	} else {
3426 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3427 		return check_condition_result;
3428 	}
3429 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3430 	return fill_from_dev_buffer(scp, arr,
3431 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3432 }
3433 
3434 enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
3435 static int resp_read_blklimits(struct scsi_cmnd *scp,
3436 			struct sdebug_dev_info *devip)
3437 {
3438 	unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
3439 
3440 	arr[0] = 4;
3441 	put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
3442 	put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
3443 	return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
3444 }
3445 
3446 static int resp_locate(struct scsi_cmnd *scp,
3447 		struct sdebug_dev_info *devip)
3448 {
3449 	unsigned char *cmd = scp->cmnd;
3450 	unsigned int i, pos;
3451 	struct tape_block *blp;
3452 	int partition;
3453 
3454 	if ((cmd[1] & 0x02) != 0) {
3455 		if (cmd[8] >= devip->tape_nbr_partitions) {
3456 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3457 			return check_condition_result;
3458 		}
3459 		devip->tape_partition = cmd[8];
3460 	}
3461 	pos = get_unaligned_be32(cmd + 3);
3462 	partition = devip->tape_partition;
3463 
3464 	for (i = 0, blp = devip->tape_blocks[partition];
3465 	     i < pos && i < devip->tape_eop[partition]; i++, blp++)
3466 		if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3467 			break;
3468 	if (i < pos) {
3469 		devip->tape_location[partition] = i;
3470 		mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
3471 		return check_condition_result;
3472 	}
3473 	devip->tape_location[partition] = pos;
3474 
3475 	return 0;
3476 }
3477 
3478 static int resp_write_filemarks(struct scsi_cmnd *scp,
3479 		struct sdebug_dev_info *devip)
3480 {
3481 	unsigned char *cmd = scp->cmnd;
3482 	unsigned int i, count, pos;
3483 	u32 data;
3484 	int partition = devip->tape_partition;
3485 
3486 	if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
3487 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
3488 		return check_condition_result;
3489 	}
3490 	count = get_unaligned_be24(cmd + 2);
3491 	data = TAPE_BLOCK_FM_FLAG;
3492 	for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
3493 		if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
3494 			devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3495 			mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
3496 					EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
3497 			return check_condition_result;
3498 		}
3499 		(devip->tape_blocks[partition] + pos)->fl_size = data;
3500 	}
3501 	(devip->tape_blocks[partition] + pos)->fl_size =
3502 		TAPE_BLOCK_EOD_FLAG;
3503 	devip->tape_location[partition] = pos;
3504 
3505 	return 0;
3506 }
3507 
3508 static int resp_space(struct scsi_cmnd *scp,
3509 		struct sdebug_dev_info *devip)
3510 {
3511 	unsigned char *cmd = scp->cmnd, code;
3512 	int i = 0, pos, count;
3513 	struct tape_block *blp;
3514 	int partition = devip->tape_partition;
3515 
3516 	count = get_unaligned_be24(cmd + 2);
3517 	if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
3518 		count |= 0xff000000;
3519 	code = cmd[1] & 0x0f;
3520 
3521 	pos = devip->tape_location[partition];
3522 	if (code == 0) { /* blocks */
3523 		if (count < 0) {
3524 			count = (-count);
3525 			pos -= 1;
3526 			for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3527 			     i++) {
3528 				if (pos < 0)
3529 					goto is_bop;
3530 				else if (IS_TAPE_BLOCK_FM(blp->fl_size))
3531 					goto is_fm;
3532 				if (i > 0) {
3533 					pos--;
3534 					blp--;
3535 				}
3536 			}
3537 		} else if (count > 0) {
3538 			for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3539 			     i++, pos++, blp++) {
3540 				if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3541 					goto is_eod;
3542 				if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
3543 					pos += 1;
3544 					goto is_fm;
3545 				}
3546 				if (pos >= devip->tape_eop[partition])
3547 					goto is_eop;
3548 			}
3549 		}
3550 	} else if (code == 1) { /* filemarks */
3551 		if (count < 0) {
3552 			count = (-count);
3553 			if (pos == 0)
3554 				goto is_bop;
3555 			else {
3556 				for (i = 0, blp = devip->tape_blocks[partition] + pos;
3557 				     i < count && pos >= 0; i++, pos--, blp--) {
3558 					for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3559 						     pos >= 0; pos--, blp--)
3560 						; /* empty */
3561 					if (pos < 0)
3562 						goto is_bop;
3563 				}
3564 			}
3565 			pos += 1;
3566 		} else if (count > 0) {
3567 			for (i = 0, blp = devip->tape_blocks[partition] + pos;
3568 			     i < count; i++, pos++, blp++) {
3569 				for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3570 					      !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
3571 					      pos < devip->tape_eop[partition];
3572 				      pos++, blp++)
3573 					; /* empty */
3574 				if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3575 					goto is_eod;
3576 				if (pos >= devip->tape_eop[partition])
3577 					goto is_eop;
3578 			}
3579 		}
3580 	} else if (code == 3) { /* EOD */
3581 		for (blp = devip->tape_blocks[partition] + pos;
3582 		     !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
3583 		     pos++, blp++)
3584 			; /* empty */
3585 		if (pos >= devip->tape_eop[partition])
3586 			goto is_eop;
3587 	} else {
3588 		/* sequential filemarks not supported */
3589 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3590 		return check_condition_result;
3591 	}
3592 	devip->tape_location[partition] = pos;
3593 	return 0;
3594 
3595 is_fm:
3596 	devip->tape_location[partition] = pos;
3597 	mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3598 			FILEMARK_DETECTED_ASCQ, count - i,
3599 			SENSE_FLAG_FILEMARK);
3600 	return check_condition_result;
3601 
3602 is_eod:
3603 	devip->tape_location[partition] = pos;
3604 	mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
3605 			EOD_DETECTED_ASCQ, count - i,
3606 			0);
3607 	return check_condition_result;
3608 
3609 is_bop:
3610 	devip->tape_location[partition] = 0;
3611 	mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3612 			BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
3613 			SENSE_FLAG_EOM);
3614 	devip->tape_location[partition] = 0;
3615 	return check_condition_result;
3616 
3617 is_eop:
3618 	devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3619 	mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
3620 			EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
3621 			SENSE_FLAG_EOM);
3622 	return check_condition_result;
3623 }
3624 
3625 enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
3626 static int resp_read_position(struct scsi_cmnd *scp,
3627 			struct sdebug_dev_info *devip)
3628 {
3629 	u8 *cmd = scp->cmnd;
3630 	int all_length;
3631 	unsigned char arr[20];
3632 	unsigned int pos;
3633 
3634 	all_length = get_unaligned_be16(cmd + 7);
3635 	if ((cmd[1] & 0xfe) != 0 ||
3636 		all_length != 0) { /* only short form */
3637 		mk_sense_invalid_fld(scp, SDEB_IN_CDB,
3638 				all_length ? 7 : 1, 0);
3639 		return check_condition_result;
3640 	}
3641 	memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
3642 	arr[1] = devip->tape_partition;
3643 	pos = devip->tape_location[devip->tape_partition];
3644 	put_unaligned_be32(pos, arr + 4);
3645 	put_unaligned_be32(pos, arr + 8);
3646 	return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ);
3647 }
3648 
3649 static int resp_rewind(struct scsi_cmnd *scp,
3650 		struct sdebug_dev_info *devip)
3651 {
3652 	devip->tape_location[devip->tape_partition] = 0;
3653 
3654 	return 0;
3655 }
3656 
3657 static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
3658 			int part_0_size, int part_1_size)
3659 {
3660 	int i;
3661 
3662 	if (part_0_size + part_1_size > TAPE_UNITS)
3663 		return -1;
3664 	devip->tape_eop[0] = part_0_size;
3665 	devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
3666 	devip->tape_eop[1] = part_1_size;
3667 	devip->tape_blocks[1] = devip->tape_blocks[0] +
3668 			devip->tape_eop[0];
3669 	devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
3670 
3671 	for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
3672 		devip->tape_location[i] = 0;
3673 
3674 	devip->tape_nbr_partitions = nbr_partitions;
3675 	devip->tape_partition = 0;
3676 
3677 	partition_pg[3] = nbr_partitions - 1;
3678 	put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
3679 	put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
3680 
3681 	return nbr_partitions;
3682 }
3683 
3684 static int resp_format_medium(struct scsi_cmnd *scp,
3685 			struct sdebug_dev_info *devip)
3686 {
3687 	int res = 0;
3688 	unsigned char *cmd = scp->cmnd;
3689 
3690 	if (cmd[2] > 2) {
3691 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
3692 		return check_condition_result;
3693 	}
3694 	if (cmd[2] != 0) {
3695 		if (devip->tape_pending_nbr_partitions > 0) {
3696 			res = partition_tape(devip,
3697 					devip->tape_pending_nbr_partitions,
3698 					devip->tape_pending_part_0_size,
3699 					devip->tape_pending_part_1_size);
3700 		} else
3701 			res = partition_tape(devip, devip->tape_nbr_partitions,
3702 					devip->tape_eop[0], devip->tape_eop[1]);
3703 	} else
3704 		res = partition_tape(devip, 1, TAPE_UNITS, 0);
3705 	if (res < 0)
3706 		return -EINVAL;
3707 
3708 	devip->tape_pending_nbr_partitions = -1;
3709 
3710 	return 0;
3711 }
3712 
3713 static int resp_erase(struct scsi_cmnd *scp,
3714 		struct sdebug_dev_info *devip)
3715 {
3716 	int partition = devip->tape_partition;
3717 	int pos = devip->tape_location[partition];
3718 	struct tape_block *blp;
3719 
3720 	blp = devip->tape_blocks[partition] + pos;
3721 	blp->fl_size = TAPE_BLOCK_EOD_FLAG;
3722 
3723 	return 0;
3724 }
3725 
3726 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3727 {
3728 	return devip->nr_zones != 0;
3729 }
3730 
3731 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3732 					unsigned long long lba)
3733 {
3734 	u32 zno = lba >> devip->zsize_shift;
3735 	struct sdeb_zone_state *zsp;
3736 
3737 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3738 		return &devip->zstate[zno];
3739 
3740 	/*
3741 	 * If the zone capacity is less than the zone size, adjust for gap
3742 	 * zones.
3743 	 */
3744 	zno = 2 * zno - devip->nr_conv_zones;
3745 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3746 	zsp = &devip->zstate[zno];
3747 	if (lba >= zsp->z_start + zsp->z_size)
3748 		zsp++;
3749 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3750 	return zsp;
3751 }
3752 
3753 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3754 {
3755 	return zsp->z_type == ZBC_ZTYPE_CNV;
3756 }
3757 
3758 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3759 {
3760 	return zsp->z_type == ZBC_ZTYPE_GAP;
3761 }
3762 
3763 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3764 {
3765 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3766 }
3767 
3768 static void zbc_close_zone(struct sdebug_dev_info *devip,
3769 			   struct sdeb_zone_state *zsp)
3770 {
3771 	enum sdebug_z_cond zc;
3772 
3773 	if (!zbc_zone_is_seq(zsp))
3774 		return;
3775 
3776 	zc = zsp->z_cond;
3777 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3778 		return;
3779 
3780 	if (zc == ZC2_IMPLICIT_OPEN)
3781 		devip->nr_imp_open--;
3782 	else
3783 		devip->nr_exp_open--;
3784 
3785 	if (zsp->z_wp == zsp->z_start) {
3786 		zsp->z_cond = ZC1_EMPTY;
3787 	} else {
3788 		zsp->z_cond = ZC4_CLOSED;
3789 		devip->nr_closed++;
3790 	}
3791 }
3792 
3793 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3794 {
3795 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3796 	unsigned int i;
3797 
3798 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3799 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3800 			zbc_close_zone(devip, zsp);
3801 			return;
3802 		}
3803 	}
3804 }
3805 
3806 static void zbc_open_zone(struct sdebug_dev_info *devip,
3807 			  struct sdeb_zone_state *zsp, bool explicit)
3808 {
3809 	enum sdebug_z_cond zc;
3810 
3811 	if (!zbc_zone_is_seq(zsp))
3812 		return;
3813 
3814 	zc = zsp->z_cond;
3815 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3816 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3817 		return;
3818 
3819 	/* Close an implicit open zone if necessary */
3820 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3821 		zbc_close_zone(devip, zsp);
3822 	else if (devip->max_open &&
3823 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3824 		zbc_close_imp_open_zone(devip);
3825 
3826 	if (zsp->z_cond == ZC4_CLOSED)
3827 		devip->nr_closed--;
3828 	if (explicit) {
3829 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3830 		devip->nr_exp_open++;
3831 	} else {
3832 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3833 		devip->nr_imp_open++;
3834 	}
3835 }
3836 
3837 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3838 				     struct sdeb_zone_state *zsp)
3839 {
3840 	switch (zsp->z_cond) {
3841 	case ZC2_IMPLICIT_OPEN:
3842 		devip->nr_imp_open--;
3843 		break;
3844 	case ZC3_EXPLICIT_OPEN:
3845 		devip->nr_exp_open--;
3846 		break;
3847 	default:
3848 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3849 			  zsp->z_start, zsp->z_cond);
3850 		break;
3851 	}
3852 	zsp->z_cond = ZC5_FULL;
3853 }
3854 
3855 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3856 		       unsigned long long lba, unsigned int num)
3857 {
3858 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3859 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3860 
3861 	if (!zbc_zone_is_seq(zsp))
3862 		return;
3863 
3864 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3865 		zsp->z_wp += num;
3866 		if (zsp->z_wp >= zend)
3867 			zbc_set_zone_full(devip, zsp);
3868 		return;
3869 	}
3870 
3871 	while (num) {
3872 		if (lba != zsp->z_wp)
3873 			zsp->z_non_seq_resource = true;
3874 
3875 		end = lba + num;
3876 		if (end >= zend) {
3877 			n = zend - lba;
3878 			zsp->z_wp = zend;
3879 		} else if (end > zsp->z_wp) {
3880 			n = num;
3881 			zsp->z_wp = end;
3882 		} else {
3883 			n = num;
3884 		}
3885 		if (zsp->z_wp >= zend)
3886 			zbc_set_zone_full(devip, zsp);
3887 
3888 		num -= n;
3889 		lba += n;
3890 		if (num) {
3891 			zsp++;
3892 			zend = zsp->z_start + zsp->z_size;
3893 		}
3894 	}
3895 }
3896 
3897 static int check_zbc_access_params(struct scsi_cmnd *scp,
3898 			unsigned long long lba, unsigned int num, bool write)
3899 {
3900 	struct scsi_device *sdp = scp->device;
3901 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3902 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3903 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3904 
3905 	if (!write) {
3906 		/* For host-managed, reads cannot cross zone types boundaries */
3907 		if (zsp->z_type != zsp_end->z_type) {
3908 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3909 					LBA_OUT_OF_RANGE,
3910 					READ_INVDATA_ASCQ);
3911 			return check_condition_result;
3912 		}
3913 		return 0;
3914 	}
3915 
3916 	/* Writing into a gap zone is not allowed */
3917 	if (zbc_zone_is_gap(zsp)) {
3918 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3919 				ATTEMPT_ACCESS_GAP);
3920 		return check_condition_result;
3921 	}
3922 
3923 	/* No restrictions for writes within conventional zones */
3924 	if (zbc_zone_is_conv(zsp)) {
3925 		if (!zbc_zone_is_conv(zsp_end)) {
3926 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3927 					LBA_OUT_OF_RANGE,
3928 					WRITE_BOUNDARY_ASCQ);
3929 			return check_condition_result;
3930 		}
3931 		return 0;
3932 	}
3933 
3934 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3935 		/* Writes cannot cross sequential zone boundaries */
3936 		if (zsp_end != zsp) {
3937 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3938 					LBA_OUT_OF_RANGE,
3939 					WRITE_BOUNDARY_ASCQ);
3940 			return check_condition_result;
3941 		}
3942 		/* Cannot write full zones */
3943 		if (zsp->z_cond == ZC5_FULL) {
3944 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3945 					INVALID_FIELD_IN_CDB, 0);
3946 			return check_condition_result;
3947 		}
3948 		/* Writes must be aligned to the zone WP */
3949 		if (lba != zsp->z_wp) {
3950 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3951 					LBA_OUT_OF_RANGE,
3952 					UNALIGNED_WRITE_ASCQ);
3953 			return check_condition_result;
3954 		}
3955 	}
3956 
3957 	/* Handle implicit open of closed and empty zones */
3958 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3959 		if (devip->max_open &&
3960 		    devip->nr_exp_open >= devip->max_open) {
3961 			mk_sense_buffer(scp, DATA_PROTECT,
3962 					INSUFF_RES_ASC,
3963 					INSUFF_ZONE_ASCQ);
3964 			return check_condition_result;
3965 		}
3966 		zbc_open_zone(devip, zsp, false);
3967 	}
3968 
3969 	return 0;
3970 }
3971 
3972 static inline int check_device_access_params
3973 			(struct scsi_cmnd *scp, unsigned long long lba,
3974 			 unsigned int num, bool write)
3975 {
3976 	struct scsi_device *sdp = scp->device;
3977 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3978 
3979 	if (lba + num > sdebug_capacity) {
3980 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3981 		return check_condition_result;
3982 	}
3983 	/* transfer length excessive (tie in to block limits VPD page) */
3984 	if (num > sdebug_store_sectors) {
3985 		/* needs work to find which cdb byte 'num' comes from */
3986 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3987 		return check_condition_result;
3988 	}
3989 	if (write && unlikely(sdebug_wp)) {
3990 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3991 		return check_condition_result;
3992 	}
3993 	if (sdebug_dev_is_zoned(devip))
3994 		return check_zbc_access_params(scp, lba, num, write);
3995 
3996 	return 0;
3997 }
3998 
3999 /*
4000  * Note: if BUG_ON() fires it usually indicates a problem with the parser
4001  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
4002  * that access any of the "stores" in struct sdeb_store_info should call this
4003  * function with bug_if_fake_rw set to true.
4004  */
4005 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
4006 						bool bug_if_fake_rw)
4007 {
4008 	if (sdebug_fake_rw) {
4009 		BUG_ON(bug_if_fake_rw);	/* See note above */
4010 		return NULL;
4011 	}
4012 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
4013 }
4014 
4015 static inline void
4016 sdeb_read_lock(rwlock_t *lock)
4017 {
4018 	if (sdebug_no_rwlock)
4019 		__acquire(lock);
4020 	else
4021 		read_lock(lock);
4022 }
4023 
4024 static inline void
4025 sdeb_read_unlock(rwlock_t *lock)
4026 {
4027 	if (sdebug_no_rwlock)
4028 		__release(lock);
4029 	else
4030 		read_unlock(lock);
4031 }
4032 
4033 static inline void
4034 sdeb_write_lock(rwlock_t *lock)
4035 {
4036 	if (sdebug_no_rwlock)
4037 		__acquire(lock);
4038 	else
4039 		write_lock(lock);
4040 }
4041 
4042 static inline void
4043 sdeb_write_unlock(rwlock_t *lock)
4044 {
4045 	if (sdebug_no_rwlock)
4046 		__release(lock);
4047 	else
4048 		write_unlock(lock);
4049 }
4050 
4051 static inline void
4052 sdeb_data_read_lock(struct sdeb_store_info *sip)
4053 {
4054 	BUG_ON(!sip);
4055 
4056 	sdeb_read_lock(&sip->macc_data_lck);
4057 }
4058 
4059 static inline void
4060 sdeb_data_read_unlock(struct sdeb_store_info *sip)
4061 {
4062 	BUG_ON(!sip);
4063 
4064 	sdeb_read_unlock(&sip->macc_data_lck);
4065 }
4066 
4067 static inline void
4068 sdeb_data_write_lock(struct sdeb_store_info *sip)
4069 {
4070 	BUG_ON(!sip);
4071 
4072 	sdeb_write_lock(&sip->macc_data_lck);
4073 }
4074 
4075 static inline void
4076 sdeb_data_write_unlock(struct sdeb_store_info *sip)
4077 {
4078 	BUG_ON(!sip);
4079 
4080 	sdeb_write_unlock(&sip->macc_data_lck);
4081 }
4082 
4083 static inline void
4084 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
4085 {
4086 	BUG_ON(!sip);
4087 
4088 	sdeb_read_lock(&sip->macc_sector_lck);
4089 }
4090 
4091 static inline void
4092 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
4093 {
4094 	BUG_ON(!sip);
4095 
4096 	sdeb_read_unlock(&sip->macc_sector_lck);
4097 }
4098 
4099 static inline void
4100 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
4101 {
4102 	BUG_ON(!sip);
4103 
4104 	sdeb_write_lock(&sip->macc_sector_lck);
4105 }
4106 
4107 static inline void
4108 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
4109 {
4110 	BUG_ON(!sip);
4111 
4112 	sdeb_write_unlock(&sip->macc_sector_lck);
4113 }
4114 
4115 /*
4116  * Atomic locking:
4117  * We simplify the atomic model to allow only 1x atomic write and many non-
4118  * atomic reads or writes for all LBAs.
4119 
4120  * A RW lock has a similar bahaviour:
4121  * Only 1x writer and many readers.
4122 
4123  * So use a RW lock for per-device read and write locking:
4124  * An atomic access grabs the lock as a writer and non-atomic grabs the lock
4125  * as a reader.
4126  */
4127 
4128 static inline void
4129 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
4130 {
4131 	if (atomic)
4132 		sdeb_data_write_lock(sip);
4133 	else
4134 		sdeb_data_read_lock(sip);
4135 }
4136 
4137 static inline void
4138 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
4139 {
4140 	if (atomic)
4141 		sdeb_data_write_unlock(sip);
4142 	else
4143 		sdeb_data_read_unlock(sip);
4144 }
4145 
4146 /* Allow many reads but only 1x write per sector */
4147 static inline void
4148 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
4149 {
4150 	if (do_write)
4151 		sdeb_data_sector_write_lock(sip);
4152 	else
4153 		sdeb_data_sector_read_lock(sip);
4154 }
4155 
4156 static inline void
4157 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
4158 {
4159 	if (do_write)
4160 		sdeb_data_sector_write_unlock(sip);
4161 	else
4162 		sdeb_data_sector_read_unlock(sip);
4163 }
4164 
4165 static inline void
4166 sdeb_meta_read_lock(struct sdeb_store_info *sip)
4167 {
4168 	if (sdebug_no_rwlock) {
4169 		if (sip)
4170 			__acquire(&sip->macc_meta_lck);
4171 		else
4172 			__acquire(&sdeb_fake_rw_lck);
4173 	} else {
4174 		if (sip)
4175 			read_lock(&sip->macc_meta_lck);
4176 		else
4177 			read_lock(&sdeb_fake_rw_lck);
4178 	}
4179 }
4180 
4181 static inline void
4182 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
4183 {
4184 	if (sdebug_no_rwlock) {
4185 		if (sip)
4186 			__release(&sip->macc_meta_lck);
4187 		else
4188 			__release(&sdeb_fake_rw_lck);
4189 	} else {
4190 		if (sip)
4191 			read_unlock(&sip->macc_meta_lck);
4192 		else
4193 			read_unlock(&sdeb_fake_rw_lck);
4194 	}
4195 }
4196 
4197 static inline void
4198 sdeb_meta_write_lock(struct sdeb_store_info *sip)
4199 {
4200 	if (sdebug_no_rwlock) {
4201 		if (sip)
4202 			__acquire(&sip->macc_meta_lck);
4203 		else
4204 			__acquire(&sdeb_fake_rw_lck);
4205 	} else {
4206 		if (sip)
4207 			write_lock(&sip->macc_meta_lck);
4208 		else
4209 			write_lock(&sdeb_fake_rw_lck);
4210 	}
4211 }
4212 
4213 static inline void
4214 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
4215 {
4216 	if (sdebug_no_rwlock) {
4217 		if (sip)
4218 			__release(&sip->macc_meta_lck);
4219 		else
4220 			__release(&sdeb_fake_rw_lck);
4221 	} else {
4222 		if (sip)
4223 			write_unlock(&sip->macc_meta_lck);
4224 		else
4225 			write_unlock(&sdeb_fake_rw_lck);
4226 	}
4227 }
4228 
4229 /* Returns number of bytes copied or -1 if error. */
4230 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
4231 			    u32 sg_skip, u64 lba, u32 num, u8 group_number,
4232 			    bool do_write, bool atomic)
4233 {
4234 	int ret;
4235 	u64 block;
4236 	enum dma_data_direction dir;
4237 	struct scsi_data_buffer *sdb = &scp->sdb;
4238 	u8 *fsp;
4239 	int i, total = 0;
4240 
4241 	/*
4242 	 * Even though reads are inherently atomic (in this driver), we expect
4243 	 * the atomic flag only for writes.
4244 	 */
4245 	if (!do_write && atomic)
4246 		return -1;
4247 
4248 	if (do_write) {
4249 		dir = DMA_TO_DEVICE;
4250 		write_since_sync = true;
4251 	} else {
4252 		dir = DMA_FROM_DEVICE;
4253 	}
4254 
4255 	if (!sdb->length || !sip)
4256 		return 0;
4257 	if (scp->sc_data_direction != dir)
4258 		return -1;
4259 
4260 	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
4261 		atomic_long_inc(&writes_by_group_number[group_number]);
4262 
4263 	fsp = sip->storep;
4264 
4265 	block = do_div(lba, sdebug_store_sectors);
4266 
4267 	/* Only allow 1x atomic write or multiple non-atomic writes at any given time */
4268 	sdeb_data_lock(sip, atomic);
4269 	for (i = 0; i < num; i++) {
4270 		/* We shouldn't need to lock for atomic writes, but do it anyway */
4271 		sdeb_data_sector_lock(sip, do_write);
4272 		ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4273 		   fsp + (block * sdebug_sector_size),
4274 		   sdebug_sector_size, sg_skip, do_write);
4275 		sdeb_data_sector_unlock(sip, do_write);
4276 		total += ret;
4277 		if (ret != sdebug_sector_size)
4278 			break;
4279 		sg_skip += sdebug_sector_size;
4280 		if (++block >= sdebug_store_sectors)
4281 			block = 0;
4282 	}
4283 	sdeb_data_unlock(sip, atomic);
4284 
4285 	return total;
4286 }
4287 
4288 /* Returns number of bytes copied or -1 if error. */
4289 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
4290 {
4291 	struct scsi_data_buffer *sdb = &scp->sdb;
4292 
4293 	if (!sdb->length)
4294 		return 0;
4295 	if (scp->sc_data_direction != DMA_TO_DEVICE)
4296 		return -1;
4297 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
4298 			      num * sdebug_sector_size, 0, true);
4299 }
4300 
4301 /* If sip->storep+lba compares equal to arr(num), then copy top half of
4302  * arr into sip->storep+lba and return true. If comparison fails then
4303  * return false. */
4304 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
4305 			      const u8 *arr, bool compare_only)
4306 {
4307 	bool res;
4308 	u64 block, rest = 0;
4309 	u32 store_blks = sdebug_store_sectors;
4310 	u32 lb_size = sdebug_sector_size;
4311 	u8 *fsp = sip->storep;
4312 
4313 	block = do_div(lba, store_blks);
4314 	if (block + num > store_blks)
4315 		rest = block + num - store_blks;
4316 
4317 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4318 	if (!res)
4319 		return res;
4320 	if (rest)
4321 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
4322 			     rest * lb_size);
4323 	if (!res)
4324 		return res;
4325 	if (compare_only)
4326 		return true;
4327 	arr += num * lb_size;
4328 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4329 	if (rest)
4330 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
4331 	return res;
4332 }
4333 
4334 static __be16 dif_compute_csum(const void *buf, int len)
4335 {
4336 	__be16 csum;
4337 
4338 	if (sdebug_guard)
4339 		csum = (__force __be16)ip_compute_csum(buf, len);
4340 	else
4341 		csum = cpu_to_be16(crc_t10dif(buf, len));
4342 
4343 	return csum;
4344 }
4345 
4346 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
4347 		      sector_t sector, u32 ei_lba)
4348 {
4349 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
4350 
4351 	if (sdt->guard_tag != csum) {
4352 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
4353 			(unsigned long)sector,
4354 			be16_to_cpu(sdt->guard_tag),
4355 			be16_to_cpu(csum));
4356 		return 0x01;
4357 	}
4358 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
4359 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
4360 		pr_err("REF check failed on sector %lu\n",
4361 			(unsigned long)sector);
4362 		return 0x03;
4363 	}
4364 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4365 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
4366 		pr_err("REF check failed on sector %lu\n",
4367 			(unsigned long)sector);
4368 		return 0x03;
4369 	}
4370 	return 0;
4371 }
4372 
4373 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
4374 			  unsigned int sectors, bool read)
4375 {
4376 	size_t resid;
4377 	void *paddr;
4378 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4379 						scp->device->hostdata, true);
4380 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
4381 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
4382 	struct sg_mapping_iter miter;
4383 
4384 	/* Bytes of protection data to copy into sgl */
4385 	resid = sectors * sizeof(*dif_storep);
4386 
4387 	sg_miter_start(&miter, scsi_prot_sglist(scp),
4388 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
4389 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
4390 
4391 	while (sg_miter_next(&miter) && resid > 0) {
4392 		size_t len = min_t(size_t, miter.length, resid);
4393 		void *start = dif_store(sip, sector);
4394 		size_t rest = 0;
4395 
4396 		if (dif_store_end < start + len)
4397 			rest = start + len - dif_store_end;
4398 
4399 		paddr = miter.addr;
4400 
4401 		if (read)
4402 			memcpy(paddr, start, len - rest);
4403 		else
4404 			memcpy(start, paddr, len - rest);
4405 
4406 		if (rest) {
4407 			if (read)
4408 				memcpy(paddr + len - rest, dif_storep, rest);
4409 			else
4410 				memcpy(dif_storep, paddr + len - rest, rest);
4411 		}
4412 
4413 		sector += len / sizeof(*dif_storep);
4414 		resid -= len;
4415 	}
4416 	sg_miter_stop(&miter);
4417 }
4418 
4419 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
4420 			    unsigned int sectors, u32 ei_lba)
4421 {
4422 	int ret = 0;
4423 	unsigned int i;
4424 	sector_t sector;
4425 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4426 						scp->device->hostdata, true);
4427 	struct t10_pi_tuple *sdt;
4428 
4429 	for (i = 0; i < sectors; i++, ei_lba++) {
4430 		sector = start_sec + i;
4431 		sdt = dif_store(sip, sector);
4432 
4433 		if (sdt->app_tag == cpu_to_be16(0xffff))
4434 			continue;
4435 
4436 		/*
4437 		 * Because scsi_debug acts as both initiator and
4438 		 * target we proceed to verify the PI even if
4439 		 * RDPROTECT=3. This is done so the "initiator" knows
4440 		 * which type of error to return. Otherwise we would
4441 		 * have to iterate over the PI twice.
4442 		 */
4443 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
4444 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
4445 					 sector, ei_lba);
4446 			if (ret) {
4447 				dif_errors++;
4448 				break;
4449 			}
4450 		}
4451 	}
4452 
4453 	dif_copy_prot(scp, start_sec, sectors, true);
4454 	dix_reads++;
4455 
4456 	return ret;
4457 }
4458 
4459 static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4460 {
4461 	u32 i, num, transfer, size;
4462 	u8 *cmd = scp->cmnd;
4463 	struct scsi_data_buffer *sdb = &scp->sdb;
4464 	int partition = devip->tape_partition;
4465 	u32 pos = devip->tape_location[partition];
4466 	struct tape_block *blp;
4467 	bool fixed, sili;
4468 
4469 	if (cmd[0] != READ_6) { /* Only Read(6) supported */
4470 		mk_sense_invalid_opcode(scp);
4471 		return illegal_condition_result;
4472 	}
4473 	fixed = (cmd[1] & 0x1) != 0;
4474 	sili = (cmd[1] & 0x2) != 0;
4475 	if (fixed && sili) {
4476 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
4477 		return check_condition_result;
4478 	}
4479 
4480 	transfer = get_unaligned_be24(cmd + 2);
4481 	if (fixed) {
4482 		num = transfer;
4483 		size = devip->tape_blksize;
4484 	} else {
4485 		if (transfer < TAPE_MIN_BLKSIZE ||
4486 			transfer > TAPE_MAX_BLKSIZE) {
4487 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4488 			return check_condition_result;
4489 		}
4490 		num = 1;
4491 		size = transfer;
4492 	}
4493 
4494 	for (i = 0, blp = devip->tape_blocks[partition] + pos;
4495 	     i < num && pos < devip->tape_eop[partition];
4496 	     i++, pos++, blp++) {
4497 		devip->tape_location[partition] = pos + 1;
4498 		if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
4499 			mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4500 					FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
4501 					SENSE_FLAG_FILEMARK);
4502 			scsi_set_resid(scp, (num - i) * size);
4503 			return check_condition_result;
4504 		}
4505 		/* Assume no REW */
4506 		if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
4507 			mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
4508 					EOD_DETECTED_ASCQ, fixed ? num - i : size,
4509 					0);
4510 			devip->tape_location[partition] = pos;
4511 			scsi_set_resid(scp, (num - i) * size);
4512 			return check_condition_result;
4513 		}
4514 		sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
4515 			size, i * size);
4516 		sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4517 			&(blp->data), 4, i * size, false);
4518 		if (fixed) {
4519 			if (blp->fl_size != devip->tape_blksize) {
4520 				scsi_set_resid(scp, (num - i) * size);
4521 				mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4522 						0, num - i,
4523 						SENSE_FLAG_ILI);
4524 				return check_condition_result;
4525 			}
4526 		} else {
4527 			if (blp->fl_size != size) {
4528 				if (blp->fl_size < size)
4529 					scsi_set_resid(scp, size - blp->fl_size);
4530 				if (!sili) {
4531 					mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4532 							0, size - blp->fl_size,
4533 							SENSE_FLAG_ILI);
4534 					return check_condition_result;
4535 				}
4536 			}
4537 		}
4538 	}
4539 	if (pos >= devip->tape_eop[partition]) {
4540 		mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4541 				EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
4542 				SENSE_FLAG_EOM);
4543 		devip->tape_location[partition] = pos - 1;
4544 		return check_condition_result;
4545 	}
4546 	devip->tape_location[partition] = pos;
4547 
4548 	return 0;
4549 }
4550 
4551 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4552 {
4553 	bool check_prot;
4554 	u32 num;
4555 	u32 ei_lba;
4556 	int ret;
4557 	u64 lba;
4558 	struct sdeb_store_info *sip = devip2sip(devip, true);
4559 	u8 *cmd = scp->cmnd;
4560 	bool meta_data_locked = false;
4561 
4562 	switch (cmd[0]) {
4563 	case READ_16:
4564 		ei_lba = 0;
4565 		lba = get_unaligned_be64(cmd + 2);
4566 		num = get_unaligned_be32(cmd + 10);
4567 		check_prot = true;
4568 		break;
4569 	case READ_10:
4570 		ei_lba = 0;
4571 		lba = get_unaligned_be32(cmd + 2);
4572 		num = get_unaligned_be16(cmd + 7);
4573 		check_prot = true;
4574 		break;
4575 	case READ_6:
4576 		ei_lba = 0;
4577 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4578 		      (u32)(cmd[1] & 0x1f) << 16;
4579 		num = (0 == cmd[4]) ? 256 : cmd[4];
4580 		check_prot = true;
4581 		break;
4582 	case READ_12:
4583 		ei_lba = 0;
4584 		lba = get_unaligned_be32(cmd + 2);
4585 		num = get_unaligned_be32(cmd + 6);
4586 		check_prot = true;
4587 		break;
4588 	case XDWRITEREAD_10:
4589 		ei_lba = 0;
4590 		lba = get_unaligned_be32(cmd + 2);
4591 		num = get_unaligned_be16(cmd + 7);
4592 		check_prot = false;
4593 		break;
4594 	default:	/* assume READ(32) */
4595 		lba = get_unaligned_be64(cmd + 12);
4596 		ei_lba = get_unaligned_be32(cmd + 20);
4597 		num = get_unaligned_be32(cmd + 28);
4598 		check_prot = false;
4599 		break;
4600 	}
4601 	if (unlikely(have_dif_prot && check_prot)) {
4602 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4603 		    (cmd[1] & 0xe0)) {
4604 			mk_sense_invalid_opcode(scp);
4605 			return check_condition_result;
4606 		}
4607 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4608 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4609 		    (cmd[1] & 0xe0) == 0)
4610 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
4611 				    "to DIF device\n");
4612 	}
4613 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
4614 		     atomic_read(&sdeb_inject_pending))) {
4615 		num /= 2;
4616 		atomic_set(&sdeb_inject_pending, 0);
4617 	}
4618 
4619 	/*
4620 	 * When checking device access params, for reads we only check data
4621 	 * versus what is set at init time, so no need to lock.
4622 	 */
4623 	ret = check_device_access_params(scp, lba, num, false);
4624 	if (ret)
4625 		return ret;
4626 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
4627 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
4628 		     ((lba + num) > sdebug_medium_error_start))) {
4629 		/* claim unrecoverable read error */
4630 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
4631 		/* set info field and valid bit for fixed descriptor */
4632 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
4633 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
4634 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
4635 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
4636 			put_unaligned_be32(ret, scp->sense_buffer + 3);
4637 		}
4638 		scsi_set_resid(scp, scsi_bufflen(scp));
4639 		return check_condition_result;
4640 	}
4641 
4642 	if (sdebug_dev_is_zoned(devip) ||
4643 	    (sdebug_dix && scsi_prot_sg_count(scp)))  {
4644 		sdeb_meta_read_lock(sip);
4645 		meta_data_locked = true;
4646 	}
4647 
4648 	/* DIX + T10 DIF */
4649 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4650 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
4651 		case 1: /* Guard tag error */
4652 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4653 				sdeb_meta_read_unlock(sip);
4654 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4655 				return check_condition_result;
4656 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4657 				sdeb_meta_read_unlock(sip);
4658 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4659 				return illegal_condition_result;
4660 			}
4661 			break;
4662 		case 3: /* Reference tag error */
4663 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4664 				sdeb_meta_read_unlock(sip);
4665 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4666 				return check_condition_result;
4667 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4668 				sdeb_meta_read_unlock(sip);
4669 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4670 				return illegal_condition_result;
4671 			}
4672 			break;
4673 		}
4674 	}
4675 
4676 	ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4677 	if (meta_data_locked)
4678 		sdeb_meta_read_unlock(sip);
4679 	if (unlikely(ret == -1))
4680 		return DID_ERROR << 16;
4681 
4682 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4683 
4684 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4685 		     atomic_read(&sdeb_inject_pending))) {
4686 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4687 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4688 			atomic_set(&sdeb_inject_pending, 0);
4689 			return check_condition_result;
4690 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4691 			/* Logical block guard check failed */
4692 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4693 			atomic_set(&sdeb_inject_pending, 0);
4694 			return illegal_condition_result;
4695 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4696 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4697 			atomic_set(&sdeb_inject_pending, 0);
4698 			return illegal_condition_result;
4699 		}
4700 	}
4701 	return 0;
4702 }
4703 
4704 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4705 			     unsigned int sectors, u32 ei_lba)
4706 {
4707 	int ret;
4708 	struct t10_pi_tuple *sdt;
4709 	void *daddr;
4710 	sector_t sector = start_sec;
4711 	int ppage_offset;
4712 	int dpage_offset;
4713 	struct sg_mapping_iter diter;
4714 	struct sg_mapping_iter piter;
4715 
4716 	BUG_ON(scsi_sg_count(SCpnt) == 0);
4717 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4718 
4719 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4720 			scsi_prot_sg_count(SCpnt),
4721 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4722 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4723 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4724 
4725 	/* For each protection page */
4726 	while (sg_miter_next(&piter)) {
4727 		dpage_offset = 0;
4728 		if (WARN_ON(!sg_miter_next(&diter))) {
4729 			ret = 0x01;
4730 			goto out;
4731 		}
4732 
4733 		for (ppage_offset = 0; ppage_offset < piter.length;
4734 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
4735 			/* If we're at the end of the current
4736 			 * data page advance to the next one
4737 			 */
4738 			if (dpage_offset >= diter.length) {
4739 				if (WARN_ON(!sg_miter_next(&diter))) {
4740 					ret = 0x01;
4741 					goto out;
4742 				}
4743 				dpage_offset = 0;
4744 			}
4745 
4746 			sdt = piter.addr + ppage_offset;
4747 			daddr = diter.addr + dpage_offset;
4748 
4749 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4750 				ret = dif_verify(sdt, daddr, sector, ei_lba);
4751 				if (ret)
4752 					goto out;
4753 			}
4754 
4755 			sector++;
4756 			ei_lba++;
4757 			dpage_offset += sdebug_sector_size;
4758 		}
4759 		diter.consumed = dpage_offset;
4760 		sg_miter_stop(&diter);
4761 	}
4762 	sg_miter_stop(&piter);
4763 
4764 	dif_copy_prot(SCpnt, start_sec, sectors, false);
4765 	dix_writes++;
4766 
4767 	return 0;
4768 
4769 out:
4770 	dif_errors++;
4771 	sg_miter_stop(&diter);
4772 	sg_miter_stop(&piter);
4773 	return ret;
4774 }
4775 
4776 static unsigned long lba_to_map_index(sector_t lba)
4777 {
4778 	if (sdebug_unmap_alignment)
4779 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4780 	sector_div(lba, sdebug_unmap_granularity);
4781 	return lba;
4782 }
4783 
4784 static sector_t map_index_to_lba(unsigned long index)
4785 {
4786 	sector_t lba = index * sdebug_unmap_granularity;
4787 
4788 	if (sdebug_unmap_alignment)
4789 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4790 	return lba;
4791 }
4792 
4793 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4794 			      unsigned int *num)
4795 {
4796 	sector_t end;
4797 	unsigned int mapped;
4798 	unsigned long index;
4799 	unsigned long next;
4800 
4801 	index = lba_to_map_index(lba);
4802 	mapped = test_bit(index, sip->map_storep);
4803 
4804 	if (mapped)
4805 		next = find_next_zero_bit(sip->map_storep, map_size, index);
4806 	else
4807 		next = find_next_bit(sip->map_storep, map_size, index);
4808 
4809 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
4810 	*num = end - lba;
4811 	return mapped;
4812 }
4813 
4814 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4815 		       unsigned int len)
4816 {
4817 	sector_t end = lba + len;
4818 
4819 	while (lba < end) {
4820 		unsigned long index = lba_to_map_index(lba);
4821 
4822 		if (index < map_size)
4823 			set_bit(index, sip->map_storep);
4824 
4825 		lba = map_index_to_lba(index + 1);
4826 	}
4827 }
4828 
4829 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4830 			 unsigned int len)
4831 {
4832 	sector_t end = lba + len;
4833 	u8 *fsp = sip->storep;
4834 
4835 	while (lba < end) {
4836 		unsigned long index = lba_to_map_index(lba);
4837 
4838 		if (lba == map_index_to_lba(index) &&
4839 		    lba + sdebug_unmap_granularity <= end &&
4840 		    index < map_size) {
4841 			clear_bit(index, sip->map_storep);
4842 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
4843 				memset(fsp + lba * sdebug_sector_size,
4844 				       (sdebug_lbprz & 1) ? 0 : 0xff,
4845 				       sdebug_sector_size *
4846 				       sdebug_unmap_granularity);
4847 			}
4848 			if (sip->dif_storep) {
4849 				memset(sip->dif_storep + lba, 0xff,
4850 				       sizeof(*sip->dif_storep) *
4851 				       sdebug_unmap_granularity);
4852 			}
4853 		}
4854 		lba = map_index_to_lba(index + 1);
4855 	}
4856 }
4857 
4858 static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4859 {
4860 	u32 i, num, transfer, size, written = 0;
4861 	u8 *cmd = scp->cmnd;
4862 	struct scsi_data_buffer *sdb = &scp->sdb;
4863 	int partition = devip->tape_partition;
4864 	int pos = devip->tape_location[partition];
4865 	struct tape_block *blp;
4866 	bool fixed, ew;
4867 
4868 	if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
4869 		mk_sense_invalid_opcode(scp);
4870 		return illegal_condition_result;
4871 	}
4872 
4873 	fixed = (cmd[1] & 1) != 0;
4874 	transfer = get_unaligned_be24(cmd + 2);
4875 	if (fixed) {
4876 		num = transfer;
4877 		size = devip->tape_blksize;
4878 	} else {
4879 		if (transfer < TAPE_MIN_BLKSIZE ||
4880 			transfer > TAPE_MAX_BLKSIZE) {
4881 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4882 			return check_condition_result;
4883 		}
4884 		num = 1;
4885 		size = transfer;
4886 	}
4887 
4888 	scsi_set_resid(scp, num * transfer);
4889 	for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
4890 	     i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
4891 		blp->fl_size = size;
4892 		sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4893 			&(blp->data), 4, i * size, true);
4894 		written += size;
4895 		scsi_set_resid(scp, num * transfer - written);
4896 		ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
4897 	}
4898 
4899 	devip->tape_location[partition] = pos;
4900 	blp->fl_size = TAPE_BLOCK_EOD_FLAG;
4901 	if (pos >= devip->tape_eop[partition] - 1) {
4902 		mk_sense_info_tape(scp, VOLUME_OVERFLOW,
4903 				NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4904 				fixed ? num - i : transfer,
4905 				SENSE_FLAG_EOM);
4906 		return check_condition_result;
4907 	}
4908 	if (ew) { /* early warning */
4909 		mk_sense_info_tape(scp, NO_SENSE,
4910 				NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4911 				fixed ? num - i : transfer,
4912 				SENSE_FLAG_EOM);
4913 		return check_condition_result;
4914 	}
4915 
4916 	return 0;
4917 }
4918 
4919 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4920 {
4921 	bool check_prot;
4922 	u32 num;
4923 	u8 group = 0;
4924 	u32 ei_lba;
4925 	int ret;
4926 	u64 lba;
4927 	struct sdeb_store_info *sip = devip2sip(devip, true);
4928 	u8 *cmd = scp->cmnd;
4929 	bool meta_data_locked = false;
4930 
4931 	if (unlikely(sdebug_opts & SDEBUG_OPT_UNALIGNED_WRITE &&
4932 		     atomic_read(&sdeb_inject_pending))) {
4933 		atomic_set(&sdeb_inject_pending, 0);
4934 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
4935 				UNALIGNED_WRITE_ASCQ);
4936 		return check_condition_result;
4937 	}
4938 
4939 	switch (cmd[0]) {
4940 	case WRITE_16:
4941 		ei_lba = 0;
4942 		lba = get_unaligned_be64(cmd + 2);
4943 		num = get_unaligned_be32(cmd + 10);
4944 		group = cmd[14] & 0x3f;
4945 		check_prot = true;
4946 		break;
4947 	case WRITE_10:
4948 		ei_lba = 0;
4949 		lba = get_unaligned_be32(cmd + 2);
4950 		group = cmd[6] & 0x3f;
4951 		num = get_unaligned_be16(cmd + 7);
4952 		check_prot = true;
4953 		break;
4954 	case WRITE_6:
4955 		ei_lba = 0;
4956 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4957 		      (u32)(cmd[1] & 0x1f) << 16;
4958 		num = (0 == cmd[4]) ? 256 : cmd[4];
4959 		check_prot = true;
4960 		break;
4961 	case WRITE_12:
4962 		ei_lba = 0;
4963 		lba = get_unaligned_be32(cmd + 2);
4964 		num = get_unaligned_be32(cmd + 6);
4965 		group = cmd[6] & 0x3f;
4966 		check_prot = true;
4967 		break;
4968 	case 0x53:	/* XDWRITEREAD(10) */
4969 		ei_lba = 0;
4970 		lba = get_unaligned_be32(cmd + 2);
4971 		group = cmd[6] & 0x1f;
4972 		num = get_unaligned_be16(cmd + 7);
4973 		check_prot = false;
4974 		break;
4975 	default:	/* assume WRITE(32) */
4976 		group = cmd[6] & 0x3f;
4977 		lba = get_unaligned_be64(cmd + 12);
4978 		ei_lba = get_unaligned_be32(cmd + 20);
4979 		num = get_unaligned_be32(cmd + 28);
4980 		check_prot = false;
4981 		break;
4982 	}
4983 	if (unlikely(have_dif_prot && check_prot)) {
4984 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4985 		    (cmd[1] & 0xe0)) {
4986 			mk_sense_invalid_opcode(scp);
4987 			return check_condition_result;
4988 		}
4989 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4990 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4991 		    (cmd[1] & 0xe0) == 0)
4992 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4993 				    "to DIF device\n");
4994 	}
4995 
4996 	if (sdebug_dev_is_zoned(devip) ||
4997 	    (sdebug_dix && scsi_prot_sg_count(scp)) ||
4998 	    scsi_debug_lbp())  {
4999 		sdeb_meta_write_lock(sip);
5000 		meta_data_locked = true;
5001 	}
5002 
5003 	ret = check_device_access_params(scp, lba, num, true);
5004 	if (ret) {
5005 		if (meta_data_locked)
5006 			sdeb_meta_write_unlock(sip);
5007 		return ret;
5008 	}
5009 
5010 	/* DIX + T10 DIF */
5011 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5012 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
5013 		case 1: /* Guard tag error */
5014 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
5015 				sdeb_meta_write_unlock(sip);
5016 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5017 				return illegal_condition_result;
5018 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5019 				sdeb_meta_write_unlock(sip);
5020 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5021 				return check_condition_result;
5022 			}
5023 			break;
5024 		case 3: /* Reference tag error */
5025 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
5026 				sdeb_meta_write_unlock(sip);
5027 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
5028 				return illegal_condition_result;
5029 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5030 				sdeb_meta_write_unlock(sip);
5031 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
5032 				return check_condition_result;
5033 			}
5034 			break;
5035 		}
5036 	}
5037 
5038 	ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
5039 	if (unlikely(scsi_debug_lbp()))
5040 		map_region(sip, lba, num);
5041 
5042 	/* If ZBC zone then bump its write pointer */
5043 	if (sdebug_dev_is_zoned(devip))
5044 		zbc_inc_wp(devip, lba, num);
5045 	if (meta_data_locked)
5046 		sdeb_meta_write_unlock(sip);
5047 
5048 	if (unlikely(-1 == ret))
5049 		return DID_ERROR << 16;
5050 	else if (unlikely(sdebug_verbose &&
5051 			  (ret < (num * sdebug_sector_size))))
5052 		sdev_printk(KERN_INFO, scp->device,
5053 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5054 			    my_name, num * sdebug_sector_size, ret);
5055 
5056 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5057 		     atomic_read(&sdeb_inject_pending))) {
5058 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5059 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5060 			atomic_set(&sdeb_inject_pending, 0);
5061 			return check_condition_result;
5062 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5063 			/* Logical block guard check failed */
5064 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5065 			atomic_set(&sdeb_inject_pending, 0);
5066 			return illegal_condition_result;
5067 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5068 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5069 			atomic_set(&sdeb_inject_pending, 0);
5070 			return illegal_condition_result;
5071 		}
5072 	}
5073 	return 0;
5074 }
5075 
5076 /*
5077  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
5078  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
5079  */
5080 static int resp_write_scat(struct scsi_cmnd *scp,
5081 			   struct sdebug_dev_info *devip)
5082 {
5083 	u8 *cmd = scp->cmnd;
5084 	u8 *lrdp = NULL;
5085 	u8 *up;
5086 	struct sdeb_store_info *sip = devip2sip(devip, true);
5087 	u8 wrprotect;
5088 	u16 lbdof, num_lrd, k;
5089 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
5090 	u32 lb_size = sdebug_sector_size;
5091 	u32 ei_lba;
5092 	u64 lba;
5093 	u8 group;
5094 	int ret, res;
5095 	bool is_16;
5096 	static const u32 lrd_size = 32; /* + parameter list header size */
5097 
5098 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
5099 		is_16 = false;
5100 		group = cmd[6] & 0x3f;
5101 		wrprotect = (cmd[10] >> 5) & 0x7;
5102 		lbdof = get_unaligned_be16(cmd + 12);
5103 		num_lrd = get_unaligned_be16(cmd + 16);
5104 		bt_len = get_unaligned_be32(cmd + 28);
5105 	} else {        /* that leaves WRITE SCATTERED(16) */
5106 		is_16 = true;
5107 		wrprotect = (cmd[2] >> 5) & 0x7;
5108 		lbdof = get_unaligned_be16(cmd + 4);
5109 		num_lrd = get_unaligned_be16(cmd + 8);
5110 		bt_len = get_unaligned_be32(cmd + 10);
5111 		group = cmd[14] & 0x3f;
5112 		if (unlikely(have_dif_prot)) {
5113 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5114 			    wrprotect) {
5115 				mk_sense_invalid_opcode(scp);
5116 				return illegal_condition_result;
5117 			}
5118 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5119 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5120 			     wrprotect == 0)
5121 				sdev_printk(KERN_ERR, scp->device,
5122 					    "Unprotected WR to DIF device\n");
5123 		}
5124 	}
5125 	if ((num_lrd == 0) || (bt_len == 0))
5126 		return 0;       /* T10 says these do-nothings are not errors */
5127 	if (lbdof == 0) {
5128 		if (sdebug_verbose)
5129 			sdev_printk(KERN_INFO, scp->device,
5130 				"%s: LB Data Offset field bad\n", my_name);
5131 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5132 		return illegal_condition_result;
5133 	}
5134 	lbdof_blen = lbdof * lb_size;
5135 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
5136 		if (sdebug_verbose)
5137 			sdev_printk(KERN_INFO, scp->device,
5138 				"%s: LBA range descriptors don't fit\n", my_name);
5139 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5140 		return illegal_condition_result;
5141 	}
5142 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
5143 	if (lrdp == NULL)
5144 		return SCSI_MLQUEUE_HOST_BUSY;
5145 	if (sdebug_verbose)
5146 		sdev_printk(KERN_INFO, scp->device,
5147 			"%s: Fetch header+scatter_list, lbdof_blen=%u\n",
5148 			my_name, lbdof_blen);
5149 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
5150 	if (res == -1) {
5151 		ret = DID_ERROR << 16;
5152 		goto err_out;
5153 	}
5154 
5155 	/* Just keep it simple and always lock for now */
5156 	sdeb_meta_write_lock(sip);
5157 	sg_off = lbdof_blen;
5158 	/* Spec says Buffer xfer Length field in number of LBs in dout */
5159 	cum_lb = 0;
5160 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
5161 		lba = get_unaligned_be64(up + 0);
5162 		num = get_unaligned_be32(up + 8);
5163 		if (sdebug_verbose)
5164 			sdev_printk(KERN_INFO, scp->device,
5165 				"%s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
5166 				my_name, k, lba, num, sg_off);
5167 		if (num == 0)
5168 			continue;
5169 		ret = check_device_access_params(scp, lba, num, true);
5170 		if (ret)
5171 			goto err_out_unlock;
5172 		num_by = num * lb_size;
5173 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
5174 
5175 		if ((cum_lb + num) > bt_len) {
5176 			if (sdebug_verbose)
5177 				sdev_printk(KERN_INFO, scp->device,
5178 				    "%s: sum of blocks > data provided\n",
5179 				    my_name);
5180 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
5181 					0);
5182 			ret = illegal_condition_result;
5183 			goto err_out_unlock;
5184 		}
5185 
5186 		/* DIX + T10 DIF */
5187 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5188 			int prot_ret = prot_verify_write(scp, lba, num,
5189 							 ei_lba);
5190 
5191 			if (prot_ret) {
5192 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
5193 						prot_ret);
5194 				ret = illegal_condition_result;
5195 				goto err_out_unlock;
5196 			}
5197 		}
5198 
5199 		/*
5200 		 * Write ranges atomically to keep as close to pre-atomic
5201 		 * writes behaviour as possible.
5202 		 */
5203 		ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
5204 		/* If ZBC zone then bump its write pointer */
5205 		if (sdebug_dev_is_zoned(devip))
5206 			zbc_inc_wp(devip, lba, num);
5207 		if (unlikely(scsi_debug_lbp()))
5208 			map_region(sip, lba, num);
5209 		if (unlikely(-1 == ret)) {
5210 			ret = DID_ERROR << 16;
5211 			goto err_out_unlock;
5212 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
5213 			sdev_printk(KERN_INFO, scp->device,
5214 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5215 			    my_name, num_by, ret);
5216 
5217 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5218 			     atomic_read(&sdeb_inject_pending))) {
5219 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5220 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5221 				atomic_set(&sdeb_inject_pending, 0);
5222 				ret = check_condition_result;
5223 				goto err_out_unlock;
5224 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5225 				/* Logical block guard check failed */
5226 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5227 				atomic_set(&sdeb_inject_pending, 0);
5228 				ret = illegal_condition_result;
5229 				goto err_out_unlock;
5230 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5231 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5232 				atomic_set(&sdeb_inject_pending, 0);
5233 				ret = illegal_condition_result;
5234 				goto err_out_unlock;
5235 			}
5236 		}
5237 		sg_off += num_by;
5238 		cum_lb += num;
5239 	}
5240 	ret = 0;
5241 err_out_unlock:
5242 	sdeb_meta_write_unlock(sip);
5243 err_out:
5244 	kfree(lrdp);
5245 	return ret;
5246 }
5247 
5248 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
5249 			   u32 ei_lba, bool unmap, bool ndob)
5250 {
5251 	struct scsi_device *sdp = scp->device;
5252 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5253 	unsigned long long i;
5254 	u64 block, lbaa;
5255 	u32 lb_size = sdebug_sector_size;
5256 	int ret;
5257 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
5258 						scp->device->hostdata, true);
5259 	u8 *fs1p;
5260 	u8 *fsp;
5261 	bool meta_data_locked = false;
5262 
5263 	if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
5264 		sdeb_meta_write_lock(sip);
5265 		meta_data_locked = true;
5266 	}
5267 
5268 	ret = check_device_access_params(scp, lba, num, true);
5269 	if (ret)
5270 		goto out;
5271 
5272 	if (unmap && scsi_debug_lbp()) {
5273 		unmap_region(sip, lba, num);
5274 		goto out;
5275 	}
5276 	lbaa = lba;
5277 	block = do_div(lbaa, sdebug_store_sectors);
5278 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
5279 	fsp = sip->storep;
5280 	fs1p = fsp + (block * lb_size);
5281 	sdeb_data_write_lock(sip);
5282 	if (ndob) {
5283 		memset(fs1p, 0, lb_size);
5284 		ret = 0;
5285 	} else
5286 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
5287 
5288 	if (-1 == ret) {
5289 		ret = DID_ERROR << 16;
5290 		goto out;
5291 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
5292 		sdev_printk(KERN_INFO, scp->device,
5293 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
5294 			    my_name, "write same", lb_size, ret);
5295 
5296 	/* Copy first sector to remaining blocks */
5297 	for (i = 1 ; i < num ; i++) {
5298 		lbaa = lba + i;
5299 		block = do_div(lbaa, sdebug_store_sectors);
5300 		memmove(fsp + (block * lb_size), fs1p, lb_size);
5301 	}
5302 	if (scsi_debug_lbp())
5303 		map_region(sip, lba, num);
5304 	/* If ZBC zone then bump its write pointer */
5305 	if (sdebug_dev_is_zoned(devip))
5306 		zbc_inc_wp(devip, lba, num);
5307 	sdeb_data_write_unlock(sip);
5308 	ret = 0;
5309 out:
5310 	if (meta_data_locked)
5311 		sdeb_meta_write_unlock(sip);
5312 	return ret;
5313 }
5314 
5315 static int resp_write_same_10(struct scsi_cmnd *scp,
5316 			      struct sdebug_dev_info *devip)
5317 {
5318 	u8 *cmd = scp->cmnd;
5319 	u32 lba;
5320 	u16 num;
5321 	u32 ei_lba = 0;
5322 	bool unmap = false;
5323 
5324 	if (cmd[1] & 0x8) {
5325 		if (sdebug_lbpws10 == 0) {
5326 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5327 			return check_condition_result;
5328 		} else
5329 			unmap = true;
5330 	}
5331 	lba = get_unaligned_be32(cmd + 2);
5332 	num = get_unaligned_be16(cmd + 7);
5333 	if (num > sdebug_write_same_length) {
5334 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5335 		return check_condition_result;
5336 	}
5337 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
5338 }
5339 
5340 static int resp_write_same_16(struct scsi_cmnd *scp,
5341 			      struct sdebug_dev_info *devip)
5342 {
5343 	u8 *cmd = scp->cmnd;
5344 	u64 lba;
5345 	u32 num;
5346 	u32 ei_lba = 0;
5347 	bool unmap = false;
5348 	bool ndob = false;
5349 
5350 	if (cmd[1] & 0x8) {	/* UNMAP */
5351 		if (sdebug_lbpws == 0) {
5352 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5353 			return check_condition_result;
5354 		} else
5355 			unmap = true;
5356 	}
5357 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
5358 		ndob = true;
5359 	lba = get_unaligned_be64(cmd + 2);
5360 	num = get_unaligned_be32(cmd + 10);
5361 	if (num > sdebug_write_same_length) {
5362 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5363 		return check_condition_result;
5364 	}
5365 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
5366 }
5367 
5368 /* Note the mode field is in the same position as the (lower) service action
5369  * field. For the Report supported operation codes command, SPC-4 suggests
5370  * each mode of this command should be reported separately; for future. */
5371 static int resp_write_buffer(struct scsi_cmnd *scp,
5372 			     struct sdebug_dev_info *devip)
5373 {
5374 	u8 *cmd = scp->cmnd;
5375 	struct scsi_device *sdp = scp->device;
5376 	struct sdebug_dev_info *dp;
5377 	u8 mode;
5378 
5379 	mode = cmd[1] & 0x1f;
5380 	switch (mode) {
5381 	case 0x4:	/* download microcode (MC) and activate (ACT) */
5382 		/* set UAs on this device only */
5383 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5384 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
5385 		break;
5386 	case 0x5:	/* download MC, save and ACT */
5387 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
5388 		break;
5389 	case 0x6:	/* download MC with offsets and ACT */
5390 		/* set UAs on most devices (LUs) in this target */
5391 		list_for_each_entry(dp,
5392 				    &devip->sdbg_host->dev_info_list,
5393 				    dev_list)
5394 			if (dp->target == sdp->id) {
5395 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
5396 				if (devip != dp)
5397 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
5398 						dp->uas_bm);
5399 			}
5400 		break;
5401 	case 0x7:	/* download MC with offsets, save, and ACT */
5402 		/* set UA on all devices (LUs) in this target */
5403 		list_for_each_entry(dp,
5404 				    &devip->sdbg_host->dev_info_list,
5405 				    dev_list)
5406 			if (dp->target == sdp->id)
5407 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
5408 					dp->uas_bm);
5409 		break;
5410 	default:
5411 		/* do nothing for this command for other mode values */
5412 		break;
5413 	}
5414 	return 0;
5415 }
5416 
5417 static int resp_comp_write(struct scsi_cmnd *scp,
5418 			   struct sdebug_dev_info *devip)
5419 {
5420 	u8 *cmd = scp->cmnd;
5421 	u8 *arr;
5422 	struct sdeb_store_info *sip = devip2sip(devip, true);
5423 	u64 lba;
5424 	u32 dnum;
5425 	u32 lb_size = sdebug_sector_size;
5426 	u8 num;
5427 	int ret;
5428 	int retval = 0;
5429 
5430 	lba = get_unaligned_be64(cmd + 2);
5431 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
5432 	if (0 == num)
5433 		return 0;	/* degenerate case, not an error */
5434 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5435 	    (cmd[1] & 0xe0)) {
5436 		mk_sense_invalid_opcode(scp);
5437 		return check_condition_result;
5438 	}
5439 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5440 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5441 	    (cmd[1] & 0xe0) == 0)
5442 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
5443 			    "to DIF device\n");
5444 	ret = check_device_access_params(scp, lba, num, false);
5445 	if (ret)
5446 		return ret;
5447 	dnum = 2 * num;
5448 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
5449 	if (NULL == arr) {
5450 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5451 				INSUFF_RES_ASCQ);
5452 		return check_condition_result;
5453 	}
5454 
5455 	ret = do_dout_fetch(scp, dnum, arr);
5456 	if (ret == -1) {
5457 		retval = DID_ERROR << 16;
5458 		goto cleanup_free;
5459 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
5460 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
5461 			    "indicated=%u, IO sent=%d bytes\n", my_name,
5462 			    dnum * lb_size, ret);
5463 
5464 	sdeb_data_write_lock(sip);
5465 	sdeb_meta_write_lock(sip);
5466 	if (!comp_write_worker(sip, lba, num, arr, false)) {
5467 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5468 		retval = check_condition_result;
5469 		goto cleanup_unlock;
5470 	}
5471 
5472 	/* Cover sip->map_storep (which map_region()) sets with data lock */
5473 	if (scsi_debug_lbp())
5474 		map_region(sip, lba, num);
5475 cleanup_unlock:
5476 	sdeb_meta_write_unlock(sip);
5477 	sdeb_data_write_unlock(sip);
5478 cleanup_free:
5479 	kfree(arr);
5480 	return retval;
5481 }
5482 
5483 struct unmap_block_desc {
5484 	__be64	lba;
5485 	__be32	blocks;
5486 	__be32	__reserved;
5487 };
5488 
5489 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5490 {
5491 	unsigned char *buf;
5492 	struct unmap_block_desc *desc;
5493 	struct sdeb_store_info *sip = devip2sip(devip, true);
5494 	unsigned int i, payload_len, descriptors;
5495 	int ret;
5496 
5497 	if (!scsi_debug_lbp())
5498 		return 0;	/* fib and say its done */
5499 	payload_len = get_unaligned_be16(scp->cmnd + 7);
5500 	BUG_ON(scsi_bufflen(scp) != payload_len);
5501 
5502 	descriptors = (payload_len - 8) / 16;
5503 	if (descriptors > sdebug_unmap_max_desc) {
5504 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5505 		return check_condition_result;
5506 	}
5507 
5508 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
5509 	if (!buf) {
5510 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5511 				INSUFF_RES_ASCQ);
5512 		return check_condition_result;
5513 	}
5514 
5515 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
5516 
5517 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
5518 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
5519 
5520 	desc = (void *)&buf[8];
5521 
5522 	sdeb_meta_write_lock(sip);
5523 
5524 	for (i = 0 ; i < descriptors ; i++) {
5525 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
5526 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
5527 
5528 		ret = check_device_access_params(scp, lba, num, true);
5529 		if (ret)
5530 			goto out;
5531 
5532 		unmap_region(sip, lba, num);
5533 	}
5534 
5535 	ret = 0;
5536 
5537 out:
5538 	sdeb_meta_write_unlock(sip);
5539 	kfree(buf);
5540 
5541 	return ret;
5542 }
5543 
5544 #define SDEBUG_GET_LBA_STATUS_LEN 32
5545 
5546 static int resp_get_lba_status(struct scsi_cmnd *scp,
5547 			       struct sdebug_dev_info *devip)
5548 {
5549 	u8 *cmd = scp->cmnd;
5550 	u64 lba;
5551 	u32 alloc_len, mapped, num;
5552 	int ret;
5553 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
5554 
5555 	lba = get_unaligned_be64(cmd + 2);
5556 	alloc_len = get_unaligned_be32(cmd + 10);
5557 
5558 	if (alloc_len < 24)
5559 		return 0;
5560 
5561 	ret = check_device_access_params(scp, lba, 1, false);
5562 	if (ret)
5563 		return ret;
5564 
5565 	if (scsi_debug_lbp()) {
5566 		struct sdeb_store_info *sip = devip2sip(devip, true);
5567 
5568 		mapped = map_state(sip, lba, &num);
5569 	} else {
5570 		mapped = 1;
5571 		/* following just in case virtual_gb changed */
5572 		sdebug_capacity = get_sdebug_capacity();
5573 		if (sdebug_capacity - lba <= 0xffffffff)
5574 			num = sdebug_capacity - lba;
5575 		else
5576 			num = 0xffffffff;
5577 	}
5578 
5579 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
5580 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
5581 	put_unaligned_be64(lba, arr + 8);	/* LBA */
5582 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
5583 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
5584 
5585 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
5586 }
5587 
5588 static int resp_get_stream_status(struct scsi_cmnd *scp,
5589 				  struct sdebug_dev_info *devip)
5590 {
5591 	u16 starting_stream_id, stream_id;
5592 	const u8 *cmd = scp->cmnd;
5593 	u32 alloc_len, offset;
5594 	u8 arr[256] = {};
5595 	struct scsi_stream_status_header *h = (void *)arr;
5596 
5597 	starting_stream_id = get_unaligned_be16(cmd + 4);
5598 	alloc_len = get_unaligned_be32(cmd + 10);
5599 
5600 	if (alloc_len < 8) {
5601 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5602 		return check_condition_result;
5603 	}
5604 
5605 	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
5606 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
5607 		return check_condition_result;
5608 	}
5609 
5610 	/*
5611 	 * The GET STREAM STATUS command only reports status information
5612 	 * about open streams. Treat the non-permanent stream as open.
5613 	 */
5614 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
5615 			   &h->number_of_open_streams);
5616 
5617 	for (offset = 8, stream_id = starting_stream_id;
5618 	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
5619 		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
5620 	     offset += 8, stream_id++) {
5621 		struct scsi_stream_status *stream_status = (void *)arr + offset;
5622 
5623 		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
5624 		put_unaligned_be16(stream_id,
5625 				   &stream_status->stream_identifier);
5626 		stream_status->rel_lifetime = stream_id + 1;
5627 	}
5628 	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
5629 
5630 	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
5631 }
5632 
5633 static int resp_sync_cache(struct scsi_cmnd *scp,
5634 			   struct sdebug_dev_info *devip)
5635 {
5636 	int res = 0;
5637 	u64 lba;
5638 	u32 num_blocks;
5639 	u8 *cmd = scp->cmnd;
5640 
5641 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
5642 		lba = get_unaligned_be32(cmd + 2);
5643 		num_blocks = get_unaligned_be16(cmd + 7);
5644 	} else {				/* SYNCHRONIZE_CACHE(16) */
5645 		lba = get_unaligned_be64(cmd + 2);
5646 		num_blocks = get_unaligned_be32(cmd + 10);
5647 	}
5648 	if (lba + num_blocks > sdebug_capacity) {
5649 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5650 		return check_condition_result;
5651 	}
5652 	if (!write_since_sync || (cmd[1] & 0x2))
5653 		res = SDEG_RES_IMMED_MASK;
5654 	else		/* delay if write_since_sync and IMMED clear */
5655 		write_since_sync = false;
5656 	return res;
5657 }
5658 
5659 /*
5660  * Assuming the LBA+num_blocks is not out-of-range, this function will return
5661  * CONDITION MET if the specified blocks will/have fitted in the cache, and
5662  * a GOOD status otherwise. Model a disk with a big cache and yield
5663  * CONDITION MET. Actually tries to bring range in main memory into the
5664  * cache associated with the CPU(s).
5665  *
5666  * The pcode 0x34 is also used for READ POSITION by tape devices.
5667  */
5668 static int resp_pre_fetch(struct scsi_cmnd *scp,
5669 			  struct sdebug_dev_info *devip)
5670 {
5671 	int res = 0;
5672 	u64 lba;
5673 	u64 block, rest = 0;
5674 	u32 nblks;
5675 	u8 *cmd = scp->cmnd;
5676 	struct sdeb_store_info *sip = devip2sip(devip, true);
5677 	u8 *fsp = sip->storep;
5678 
5679 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
5680 		lba = get_unaligned_be32(cmd + 2);
5681 		nblks = get_unaligned_be16(cmd + 7);
5682 	} else {			/* PRE-FETCH(16) */
5683 		lba = get_unaligned_be64(cmd + 2);
5684 		nblks = get_unaligned_be32(cmd + 10);
5685 	}
5686 	if (lba + nblks > sdebug_capacity) {
5687 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5688 		return check_condition_result;
5689 	}
5690 	if (!fsp)
5691 		goto fini;
5692 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
5693 	block = do_div(lba, sdebug_store_sectors);
5694 	if (block + nblks > sdebug_store_sectors)
5695 		rest = block + nblks - sdebug_store_sectors;
5696 
5697 	/* Try to bring the PRE-FETCH range into CPU's cache */
5698 	sdeb_data_read_lock(sip);
5699 	prefetch_range(fsp + (sdebug_sector_size * block),
5700 		       (nblks - rest) * sdebug_sector_size);
5701 	if (rest)
5702 		prefetch_range(fsp, rest * sdebug_sector_size);
5703 
5704 	sdeb_data_read_unlock(sip);
5705 fini:
5706 	if (cmd[1] & 0x2)
5707 		res = SDEG_RES_IMMED_MASK;
5708 	return res | condition_met_result;
5709 }
5710 
5711 #define RL_BUCKET_ELEMS 8
5712 
5713 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
5714  * (W-LUN), the normal Linux scanning logic does not associate it with a
5715  * device (e.g. /dev/sg7). The following magic will make that association:
5716  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5717  * where <n> is a host number. If there are multiple targets in a host then
5718  * the above will associate a W-LUN to each target. To only get a W-LUN
5719  * for target 2, then use "echo '- 2 49409' > scan" .
5720  */
5721 static int resp_report_luns(struct scsi_cmnd *scp,
5722 			    struct sdebug_dev_info *devip)
5723 {
5724 	unsigned char *cmd = scp->cmnd;
5725 	unsigned int alloc_len;
5726 	unsigned char select_report;
5727 	u64 lun;
5728 	struct scsi_lun *lun_p;
5729 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
5730 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
5731 	unsigned int wlun_cnt;	/* report luns W-LUN count */
5732 	unsigned int tlun_cnt;	/* total LUN count */
5733 	unsigned int rlen;	/* response length (in bytes) */
5734 	int k, j, n, res;
5735 	unsigned int off_rsp = 0;
5736 	const int sz_lun = sizeof(struct scsi_lun);
5737 
5738 	clear_luns_changed_on_target(devip);
5739 
5740 	select_report = cmd[2];
5741 	alloc_len = get_unaligned_be32(cmd + 6);
5742 
5743 	if (alloc_len < 4) {
5744 		pr_err("alloc len too small %d\n", alloc_len);
5745 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5746 		return check_condition_result;
5747 	}
5748 
5749 	switch (select_report) {
5750 	case 0:		/* all LUNs apart from W-LUNs */
5751 		lun_cnt = sdebug_max_luns;
5752 		wlun_cnt = 0;
5753 		break;
5754 	case 1:		/* only W-LUNs */
5755 		lun_cnt = 0;
5756 		wlun_cnt = 1;
5757 		break;
5758 	case 2:		/* all LUNs */
5759 		lun_cnt = sdebug_max_luns;
5760 		wlun_cnt = 1;
5761 		break;
5762 	case 0x10:	/* only administrative LUs */
5763 	case 0x11:	/* see SPC-5 */
5764 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
5765 	default:
5766 		pr_debug("select report invalid %d\n", select_report);
5767 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5768 		return check_condition_result;
5769 	}
5770 
5771 	if (sdebug_no_lun_0 && (lun_cnt > 0))
5772 		--lun_cnt;
5773 
5774 	tlun_cnt = lun_cnt + wlun_cnt;
5775 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
5776 	scsi_set_resid(scp, scsi_bufflen(scp));
5777 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5778 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5779 
5780 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
5781 	lun = sdebug_no_lun_0 ? 1 : 0;
5782 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5783 		memset(arr, 0, sizeof(arr));
5784 		lun_p = (struct scsi_lun *)&arr[0];
5785 		if (k == 0) {
5786 			put_unaligned_be32(rlen, &arr[0]);
5787 			++lun_p;
5788 			j = 1;
5789 		}
5790 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5791 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5792 				break;
5793 			int_to_scsilun(lun++, lun_p);
5794 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5795 				lun_p->scsi_lun[0] |= 0x40;
5796 		}
5797 		if (j < RL_BUCKET_ELEMS)
5798 			break;
5799 		n = j * sz_lun;
5800 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5801 		if (res)
5802 			return res;
5803 		off_rsp += n;
5804 	}
5805 	if (wlun_cnt) {
5806 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5807 		++j;
5808 	}
5809 	if (j > 0)
5810 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5811 	return res;
5812 }
5813 
5814 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5815 {
5816 	bool is_bytchk3 = false;
5817 	u8 bytchk;
5818 	int ret, j;
5819 	u32 vnum, a_num, off;
5820 	const u32 lb_size = sdebug_sector_size;
5821 	u64 lba;
5822 	u8 *arr;
5823 	u8 *cmd = scp->cmnd;
5824 	struct sdeb_store_info *sip = devip2sip(devip, true);
5825 
5826 	bytchk = (cmd[1] >> 1) & 0x3;
5827 	if (bytchk == 0) {
5828 		return 0;	/* always claim internal verify okay */
5829 	} else if (bytchk == 2) {
5830 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5831 		return check_condition_result;
5832 	} else if (bytchk == 3) {
5833 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
5834 	}
5835 	switch (cmd[0]) {
5836 	case VERIFY_16:
5837 		lba = get_unaligned_be64(cmd + 2);
5838 		vnum = get_unaligned_be32(cmd + 10);
5839 		break;
5840 	case VERIFY:		/* is VERIFY(10) */
5841 		lba = get_unaligned_be32(cmd + 2);
5842 		vnum = get_unaligned_be16(cmd + 7);
5843 		break;
5844 	default:
5845 		mk_sense_invalid_opcode(scp);
5846 		return check_condition_result;
5847 	}
5848 	if (vnum == 0)
5849 		return 0;	/* not an error */
5850 	a_num = is_bytchk3 ? 1 : vnum;
5851 	/* Treat following check like one for read (i.e. no write) access */
5852 	ret = check_device_access_params(scp, lba, a_num, false);
5853 	if (ret)
5854 		return ret;
5855 
5856 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5857 	if (!arr) {
5858 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5859 				INSUFF_RES_ASCQ);
5860 		return check_condition_result;
5861 	}
5862 	/* Not changing store, so only need read access */
5863 	sdeb_data_read_lock(sip);
5864 
5865 	ret = do_dout_fetch(scp, a_num, arr);
5866 	if (ret == -1) {
5867 		ret = DID_ERROR << 16;
5868 		goto cleanup;
5869 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5870 		sdev_printk(KERN_INFO, scp->device,
5871 			    "%s: cdb indicated=%u, IO sent=%d bytes\n",
5872 			    my_name, a_num * lb_size, ret);
5873 	}
5874 	if (is_bytchk3) {
5875 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5876 			memcpy(arr + off, arr, lb_size);
5877 	}
5878 	ret = 0;
5879 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5880 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5881 		ret = check_condition_result;
5882 		goto cleanup;
5883 	}
5884 cleanup:
5885 	sdeb_data_read_unlock(sip);
5886 	kfree(arr);
5887 	return ret;
5888 }
5889 
5890 #define RZONES_DESC_HD 64
5891 
5892 /* Report zones depending on start LBA and reporting options */
5893 static int resp_report_zones(struct scsi_cmnd *scp,
5894 			     struct sdebug_dev_info *devip)
5895 {
5896 	unsigned int rep_max_zones, nrz = 0;
5897 	int ret = 0;
5898 	u32 alloc_len, rep_opts, rep_len;
5899 	bool partial;
5900 	u64 lba, zs_lba;
5901 	u8 *arr = NULL, *desc;
5902 	u8 *cmd = scp->cmnd;
5903 	struct sdeb_zone_state *zsp = NULL;
5904 	struct sdeb_store_info *sip = devip2sip(devip, false);
5905 
5906 	if (!sdebug_dev_is_zoned(devip)) {
5907 		mk_sense_invalid_opcode(scp);
5908 		return check_condition_result;
5909 	}
5910 	zs_lba = get_unaligned_be64(cmd + 2);
5911 	alloc_len = get_unaligned_be32(cmd + 10);
5912 	if (alloc_len == 0)
5913 		return 0;	/* not an error */
5914 	rep_opts = cmd[14] & 0x3f;
5915 	partial = cmd[14] & 0x80;
5916 
5917 	if (zs_lba >= sdebug_capacity) {
5918 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5919 		return check_condition_result;
5920 	}
5921 
5922 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5923 
5924 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5925 	if (!arr) {
5926 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5927 				INSUFF_RES_ASCQ);
5928 		return check_condition_result;
5929 	}
5930 
5931 	sdeb_meta_read_lock(sip);
5932 
5933 	desc = arr + 64;
5934 	for (lba = zs_lba; lba < sdebug_capacity;
5935 	     lba = zsp->z_start + zsp->z_size) {
5936 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5937 			break;
5938 		zsp = zbc_zone(devip, lba);
5939 		switch (rep_opts) {
5940 		case 0x00:
5941 			/* All zones */
5942 			break;
5943 		case 0x01:
5944 			/* Empty zones */
5945 			if (zsp->z_cond != ZC1_EMPTY)
5946 				continue;
5947 			break;
5948 		case 0x02:
5949 			/* Implicit open zones */
5950 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5951 				continue;
5952 			break;
5953 		case 0x03:
5954 			/* Explicit open zones */
5955 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5956 				continue;
5957 			break;
5958 		case 0x04:
5959 			/* Closed zones */
5960 			if (zsp->z_cond != ZC4_CLOSED)
5961 				continue;
5962 			break;
5963 		case 0x05:
5964 			/* Full zones */
5965 			if (zsp->z_cond != ZC5_FULL)
5966 				continue;
5967 			break;
5968 		case 0x06:
5969 		case 0x07:
5970 		case 0x10:
5971 			/*
5972 			 * Read-only, offline, reset WP recommended are
5973 			 * not emulated: no zones to report;
5974 			 */
5975 			continue;
5976 		case 0x11:
5977 			/* non-seq-resource set */
5978 			if (!zsp->z_non_seq_resource)
5979 				continue;
5980 			break;
5981 		case 0x3e:
5982 			/* All zones except gap zones. */
5983 			if (zbc_zone_is_gap(zsp))
5984 				continue;
5985 			break;
5986 		case 0x3f:
5987 			/* Not write pointer (conventional) zones */
5988 			if (zbc_zone_is_seq(zsp))
5989 				continue;
5990 			break;
5991 		default:
5992 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
5993 					INVALID_FIELD_IN_CDB, 0);
5994 			ret = check_condition_result;
5995 			goto fini;
5996 		}
5997 
5998 		if (nrz < rep_max_zones) {
5999 			/* Fill zone descriptor */
6000 			desc[0] = zsp->z_type;
6001 			desc[1] = zsp->z_cond << 4;
6002 			if (zsp->z_non_seq_resource)
6003 				desc[1] |= 1 << 1;
6004 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
6005 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
6006 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
6007 			desc += 64;
6008 		}
6009 
6010 		if (partial && nrz >= rep_max_zones)
6011 			break;
6012 
6013 		nrz++;
6014 	}
6015 
6016 	/* Report header */
6017 	/* Zone list length. */
6018 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
6019 	/* Maximum LBA */
6020 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
6021 	/* Zone starting LBA granularity. */
6022 	if (devip->zcap < devip->zsize)
6023 		put_unaligned_be64(devip->zsize, arr + 16);
6024 
6025 	rep_len = (unsigned long)desc - (unsigned long)arr;
6026 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
6027 
6028 fini:
6029 	sdeb_meta_read_unlock(sip);
6030 	kfree(arr);
6031 	return ret;
6032 }
6033 
6034 static int resp_atomic_write(struct scsi_cmnd *scp,
6035 			     struct sdebug_dev_info *devip)
6036 {
6037 	struct sdeb_store_info *sip;
6038 	u8 *cmd = scp->cmnd;
6039 	u16 boundary, len;
6040 	u64 lba, lba_tmp;
6041 	int ret;
6042 
6043 	if (!scsi_debug_atomic_write()) {
6044 		mk_sense_invalid_opcode(scp);
6045 		return check_condition_result;
6046 	}
6047 
6048 	sip = devip2sip(devip, true);
6049 
6050 	lba = get_unaligned_be64(cmd + 2);
6051 	boundary = get_unaligned_be16(cmd + 10);
6052 	len = get_unaligned_be16(cmd + 12);
6053 
6054 	lba_tmp = lba;
6055 	if (sdebug_atomic_wr_align &&
6056 	    do_div(lba_tmp, sdebug_atomic_wr_align)) {
6057 		/* Does not meet alignment requirement */
6058 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6059 		return check_condition_result;
6060 	}
6061 
6062 	if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
6063 		/* Does not meet alignment requirement */
6064 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6065 		return check_condition_result;
6066 	}
6067 
6068 	if (boundary > 0) {
6069 		if (boundary > sdebug_atomic_wr_max_bndry) {
6070 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6071 			return check_condition_result;
6072 		}
6073 
6074 		if (len > sdebug_atomic_wr_max_length_bndry) {
6075 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6076 			return check_condition_result;
6077 		}
6078 	} else {
6079 		if (len > sdebug_atomic_wr_max_length) {
6080 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6081 			return check_condition_result;
6082 		}
6083 	}
6084 
6085 	ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
6086 	if (unlikely(ret == -1))
6087 		return DID_ERROR << 16;
6088 	if (unlikely(ret != len * sdebug_sector_size))
6089 		return DID_ERROR << 16;
6090 	return 0;
6091 }
6092 
6093 /* Logic transplanted from tcmu-runner, file_zbc.c */
6094 static void zbc_open_all(struct sdebug_dev_info *devip)
6095 {
6096 	struct sdeb_zone_state *zsp = &devip->zstate[0];
6097 	unsigned int i;
6098 
6099 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
6100 		if (zsp->z_cond == ZC4_CLOSED)
6101 			zbc_open_zone(devip, &devip->zstate[i], true);
6102 	}
6103 }
6104 
6105 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6106 {
6107 	int res = 0;
6108 	u64 z_id;
6109 	enum sdebug_z_cond zc;
6110 	u8 *cmd = scp->cmnd;
6111 	struct sdeb_zone_state *zsp;
6112 	bool all = cmd[14] & 0x01;
6113 	struct sdeb_store_info *sip = devip2sip(devip, false);
6114 
6115 	if (!sdebug_dev_is_zoned(devip)) {
6116 		mk_sense_invalid_opcode(scp);
6117 		return check_condition_result;
6118 	}
6119 	sdeb_meta_write_lock(sip);
6120 
6121 	if (all) {
6122 		/* Check if all closed zones can be open */
6123 		if (devip->max_open &&
6124 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
6125 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6126 					INSUFF_ZONE_ASCQ);
6127 			res = check_condition_result;
6128 			goto fini;
6129 		}
6130 		/* Open all closed zones */
6131 		zbc_open_all(devip);
6132 		goto fini;
6133 	}
6134 
6135 	/* Open the specified zone */
6136 	z_id = get_unaligned_be64(cmd + 2);
6137 	if (z_id >= sdebug_capacity) {
6138 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6139 		res = check_condition_result;
6140 		goto fini;
6141 	}
6142 
6143 	zsp = zbc_zone(devip, z_id);
6144 	if (z_id != zsp->z_start) {
6145 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6146 		res = check_condition_result;
6147 		goto fini;
6148 	}
6149 	if (zbc_zone_is_conv(zsp)) {
6150 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6151 		res = check_condition_result;
6152 		goto fini;
6153 	}
6154 
6155 	zc = zsp->z_cond;
6156 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
6157 		goto fini;
6158 
6159 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
6160 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6161 				INSUFF_ZONE_ASCQ);
6162 		res = check_condition_result;
6163 		goto fini;
6164 	}
6165 
6166 	zbc_open_zone(devip, zsp, true);
6167 fini:
6168 	sdeb_meta_write_unlock(sip);
6169 	return res;
6170 }
6171 
6172 static void zbc_close_all(struct sdebug_dev_info *devip)
6173 {
6174 	unsigned int i;
6175 
6176 	for (i = 0; i < devip->nr_zones; i++)
6177 		zbc_close_zone(devip, &devip->zstate[i]);
6178 }
6179 
6180 static int resp_close_zone(struct scsi_cmnd *scp,
6181 			   struct sdebug_dev_info *devip)
6182 {
6183 	int res = 0;
6184 	u64 z_id;
6185 	u8 *cmd = scp->cmnd;
6186 	struct sdeb_zone_state *zsp;
6187 	bool all = cmd[14] & 0x01;
6188 	struct sdeb_store_info *sip = devip2sip(devip, false);
6189 
6190 	if (!sdebug_dev_is_zoned(devip)) {
6191 		mk_sense_invalid_opcode(scp);
6192 		return check_condition_result;
6193 	}
6194 
6195 	sdeb_meta_write_lock(sip);
6196 
6197 	if (all) {
6198 		zbc_close_all(devip);
6199 		goto fini;
6200 	}
6201 
6202 	/* Close specified zone */
6203 	z_id = get_unaligned_be64(cmd + 2);
6204 	if (z_id >= sdebug_capacity) {
6205 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6206 		res = check_condition_result;
6207 		goto fini;
6208 	}
6209 
6210 	zsp = zbc_zone(devip, z_id);
6211 	if (z_id != zsp->z_start) {
6212 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6213 		res = check_condition_result;
6214 		goto fini;
6215 	}
6216 	if (zbc_zone_is_conv(zsp)) {
6217 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6218 		res = check_condition_result;
6219 		goto fini;
6220 	}
6221 
6222 	zbc_close_zone(devip, zsp);
6223 fini:
6224 	sdeb_meta_write_unlock(sip);
6225 	return res;
6226 }
6227 
6228 static void zbc_finish_zone(struct sdebug_dev_info *devip,
6229 			    struct sdeb_zone_state *zsp, bool empty)
6230 {
6231 	enum sdebug_z_cond zc = zsp->z_cond;
6232 
6233 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
6234 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
6235 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6236 			zbc_close_zone(devip, zsp);
6237 		if (zsp->z_cond == ZC4_CLOSED)
6238 			devip->nr_closed--;
6239 		zsp->z_wp = zsp->z_start + zsp->z_size;
6240 		zsp->z_cond = ZC5_FULL;
6241 	}
6242 }
6243 
6244 static void zbc_finish_all(struct sdebug_dev_info *devip)
6245 {
6246 	unsigned int i;
6247 
6248 	for (i = 0; i < devip->nr_zones; i++)
6249 		zbc_finish_zone(devip, &devip->zstate[i], false);
6250 }
6251 
6252 static int resp_finish_zone(struct scsi_cmnd *scp,
6253 			    struct sdebug_dev_info *devip)
6254 {
6255 	struct sdeb_zone_state *zsp;
6256 	int res = 0;
6257 	u64 z_id;
6258 	u8 *cmd = scp->cmnd;
6259 	bool all = cmd[14] & 0x01;
6260 	struct sdeb_store_info *sip = devip2sip(devip, false);
6261 
6262 	if (!sdebug_dev_is_zoned(devip)) {
6263 		mk_sense_invalid_opcode(scp);
6264 		return check_condition_result;
6265 	}
6266 
6267 	sdeb_meta_write_lock(sip);
6268 
6269 	if (all) {
6270 		zbc_finish_all(devip);
6271 		goto fini;
6272 	}
6273 
6274 	/* Finish the specified zone */
6275 	z_id = get_unaligned_be64(cmd + 2);
6276 	if (z_id >= sdebug_capacity) {
6277 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6278 		res = check_condition_result;
6279 		goto fini;
6280 	}
6281 
6282 	zsp = zbc_zone(devip, z_id);
6283 	if (z_id != zsp->z_start) {
6284 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6285 		res = check_condition_result;
6286 		goto fini;
6287 	}
6288 	if (zbc_zone_is_conv(zsp)) {
6289 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6290 		res = check_condition_result;
6291 		goto fini;
6292 	}
6293 
6294 	zbc_finish_zone(devip, zsp, true);
6295 fini:
6296 	sdeb_meta_write_unlock(sip);
6297 	return res;
6298 }
6299 
6300 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
6301 			 struct sdeb_zone_state *zsp)
6302 {
6303 	enum sdebug_z_cond zc;
6304 	struct sdeb_store_info *sip = devip2sip(devip, false);
6305 
6306 	if (!zbc_zone_is_seq(zsp))
6307 		return;
6308 
6309 	zc = zsp->z_cond;
6310 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6311 		zbc_close_zone(devip, zsp);
6312 
6313 	if (zsp->z_cond == ZC4_CLOSED)
6314 		devip->nr_closed--;
6315 
6316 	if (zsp->z_wp > zsp->z_start)
6317 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
6318 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
6319 
6320 	zsp->z_non_seq_resource = false;
6321 	zsp->z_wp = zsp->z_start;
6322 	zsp->z_cond = ZC1_EMPTY;
6323 }
6324 
6325 static void zbc_rwp_all(struct sdebug_dev_info *devip)
6326 {
6327 	unsigned int i;
6328 
6329 	for (i = 0; i < devip->nr_zones; i++)
6330 		zbc_rwp_zone(devip, &devip->zstate[i]);
6331 }
6332 
6333 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6334 {
6335 	struct sdeb_zone_state *zsp;
6336 	int res = 0;
6337 	u64 z_id;
6338 	u8 *cmd = scp->cmnd;
6339 	bool all = cmd[14] & 0x01;
6340 	struct sdeb_store_info *sip = devip2sip(devip, false);
6341 
6342 	if (!sdebug_dev_is_zoned(devip)) {
6343 		mk_sense_invalid_opcode(scp);
6344 		return check_condition_result;
6345 	}
6346 
6347 	sdeb_meta_write_lock(sip);
6348 
6349 	if (all) {
6350 		zbc_rwp_all(devip);
6351 		goto fini;
6352 	}
6353 
6354 	z_id = get_unaligned_be64(cmd + 2);
6355 	if (z_id >= sdebug_capacity) {
6356 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6357 		res = check_condition_result;
6358 		goto fini;
6359 	}
6360 
6361 	zsp = zbc_zone(devip, z_id);
6362 	if (z_id != zsp->z_start) {
6363 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6364 		res = check_condition_result;
6365 		goto fini;
6366 	}
6367 	if (zbc_zone_is_conv(zsp)) {
6368 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6369 		res = check_condition_result;
6370 		goto fini;
6371 	}
6372 
6373 	zbc_rwp_zone(devip, zsp);
6374 fini:
6375 	sdeb_meta_write_unlock(sip);
6376 	return res;
6377 }
6378 
6379 static u32 get_tag(struct scsi_cmnd *cmnd)
6380 {
6381 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
6382 }
6383 
6384 /* Queued (deferred) command completions converge here. */
6385 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
6386 {
6387 	struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
6388 					typeof(*sdsc), sd_dp);
6389 	struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
6390 	unsigned long flags;
6391 	bool aborted;
6392 
6393 	if (sdebug_statistics) {
6394 		atomic_inc(&sdebug_completions);
6395 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
6396 			atomic_inc(&sdebug_miss_cpus);
6397 	}
6398 
6399 	spin_lock_irqsave(&sdsc->lock, flags);
6400 	aborted = sd_dp->aborted;
6401 	if (unlikely(aborted))
6402 		sd_dp->aborted = false;
6403 
6404 	spin_unlock_irqrestore(&sdsc->lock, flags);
6405 
6406 	if (aborted) {
6407 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
6408 		blk_abort_request(scsi_cmd_to_rq(scp));
6409 		return;
6410 	}
6411 
6412 	scsi_done(scp); /* callback to mid level */
6413 }
6414 
6415 /* When high resolution timer goes off this function is called. */
6416 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
6417 {
6418 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
6419 						  hrt);
6420 	sdebug_q_cmd_complete(sd_dp);
6421 	return HRTIMER_NORESTART;
6422 }
6423 
6424 /* When work queue schedules work, it calls this function. */
6425 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
6426 {
6427 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
6428 						  ew.work);
6429 	sdebug_q_cmd_complete(sd_dp);
6430 }
6431 
6432 static bool got_shared_uuid;
6433 static uuid_t shared_uuid;
6434 
6435 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
6436 {
6437 	struct sdeb_zone_state *zsp;
6438 	sector_t capacity = get_sdebug_capacity();
6439 	sector_t conv_capacity;
6440 	sector_t zstart = 0;
6441 	unsigned int i;
6442 
6443 	/*
6444 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
6445 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
6446 	 * use the specified zone size checking that at least 2 zones can be
6447 	 * created for the device.
6448 	 */
6449 	if (!sdeb_zbc_zone_size_mb) {
6450 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
6451 			>> ilog2(sdebug_sector_size);
6452 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
6453 			devip->zsize >>= 1;
6454 		if (devip->zsize < 2) {
6455 			pr_err("Device capacity too small\n");
6456 			return -EINVAL;
6457 		}
6458 	} else {
6459 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
6460 			pr_err("Zone size is not a power of 2\n");
6461 			return -EINVAL;
6462 		}
6463 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
6464 			>> ilog2(sdebug_sector_size);
6465 		if (devip->zsize >= capacity) {
6466 			pr_err("Zone size too large for device capacity\n");
6467 			return -EINVAL;
6468 		}
6469 	}
6470 
6471 	devip->zsize_shift = ilog2(devip->zsize);
6472 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
6473 
6474 	if (sdeb_zbc_zone_cap_mb == 0) {
6475 		devip->zcap = devip->zsize;
6476 	} else {
6477 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
6478 			      ilog2(sdebug_sector_size);
6479 		if (devip->zcap > devip->zsize) {
6480 			pr_err("Zone capacity too large\n");
6481 			return -EINVAL;
6482 		}
6483 	}
6484 
6485 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
6486 	if (conv_capacity >= capacity) {
6487 		pr_err("Number of conventional zones too large\n");
6488 		return -EINVAL;
6489 	}
6490 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
6491 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
6492 			      devip->zsize_shift;
6493 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
6494 
6495 	/* Add gap zones if zone capacity is smaller than the zone size */
6496 	if (devip->zcap < devip->zsize)
6497 		devip->nr_zones += devip->nr_seq_zones;
6498 
6499 	if (devip->zoned) {
6500 		/* zbc_max_open_zones can be 0, meaning "not reported" */
6501 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
6502 			devip->max_open = (devip->nr_zones - 1) / 2;
6503 		else
6504 			devip->max_open = sdeb_zbc_max_open;
6505 	}
6506 
6507 	devip->zstate = kcalloc(devip->nr_zones,
6508 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
6509 	if (!devip->zstate)
6510 		return -ENOMEM;
6511 
6512 	for (i = 0; i < devip->nr_zones; i++) {
6513 		zsp = &devip->zstate[i];
6514 
6515 		zsp->z_start = zstart;
6516 
6517 		if (i < devip->nr_conv_zones) {
6518 			zsp->z_type = ZBC_ZTYPE_CNV;
6519 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6520 			zsp->z_wp = (sector_t)-1;
6521 			zsp->z_size =
6522 				min_t(u64, devip->zsize, capacity - zstart);
6523 		} else if ((zstart & (devip->zsize - 1)) == 0) {
6524 			if (devip->zoned)
6525 				zsp->z_type = ZBC_ZTYPE_SWR;
6526 			else
6527 				zsp->z_type = ZBC_ZTYPE_SWP;
6528 			zsp->z_cond = ZC1_EMPTY;
6529 			zsp->z_wp = zsp->z_start;
6530 			zsp->z_size =
6531 				min_t(u64, devip->zcap, capacity - zstart);
6532 		} else {
6533 			zsp->z_type = ZBC_ZTYPE_GAP;
6534 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6535 			zsp->z_wp = (sector_t)-1;
6536 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
6537 					    capacity - zstart);
6538 		}
6539 
6540 		WARN_ON_ONCE((int)zsp->z_size <= 0);
6541 		zstart += zsp->z_size;
6542 	}
6543 
6544 	return 0;
6545 }
6546 
6547 static struct sdebug_dev_info *sdebug_device_create(
6548 			struct sdebug_host_info *sdbg_host, gfp_t flags)
6549 {
6550 	struct sdebug_dev_info *devip;
6551 
6552 	devip = kzalloc(sizeof(*devip), flags);
6553 	if (devip) {
6554 		if (sdebug_uuid_ctl == 1)
6555 			uuid_gen(&devip->lu_name);
6556 		else if (sdebug_uuid_ctl == 2) {
6557 			if (got_shared_uuid)
6558 				devip->lu_name = shared_uuid;
6559 			else {
6560 				uuid_gen(&shared_uuid);
6561 				got_shared_uuid = true;
6562 				devip->lu_name = shared_uuid;
6563 			}
6564 		}
6565 		devip->sdbg_host = sdbg_host;
6566 		if (sdeb_zbc_in_use) {
6567 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
6568 			if (sdebug_device_create_zones(devip)) {
6569 				kfree(devip);
6570 				return NULL;
6571 			}
6572 		} else {
6573 			devip->zoned = false;
6574 		}
6575 		if (sdebug_ptype == TYPE_TAPE) {
6576 			devip->tape_density = TAPE_DEF_DENSITY;
6577 			devip->tape_blksize = TAPE_DEF_BLKSIZE;
6578 		}
6579 		devip->create_ts = ktime_get_boottime();
6580 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
6581 		spin_lock_init(&devip->list_lock);
6582 		INIT_LIST_HEAD(&devip->inject_err_list);
6583 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
6584 	}
6585 	return devip;
6586 }
6587 
6588 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
6589 {
6590 	struct sdebug_host_info *sdbg_host;
6591 	struct sdebug_dev_info *open_devip = NULL;
6592 	struct sdebug_dev_info *devip;
6593 
6594 	sdbg_host = shost_to_sdebug_host(sdev->host);
6595 
6596 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6597 		if ((devip->used) && (devip->channel == sdev->channel) &&
6598 		    (devip->target == sdev->id) &&
6599 		    (devip->lun == sdev->lun))
6600 			return devip;
6601 		else {
6602 			if ((!devip->used) && (!open_devip))
6603 				open_devip = devip;
6604 		}
6605 	}
6606 	if (!open_devip) { /* try and make a new one */
6607 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
6608 		if (!open_devip) {
6609 			pr_err("out of memory at line %d\n", __LINE__);
6610 			return NULL;
6611 		}
6612 	}
6613 
6614 	open_devip->channel = sdev->channel;
6615 	open_devip->target = sdev->id;
6616 	open_devip->lun = sdev->lun;
6617 	open_devip->sdbg_host = sdbg_host;
6618 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
6619 	open_devip->used = true;
6620 	return open_devip;
6621 }
6622 
6623 static int scsi_debug_sdev_init(struct scsi_device *sdp)
6624 {
6625 	if (sdebug_verbose)
6626 		pr_info("sdev_init <%u %u %u %llu>\n",
6627 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6628 
6629 	return 0;
6630 }
6631 
6632 static int scsi_debug_sdev_configure(struct scsi_device *sdp,
6633 				     struct queue_limits *lim)
6634 {
6635 	struct sdebug_dev_info *devip =
6636 			(struct sdebug_dev_info *)sdp->hostdata;
6637 	struct dentry *dentry;
6638 
6639 	if (sdebug_verbose)
6640 		pr_info("sdev_configure <%u %u %u %llu>\n",
6641 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6642 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
6643 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
6644 	if (devip == NULL) {
6645 		devip = find_build_dev_info(sdp);
6646 		if (devip == NULL)
6647 			return 1;  /* no resources, will be marked offline */
6648 	}
6649 	if (sdebug_ptype == TYPE_TAPE) {
6650 		if (!devip->tape_blocks[0]) {
6651 			devip->tape_blocks[0] =
6652 				kcalloc(TAPE_UNITS, sizeof(struct tape_block),
6653 					GFP_KERNEL);
6654 			if (!devip->tape_blocks[0])
6655 				return 1;
6656 		}
6657 		devip->tape_pending_nbr_partitions = -1;
6658 		if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
6659 			kfree(devip->tape_blocks[0]);
6660 			devip->tape_blocks[0] = NULL;
6661 			return 1;
6662 		}
6663 	}
6664 	sdp->hostdata = devip;
6665 	if (sdebug_no_uld)
6666 		sdp->no_uld_attach = 1;
6667 	config_cdb_len(sdp);
6668 
6669 	if (sdebug_allow_restart)
6670 		sdp->allow_restart = 1;
6671 
6672 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
6673 				sdebug_debugfs_root);
6674 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
6675 		pr_info("failed to create debugfs directory for device %s\n",
6676 			dev_name(&sdp->sdev_gendev));
6677 
6678 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
6679 				&sdebug_error_fops);
6680 	if (IS_ERR_OR_NULL(dentry))
6681 		pr_info("failed to create error file for device %s\n",
6682 			dev_name(&sdp->sdev_gendev));
6683 
6684 	return 0;
6685 }
6686 
6687 static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
6688 {
6689 	struct sdebug_dev_info *devip =
6690 		(struct sdebug_dev_info *)sdp->hostdata;
6691 	struct sdebug_err_inject *err;
6692 
6693 	if (sdebug_verbose)
6694 		pr_info("sdev_destroy <%u %u %u %llu>\n",
6695 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6696 
6697 	if (!devip)
6698 		return;
6699 
6700 	spin_lock(&devip->list_lock);
6701 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6702 		list_del_rcu(&err->list);
6703 		call_rcu(&err->rcu, sdebug_err_free);
6704 	}
6705 	spin_unlock(&devip->list_lock);
6706 
6707 	debugfs_remove(devip->debugfs_entry);
6708 
6709 	if (sdp->type == TYPE_TAPE) {
6710 		kfree(devip->tape_blocks[0]);
6711 		devip->tape_blocks[0] = NULL;
6712 	}
6713 
6714 	/* make this slot available for re-use */
6715 	devip->used = false;
6716 	sdp->hostdata = NULL;
6717 }
6718 
6719 /* Returns true if cancelled or not running callback. */
6720 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
6721 {
6722 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6723 	struct sdebug_defer *sd_dp = &sdsc->sd_dp;
6724 	enum sdeb_defer_type defer_t = sd_dp->defer_t;
6725 
6726 	lockdep_assert_held(&sdsc->lock);
6727 
6728 	if (defer_t == SDEB_DEFER_HRT) {
6729 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
6730 
6731 		switch (res) {
6732 		case -1: /* -1 It's executing the CB */
6733 			return false;
6734 		case 0: /* Not active, it must have already run */
6735 		case 1: /* Was active, we've now cancelled */
6736 		default:
6737 			return true;
6738 		}
6739 	} else if (defer_t == SDEB_DEFER_WQ) {
6740 		/* Cancel if pending */
6741 		if (cancel_work(&sd_dp->ew.work))
6742 			return true;
6743 		/* callback may be running, so return false */
6744 		return false;
6745 	} else if (defer_t == SDEB_DEFER_POLL) {
6746 		return true;
6747 	}
6748 
6749 	return false;
6750 }
6751 
6752 struct sdebug_abort_cmd {
6753 	u32 unique_tag;
6754 };
6755 
6756 enum sdebug_internal_cmd_type {
6757 	SCSI_DEBUG_ABORT_CMD,
6758 };
6759 
6760 struct sdebug_internal_cmd {
6761 	enum sdebug_internal_cmd_type type;
6762 
6763 	union {
6764 		struct sdebug_abort_cmd abort_cmd;
6765 	};
6766 };
6767 
6768 union sdebug_priv {
6769 	struct sdebug_scsi_cmd cmd;
6770 	struct sdebug_internal_cmd internal_cmd;
6771 };
6772 
6773 /*
6774  * Abort SCSI command @cmnd. Only called from scsi_debug_abort(). Although
6775  * it would be possible to call scsi_debug_stop_cmnd() directly, an internal
6776  * command is allocated and submitted to trigger the reserved command
6777  * infrastructure.
6778  */
6779 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6780 {
6781 	struct Scsi_Host *shost = cmnd->device->host;
6782 	struct request *rq = scsi_cmd_to_rq(cmnd);
6783 	u32 unique_tag = blk_mq_unique_tag(rq);
6784 	struct sdebug_internal_cmd *internal_cmd;
6785 	struct scsi_cmnd *abort_cmd;
6786 	struct request *abort_rq;
6787 	blk_status_t res;
6788 
6789 	abort_cmd = scsi_get_internal_cmd(shost->pseudo_sdev, DMA_NONE,
6790 					  BLK_MQ_REQ_RESERVED);
6791 	if (!abort_cmd)
6792 		return false;
6793 	internal_cmd = scsi_cmd_priv(abort_cmd);
6794 	*internal_cmd = (struct sdebug_internal_cmd) {
6795 		.type = SCSI_DEBUG_ABORT_CMD,
6796 		.abort_cmd = {
6797 			.unique_tag = unique_tag,
6798 		},
6799 	};
6800 	abort_rq = scsi_cmd_to_rq(abort_cmd);
6801 	abort_rq->timeout = secs_to_jiffies(3);
6802 	res = blk_execute_rq(abort_rq, true);
6803 	scsi_put_internal_cmd(abort_cmd);
6804 	return res == BLK_STS_OK;
6805 }
6806 
6807 /*
6808  * All we can do is set the cmnd as internally aborted and wait for it to
6809  * finish. We cannot call scsi_done() as normal completion path may do that.
6810  */
6811 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6812 {
6813 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6814 
6815 	return true;
6816 }
6817 
6818 /* Deletes (stops) timers or work queues of all queued commands */
6819 static void stop_all_queued(void)
6820 {
6821 	struct sdebug_host_info *sdhp;
6822 
6823 	mutex_lock(&sdebug_host_list_mutex);
6824 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6825 		struct Scsi_Host *shost = sdhp->shost;
6826 
6827 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6828 	}
6829 	mutex_unlock(&sdebug_host_list_mutex);
6830 }
6831 
6832 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6833 {
6834 	struct scsi_device *sdp = cmnd->device;
6835 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6836 	struct sdebug_err_inject *err;
6837 	unsigned char *cmd = cmnd->cmnd;
6838 	int ret = 0;
6839 
6840 	if (devip == NULL)
6841 		return 0;
6842 
6843 	rcu_read_lock();
6844 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6845 		if (err->type == ERR_ABORT_CMD_FAILED &&
6846 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6847 			ret = !!err->cnt;
6848 			if (err->cnt < 0)
6849 				err->cnt++;
6850 
6851 			rcu_read_unlock();
6852 			return ret;
6853 		}
6854 	}
6855 	rcu_read_unlock();
6856 
6857 	return 0;
6858 }
6859 
6860 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6861 {
6862 	bool aborted = scsi_debug_abort_cmnd(SCpnt);
6863 	u8 *cmd = SCpnt->cmnd;
6864 	u8 opcode = cmd[0];
6865 
6866 	++num_aborts;
6867 
6868 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6869 		sdev_printk(KERN_INFO, SCpnt->device,
6870 			    "command%s found\n",
6871 			    aborted ? "" : " not");
6872 
6873 
6874 	if (sdebug_fail_abort(SCpnt)) {
6875 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6876 			    opcode);
6877 		return FAILED;
6878 	}
6879 
6880 	if (aborted == false)
6881 		return FAILED;
6882 
6883 	return SUCCESS;
6884 }
6885 
6886 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6887 {
6888 	struct scsi_device *sdp = data;
6889 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6890 
6891 	if (scmd->device == sdp)
6892 		scsi_debug_abort_cmnd(scmd);
6893 
6894 	return true;
6895 }
6896 
6897 /* Deletes (stops) timers or work queues of all queued commands per sdev */
6898 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6899 {
6900 	struct Scsi_Host *shost = sdp->host;
6901 
6902 	blk_mq_tagset_busy_iter(&shost->tag_set,
6903 				scsi_debug_stop_all_queued_iter, sdp);
6904 }
6905 
6906 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6907 {
6908 	struct scsi_device *sdp = cmnd->device;
6909 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6910 	struct sdebug_err_inject *err;
6911 	unsigned char *cmd = cmnd->cmnd;
6912 	int ret = 0;
6913 
6914 	if (devip == NULL)
6915 		return 0;
6916 
6917 	rcu_read_lock();
6918 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6919 		if (err->type == ERR_LUN_RESET_FAILED &&
6920 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6921 			ret = !!err->cnt;
6922 			if (err->cnt < 0)
6923 				err->cnt++;
6924 
6925 			rcu_read_unlock();
6926 			return ret;
6927 		}
6928 	}
6929 	rcu_read_unlock();
6930 
6931 	return 0;
6932 }
6933 
6934 static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
6935 {
6936 	int i;
6937 
6938 	devip->tape_blksize = TAPE_DEF_BLKSIZE;
6939 	devip->tape_density = TAPE_DEF_DENSITY;
6940 	devip->tape_partition = 0;
6941 	devip->tape_dce = 0;
6942 	for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
6943 		devip->tape_location[i] = 0;
6944 	devip->tape_pending_nbr_partitions = -1;
6945 	/* Don't reset partitioning? */
6946 }
6947 
6948 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6949 {
6950 	struct scsi_device *sdp = SCpnt->device;
6951 	struct sdebug_dev_info *devip = sdp->hostdata;
6952 	u8 *cmd = SCpnt->cmnd;
6953 	u8 opcode = cmd[0];
6954 
6955 	++num_dev_resets;
6956 
6957 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6958 		sdev_printk(KERN_INFO, sdp, "doing device reset");
6959 
6960 	scsi_debug_stop_all_queued(sdp);
6961 	if (devip) {
6962 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
6963 		if (SCpnt->device->type == TYPE_TAPE)
6964 			scsi_tape_reset_clear(devip);
6965 	}
6966 
6967 	if (sdebug_fail_lun_reset(SCpnt)) {
6968 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6969 		return FAILED;
6970 	}
6971 
6972 	return SUCCESS;
6973 }
6974 
6975 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6976 {
6977 	struct scsi_target *starget = scsi_target(cmnd->device);
6978 	struct sdebug_target_info *targetip =
6979 		(struct sdebug_target_info *)starget->hostdata;
6980 
6981 	if (targetip)
6982 		return targetip->reset_fail;
6983 
6984 	return 0;
6985 }
6986 
6987 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6988 {
6989 	struct scsi_device *sdp = SCpnt->device;
6990 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6991 	struct sdebug_dev_info *devip;
6992 	u8 *cmd = SCpnt->cmnd;
6993 	u8 opcode = cmd[0];
6994 	int k = 0;
6995 
6996 	++num_target_resets;
6997 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6998 		sdev_printk(KERN_INFO, sdp, "doing target reset\n");
6999 
7000 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
7001 		if (devip->target == sdp->id) {
7002 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7003 			if (SCpnt->device->type == TYPE_TAPE)
7004 				scsi_tape_reset_clear(devip);
7005 			++k;
7006 		}
7007 	}
7008 
7009 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7010 		sdev_printk(KERN_INFO, sdp,
7011 			    "%d device(s) found in target\n", k);
7012 
7013 	if (sdebug_fail_target_reset(SCpnt)) {
7014 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
7015 			    opcode);
7016 		return FAILED;
7017 	}
7018 
7019 	return SUCCESS;
7020 }
7021 
7022 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
7023 {
7024 	struct scsi_device *sdp = SCpnt->device;
7025 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
7026 	struct sdebug_dev_info *devip;
7027 	int k = 0;
7028 
7029 	++num_bus_resets;
7030 
7031 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7032 		sdev_printk(KERN_INFO, sdp, "doing bus reset\n");
7033 
7034 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
7035 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7036 		if (SCpnt->device->type == TYPE_TAPE)
7037 			scsi_tape_reset_clear(devip);
7038 		++k;
7039 	}
7040 
7041 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7042 		sdev_printk(KERN_INFO, sdp,
7043 			    "%d device(s) found in host\n", k);
7044 	return SUCCESS;
7045 }
7046 
7047 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
7048 {
7049 	struct sdebug_host_info *sdbg_host;
7050 	struct sdebug_dev_info *devip;
7051 	int k = 0;
7052 
7053 	++num_host_resets;
7054 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7055 		sdev_printk(KERN_INFO, SCpnt->device, "doing host reset\n");
7056 	mutex_lock(&sdebug_host_list_mutex);
7057 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
7058 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
7059 				    dev_list) {
7060 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7061 			if (SCpnt->device->type == TYPE_TAPE)
7062 				scsi_tape_reset_clear(devip);
7063 			++k;
7064 		}
7065 	}
7066 	mutex_unlock(&sdebug_host_list_mutex);
7067 	stop_all_queued();
7068 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7069 		sdev_printk(KERN_INFO, SCpnt->device,
7070 			"%d device(s) found\n", k);
7071 	return SUCCESS;
7072 }
7073 
7074 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
7075 {
7076 	struct msdos_partition *pp;
7077 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
7078 	int sectors_per_part, num_sectors, k;
7079 	int heads_by_sects, start_sec, end_sec;
7080 
7081 	/* assume partition table already zeroed */
7082 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
7083 		return;
7084 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
7085 		sdebug_num_parts = SDEBUG_MAX_PARTS;
7086 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
7087 	}
7088 	num_sectors = (int)get_sdebug_capacity();
7089 	sectors_per_part = (num_sectors - sdebug_sectors_per)
7090 			   / sdebug_num_parts;
7091 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
7092 	starts[0] = sdebug_sectors_per;
7093 	max_part_secs = sectors_per_part;
7094 	for (k = 1; k < sdebug_num_parts; ++k) {
7095 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
7096 			    * heads_by_sects;
7097 		if (starts[k] - starts[k - 1] < max_part_secs)
7098 			max_part_secs = starts[k] - starts[k - 1];
7099 	}
7100 	starts[sdebug_num_parts] = num_sectors;
7101 	starts[sdebug_num_parts + 1] = 0;
7102 
7103 	ramp[510] = 0x55;	/* magic partition markings */
7104 	ramp[511] = 0xAA;
7105 	pp = (struct msdos_partition *)(ramp + 0x1be);
7106 	for (k = 0; starts[k + 1]; ++k, ++pp) {
7107 		start_sec = starts[k];
7108 		end_sec = starts[k] + max_part_secs - 1;
7109 		pp->boot_ind = 0;
7110 
7111 		pp->cyl = start_sec / heads_by_sects;
7112 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
7113 			   / sdebug_sectors_per;
7114 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
7115 
7116 		pp->end_cyl = end_sec / heads_by_sects;
7117 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
7118 			       / sdebug_sectors_per;
7119 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
7120 
7121 		pp->start_sect = cpu_to_le32(start_sec);
7122 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
7123 		pp->sys_ind = 0x83;	/* plain Linux partition */
7124 	}
7125 }
7126 
7127 static void block_unblock_all_queues(bool block)
7128 {
7129 	struct sdebug_host_info *sdhp;
7130 
7131 	lockdep_assert_held(&sdebug_host_list_mutex);
7132 
7133 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7134 		struct Scsi_Host *shost = sdhp->shost;
7135 
7136 		if (block)
7137 			scsi_block_requests(shost);
7138 		else
7139 			scsi_unblock_requests(shost);
7140 	}
7141 }
7142 
7143 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
7144  * commands will be processed normally before triggers occur.
7145  */
7146 static void tweak_cmnd_count(void)
7147 {
7148 	int count, modulo;
7149 
7150 	modulo = abs(sdebug_every_nth);
7151 	if (modulo < 2)
7152 		return;
7153 
7154 	mutex_lock(&sdebug_host_list_mutex);
7155 	block_unblock_all_queues(true);
7156 	count = atomic_read(&sdebug_cmnd_count);
7157 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
7158 	block_unblock_all_queues(false);
7159 	mutex_unlock(&sdebug_host_list_mutex);
7160 }
7161 
7162 static void clear_queue_stats(void)
7163 {
7164 	atomic_set(&sdebug_cmnd_count, 0);
7165 	atomic_set(&sdebug_completions, 0);
7166 	atomic_set(&sdebug_miss_cpus, 0);
7167 	atomic_set(&sdebug_a_tsf, 0);
7168 }
7169 
7170 static bool inject_on_this_cmd(void)
7171 {
7172 	if (sdebug_every_nth == 0)
7173 		return false;
7174 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7175 }
7176 
7177 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
7178 
7179 /* Complete the processing of the thread that queued a SCSI command to this
7180  * driver. It either completes the command by calling cmnd_done() or
7181  * schedules a hr timer or work queue then returns 0. Returns
7182  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
7183  */
7184 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
7185 			 int scsi_result,
7186 			 int (*pfp)(struct scsi_cmnd *,
7187 				    struct sdebug_dev_info *),
7188 			 int delta_jiff, int ndelay)
7189 {
7190 	struct request *rq = scsi_cmd_to_rq(cmnd);
7191 	bool polled = rq->cmd_flags & REQ_POLLED;
7192 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
7193 	unsigned long flags;
7194 	u64 ns_from_boot = 0;
7195 	struct scsi_device *sdp;
7196 	struct sdebug_defer *sd_dp;
7197 
7198 	if (unlikely(devip == NULL)) {
7199 		if (scsi_result == 0)
7200 			scsi_result = DID_NO_CONNECT << 16;
7201 		goto respond_in_thread;
7202 	}
7203 	sdp = cmnd->device;
7204 
7205 	if (delta_jiff == 0)
7206 		goto respond_in_thread;
7207 
7208 
7209 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
7210 		     (scsi_result == 0))) {
7211 		int num_in_q = scsi_device_busy(sdp);
7212 		int qdepth = cmnd->device->queue_depth;
7213 
7214 		if ((num_in_q == qdepth) &&
7215 		    (atomic_inc_return(&sdebug_a_tsf) >=
7216 		     abs(sdebug_every_nth))) {
7217 			atomic_set(&sdebug_a_tsf, 0);
7218 			scsi_result = device_qfull_result;
7219 
7220 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
7221 				sdev_printk(KERN_INFO, sdp, "num_in_q=%d +1, <inject> status: TASK SET FULL\n",
7222 					    num_in_q);
7223 		}
7224 	}
7225 
7226 	sd_dp = &sdsc->sd_dp;
7227 
7228 	if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
7229 		ns_from_boot = ktime_get_boottime_ns();
7230 
7231 	/* one of the resp_*() response functions is called here */
7232 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
7233 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
7234 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
7235 		delta_jiff = ndelay = 0;
7236 	}
7237 	if (cmnd->result == 0 && scsi_result != 0)
7238 		cmnd->result = scsi_result;
7239 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
7240 		if (atomic_read(&sdeb_inject_pending)) {
7241 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
7242 			atomic_set(&sdeb_inject_pending, 0);
7243 			cmnd->result = check_condition_result;
7244 		}
7245 	}
7246 
7247 	if (unlikely(sdebug_verbose && cmnd->result))
7248 		sdev_printk(KERN_INFO, sdp, "non-zero result=0x%x\n",
7249 			    cmnd->result);
7250 
7251 	if (delta_jiff > 0 || ndelay > 0) {
7252 		ktime_t kt;
7253 
7254 		if (delta_jiff > 0) {
7255 			u64 ns = jiffies_to_nsecs(delta_jiff);
7256 
7257 			if (sdebug_random && ns < U32_MAX) {
7258 				ns = get_random_u32_below((u32)ns);
7259 			} else if (sdebug_random) {
7260 				ns >>= 12;	/* scale to 4 usec precision */
7261 				if (ns < U32_MAX)	/* over 4 hours max */
7262 					ns = get_random_u32_below((u32)ns);
7263 				ns <<= 12;
7264 			}
7265 			kt = ns_to_ktime(ns);
7266 		} else {	/* ndelay has a 4.2 second max */
7267 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
7268 					     (u32)ndelay;
7269 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
7270 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
7271 
7272 				if (kt <= d) {	/* elapsed duration >= kt */
7273 					/* call scsi_done() from this thread */
7274 					scsi_done(cmnd);
7275 					return 0;
7276 				}
7277 				/* otherwise reduce kt by elapsed time */
7278 				kt -= d;
7279 			}
7280 		}
7281 		if (sdebug_statistics)
7282 			sd_dp->issuing_cpu = raw_smp_processor_id();
7283 		if (polled) {
7284 			spin_lock_irqsave(&sdsc->lock, flags);
7285 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
7286 			sd_dp->defer_t = SDEB_DEFER_POLL;
7287 			spin_unlock_irqrestore(&sdsc->lock, flags);
7288 		} else {
7289 			/* schedule the invocation of scsi_done() for a later time */
7290 			spin_lock_irqsave(&sdsc->lock, flags);
7291 			sd_dp->defer_t = SDEB_DEFER_HRT;
7292 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
7293 			/*
7294 			 * The completion handler will try to grab sqcp->lock,
7295 			 * so there is no chance that the completion handler
7296 			 * will call scsi_done() until we release the lock
7297 			 * here (so ok to keep referencing sdsc).
7298 			 */
7299 			spin_unlock_irqrestore(&sdsc->lock, flags);
7300 		}
7301 	} else {	/* jdelay < 0, use work queue */
7302 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
7303 			     atomic_read(&sdeb_inject_pending))) {
7304 			sd_dp->aborted = true;
7305 			atomic_set(&sdeb_inject_pending, 0);
7306 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
7307 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
7308 		}
7309 
7310 		if (sdebug_statistics)
7311 			sd_dp->issuing_cpu = raw_smp_processor_id();
7312 		if (polled) {
7313 			spin_lock_irqsave(&sdsc->lock, flags);
7314 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
7315 			sd_dp->defer_t = SDEB_DEFER_POLL;
7316 			spin_unlock_irqrestore(&sdsc->lock, flags);
7317 		} else {
7318 			spin_lock_irqsave(&sdsc->lock, flags);
7319 			sd_dp->defer_t = SDEB_DEFER_WQ;
7320 			schedule_work(&sd_dp->ew.work);
7321 			spin_unlock_irqrestore(&sdsc->lock, flags);
7322 		}
7323 	}
7324 
7325 	return 0;
7326 
7327 respond_in_thread:	/* call back to mid-layer using invocation thread */
7328 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
7329 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
7330 	if (cmnd->result == 0 && scsi_result != 0)
7331 		cmnd->result = scsi_result;
7332 	scsi_done(cmnd);
7333 	return 0;
7334 }
7335 
7336 /* Note: The following macros create attribute files in the
7337    /sys/module/scsi_debug/parameters directory. Unfortunately this
7338    driver is unaware of a change and cannot trigger auxiliary actions
7339    as it can when the corresponding attribute in the
7340    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
7341  */
7342 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
7343 module_param_named(ato, sdebug_ato, int, S_IRUGO);
7344 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
7345 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
7346 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
7347 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
7348 module_param_named(dif, sdebug_dif, int, S_IRUGO);
7349 module_param_named(dix, sdebug_dix, int, S_IRUGO);
7350 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
7351 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
7352 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
7353 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
7354 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
7355 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
7356 module_param_string(inq_product, sdebug_inq_product_id,
7357 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
7358 module_param_string(inq_rev, sdebug_inq_product_rev,
7359 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
7360 module_param_string(inq_vendor, sdebug_inq_vendor_id,
7361 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
7362 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
7363 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
7364 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
7365 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
7366 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
7367 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
7368 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
7369 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
7370 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
7371 module_param_named(medium_error_count, sdebug_medium_error_count, int,
7372 		   S_IRUGO | S_IWUSR);
7373 module_param_named(medium_error_start, sdebug_medium_error_start, int,
7374 		   S_IRUGO | S_IWUSR);
7375 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
7376 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
7377 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
7378 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
7379 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
7380 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
7381 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
7382 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
7383 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
7384 module_param_named(per_host_store, sdebug_per_host_store, bool,
7385 		   S_IRUGO | S_IWUSR);
7386 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
7387 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
7388 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
7389 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
7390 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
7391 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
7392 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
7393 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
7394 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
7395 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
7396 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
7397 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
7398 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
7399 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
7400 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
7401 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
7402 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
7403 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
7404 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
7405 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
7406 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
7407 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
7408 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
7409 		   S_IRUGO | S_IWUSR);
7410 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
7411 module_param_named(write_same_length, sdebug_write_same_length, int,
7412 		   S_IRUGO | S_IWUSR);
7413 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
7414 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
7415 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
7416 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
7417 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
7418 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
7419 
7420 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
7421 MODULE_DESCRIPTION("SCSI debug adapter driver");
7422 MODULE_LICENSE("GPL");
7423 MODULE_VERSION(SDEBUG_VERSION);
7424 
7425 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
7426 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
7427 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
7428 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
7429 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
7430 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
7431 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
7432 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
7433 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
7434 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
7435 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
7436 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
7437 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
7438 MODULE_PARM_DESC(host_max_queue,
7439 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
7440 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
7441 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
7442 		 SDEBUG_VERSION "\")");
7443 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
7444 MODULE_PARM_DESC(lbprz,
7445 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
7446 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
7447 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
7448 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
7449 MODULE_PARM_DESC(atomic_wr, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
7450 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
7451 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
7452 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
7453 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
7454 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
7455 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
7456 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
7457 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
7458 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
7459 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
7460 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
7461 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
7462 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
7463 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
7464 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
7465 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
7466 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
7467 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
7468 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
7469 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
7470 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
7471 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
7472 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
7473 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
7474 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
7475 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
7476 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
7477 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
7478 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
7479 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
7480 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
7481 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
7482 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
7483 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
7484 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
7485 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
7486 MODULE_PARM_DESC(uuid_ctl,
7487 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
7488 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
7489 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
7490 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
7491 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
7492 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
7493 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
7494 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
7495 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
7496 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
7497 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
7498 
7499 #define SDEBUG_INFO_LEN 256
7500 static char sdebug_info[SDEBUG_INFO_LEN];
7501 
7502 static const char *scsi_debug_info(struct Scsi_Host *shp)
7503 {
7504 	int k;
7505 
7506 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
7507 		      my_name, SDEBUG_VERSION, sdebug_version_date);
7508 	if (k >= (SDEBUG_INFO_LEN - 1))
7509 		return sdebug_info;
7510 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
7511 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
7512 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
7513 		  "statistics", (int)sdebug_statistics);
7514 	return sdebug_info;
7515 }
7516 
7517 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
7518 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
7519 				 int length)
7520 {
7521 	char arr[16];
7522 	int opts;
7523 	int minLen = length > 15 ? 15 : length;
7524 
7525 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
7526 		return -EACCES;
7527 	memcpy(arr, buffer, minLen);
7528 	arr[minLen] = '\0';
7529 	if (1 != sscanf(arr, "%d", &opts))
7530 		return -EINVAL;
7531 	sdebug_opts = opts;
7532 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7533 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7534 	if (sdebug_every_nth != 0)
7535 		tweak_cmnd_count();
7536 	return length;
7537 }
7538 
7539 struct sdebug_submit_queue_data {
7540 	int *first;
7541 	int *last;
7542 	int queue_num;
7543 };
7544 
7545 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
7546 {
7547 	struct sdebug_submit_queue_data *data = opaque;
7548 	u32 unique_tag = blk_mq_unique_tag(rq);
7549 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7550 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
7551 	int queue_num = data->queue_num;
7552 
7553 	if (hwq != queue_num)
7554 		return true;
7555 
7556 	/* Rely on iter'ing in ascending tag order */
7557 	if (*data->first == -1)
7558 		*data->first = *data->last = tag;
7559 	else
7560 		*data->last = tag;
7561 
7562 	return true;
7563 }
7564 
7565 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
7566  * same for each scsi_debug host (if more than one). Some of the counters
7567  * output are not atomics so might be inaccurate in a busy system. */
7568 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
7569 {
7570 	struct sdebug_host_info *sdhp;
7571 	int j;
7572 
7573 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
7574 		   SDEBUG_VERSION, sdebug_version_date);
7575 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
7576 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
7577 		   sdebug_opts, sdebug_every_nth);
7578 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
7579 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
7580 		   sdebug_sector_size, "bytes");
7581 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
7582 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
7583 		   num_aborts);
7584 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
7585 		   num_dev_resets, num_target_resets, num_bus_resets,
7586 		   num_host_resets);
7587 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
7588 		   dix_reads, dix_writes, dif_errors);
7589 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
7590 		   sdebug_statistics);
7591 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
7592 		   atomic_read(&sdebug_cmnd_count),
7593 		   atomic_read(&sdebug_completions),
7594 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
7595 		   atomic_read(&sdebug_a_tsf),
7596 		   atomic_read(&sdeb_mq_poll_count));
7597 
7598 	seq_printf(m, "submit_queues=%d\n", submit_queues);
7599 	for (j = 0; j < submit_queues; ++j) {
7600 		int f = -1, l = -1;
7601 		struct sdebug_submit_queue_data data = {
7602 			.queue_num = j,
7603 			.first = &f,
7604 			.last = &l,
7605 		};
7606 		seq_printf(m, "  queue %d:\n", j);
7607 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
7608 					&data);
7609 		if (f >= 0) {
7610 			seq_printf(m, "    BUSY: %s: %d,%d\n",
7611 				   "first,last bits", f, l);
7612 		}
7613 	}
7614 
7615 	seq_printf(m, "this host_no=%d\n", host->host_no);
7616 	if (!xa_empty(per_store_ap)) {
7617 		bool niu;
7618 		int idx;
7619 		unsigned long l_idx;
7620 		struct sdeb_store_info *sip;
7621 
7622 		seq_puts(m, "\nhost list:\n");
7623 		j = 0;
7624 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7625 			idx = sdhp->si_idx;
7626 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
7627 				   sdhp->shost->host_no, idx);
7628 			++j;
7629 		}
7630 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
7631 			   sdeb_most_recent_idx);
7632 		j = 0;
7633 		xa_for_each(per_store_ap, l_idx, sip) {
7634 			niu = xa_get_mark(per_store_ap, l_idx,
7635 					  SDEB_XA_NOT_IN_USE);
7636 			idx = (int)l_idx;
7637 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
7638 				   (niu ? "  not_in_use" : ""));
7639 			++j;
7640 		}
7641 	}
7642 	return 0;
7643 }
7644 
7645 static ssize_t delay_show(struct device_driver *ddp, char *buf)
7646 {
7647 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
7648 }
7649 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
7650  * of delay is jiffies.
7651  */
7652 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
7653 			   size_t count)
7654 {
7655 	int jdelay, res;
7656 
7657 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
7658 		res = count;
7659 		if (sdebug_jdelay != jdelay) {
7660 			struct sdebug_host_info *sdhp;
7661 
7662 			mutex_lock(&sdebug_host_list_mutex);
7663 			block_unblock_all_queues(true);
7664 
7665 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7666 				struct Scsi_Host *shost = sdhp->shost;
7667 
7668 				if (scsi_host_busy(shost)) {
7669 					res = -EBUSY;   /* queued commands */
7670 					break;
7671 				}
7672 			}
7673 			if (res > 0) {
7674 				sdebug_jdelay = jdelay;
7675 				sdebug_ndelay = 0;
7676 			}
7677 			block_unblock_all_queues(false);
7678 			mutex_unlock(&sdebug_host_list_mutex);
7679 		}
7680 		return res;
7681 	}
7682 	return -EINVAL;
7683 }
7684 static DRIVER_ATTR_RW(delay);
7685 
7686 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
7687 {
7688 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
7689 }
7690 /* Returns -EBUSY if ndelay is being changed and commands are queued */
7691 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
7692 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
7693 			    size_t count)
7694 {
7695 	int ndelay, res;
7696 
7697 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
7698 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
7699 		res = count;
7700 		if (sdebug_ndelay != ndelay) {
7701 			struct sdebug_host_info *sdhp;
7702 
7703 			mutex_lock(&sdebug_host_list_mutex);
7704 			block_unblock_all_queues(true);
7705 
7706 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7707 				struct Scsi_Host *shost = sdhp->shost;
7708 
7709 				if (scsi_host_busy(shost)) {
7710 					res = -EBUSY;   /* queued commands */
7711 					break;
7712 				}
7713 			}
7714 
7715 			if (res > 0) {
7716 				sdebug_ndelay = ndelay;
7717 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
7718 							: DEF_JDELAY;
7719 			}
7720 			block_unblock_all_queues(false);
7721 			mutex_unlock(&sdebug_host_list_mutex);
7722 		}
7723 		return res;
7724 	}
7725 	return -EINVAL;
7726 }
7727 static DRIVER_ATTR_RW(ndelay);
7728 
7729 static ssize_t opts_show(struct device_driver *ddp, char *buf)
7730 {
7731 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
7732 }
7733 
7734 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
7735 			  size_t count)
7736 {
7737 	int opts;
7738 	char work[20];
7739 
7740 	if (sscanf(buf, "%10s", work) == 1) {
7741 		if (strncasecmp(work, "0x", 2) == 0) {
7742 			if (kstrtoint(work + 2, 16, &opts) == 0)
7743 				goto opts_done;
7744 		} else {
7745 			if (kstrtoint(work, 10, &opts) == 0)
7746 				goto opts_done;
7747 		}
7748 	}
7749 	return -EINVAL;
7750 opts_done:
7751 	sdebug_opts = opts;
7752 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7753 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7754 	tweak_cmnd_count();
7755 	return count;
7756 }
7757 static DRIVER_ATTR_RW(opts);
7758 
7759 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
7760 {
7761 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
7762 }
7763 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
7764 			   size_t count)
7765 {
7766 	int n;
7767 
7768 	/* Cannot change from or to TYPE_ZBC with sysfs */
7769 	if (sdebug_ptype == TYPE_ZBC)
7770 		return -EINVAL;
7771 
7772 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7773 		if (n == TYPE_ZBC)
7774 			return -EINVAL;
7775 		sdebug_ptype = n;
7776 		return count;
7777 	}
7778 	return -EINVAL;
7779 }
7780 static DRIVER_ATTR_RW(ptype);
7781 
7782 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7783 {
7784 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7785 }
7786 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7787 			    size_t count)
7788 {
7789 	int n;
7790 
7791 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7792 		sdebug_dsense = n;
7793 		return count;
7794 	}
7795 	return -EINVAL;
7796 }
7797 static DRIVER_ATTR_RW(dsense);
7798 
7799 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7800 {
7801 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7802 }
7803 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7804 			     size_t count)
7805 {
7806 	int n, idx;
7807 
7808 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7809 		bool want_store = (n == 0);
7810 		struct sdebug_host_info *sdhp;
7811 
7812 		n = (n > 0);
7813 		sdebug_fake_rw = (sdebug_fake_rw > 0);
7814 		if (sdebug_fake_rw == n)
7815 			return count;	/* not transitioning so do nothing */
7816 
7817 		if (want_store) {	/* 1 --> 0 transition, set up store */
7818 			if (sdeb_first_idx < 0) {
7819 				idx = sdebug_add_store();
7820 				if (idx < 0)
7821 					return idx;
7822 			} else {
7823 				idx = sdeb_first_idx;
7824 				xa_clear_mark(per_store_ap, idx,
7825 					      SDEB_XA_NOT_IN_USE);
7826 			}
7827 			/* make all hosts use same store */
7828 			list_for_each_entry(sdhp, &sdebug_host_list,
7829 					    host_list) {
7830 				if (sdhp->si_idx != idx) {
7831 					xa_set_mark(per_store_ap, sdhp->si_idx,
7832 						    SDEB_XA_NOT_IN_USE);
7833 					sdhp->si_idx = idx;
7834 				}
7835 			}
7836 			sdeb_most_recent_idx = idx;
7837 		} else {	/* 0 --> 1 transition is trigger for shrink */
7838 			sdebug_erase_all_stores(true /* apart from first */);
7839 		}
7840 		sdebug_fake_rw = n;
7841 		return count;
7842 	}
7843 	return -EINVAL;
7844 }
7845 static DRIVER_ATTR_RW(fake_rw);
7846 
7847 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7848 {
7849 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7850 }
7851 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7852 			      size_t count)
7853 {
7854 	int n;
7855 
7856 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7857 		sdebug_no_lun_0 = n;
7858 		return count;
7859 	}
7860 	return -EINVAL;
7861 }
7862 static DRIVER_ATTR_RW(no_lun_0);
7863 
7864 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7865 {
7866 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7867 }
7868 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7869 			      size_t count)
7870 {
7871 	int n;
7872 
7873 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7874 		sdebug_num_tgts = n;
7875 		sdebug_max_tgts_luns();
7876 		return count;
7877 	}
7878 	return -EINVAL;
7879 }
7880 static DRIVER_ATTR_RW(num_tgts);
7881 
7882 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7883 {
7884 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7885 }
7886 static DRIVER_ATTR_RO(dev_size_mb);
7887 
7888 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7889 {
7890 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7891 }
7892 
7893 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7894 				    size_t count)
7895 {
7896 	bool v;
7897 
7898 	if (kstrtobool(buf, &v))
7899 		return -EINVAL;
7900 
7901 	sdebug_per_host_store = v;
7902 	return count;
7903 }
7904 static DRIVER_ATTR_RW(per_host_store);
7905 
7906 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7907 {
7908 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7909 }
7910 static DRIVER_ATTR_RO(num_parts);
7911 
7912 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7913 {
7914 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7915 }
7916 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7917 			       size_t count)
7918 {
7919 	int nth;
7920 	char work[20];
7921 
7922 	if (sscanf(buf, "%10s", work) == 1) {
7923 		if (strncasecmp(work, "0x", 2) == 0) {
7924 			if (kstrtoint(work + 2, 16, &nth) == 0)
7925 				goto every_nth_done;
7926 		} else {
7927 			if (kstrtoint(work, 10, &nth) == 0)
7928 				goto every_nth_done;
7929 		}
7930 	}
7931 	return -EINVAL;
7932 
7933 every_nth_done:
7934 	sdebug_every_nth = nth;
7935 	if (nth && !sdebug_statistics) {
7936 		pr_info("every_nth needs statistics=1, set it\n");
7937 		sdebug_statistics = true;
7938 	}
7939 	tweak_cmnd_count();
7940 	return count;
7941 }
7942 static DRIVER_ATTR_RW(every_nth);
7943 
7944 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7945 {
7946 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7947 }
7948 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7949 				size_t count)
7950 {
7951 	int n;
7952 	bool changed;
7953 
7954 	if (kstrtoint(buf, 0, &n))
7955 		return -EINVAL;
7956 	if (n >= 0) {
7957 		if (n > (int)SAM_LUN_AM_FLAT) {
7958 			pr_warn("only LUN address methods 0 and 1 are supported\n");
7959 			return -EINVAL;
7960 		}
7961 		changed = ((int)sdebug_lun_am != n);
7962 		sdebug_lun_am = n;
7963 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
7964 			struct sdebug_host_info *sdhp;
7965 			struct sdebug_dev_info *dp;
7966 
7967 			mutex_lock(&sdebug_host_list_mutex);
7968 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7969 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7970 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7971 				}
7972 			}
7973 			mutex_unlock(&sdebug_host_list_mutex);
7974 		}
7975 		return count;
7976 	}
7977 	return -EINVAL;
7978 }
7979 static DRIVER_ATTR_RW(lun_format);
7980 
7981 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7982 {
7983 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7984 }
7985 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7986 			      size_t count)
7987 {
7988 	int n;
7989 	bool changed;
7990 
7991 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7992 		if (n > 256) {
7993 			pr_warn("max_luns can be no more than 256\n");
7994 			return -EINVAL;
7995 		}
7996 		changed = (sdebug_max_luns != n);
7997 		sdebug_max_luns = n;
7998 		sdebug_max_tgts_luns();
7999 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
8000 			struct sdebug_host_info *sdhp;
8001 			struct sdebug_dev_info *dp;
8002 
8003 			mutex_lock(&sdebug_host_list_mutex);
8004 			list_for_each_entry(sdhp, &sdebug_host_list,
8005 					    host_list) {
8006 				list_for_each_entry(dp, &sdhp->dev_info_list,
8007 						    dev_list) {
8008 					set_bit(SDEBUG_UA_LUNS_CHANGED,
8009 						dp->uas_bm);
8010 				}
8011 			}
8012 			mutex_unlock(&sdebug_host_list_mutex);
8013 		}
8014 		return count;
8015 	}
8016 	return -EINVAL;
8017 }
8018 static DRIVER_ATTR_RW(max_luns);
8019 
8020 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
8021 {
8022 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
8023 }
8024 /* N.B. max_queue can be changed while there are queued commands. In flight
8025  * commands beyond the new max_queue will be completed. */
8026 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
8027 			       size_t count)
8028 {
8029 	int n;
8030 
8031 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
8032 	    (n <= SDEBUG_CANQUEUE) &&
8033 	    (sdebug_host_max_queue == 0)) {
8034 		mutex_lock(&sdebug_host_list_mutex);
8035 
8036 		/* We may only change sdebug_max_queue when we have no shosts */
8037 		if (list_empty(&sdebug_host_list))
8038 			sdebug_max_queue = n;
8039 		else
8040 			count = -EBUSY;
8041 		mutex_unlock(&sdebug_host_list_mutex);
8042 		return count;
8043 	}
8044 	return -EINVAL;
8045 }
8046 static DRIVER_ATTR_RW(max_queue);
8047 
8048 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
8049 {
8050 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
8051 }
8052 
8053 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
8054 {
8055 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
8056 }
8057 
8058 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
8059 {
8060 	bool v;
8061 
8062 	if (kstrtobool(buf, &v))
8063 		return -EINVAL;
8064 
8065 	sdebug_no_rwlock = v;
8066 	return count;
8067 }
8068 static DRIVER_ATTR_RW(no_rwlock);
8069 
8070 /*
8071  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
8072  * in range [0, sdebug_host_max_queue), we can't change it.
8073  */
8074 static DRIVER_ATTR_RO(host_max_queue);
8075 
8076 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
8077 {
8078 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
8079 }
8080 static DRIVER_ATTR_RO(no_uld);
8081 
8082 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
8083 {
8084 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
8085 }
8086 static DRIVER_ATTR_RO(scsi_level);
8087 
8088 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
8089 {
8090 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
8091 }
8092 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
8093 				size_t count)
8094 {
8095 	int n;
8096 	bool changed;
8097 
8098 	/* Ignore capacity change for ZBC drives for now */
8099 	if (sdeb_zbc_in_use)
8100 		return -ENOTSUPP;
8101 
8102 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8103 		changed = (sdebug_virtual_gb != n);
8104 		sdebug_virtual_gb = n;
8105 		sdebug_capacity = get_sdebug_capacity();
8106 		if (changed) {
8107 			struct sdebug_host_info *sdhp;
8108 			struct sdebug_dev_info *dp;
8109 
8110 			mutex_lock(&sdebug_host_list_mutex);
8111 			list_for_each_entry(sdhp, &sdebug_host_list,
8112 					    host_list) {
8113 				list_for_each_entry(dp, &sdhp->dev_info_list,
8114 						    dev_list) {
8115 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
8116 						dp->uas_bm);
8117 				}
8118 			}
8119 			mutex_unlock(&sdebug_host_list_mutex);
8120 		}
8121 		return count;
8122 	}
8123 	return -EINVAL;
8124 }
8125 static DRIVER_ATTR_RW(virtual_gb);
8126 
8127 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
8128 {
8129 	/* absolute number of hosts currently active is what is shown */
8130 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
8131 }
8132 
8133 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
8134 			      size_t count)
8135 {
8136 	bool found;
8137 	unsigned long idx;
8138 	struct sdeb_store_info *sip;
8139 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
8140 	int delta_hosts;
8141 
8142 	if (sscanf(buf, "%d", &delta_hosts) != 1)
8143 		return -EINVAL;
8144 	if (delta_hosts > 0) {
8145 		do {
8146 			found = false;
8147 			if (want_phs) {
8148 				xa_for_each_marked(per_store_ap, idx, sip,
8149 						   SDEB_XA_NOT_IN_USE) {
8150 					sdeb_most_recent_idx = (int)idx;
8151 					found = true;
8152 					break;
8153 				}
8154 				if (found)	/* re-use case */
8155 					sdebug_add_host_helper((int)idx);
8156 				else
8157 					sdebug_do_add_host(true);
8158 			} else {
8159 				sdebug_do_add_host(false);
8160 			}
8161 		} while (--delta_hosts);
8162 	} else if (delta_hosts < 0) {
8163 		do {
8164 			sdebug_do_remove_host(false);
8165 		} while (++delta_hosts);
8166 	}
8167 	return count;
8168 }
8169 static DRIVER_ATTR_RW(add_host);
8170 
8171 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
8172 {
8173 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
8174 }
8175 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
8176 				    size_t count)
8177 {
8178 	int n;
8179 
8180 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8181 		sdebug_vpd_use_hostno = n;
8182 		return count;
8183 	}
8184 	return -EINVAL;
8185 }
8186 static DRIVER_ATTR_RW(vpd_use_hostno);
8187 
8188 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
8189 {
8190 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
8191 }
8192 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
8193 				size_t count)
8194 {
8195 	int n;
8196 
8197 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
8198 		if (n > 0)
8199 			sdebug_statistics = true;
8200 		else {
8201 			clear_queue_stats();
8202 			sdebug_statistics = false;
8203 		}
8204 		return count;
8205 	}
8206 	return -EINVAL;
8207 }
8208 static DRIVER_ATTR_RW(statistics);
8209 
8210 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
8211 {
8212 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
8213 }
8214 static DRIVER_ATTR_RO(sector_size);
8215 
8216 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
8217 {
8218 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
8219 }
8220 static DRIVER_ATTR_RO(submit_queues);
8221 
8222 static ssize_t dix_show(struct device_driver *ddp, char *buf)
8223 {
8224 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
8225 }
8226 static DRIVER_ATTR_RO(dix);
8227 
8228 static ssize_t dif_show(struct device_driver *ddp, char *buf)
8229 {
8230 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
8231 }
8232 static DRIVER_ATTR_RO(dif);
8233 
8234 static ssize_t guard_show(struct device_driver *ddp, char *buf)
8235 {
8236 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
8237 }
8238 static DRIVER_ATTR_RO(guard);
8239 
8240 static ssize_t ato_show(struct device_driver *ddp, char *buf)
8241 {
8242 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
8243 }
8244 static DRIVER_ATTR_RO(ato);
8245 
8246 static ssize_t map_show(struct device_driver *ddp, char *buf)
8247 {
8248 	ssize_t count = 0;
8249 
8250 	if (!scsi_debug_lbp())
8251 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
8252 				 sdebug_store_sectors);
8253 
8254 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
8255 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
8256 
8257 		if (sip)
8258 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
8259 					  (int)map_size, sip->map_storep);
8260 	}
8261 	buf[count++] = '\n';
8262 	buf[count] = '\0';
8263 
8264 	return count;
8265 }
8266 static DRIVER_ATTR_RO(map);
8267 
8268 static ssize_t random_show(struct device_driver *ddp, char *buf)
8269 {
8270 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
8271 }
8272 
8273 static ssize_t random_store(struct device_driver *ddp, const char *buf,
8274 			    size_t count)
8275 {
8276 	bool v;
8277 
8278 	if (kstrtobool(buf, &v))
8279 		return -EINVAL;
8280 
8281 	sdebug_random = v;
8282 	return count;
8283 }
8284 static DRIVER_ATTR_RW(random);
8285 
8286 static ssize_t removable_show(struct device_driver *ddp, char *buf)
8287 {
8288 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
8289 }
8290 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
8291 			       size_t count)
8292 {
8293 	int n;
8294 
8295 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8296 		sdebug_removable = (n > 0);
8297 		return count;
8298 	}
8299 	return -EINVAL;
8300 }
8301 static DRIVER_ATTR_RW(removable);
8302 
8303 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
8304 {
8305 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
8306 }
8307 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
8308 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
8309 			       size_t count)
8310 {
8311 	int n;
8312 
8313 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8314 		sdebug_host_lock = (n > 0);
8315 		return count;
8316 	}
8317 	return -EINVAL;
8318 }
8319 static DRIVER_ATTR_RW(host_lock);
8320 
8321 static ssize_t strict_show(struct device_driver *ddp, char *buf)
8322 {
8323 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
8324 }
8325 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
8326 			    size_t count)
8327 {
8328 	int n;
8329 
8330 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8331 		sdebug_strict = (n > 0);
8332 		return count;
8333 	}
8334 	return -EINVAL;
8335 }
8336 static DRIVER_ATTR_RW(strict);
8337 
8338 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
8339 {
8340 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
8341 }
8342 static DRIVER_ATTR_RO(uuid_ctl);
8343 
8344 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
8345 {
8346 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
8347 }
8348 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
8349 			     size_t count)
8350 {
8351 	int ret, n;
8352 
8353 	ret = kstrtoint(buf, 0, &n);
8354 	if (ret)
8355 		return ret;
8356 	sdebug_cdb_len = n;
8357 	all_config_cdb_len();
8358 	return count;
8359 }
8360 static DRIVER_ATTR_RW(cdb_len);
8361 
8362 static const char * const zbc_model_strs_a[] = {
8363 	[BLK_ZONED_NONE] = "none",
8364 	[BLK_ZONED_HA]   = "host-aware",
8365 	[BLK_ZONED_HM]   = "host-managed",
8366 };
8367 
8368 static const char * const zbc_model_strs_b[] = {
8369 	[BLK_ZONED_NONE] = "no",
8370 	[BLK_ZONED_HA]   = "aware",
8371 	[BLK_ZONED_HM]   = "managed",
8372 };
8373 
8374 static const char * const zbc_model_strs_c[] = {
8375 	[BLK_ZONED_NONE] = "0",
8376 	[BLK_ZONED_HA]   = "1",
8377 	[BLK_ZONED_HM]   = "2",
8378 };
8379 
8380 static int sdeb_zbc_model_str(const char *cp)
8381 {
8382 	int res = sysfs_match_string(zbc_model_strs_a, cp);
8383 
8384 	if (res < 0) {
8385 		res = sysfs_match_string(zbc_model_strs_b, cp);
8386 		if (res < 0) {
8387 			res = sysfs_match_string(zbc_model_strs_c, cp);
8388 			if (res < 0)
8389 				return -EINVAL;
8390 		}
8391 	}
8392 	return res;
8393 }
8394 
8395 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
8396 {
8397 	return scnprintf(buf, PAGE_SIZE, "%s\n",
8398 			 zbc_model_strs_a[sdeb_zbc_model]);
8399 }
8400 static DRIVER_ATTR_RO(zbc);
8401 
8402 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
8403 {
8404 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
8405 }
8406 static DRIVER_ATTR_RO(tur_ms_to_ready);
8407 
8408 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
8409 {
8410 	char *p = buf, *end = buf + PAGE_SIZE;
8411 	int i;
8412 
8413 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8414 		p += scnprintf(p, end - p, "%d %ld\n", i,
8415 			       atomic_long_read(&writes_by_group_number[i]));
8416 
8417 	return p - buf;
8418 }
8419 
8420 static ssize_t group_number_stats_store(struct device_driver *ddp,
8421 					const char *buf, size_t count)
8422 {
8423 	int i;
8424 
8425 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8426 		atomic_long_set(&writes_by_group_number[i], 0);
8427 
8428 	return count;
8429 }
8430 static DRIVER_ATTR_RW(group_number_stats);
8431 
8432 /* Note: The following array creates attribute files in the
8433    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
8434    files (over those found in the /sys/module/scsi_debug/parameters
8435    directory) is that auxiliary actions can be triggered when an attribute
8436    is changed. For example see: add_host_store() above.
8437  */
8438 
8439 static struct attribute *sdebug_drv_attrs[] = {
8440 	&driver_attr_delay.attr,
8441 	&driver_attr_opts.attr,
8442 	&driver_attr_ptype.attr,
8443 	&driver_attr_dsense.attr,
8444 	&driver_attr_fake_rw.attr,
8445 	&driver_attr_host_max_queue.attr,
8446 	&driver_attr_no_lun_0.attr,
8447 	&driver_attr_num_tgts.attr,
8448 	&driver_attr_dev_size_mb.attr,
8449 	&driver_attr_num_parts.attr,
8450 	&driver_attr_every_nth.attr,
8451 	&driver_attr_lun_format.attr,
8452 	&driver_attr_max_luns.attr,
8453 	&driver_attr_max_queue.attr,
8454 	&driver_attr_no_rwlock.attr,
8455 	&driver_attr_no_uld.attr,
8456 	&driver_attr_scsi_level.attr,
8457 	&driver_attr_virtual_gb.attr,
8458 	&driver_attr_add_host.attr,
8459 	&driver_attr_per_host_store.attr,
8460 	&driver_attr_vpd_use_hostno.attr,
8461 	&driver_attr_sector_size.attr,
8462 	&driver_attr_statistics.attr,
8463 	&driver_attr_submit_queues.attr,
8464 	&driver_attr_dix.attr,
8465 	&driver_attr_dif.attr,
8466 	&driver_attr_guard.attr,
8467 	&driver_attr_ato.attr,
8468 	&driver_attr_map.attr,
8469 	&driver_attr_random.attr,
8470 	&driver_attr_removable.attr,
8471 	&driver_attr_host_lock.attr,
8472 	&driver_attr_ndelay.attr,
8473 	&driver_attr_strict.attr,
8474 	&driver_attr_uuid_ctl.attr,
8475 	&driver_attr_cdb_len.attr,
8476 	&driver_attr_tur_ms_to_ready.attr,
8477 	&driver_attr_zbc.attr,
8478 	&driver_attr_group_number_stats.attr,
8479 	NULL,
8480 };
8481 ATTRIBUTE_GROUPS(sdebug_drv);
8482 
8483 static struct device *pseudo_primary;
8484 
8485 static int __init scsi_debug_init(void)
8486 {
8487 	bool want_store = (sdebug_fake_rw == 0);
8488 	unsigned long sz;
8489 	int k, ret, hosts_to_add;
8490 	int idx = -1;
8491 
8492 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
8493 		pr_warn("ndelay must be less than 1 second, ignored\n");
8494 		sdebug_ndelay = 0;
8495 	} else if (sdebug_ndelay > 0)
8496 		sdebug_jdelay = JDELAY_OVERRIDDEN;
8497 
8498 	switch (sdebug_sector_size) {
8499 	case  512:
8500 	case 1024:
8501 	case 2048:
8502 	case 4096:
8503 		break;
8504 	default:
8505 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
8506 		return -EINVAL;
8507 	}
8508 
8509 	switch (sdebug_dif) {
8510 	case T10_PI_TYPE0_PROTECTION:
8511 		break;
8512 	case T10_PI_TYPE1_PROTECTION:
8513 	case T10_PI_TYPE2_PROTECTION:
8514 	case T10_PI_TYPE3_PROTECTION:
8515 		have_dif_prot = true;
8516 		break;
8517 
8518 	default:
8519 		pr_err("dif must be 0, 1, 2 or 3\n");
8520 		return -EINVAL;
8521 	}
8522 
8523 	if (sdebug_num_tgts < 0) {
8524 		pr_err("num_tgts must be >= 0\n");
8525 		return -EINVAL;
8526 	}
8527 
8528 	if (sdebug_guard > 1) {
8529 		pr_err("guard must be 0 or 1\n");
8530 		return -EINVAL;
8531 	}
8532 
8533 	if (sdebug_ato > 1) {
8534 		pr_err("ato must be 0 or 1\n");
8535 		return -EINVAL;
8536 	}
8537 
8538 	if (sdebug_physblk_exp > 15) {
8539 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
8540 		return -EINVAL;
8541 	}
8542 
8543 	sdebug_lun_am = sdebug_lun_am_i;
8544 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
8545 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
8546 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
8547 	}
8548 
8549 	if (sdebug_max_luns > 256) {
8550 		if (sdebug_max_luns > 16384) {
8551 			pr_warn("max_luns can be no more than 16384, use default\n");
8552 			sdebug_max_luns = DEF_MAX_LUNS;
8553 		}
8554 		sdebug_lun_am = SAM_LUN_AM_FLAT;
8555 	}
8556 
8557 	if (sdebug_lowest_aligned > 0x3fff) {
8558 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
8559 		return -EINVAL;
8560 	}
8561 
8562 	if (submit_queues < 1) {
8563 		pr_err("submit_queues must be 1 or more\n");
8564 		return -EINVAL;
8565 	}
8566 
8567 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
8568 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
8569 		return -EINVAL;
8570 	}
8571 
8572 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
8573 	    (sdebug_host_max_queue < 0)) {
8574 		pr_err("host_max_queue must be in range [0 %d]\n",
8575 		       SDEBUG_CANQUEUE);
8576 		return -EINVAL;
8577 	}
8578 
8579 	if (sdebug_host_max_queue &&
8580 	    (sdebug_max_queue != sdebug_host_max_queue)) {
8581 		sdebug_max_queue = sdebug_host_max_queue;
8582 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
8583 			sdebug_max_queue);
8584 	}
8585 
8586 	/*
8587 	 * check for host managed zoned block device specified with
8588 	 * ptype=0x14 or zbc=XXX.
8589 	 */
8590 	if (sdebug_ptype == TYPE_ZBC) {
8591 		sdeb_zbc_model = BLK_ZONED_HM;
8592 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
8593 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
8594 		if (k < 0)
8595 			return k;
8596 		sdeb_zbc_model = k;
8597 		switch (sdeb_zbc_model) {
8598 		case BLK_ZONED_NONE:
8599 		case BLK_ZONED_HA:
8600 			sdebug_ptype = TYPE_DISK;
8601 			break;
8602 		case BLK_ZONED_HM:
8603 			sdebug_ptype = TYPE_ZBC;
8604 			break;
8605 		default:
8606 			pr_err("Invalid ZBC model\n");
8607 			return -EINVAL;
8608 		}
8609 	}
8610 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
8611 		sdeb_zbc_in_use = true;
8612 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8613 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
8614 	}
8615 
8616 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8617 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
8618 	if (sdebug_dev_size_mb < 1)
8619 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
8620 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8621 	sdebug_store_sectors = sz / sdebug_sector_size;
8622 	sdebug_capacity = get_sdebug_capacity();
8623 
8624 	/* play around with geometry, don't waste too much on track 0 */
8625 	sdebug_heads = 8;
8626 	sdebug_sectors_per = 32;
8627 	if (sdebug_dev_size_mb >= 256)
8628 		sdebug_heads = 64;
8629 	else if (sdebug_dev_size_mb >= 16)
8630 		sdebug_heads = 32;
8631 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8632 			       (sdebug_sectors_per * sdebug_heads);
8633 	if (sdebug_cylinders_per >= 1024) {
8634 		/* other LLDs do this; implies >= 1GB ram disk ... */
8635 		sdebug_heads = 255;
8636 		sdebug_sectors_per = 63;
8637 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8638 			       (sdebug_sectors_per * sdebug_heads);
8639 	}
8640 	if (scsi_debug_lbp()) {
8641 		sdebug_unmap_max_blocks =
8642 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
8643 
8644 		sdebug_unmap_max_desc =
8645 			clamp(sdebug_unmap_max_desc, 0U, 256U);
8646 
8647 		sdebug_unmap_granularity =
8648 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
8649 
8650 		if (sdebug_unmap_alignment &&
8651 		    sdebug_unmap_granularity <=
8652 		    sdebug_unmap_alignment) {
8653 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
8654 			return -EINVAL;
8655 		}
8656 	}
8657 
8658 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
8659 	if (want_store) {
8660 		idx = sdebug_add_store();
8661 		if (idx < 0)
8662 			return idx;
8663 	}
8664 
8665 	pseudo_primary = root_device_register("pseudo_0");
8666 	if (IS_ERR(pseudo_primary)) {
8667 		pr_warn("root_device_register() error\n");
8668 		ret = PTR_ERR(pseudo_primary);
8669 		goto free_vm;
8670 	}
8671 	ret = bus_register(&pseudo_lld_bus);
8672 	if (ret < 0) {
8673 		pr_warn("bus_register error: %d\n", ret);
8674 		goto dev_unreg;
8675 	}
8676 	ret = driver_register(&sdebug_driverfs_driver);
8677 	if (ret < 0) {
8678 		pr_warn("driver_register error: %d\n", ret);
8679 		goto bus_unreg;
8680 	}
8681 
8682 	hosts_to_add = sdebug_add_host;
8683 	sdebug_add_host = 0;
8684 
8685 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
8686 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
8687 		pr_info("failed to create initial debugfs directory\n");
8688 
8689 	for (k = 0; k < hosts_to_add; k++) {
8690 		if (want_store && k == 0) {
8691 			ret = sdebug_add_host_helper(idx);
8692 			if (ret < 0) {
8693 				pr_err("add_host_helper k=%d, error=%d\n",
8694 				       k, -ret);
8695 				break;
8696 			}
8697 		} else {
8698 			ret = sdebug_do_add_host(want_store &&
8699 						 sdebug_per_host_store);
8700 			if (ret < 0) {
8701 				pr_err("add_host k=%d error=%d\n", k, -ret);
8702 				break;
8703 			}
8704 		}
8705 	}
8706 	if (sdebug_verbose)
8707 		pr_info("built %d host(s)\n", sdebug_num_hosts);
8708 
8709 	return 0;
8710 
8711 bus_unreg:
8712 	bus_unregister(&pseudo_lld_bus);
8713 dev_unreg:
8714 	root_device_unregister(pseudo_primary);
8715 free_vm:
8716 	sdebug_erase_store(idx, NULL);
8717 	return ret;
8718 }
8719 
8720 static void __exit scsi_debug_exit(void)
8721 {
8722 	int k = sdebug_num_hosts;
8723 
8724 	for (; k; k--)
8725 		sdebug_do_remove_host(true);
8726 	driver_unregister(&sdebug_driverfs_driver);
8727 	bus_unregister(&pseudo_lld_bus);
8728 	root_device_unregister(pseudo_primary);
8729 
8730 	sdebug_erase_all_stores(false);
8731 	xa_destroy(per_store_ap);
8732 	debugfs_remove(sdebug_debugfs_root);
8733 }
8734 
8735 device_initcall(scsi_debug_init);
8736 module_exit(scsi_debug_exit);
8737 
8738 static void sdebug_release_adapter(struct device *dev)
8739 {
8740 	struct sdebug_host_info *sdbg_host;
8741 
8742 	sdbg_host = dev_to_sdebug_host(dev);
8743 	kfree(sdbg_host);
8744 }
8745 
8746 /* idx must be valid, if sip is NULL then it will be obtained using idx */
8747 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
8748 {
8749 	if (idx < 0)
8750 		return;
8751 	if (!sip) {
8752 		if (xa_empty(per_store_ap))
8753 			return;
8754 		sip = xa_load(per_store_ap, idx);
8755 		if (!sip)
8756 			return;
8757 	}
8758 	vfree(sip->map_storep);
8759 	vfree(sip->dif_storep);
8760 	vfree(sip->storep);
8761 	xa_erase(per_store_ap, idx);
8762 	kfree(sip);
8763 }
8764 
8765 /* Assume apart_from_first==false only in shutdown case. */
8766 static void sdebug_erase_all_stores(bool apart_from_first)
8767 {
8768 	unsigned long idx;
8769 	struct sdeb_store_info *sip = NULL;
8770 
8771 	xa_for_each(per_store_ap, idx, sip) {
8772 		if (apart_from_first)
8773 			apart_from_first = false;
8774 		else
8775 			sdebug_erase_store(idx, sip);
8776 	}
8777 	if (apart_from_first)
8778 		sdeb_most_recent_idx = sdeb_first_idx;
8779 }
8780 
8781 /*
8782  * Returns store xarray new element index (idx) if >=0 else negated errno.
8783  * Limit the number of stores to 65536.
8784  */
8785 static int sdebug_add_store(void)
8786 {
8787 	int res;
8788 	u32 n_idx;
8789 	unsigned long iflags;
8790 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8791 	struct sdeb_store_info *sip = NULL;
8792 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8793 
8794 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8795 	if (!sip)
8796 		return -ENOMEM;
8797 
8798 	xa_lock_irqsave(per_store_ap, iflags);
8799 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8800 	if (unlikely(res < 0)) {
8801 		xa_unlock_irqrestore(per_store_ap, iflags);
8802 		kfree(sip);
8803 		pr_warn("xa_alloc() errno=%d\n", -res);
8804 		return res;
8805 	}
8806 	sdeb_most_recent_idx = n_idx;
8807 	if (sdeb_first_idx < 0)
8808 		sdeb_first_idx = n_idx;
8809 	xa_unlock_irqrestore(per_store_ap, iflags);
8810 
8811 	res = -ENOMEM;
8812 	sip->storep = vzalloc(sz);
8813 	if (!sip->storep) {
8814 		pr_err("user data oom\n");
8815 		goto err;
8816 	}
8817 	if (sdebug_num_parts > 0)
8818 		sdebug_build_parts(sip->storep, sz);
8819 
8820 	/* DIF/DIX: what T10 calls Protection Information (PI) */
8821 	if (sdebug_dix) {
8822 		int dif_size;
8823 
8824 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8825 		sip->dif_storep = vmalloc(dif_size);
8826 
8827 		pr_info("dif_storep %u bytes @ %p\n", dif_size,
8828 			sip->dif_storep);
8829 
8830 		if (!sip->dif_storep) {
8831 			pr_err("DIX oom\n");
8832 			goto err;
8833 		}
8834 		memset(sip->dif_storep, 0xff, dif_size);
8835 	}
8836 	/* Logical Block Provisioning */
8837 	if (scsi_debug_lbp()) {
8838 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8839 		sip->map_storep = vcalloc(BITS_TO_LONGS(map_size),
8840 					  sizeof(long));
8841 
8842 		pr_info("%lu provisioning blocks\n", map_size);
8843 
8844 		if (!sip->map_storep) {
8845 			pr_err("LBP map oom\n");
8846 			goto err;
8847 		}
8848 
8849 		/* Map first 1KB for partition table */
8850 		if (sdebug_num_parts)
8851 			map_region(sip, 0, 2);
8852 	}
8853 
8854 	rwlock_init(&sip->macc_data_lck);
8855 	rwlock_init(&sip->macc_meta_lck);
8856 	rwlock_init(&sip->macc_sector_lck);
8857 	return (int)n_idx;
8858 err:
8859 	sdebug_erase_store((int)n_idx, sip);
8860 	pr_warn("failed, errno=%d\n", -res);
8861 	return res;
8862 }
8863 
8864 static int sdebug_add_host_helper(int per_host_idx)
8865 {
8866 	int k, devs_per_host, idx;
8867 	int error = -ENOMEM;
8868 	struct sdebug_host_info *sdbg_host;
8869 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8870 
8871 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8872 	if (!sdbg_host)
8873 		return -ENOMEM;
8874 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8875 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8876 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8877 	sdbg_host->si_idx = idx;
8878 
8879 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8880 
8881 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8882 	for (k = 0; k < devs_per_host; k++) {
8883 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8884 		if (!sdbg_devinfo)
8885 			goto clean;
8886 	}
8887 
8888 	mutex_lock(&sdebug_host_list_mutex);
8889 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8890 	mutex_unlock(&sdebug_host_list_mutex);
8891 
8892 	sdbg_host->dev.bus = &pseudo_lld_bus;
8893 	sdbg_host->dev.parent = pseudo_primary;
8894 	sdbg_host->dev.release = &sdebug_release_adapter;
8895 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8896 
8897 	error = device_register(&sdbg_host->dev);
8898 	if (error) {
8899 		mutex_lock(&sdebug_host_list_mutex);
8900 		list_del(&sdbg_host->host_list);
8901 		mutex_unlock(&sdebug_host_list_mutex);
8902 		goto clean;
8903 	}
8904 
8905 	++sdebug_num_hosts;
8906 	return 0;
8907 
8908 clean:
8909 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8910 				 dev_list) {
8911 		list_del(&sdbg_devinfo->dev_list);
8912 		kfree(sdbg_devinfo->zstate);
8913 		kfree(sdbg_devinfo);
8914 	}
8915 	if (sdbg_host->dev.release)
8916 		put_device(&sdbg_host->dev);
8917 	else
8918 		kfree(sdbg_host);
8919 	pr_warn("failed, errno=%d\n", -error);
8920 	return error;
8921 }
8922 
8923 static int sdebug_do_add_host(bool mk_new_store)
8924 {
8925 	int ph_idx = sdeb_most_recent_idx;
8926 
8927 	if (mk_new_store) {
8928 		ph_idx = sdebug_add_store();
8929 		if (ph_idx < 0)
8930 			return ph_idx;
8931 	}
8932 	return sdebug_add_host_helper(ph_idx);
8933 }
8934 
8935 static void sdebug_do_remove_host(bool the_end)
8936 {
8937 	int idx = -1;
8938 	struct sdebug_host_info *sdbg_host = NULL;
8939 	struct sdebug_host_info *sdbg_host2;
8940 
8941 	mutex_lock(&sdebug_host_list_mutex);
8942 	if (!list_empty(&sdebug_host_list)) {
8943 		sdbg_host = list_entry(sdebug_host_list.prev,
8944 				       struct sdebug_host_info, host_list);
8945 		idx = sdbg_host->si_idx;
8946 	}
8947 	if (!the_end && idx >= 0) {
8948 		bool unique = true;
8949 
8950 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8951 			if (sdbg_host2 == sdbg_host)
8952 				continue;
8953 			if (idx == sdbg_host2->si_idx) {
8954 				unique = false;
8955 				break;
8956 			}
8957 		}
8958 		if (unique) {
8959 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8960 			if (idx == sdeb_most_recent_idx)
8961 				--sdeb_most_recent_idx;
8962 		}
8963 	}
8964 	if (sdbg_host)
8965 		list_del(&sdbg_host->host_list);
8966 	mutex_unlock(&sdebug_host_list_mutex);
8967 
8968 	if (!sdbg_host)
8969 		return;
8970 
8971 	device_unregister(&sdbg_host->dev);
8972 	--sdebug_num_hosts;
8973 }
8974 
8975 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8976 {
8977 	struct sdebug_dev_info *devip = sdev->hostdata;
8978 
8979 	if (!devip)
8980 		return	-ENODEV;
8981 
8982 	mutex_lock(&sdebug_host_list_mutex);
8983 	block_unblock_all_queues(true);
8984 
8985 	if (qdepth > SDEBUG_CANQUEUE) {
8986 		qdepth = SDEBUG_CANQUEUE;
8987 		pr_warn("requested qdepth [%d] exceeds canqueue [%d], trim\n",
8988 			qdepth, SDEBUG_CANQUEUE);
8989 	}
8990 	if (qdepth < 1)
8991 		qdepth = 1;
8992 	if (qdepth != sdev->queue_depth)
8993 		scsi_change_queue_depth(sdev, qdepth);
8994 
8995 	block_unblock_all_queues(false);
8996 	mutex_unlock(&sdebug_host_list_mutex);
8997 
8998 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8999 		sdev_printk(KERN_INFO, sdev, "qdepth=%d\n", qdepth);
9000 
9001 	return sdev->queue_depth;
9002 }
9003 
9004 static bool fake_timeout(struct scsi_cmnd *scp)
9005 {
9006 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
9007 		if (sdebug_every_nth < -1)
9008 			sdebug_every_nth = -1;
9009 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
9010 			return true; /* ignore command causing timeout */
9011 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
9012 			 scsi_medium_access_command(scp))
9013 			return true; /* time out reads and writes */
9014 	}
9015 	return false;
9016 }
9017 
9018 /* Response to TUR or media access command when device stopped */
9019 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
9020 {
9021 	int stopped_state;
9022 	u64 diff_ns = 0;
9023 	ktime_t now_ts = ktime_get_boottime();
9024 	struct scsi_device *sdp = scp->device;
9025 
9026 	stopped_state = atomic_read(&devip->stopped);
9027 	if (stopped_state == 2) {
9028 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
9029 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
9030 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
9031 				/* tur_ms_to_ready timer extinguished */
9032 				atomic_set(&devip->stopped, 0);
9033 				return 0;
9034 			}
9035 		}
9036 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
9037 		if (sdebug_verbose)
9038 			sdev_printk(KERN_INFO, sdp,
9039 				    "%s: Not ready: in process of becoming ready\n", my_name);
9040 		if (scp->cmnd[0] == TEST_UNIT_READY) {
9041 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
9042 
9043 			if (diff_ns <= tur_nanosecs_to_ready)
9044 				diff_ns = tur_nanosecs_to_ready - diff_ns;
9045 			else
9046 				diff_ns = tur_nanosecs_to_ready;
9047 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
9048 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
9049 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
9050 						   diff_ns);
9051 			return check_condition_result;
9052 		}
9053 	}
9054 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
9055 	if (sdebug_verbose)
9056 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
9057 			    my_name);
9058 	return check_condition_result;
9059 }
9060 
9061 static void sdebug_map_queues(struct Scsi_Host *shost)
9062 {
9063 	int i, qoff;
9064 
9065 	if (shost->nr_hw_queues == 1)
9066 		return;
9067 
9068 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
9069 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
9070 
9071 		map->nr_queues  = 0;
9072 
9073 		if (i == HCTX_TYPE_DEFAULT)
9074 			map->nr_queues = submit_queues - poll_queues;
9075 		else if (i == HCTX_TYPE_POLL)
9076 			map->nr_queues = poll_queues;
9077 
9078 		if (!map->nr_queues) {
9079 			BUG_ON(i == HCTX_TYPE_DEFAULT);
9080 			continue;
9081 		}
9082 
9083 		map->queue_offset = qoff;
9084 		blk_mq_map_queues(map);
9085 
9086 		qoff += map->nr_queues;
9087 	}
9088 }
9089 
9090 struct sdebug_blk_mq_poll_data {
9091 	unsigned int queue_num;
9092 	int *num_entries;
9093 };
9094 
9095 /*
9096  * We don't handle aborted commands here, but it does not seem possible to have
9097  * aborted polled commands from schedule_resp()
9098  */
9099 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
9100 {
9101 	struct sdebug_blk_mq_poll_data *data = opaque;
9102 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
9103 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9104 	struct sdebug_defer *sd_dp;
9105 	u32 unique_tag = blk_mq_unique_tag(rq);
9106 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
9107 	unsigned long flags;
9108 	int queue_num = data->queue_num;
9109 	ktime_t time;
9110 
9111 	/* We're only interested in one queue for this iteration */
9112 	if (hwq != queue_num)
9113 		return true;
9114 
9115 	/* Subsequent checks would fail if this failed, but check anyway */
9116 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
9117 		return true;
9118 
9119 	time = ktime_get_boottime();
9120 
9121 	spin_lock_irqsave(&sdsc->lock, flags);
9122 	sd_dp = &sdsc->sd_dp;
9123 	if (sd_dp->defer_t != SDEB_DEFER_POLL) {
9124 		spin_unlock_irqrestore(&sdsc->lock, flags);
9125 		return true;
9126 	}
9127 
9128 	if (time < sd_dp->cmpl_ts) {
9129 		spin_unlock_irqrestore(&sdsc->lock, flags);
9130 		return true;
9131 	}
9132 	spin_unlock_irqrestore(&sdsc->lock, flags);
9133 
9134 	if (sdebug_statistics) {
9135 		atomic_inc(&sdebug_completions);
9136 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
9137 			atomic_inc(&sdebug_miss_cpus);
9138 	}
9139 
9140 	scsi_done(cmd); /* callback to mid level */
9141 	(*data->num_entries)++;
9142 	return true;
9143 }
9144 
9145 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
9146 {
9147 	int num_entries = 0;
9148 	struct sdebug_blk_mq_poll_data data = {
9149 		.queue_num = queue_num,
9150 		.num_entries = &num_entries,
9151 	};
9152 
9153 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
9154 				&data);
9155 
9156 	if (num_entries > 0)
9157 		atomic_add(num_entries, &sdeb_mq_poll_count);
9158 	return num_entries;
9159 }
9160 
9161 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
9162 {
9163 	struct scsi_device *sdp = cmnd->device;
9164 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9165 	struct sdebug_err_inject *err;
9166 	unsigned char *cmd = cmnd->cmnd;
9167 	int ret = 0;
9168 
9169 	if (devip == NULL)
9170 		return 0;
9171 
9172 	rcu_read_lock();
9173 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9174 		if (err->type == ERR_TMOUT_CMD &&
9175 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9176 			ret = !!err->cnt;
9177 			if (err->cnt < 0)
9178 				err->cnt++;
9179 
9180 			rcu_read_unlock();
9181 			return ret;
9182 		}
9183 	}
9184 	rcu_read_unlock();
9185 
9186 	return 0;
9187 }
9188 
9189 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
9190 {
9191 	struct scsi_device *sdp = cmnd->device;
9192 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9193 	struct sdebug_err_inject *err;
9194 	unsigned char *cmd = cmnd->cmnd;
9195 	int ret = 0;
9196 
9197 	if (devip == NULL)
9198 		return 0;
9199 
9200 	rcu_read_lock();
9201 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9202 		if (err->type == ERR_FAIL_QUEUE_CMD &&
9203 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9204 			ret = err->cnt ? err->queuecmd_ret : 0;
9205 			if (err->cnt < 0)
9206 				err->cnt++;
9207 
9208 			rcu_read_unlock();
9209 			return ret;
9210 		}
9211 	}
9212 	rcu_read_unlock();
9213 
9214 	return 0;
9215 }
9216 
9217 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
9218 			   struct sdebug_err_inject *info)
9219 {
9220 	struct scsi_device *sdp = cmnd->device;
9221 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9222 	struct sdebug_err_inject *err;
9223 	unsigned char *cmd = cmnd->cmnd;
9224 	int ret = 0;
9225 	int result;
9226 
9227 	if (devip == NULL)
9228 		return 0;
9229 
9230 	rcu_read_lock();
9231 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9232 		if (err->type == ERR_FAIL_CMD &&
9233 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9234 			if (!err->cnt) {
9235 				rcu_read_unlock();
9236 				return 0;
9237 			}
9238 
9239 			ret = !!err->cnt;
9240 			rcu_read_unlock();
9241 			goto out_handle;
9242 		}
9243 	}
9244 	rcu_read_unlock();
9245 
9246 	return 0;
9247 
9248 out_handle:
9249 	if (err->cnt < 0)
9250 		err->cnt++;
9251 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
9252 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
9253 	*info = *err;
9254 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
9255 
9256 	return ret;
9257 }
9258 
9259 /* Process @scp, a request to abort a SCSI command by tag. */
9260 static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
9261 {
9262 	struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
9263 	struct sdebug_abort_cmd *abort_cmd = &internal_cmd->abort_cmd;
9264 	const u32 unique_tag = abort_cmd->unique_tag;
9265 	struct scsi_cmnd *to_be_aborted_scmd =
9266 		scsi_host_find_tag(shost, unique_tag);
9267 	struct sdebug_scsi_cmd *to_be_aborted_sdsc =
9268 		scsi_cmd_priv(to_be_aborted_scmd);
9269 	bool res = false;
9270 
9271 	if (!to_be_aborted_scmd) {
9272 		pr_err("command with tag %#x not found\n", unique_tag);
9273 		return;
9274 	}
9275 
9276 	scoped_guard(spinlock_irqsave, &to_be_aborted_sdsc->lock)
9277 		res = scsi_debug_stop_cmnd(to_be_aborted_scmd);
9278 
9279 	if (res)
9280 		pr_info("aborted command with tag %#x\n", unique_tag);
9281 	else
9282 		pr_err("failed to abort command with tag %#x\n", unique_tag);
9283 
9284 	set_host_byte(scp, res ? DID_OK : DID_ERROR);
9285 }
9286 
9287 static enum scsi_qc_status
9288 scsi_debug_process_reserved_command(struct Scsi_Host *shost,
9289 				    struct scsi_cmnd *scp)
9290 {
9291 	struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
9292 
9293 	switch (internal_cmd->type) {
9294 	case SCSI_DEBUG_ABORT_CMD:
9295 		scsi_debug_abort_cmd(shost, scp);
9296 		break;
9297 	default:
9298 		WARN_ON_ONCE(true);
9299 		set_host_byte(scp, DID_ERROR);
9300 		break;
9301 	}
9302 
9303 	scsi_done(scp);
9304 	return 0;
9305 }
9306 
9307 static enum scsi_qc_status scsi_debug_queuecommand(struct Scsi_Host *shost,
9308 						   struct scsi_cmnd *scp)
9309 {
9310 	u8 sdeb_i;
9311 	struct scsi_device *sdp = scp->device;
9312 	const struct opcode_info_t *oip;
9313 	const struct opcode_info_t *r_oip;
9314 	struct sdebug_dev_info *devip;
9315 	u8 *cmd = scp->cmnd;
9316 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
9317 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
9318 	int k, na;
9319 	int errsts = 0;
9320 	u64 lun_index = sdp->lun & 0x3FFF;
9321 	u32 flags;
9322 	u16 sa;
9323 	u8 opcode = cmd[0];
9324 	u32 devsel = sdebug_get_devsel(scp->device);
9325 	bool has_wlun_rl;
9326 	bool inject_now;
9327 	int ret = 0;
9328 	struct sdebug_err_inject err;
9329 
9330 	scsi_set_resid(scp, 0);
9331 	if (sdebug_statistics) {
9332 		atomic_inc(&sdebug_cmnd_count);
9333 		inject_now = inject_on_this_cmd();
9334 	} else {
9335 		inject_now = false;
9336 	}
9337 	if (unlikely(sdebug_verbose &&
9338 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
9339 		char b[120];
9340 		int n, len, sb;
9341 
9342 		len = scp->cmd_len;
9343 		sb = (int)sizeof(b);
9344 		if (len > 32)
9345 			strcpy(b, "too long, over 32 bytes");
9346 		else {
9347 			for (k = 0, n = 0; k < len && n < sb; ++k)
9348 				n += scnprintf(b + n, sb - n, "%02x ",
9349 					       (u32)cmd[k]);
9350 		}
9351 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
9352 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
9353 	}
9354 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
9355 		return SCSI_MLQUEUE_HOST_BUSY;
9356 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
9357 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
9358 		goto err_out;
9359 
9360 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
9361 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
9362 	devip = (struct sdebug_dev_info *)sdp->hostdata;
9363 	if (unlikely(!devip)) {
9364 		devip = find_build_dev_info(sdp);
9365 		if (NULL == devip)
9366 			goto err_out;
9367 	}
9368 
9369 	if (sdebug_timeout_cmd(scp)) {
9370 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
9371 		return 0;
9372 	}
9373 
9374 	ret = sdebug_fail_queue_cmd(scp);
9375 	if (ret) {
9376 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
9377 				opcode, ret);
9378 		return ret;
9379 	}
9380 
9381 	if (sdebug_fail_cmd(scp, &ret, &err)) {
9382 		scmd_printk(KERN_INFO, scp,
9383 			"fail command 0x%x with hostbyte=0x%x, "
9384 			"driverbyte=0x%x, statusbyte=0x%x, "
9385 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
9386 			opcode, err.host_byte, err.driver_byte,
9387 			err.status_byte, err.sense_key, err.asc, err.asq);
9388 		return ret;
9389 	}
9390 
9391 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
9392 		atomic_set(&sdeb_inject_pending, 1);
9393 
9394 	na = oip->num_attached;
9395 	r_pfp = oip->pfp;
9396 	if (na) {	/* multiple commands with this opcode */
9397 		r_oip = oip;
9398 		if (FF_SA & r_oip->flags) {
9399 			if (F_SA_LOW & oip->flags)
9400 				sa = 0x1f & cmd[1];
9401 			else
9402 				sa = get_unaligned_be16(cmd + 8);
9403 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9404 				if (opcode == oip->opcode && sa == oip->sa &&
9405 					(devsel & oip->devsel) != 0)
9406 					break;
9407 			}
9408 		} else {   /* since no service action only check opcode */
9409 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9410 				if (opcode == oip->opcode &&
9411 					(devsel & oip->devsel) != 0)
9412 					break;
9413 			}
9414 		}
9415 		if (k > na) {
9416 			if (F_SA_LOW & r_oip->flags)
9417 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
9418 			else if (F_SA_HIGH & r_oip->flags)
9419 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
9420 			else
9421 				mk_sense_invalid_opcode(scp);
9422 			goto check_cond;
9423 		}
9424 	}	/* else (when na==0) we assume the oip is a match */
9425 	flags = oip->flags;
9426 	if (unlikely(F_INV_OP & flags)) {
9427 		mk_sense_invalid_opcode(scp);
9428 		goto check_cond;
9429 	}
9430 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
9431 		if (sdebug_verbose)
9432 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
9433 				    my_name, opcode, " supported for wlun");
9434 		mk_sense_invalid_opcode(scp);
9435 		goto check_cond;
9436 	}
9437 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
9438 		u8 rem;
9439 		int j;
9440 
9441 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
9442 			rem = ~oip->len_mask[k] & cmd[k];
9443 			if (rem) {
9444 				for (j = 7; j >= 0; --j, rem <<= 1) {
9445 					if (0x80 & rem)
9446 						break;
9447 				}
9448 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
9449 				goto check_cond;
9450 			}
9451 		}
9452 	}
9453 	if (unlikely(!(F_SKIP_UA & flags) &&
9454 		     find_first_bit(devip->uas_bm,
9455 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
9456 		errsts = make_ua(scp, devip);
9457 		if (errsts)
9458 			goto check_cond;
9459 	}
9460 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
9461 		     atomic_read(&devip->stopped))) {
9462 		errsts = resp_not_ready(scp, devip);
9463 		if (errsts)
9464 			goto fini;
9465 	}
9466 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
9467 		goto fini;
9468 	if (unlikely(sdebug_every_nth)) {
9469 		if (fake_timeout(scp))
9470 			return 0;	/* ignore command: make trouble */
9471 	}
9472 	if (likely(oip->pfp))
9473 		pfp = oip->pfp;	/* calls a resp_* function */
9474 	else
9475 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
9476 
9477 fini:
9478 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
9479 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
9480 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
9481 					    sdebug_ndelay > 10000)) {
9482 		/*
9483 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
9484 		 * for Start Stop Unit (SSU) want at least 1 second delay and
9485 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
9486 		 * For Synchronize Cache want 1/20 of SSU's delay.
9487 		 */
9488 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
9489 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
9490 
9491 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
9492 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
9493 	} else
9494 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
9495 				     sdebug_ndelay);
9496 check_cond:
9497 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
9498 err_out:
9499 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
9500 }
9501 
9502 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
9503 {
9504 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9505 	struct sdebug_defer *sd_dp = &sdsc->sd_dp;
9506 
9507 	if (blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd)))
9508 		return 0;
9509 
9510 	spin_lock_init(&sdsc->lock);
9511 	hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
9512 		      HRTIMER_MODE_REL_PINNED);
9513 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
9514 
9515 	return 0;
9516 }
9517 
9518 static const struct scsi_host_template sdebug_driver_template = {
9519 	.show_info =		scsi_debug_show_info,
9520 	.write_info =		scsi_debug_write_info,
9521 	.proc_name =		sdebug_proc_name,
9522 	.name =			"SCSI DEBUG",
9523 	.info =			scsi_debug_info,
9524 	.sdev_init =		scsi_debug_sdev_init,
9525 	.sdev_configure =	scsi_debug_sdev_configure,
9526 	.sdev_destroy =		scsi_debug_sdev_destroy,
9527 	.ioctl =		scsi_debug_ioctl,
9528 	.queuecommand =		scsi_debug_queuecommand,
9529 	.queue_reserved_command = scsi_debug_process_reserved_command,
9530 	.change_queue_depth =	sdebug_change_qdepth,
9531 	.map_queues =		sdebug_map_queues,
9532 	.mq_poll =		sdebug_blk_mq_poll,
9533 	.eh_abort_handler =	scsi_debug_abort,
9534 	.eh_device_reset_handler = scsi_debug_device_reset,
9535 	.eh_target_reset_handler = scsi_debug_target_reset,
9536 	.eh_bus_reset_handler = scsi_debug_bus_reset,
9537 	.eh_host_reset_handler = scsi_debug_host_reset,
9538 	.can_queue =		SDEBUG_CANQUEUE,
9539 	.nr_reserved_cmds =	1,
9540 	.this_id =		7,
9541 	.sg_tablesize =		SG_MAX_SEGMENTS,
9542 	.cmd_per_lun =		DEF_CMD_PER_LUN,
9543 	.max_sectors =		-1U,
9544 	.max_segment_size =	-1U,
9545 	.module =		THIS_MODULE,
9546 	.skip_settle_delay =	1,
9547 	.track_queue_depth =	1,
9548 	.cmd_size = sizeof(union sdebug_priv),
9549 	.init_cmd_priv = sdebug_init_cmd_priv,
9550 	.target_alloc =		sdebug_target_alloc,
9551 	.target_destroy =	sdebug_target_destroy,
9552 };
9553 
9554 static int sdebug_driver_probe(struct device *dev)
9555 {
9556 	int error = 0;
9557 	struct sdebug_host_info *sdbg_host;
9558 	struct Scsi_Host *hpnt;
9559 	int hprot;
9560 
9561 	sdbg_host = dev_to_sdebug_host(dev);
9562 
9563 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
9564 	if (NULL == hpnt) {
9565 		pr_err("scsi_host_alloc failed\n");
9566 		error = -ENODEV;
9567 		return error;
9568 	}
9569 	hpnt->can_queue = sdebug_max_queue;
9570 	hpnt->cmd_per_lun = sdebug_max_queue;
9571 	if (!sdebug_clustering)
9572 		hpnt->dma_boundary = PAGE_SIZE - 1;
9573 
9574 	if (submit_queues > nr_cpu_ids) {
9575 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
9576 			my_name, submit_queues, nr_cpu_ids);
9577 		submit_queues = nr_cpu_ids;
9578 	}
9579 	/*
9580 	 * Decide whether to tell scsi subsystem that we want mq. The
9581 	 * following should give the same answer for each host.
9582 	 */
9583 	hpnt->nr_hw_queues = submit_queues;
9584 	if (sdebug_host_max_queue)
9585 		hpnt->host_tagset = 1;
9586 
9587 	/* poll queues are possible for nr_hw_queues > 1 */
9588 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
9589 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
9590 			 my_name, poll_queues, hpnt->nr_hw_queues);
9591 		poll_queues = 0;
9592 	}
9593 
9594 	/*
9595 	 * Poll queues don't need interrupts, but we need at least one I/O queue
9596 	 * left over for non-polled I/O.
9597 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
9598 	 */
9599 	if (poll_queues >= submit_queues) {
9600 		if (submit_queues < 3)
9601 			pr_warn("%s: trim poll_queues to 1\n", my_name);
9602 		else
9603 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
9604 				my_name, submit_queues - 1);
9605 		poll_queues = 1;
9606 	}
9607 	if (poll_queues)
9608 		hpnt->nr_maps = 3;
9609 
9610 	sdbg_host->shost = hpnt;
9611 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
9612 		hpnt->max_id = sdebug_num_tgts + 1;
9613 	else
9614 		hpnt->max_id = sdebug_num_tgts;
9615 	/* = sdebug_max_luns; */
9616 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
9617 
9618 	hprot = 0;
9619 
9620 	switch (sdebug_dif) {
9621 
9622 	case T10_PI_TYPE1_PROTECTION:
9623 		hprot = SHOST_DIF_TYPE1_PROTECTION;
9624 		if (sdebug_dix)
9625 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
9626 		break;
9627 
9628 	case T10_PI_TYPE2_PROTECTION:
9629 		hprot = SHOST_DIF_TYPE2_PROTECTION;
9630 		if (sdebug_dix)
9631 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
9632 		break;
9633 
9634 	case T10_PI_TYPE3_PROTECTION:
9635 		hprot = SHOST_DIF_TYPE3_PROTECTION;
9636 		if (sdebug_dix)
9637 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
9638 		break;
9639 
9640 	default:
9641 		if (sdebug_dix)
9642 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
9643 		break;
9644 	}
9645 
9646 	scsi_host_set_prot(hpnt, hprot);
9647 
9648 	if (have_dif_prot || sdebug_dix)
9649 		pr_info("host protection%s%s%s%s%s%s%s\n",
9650 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
9651 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
9652 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
9653 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
9654 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
9655 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
9656 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
9657 
9658 	if (sdebug_guard == 1)
9659 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
9660 	else
9661 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
9662 
9663 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
9664 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
9665 	if (sdebug_every_nth)	/* need stats counters for every_nth */
9666 		sdebug_statistics = true;
9667 	error = scsi_add_host(hpnt, &sdbg_host->dev);
9668 	if (error) {
9669 		pr_err("scsi_add_host failed\n");
9670 		error = -ENODEV;
9671 		scsi_host_put(hpnt);
9672 	} else {
9673 		scsi_scan_host(hpnt);
9674 	}
9675 
9676 	return error;
9677 }
9678 
9679 static void sdebug_driver_remove(struct device *dev)
9680 {
9681 	struct sdebug_host_info *sdbg_host;
9682 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
9683 
9684 	sdbg_host = dev_to_sdebug_host(dev);
9685 
9686 	scsi_remove_host(sdbg_host->shost);
9687 
9688 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
9689 				 dev_list) {
9690 		list_del(&sdbg_devinfo->dev_list);
9691 		kfree(sdbg_devinfo->zstate);
9692 		kfree(sdbg_devinfo);
9693 	}
9694 
9695 	scsi_host_put(sdbg_host->shost);
9696 }
9697 
9698 static const struct bus_type pseudo_lld_bus = {
9699 	.name = "pseudo",
9700 	.probe = sdebug_driver_probe,
9701 	.remove = sdebug_driver_remove,
9702 	.drv_groups = sdebug_drv_groups,
9703 };
9704