xref: /linux/drivers/scsi/scsi_debug.c (revision 8a922b7728a93d837954315c98b84f6b78de0c4f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 /* Zone types (zbcr05 table 25) */
254 enum sdebug_z_type {
255 	ZBC_ZTYPE_CNV	= 0x1,
256 	ZBC_ZTYPE_SWR	= 0x2,
257 	ZBC_ZTYPE_SWP	= 0x3,
258 	/* ZBC_ZTYPE_SOBR = 0x4, */
259 	ZBC_ZTYPE_GAP	= 0x5,
260 };
261 
262 /* enumeration names taken from table 26, zbcr05 */
263 enum sdebug_z_cond {
264 	ZBC_NOT_WRITE_POINTER	= 0x0,
265 	ZC1_EMPTY		= 0x1,
266 	ZC2_IMPLICIT_OPEN	= 0x2,
267 	ZC3_EXPLICIT_OPEN	= 0x3,
268 	ZC4_CLOSED		= 0x4,
269 	ZC6_READ_ONLY		= 0xd,
270 	ZC5_FULL		= 0xe,
271 	ZC7_OFFLINE		= 0xf,
272 };
273 
274 struct sdeb_zone_state {	/* ZBC: per zone state */
275 	enum sdebug_z_type z_type;
276 	enum sdebug_z_cond z_cond;
277 	bool z_non_seq_resource;
278 	unsigned int z_size;
279 	sector_t z_start;
280 	sector_t z_wp;
281 };
282 
283 struct sdebug_dev_info {
284 	struct list_head dev_list;
285 	unsigned int channel;
286 	unsigned int target;
287 	u64 lun;
288 	uuid_t lu_name;
289 	struct sdebug_host_info *sdbg_host;
290 	unsigned long uas_bm[1];
291 	atomic_t num_in_q;
292 	atomic_t stopped;	/* 1: by SSU, 2: device start */
293 	bool used;
294 
295 	/* For ZBC devices */
296 	enum blk_zoned_model zmodel;
297 	unsigned int zcap;
298 	unsigned int zsize;
299 	unsigned int zsize_shift;
300 	unsigned int nr_zones;
301 	unsigned int nr_conv_zones;
302 	unsigned int nr_seq_zones;
303 	unsigned int nr_imp_open;
304 	unsigned int nr_exp_open;
305 	unsigned int nr_closed;
306 	unsigned int max_open;
307 	ktime_t create_ts;	/* time since bootup that this device was created */
308 	struct sdeb_zone_state *zstate;
309 };
310 
311 struct sdebug_host_info {
312 	struct list_head host_list;
313 	int si_idx;	/* sdeb_store_info (per host) xarray index */
314 	struct Scsi_Host *shost;
315 	struct device dev;
316 	struct list_head dev_info_list;
317 };
318 
319 /* There is an xarray of pointers to this struct's objects, one per host */
320 struct sdeb_store_info {
321 	rwlock_t macc_lck;	/* for atomic media access on this store */
322 	u8 *storep;		/* user data storage (ram) */
323 	struct t10_pi_tuple *dif_storep; /* protection info */
324 	void *map_storep;	/* provisioning map */
325 };
326 
327 #define to_sdebug_host(d)	\
328 	container_of(d, struct sdebug_host_info, dev)
329 
330 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
331 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
332 
333 struct sdebug_defer {
334 	struct hrtimer hrt;
335 	struct execute_work ew;
336 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
337 	int sqa_idx;	/* index of sdebug_queue array */
338 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
339 	int hc_idx;	/* hostwide tag index */
340 	int issuing_cpu;
341 	bool init_hrt;
342 	bool init_wq;
343 	bool init_poll;
344 	bool aborted;	/* true when blk_abort_request() already called */
345 	enum sdeb_defer_type defer_t;
346 };
347 
348 struct sdebug_queued_cmd {
349 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
350 	 * instance indicates this slot is in use.
351 	 */
352 	struct sdebug_defer *sd_dp;
353 	struct scsi_cmnd *a_cmnd;
354 };
355 
356 struct sdebug_queue {
357 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
358 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
359 	spinlock_t qc_lock;
360 	atomic_t blocked;	/* to temporarily stop more being queued */
361 };
362 
363 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
364 static atomic_t sdebug_completions;  /* count of deferred completions */
365 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
366 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
367 static atomic_t sdeb_inject_pending;
368 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
369 
370 struct opcode_info_t {
371 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
372 				/* for terminating element */
373 	u8 opcode;		/* if num_attached > 0, preferred */
374 	u16 sa;			/* service action */
375 	u32 flags;		/* OR-ed set of SDEB_F_* */
376 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
377 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
378 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
379 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
380 };
381 
382 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
383 enum sdeb_opcode_index {
384 	SDEB_I_INVALID_OPCODE =	0,
385 	SDEB_I_INQUIRY = 1,
386 	SDEB_I_REPORT_LUNS = 2,
387 	SDEB_I_REQUEST_SENSE = 3,
388 	SDEB_I_TEST_UNIT_READY = 4,
389 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
390 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
391 	SDEB_I_LOG_SENSE = 7,
392 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
393 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
394 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
395 	SDEB_I_START_STOP = 11,
396 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
397 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
398 	SDEB_I_MAINT_IN = 14,
399 	SDEB_I_MAINT_OUT = 15,
400 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
401 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
402 	SDEB_I_RESERVE = 18,		/* 6, 10 */
403 	SDEB_I_RELEASE = 19,		/* 6, 10 */
404 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
405 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
406 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
407 	SDEB_I_SEND_DIAG = 23,
408 	SDEB_I_UNMAP = 24,
409 	SDEB_I_WRITE_BUFFER = 25,
410 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
411 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
412 	SDEB_I_COMP_WRITE = 28,
413 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
414 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
415 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
416 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
417 };
418 
419 
420 static const unsigned char opcode_ind_arr[256] = {
421 /* 0x0; 0x0->0x1f: 6 byte cdbs */
422 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
423 	    0, 0, 0, 0,
424 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
425 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
426 	    SDEB_I_RELEASE,
427 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
428 	    SDEB_I_ALLOW_REMOVAL, 0,
429 /* 0x20; 0x20->0x3f: 10 byte cdbs */
430 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
431 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
432 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
433 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
434 /* 0x40; 0x40->0x5f: 10 byte cdbs */
435 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
436 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
437 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
438 	    SDEB_I_RELEASE,
439 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
440 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
441 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
442 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
443 	0, SDEB_I_VARIABLE_LEN,
444 /* 0x80; 0x80->0x9f: 16 byte cdbs */
445 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
446 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
447 	0, 0, 0, SDEB_I_VERIFY,
448 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
449 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
450 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
451 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
452 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
453 	     SDEB_I_MAINT_OUT, 0, 0, 0,
454 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
455 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0,
458 /* 0xc0; 0xc0->0xff: vendor specific */
459 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
463 };
464 
465 /*
466  * The following "response" functions return the SCSI mid-level's 4 byte
467  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
468  * command completion, they can mask their return value with
469  * SDEG_RES_IMMED_MASK .
470  */
471 #define SDEG_RES_IMMED_MASK 0x40000000
472 
473 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
502 
503 static int sdebug_do_add_host(bool mk_new_store);
504 static int sdebug_add_host_helper(int per_host_idx);
505 static void sdebug_do_remove_host(bool the_end);
506 static int sdebug_add_store(void);
507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
508 static void sdebug_erase_all_stores(bool apart_from_first);
509 
510 /*
511  * The following are overflow arrays for cdbs that "hit" the same index in
512  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513  * should be placed in opcode_info_arr[], the others should be placed here.
514  */
515 static const struct opcode_info_t msense_iarr[] = {
516 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
517 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519 
520 static const struct opcode_info_t mselect_iarr[] = {
521 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
522 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523 };
524 
525 static const struct opcode_info_t read_iarr[] = {
526 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
528 	     0, 0, 0, 0} },
529 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
533 	     0xc7, 0, 0, 0, 0} },
534 };
535 
536 static const struct opcode_info_t write_iarr[] = {
537 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
538 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
539 		   0, 0, 0, 0, 0, 0} },
540 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
541 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
542 		   0, 0, 0} },
543 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
544 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 		   0xbf, 0xc7, 0, 0, 0, 0} },
546 };
547 
548 static const struct opcode_info_t verify_iarr[] = {
549 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
551 		   0, 0, 0, 0, 0, 0} },
552 };
553 
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
558 };
559 
560 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
561 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
564 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
567 };
568 
569 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
570 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
576 };
577 
578 static const struct opcode_info_t write_same_iarr[] = {
579 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
582 };
583 
584 static const struct opcode_info_t reserve_iarr[] = {
585 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
586 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588 
589 static const struct opcode_info_t release_iarr[] = {
590 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
591 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 };
593 
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
598 };
599 
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
604 };
605 
606 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
607 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
610 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
613 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
616 };
617 
618 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
619 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
622 };
623 
624 
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626  * plus the terminating elements for logic that scans this table such as
627  * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
629 /* 0 */
630 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
631 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 	     0, 0} },					/* REPORT LUNS */
637 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 /* 5 */
642 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
643 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
644 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
646 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
647 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
649 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
650 	     0, 0, 0} },
651 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
652 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
653 	     0, 0} },
654 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
656 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 /* 10 */
658 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
660 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
671 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
673 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 				0xff, 0, 0xc7, 0, 0, 0, 0} },
675 /* 15 */
676 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
680 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
684 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
685 	     0xff, 0xff} },
686 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
688 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 	     0} },
690 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
692 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
693 	     0} },
694 /* 20 */
695 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
702 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 /* 25 */
706 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
709 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
711 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
712 		 0, 0, 0, 0, 0} },
713 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 	    resp_sync_cache, sync_cache_iarr,
715 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
717 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
720 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 	    resp_pre_fetch, pre_fetch_iarr,
722 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
724 
725 /* 30 */
726 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
734 /* sentinel */
735 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
736 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
737 };
738 
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue;	/* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
758 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
759 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
760 static int sdebug_no_uld;
761 static int sdebug_num_parts = DEF_NUM_PARTS;
762 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
763 static int sdebug_opt_blks = DEF_OPT_BLKS;
764 static int sdebug_opts = DEF_OPTS;
765 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
766 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
767 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
768 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
769 static int sdebug_sector_size = DEF_SECTOR_SIZE;
770 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
771 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
772 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
773 static unsigned int sdebug_lbpu = DEF_LBPU;
774 static unsigned int sdebug_lbpws = DEF_LBPWS;
775 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
776 static unsigned int sdebug_lbprz = DEF_LBPRZ;
777 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
778 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
779 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
780 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
781 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
782 static int sdebug_uuid_ctl = DEF_UUID_CTL;
783 static bool sdebug_random = DEF_RANDOM;
784 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
785 static bool sdebug_removable = DEF_REMOVABLE;
786 static bool sdebug_clustering;
787 static bool sdebug_host_lock = DEF_HOST_LOCK;
788 static bool sdebug_strict = DEF_STRICT;
789 static bool sdebug_any_injecting_opt;
790 static bool sdebug_no_rwlock;
791 static bool sdebug_verbose;
792 static bool have_dif_prot;
793 static bool write_since_sync;
794 static bool sdebug_statistics = DEF_STATISTICS;
795 static bool sdebug_wp;
796 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
797 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
798 static char *sdeb_zbc_model_s;
799 
800 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
801 			  SAM_LUN_AM_FLAT = 0x1,
802 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
803 			  SAM_LUN_AM_EXTENDED = 0x3};
804 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
805 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
806 
807 static unsigned int sdebug_store_sectors;
808 static sector_t sdebug_capacity;	/* in sectors */
809 
810 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
811    may still need them */
812 static int sdebug_heads;		/* heads per disk */
813 static int sdebug_cylinders_per;	/* cylinders per surface */
814 static int sdebug_sectors_per;		/* sectors per cylinder */
815 
816 static LIST_HEAD(sdebug_host_list);
817 static DEFINE_SPINLOCK(sdebug_host_list_lock);
818 
819 static struct xarray per_store_arr;
820 static struct xarray *per_store_ap = &per_store_arr;
821 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
822 static int sdeb_most_recent_idx = -1;
823 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
824 
825 static unsigned long map_size;
826 static int num_aborts;
827 static int num_dev_resets;
828 static int num_target_resets;
829 static int num_bus_resets;
830 static int num_host_resets;
831 static int dix_writes;
832 static int dix_reads;
833 static int dif_errors;
834 
835 /* ZBC global data */
836 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
837 static int sdeb_zbc_zone_cap_mb;
838 static int sdeb_zbc_zone_size_mb;
839 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
840 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
841 
842 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
843 static int poll_queues; /* iouring iopoll interface.*/
844 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
845 
846 static DEFINE_RWLOCK(atomic_rw);
847 static DEFINE_RWLOCK(atomic_rw2);
848 
849 static rwlock_t *ramdisk_lck_a[2];
850 
851 static char sdebug_proc_name[] = MY_NAME;
852 static const char *my_name = MY_NAME;
853 
854 static struct bus_type pseudo_lld_bus;
855 
856 static struct device_driver sdebug_driverfs_driver = {
857 	.name 		= sdebug_proc_name,
858 	.bus		= &pseudo_lld_bus,
859 };
860 
861 static const int check_condition_result =
862 	SAM_STAT_CHECK_CONDITION;
863 
864 static const int illegal_condition_result =
865 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
866 
867 static const int device_qfull_result =
868 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
869 
870 static const int condition_met_result = SAM_STAT_CONDITION_MET;
871 
872 
873 /* Only do the extra work involved in logical block provisioning if one or
874  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
875  * real reads and writes (i.e. not skipping them for speed).
876  */
877 static inline bool scsi_debug_lbp(void)
878 {
879 	return 0 == sdebug_fake_rw &&
880 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
881 }
882 
883 static void *lba2fake_store(struct sdeb_store_info *sip,
884 			    unsigned long long lba)
885 {
886 	struct sdeb_store_info *lsip = sip;
887 
888 	lba = do_div(lba, sdebug_store_sectors);
889 	if (!sip || !sip->storep) {
890 		WARN_ON_ONCE(true);
891 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
892 	}
893 	return lsip->storep + lba * sdebug_sector_size;
894 }
895 
896 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
897 				      sector_t sector)
898 {
899 	sector = sector_div(sector, sdebug_store_sectors);
900 
901 	return sip->dif_storep + sector;
902 }
903 
904 static void sdebug_max_tgts_luns(void)
905 {
906 	struct sdebug_host_info *sdbg_host;
907 	struct Scsi_Host *hpnt;
908 
909 	spin_lock(&sdebug_host_list_lock);
910 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
911 		hpnt = sdbg_host->shost;
912 		if ((hpnt->this_id >= 0) &&
913 		    (sdebug_num_tgts > hpnt->this_id))
914 			hpnt->max_id = sdebug_num_tgts + 1;
915 		else
916 			hpnt->max_id = sdebug_num_tgts;
917 		/* sdebug_max_luns; */
918 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
919 	}
920 	spin_unlock(&sdebug_host_list_lock);
921 }
922 
923 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
924 
925 /* Set in_bit to -1 to indicate no bit position of invalid field */
926 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
927 				 enum sdeb_cmd_data c_d,
928 				 int in_byte, int in_bit)
929 {
930 	unsigned char *sbuff;
931 	u8 sks[4];
932 	int sl, asc;
933 
934 	sbuff = scp->sense_buffer;
935 	if (!sbuff) {
936 		sdev_printk(KERN_ERR, scp->device,
937 			    "%s: sense_buffer is NULL\n", __func__);
938 		return;
939 	}
940 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
941 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
942 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
943 	memset(sks, 0, sizeof(sks));
944 	sks[0] = 0x80;
945 	if (c_d)
946 		sks[0] |= 0x40;
947 	if (in_bit >= 0) {
948 		sks[0] |= 0x8;
949 		sks[0] |= 0x7 & in_bit;
950 	}
951 	put_unaligned_be16(in_byte, sks + 1);
952 	if (sdebug_dsense) {
953 		sl = sbuff[7] + 8;
954 		sbuff[7] = sl;
955 		sbuff[sl] = 0x2;
956 		sbuff[sl + 1] = 0x6;
957 		memcpy(sbuff + sl + 4, sks, 3);
958 	} else
959 		memcpy(sbuff + 15, sks, 3);
960 	if (sdebug_verbose)
961 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
962 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
963 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
964 }
965 
966 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
967 {
968 	if (!scp->sense_buffer) {
969 		sdev_printk(KERN_ERR, scp->device,
970 			    "%s: sense_buffer is NULL\n", __func__);
971 		return;
972 	}
973 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
974 
975 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
976 
977 	if (sdebug_verbose)
978 		sdev_printk(KERN_INFO, scp->device,
979 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
980 			    my_name, key, asc, asq);
981 }
982 
983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
984 {
985 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
986 }
987 
988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
989 			    void __user *arg)
990 {
991 	if (sdebug_verbose) {
992 		if (0x1261 == cmd)
993 			sdev_printk(KERN_INFO, dev,
994 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
995 		else if (0x5331 == cmd)
996 			sdev_printk(KERN_INFO, dev,
997 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
998 				    __func__);
999 		else
1000 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1001 				    __func__, cmd);
1002 	}
1003 	return -EINVAL;
1004 	/* return -ENOTTY; // correct return but upsets fdisk */
1005 }
1006 
1007 static void config_cdb_len(struct scsi_device *sdev)
1008 {
1009 	switch (sdebug_cdb_len) {
1010 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1011 		sdev->use_10_for_rw = false;
1012 		sdev->use_16_for_rw = false;
1013 		sdev->use_10_for_ms = false;
1014 		break;
1015 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1016 		sdev->use_10_for_rw = true;
1017 		sdev->use_16_for_rw = false;
1018 		sdev->use_10_for_ms = false;
1019 		break;
1020 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1021 		sdev->use_10_for_rw = true;
1022 		sdev->use_16_for_rw = false;
1023 		sdev->use_10_for_ms = true;
1024 		break;
1025 	case 16:
1026 		sdev->use_10_for_rw = false;
1027 		sdev->use_16_for_rw = true;
1028 		sdev->use_10_for_ms = true;
1029 		break;
1030 	case 32: /* No knobs to suggest this so same as 16 for now */
1031 		sdev->use_10_for_rw = false;
1032 		sdev->use_16_for_rw = true;
1033 		sdev->use_10_for_ms = true;
1034 		break;
1035 	default:
1036 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1037 			sdebug_cdb_len);
1038 		sdev->use_10_for_rw = true;
1039 		sdev->use_16_for_rw = false;
1040 		sdev->use_10_for_ms = false;
1041 		sdebug_cdb_len = 10;
1042 		break;
1043 	}
1044 }
1045 
1046 static void all_config_cdb_len(void)
1047 {
1048 	struct sdebug_host_info *sdbg_host;
1049 	struct Scsi_Host *shost;
1050 	struct scsi_device *sdev;
1051 
1052 	spin_lock(&sdebug_host_list_lock);
1053 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054 		shost = sdbg_host->shost;
1055 		shost_for_each_device(sdev, shost) {
1056 			config_cdb_len(sdev);
1057 		}
1058 	}
1059 	spin_unlock(&sdebug_host_list_lock);
1060 }
1061 
1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1063 {
1064 	struct sdebug_host_info *sdhp;
1065 	struct sdebug_dev_info *dp;
1066 
1067 	spin_lock(&sdebug_host_list_lock);
1068 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 			if ((devip->sdbg_host == dp->sdbg_host) &&
1071 			    (devip->target == dp->target))
1072 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073 		}
1074 	}
1075 	spin_unlock(&sdebug_host_list_lock);
1076 }
1077 
1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1079 {
1080 	int k;
1081 
1082 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083 	if (k != SDEBUG_NUM_UAS) {
1084 		const char *cp = NULL;
1085 
1086 		switch (k) {
1087 		case SDEBUG_UA_POR:
1088 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 					POWER_ON_RESET_ASCQ);
1090 			if (sdebug_verbose)
1091 				cp = "power on reset";
1092 			break;
1093 		case SDEBUG_UA_POOCCUR:
1094 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 					POWER_ON_OCCURRED_ASCQ);
1096 			if (sdebug_verbose)
1097 				cp = "power on occurred";
1098 			break;
1099 		case SDEBUG_UA_BUS_RESET:
1100 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1101 					BUS_RESET_ASCQ);
1102 			if (sdebug_verbose)
1103 				cp = "bus reset";
1104 			break;
1105 		case SDEBUG_UA_MODE_CHANGED:
1106 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1107 					MODE_CHANGED_ASCQ);
1108 			if (sdebug_verbose)
1109 				cp = "mode parameters changed";
1110 			break;
1111 		case SDEBUG_UA_CAPACITY_CHANGED:
1112 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1113 					CAPACITY_CHANGED_ASCQ);
1114 			if (sdebug_verbose)
1115 				cp = "capacity data changed";
1116 			break;
1117 		case SDEBUG_UA_MICROCODE_CHANGED:
1118 			mk_sense_buffer(scp, UNIT_ATTENTION,
1119 					TARGET_CHANGED_ASC,
1120 					MICROCODE_CHANGED_ASCQ);
1121 			if (sdebug_verbose)
1122 				cp = "microcode has been changed";
1123 			break;
1124 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1125 			mk_sense_buffer(scp, UNIT_ATTENTION,
1126 					TARGET_CHANGED_ASC,
1127 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1128 			if (sdebug_verbose)
1129 				cp = "microcode has been changed without reset";
1130 			break;
1131 		case SDEBUG_UA_LUNS_CHANGED:
1132 			/*
1133 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1134 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1135 			 * on the target, until a REPORT LUNS command is
1136 			 * received.  SPC-4 behavior is to report it only once.
1137 			 * NOTE:  sdebug_scsi_level does not use the same
1138 			 * values as struct scsi_device->scsi_level.
1139 			 */
1140 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1141 				clear_luns_changed_on_target(devip);
1142 			mk_sense_buffer(scp, UNIT_ATTENTION,
1143 					TARGET_CHANGED_ASC,
1144 					LUNS_CHANGED_ASCQ);
1145 			if (sdebug_verbose)
1146 				cp = "reported luns data has changed";
1147 			break;
1148 		default:
1149 			pr_warn("unexpected unit attention code=%d\n", k);
1150 			if (sdebug_verbose)
1151 				cp = "unknown";
1152 			break;
1153 		}
1154 		clear_bit(k, devip->uas_bm);
1155 		if (sdebug_verbose)
1156 			sdev_printk(KERN_INFO, scp->device,
1157 				   "%s reports: Unit attention: %s\n",
1158 				   my_name, cp);
1159 		return check_condition_result;
1160 	}
1161 	return 0;
1162 }
1163 
1164 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1165 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1166 				int arr_len)
1167 {
1168 	int act_len;
1169 	struct scsi_data_buffer *sdb = &scp->sdb;
1170 
1171 	if (!sdb->length)
1172 		return 0;
1173 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1174 		return DID_ERROR << 16;
1175 
1176 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1177 				      arr, arr_len);
1178 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1179 
1180 	return 0;
1181 }
1182 
1183 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1184  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1185  * calls, not required to write in ascending offset order. Assumes resid
1186  * set to scsi_bufflen() prior to any calls.
1187  */
1188 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1189 				  int arr_len, unsigned int off_dst)
1190 {
1191 	unsigned int act_len, n;
1192 	struct scsi_data_buffer *sdb = &scp->sdb;
1193 	off_t skip = off_dst;
1194 
1195 	if (sdb->length <= off_dst)
1196 		return 0;
1197 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1198 		return DID_ERROR << 16;
1199 
1200 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1201 				       arr, arr_len, skip);
1202 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1203 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1204 		 scsi_get_resid(scp));
1205 	n = scsi_bufflen(scp) - (off_dst + act_len);
1206 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1207 	return 0;
1208 }
1209 
1210 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1211  * 'arr' or -1 if error.
1212  */
1213 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1214 			       int arr_len)
1215 {
1216 	if (!scsi_bufflen(scp))
1217 		return 0;
1218 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1219 		return -1;
1220 
1221 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1222 }
1223 
1224 
1225 static char sdebug_inq_vendor_id[9] = "Linux   ";
1226 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1227 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1228 /* Use some locally assigned NAAs for SAS addresses. */
1229 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1230 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1231 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1232 
1233 /* Device identification VPD page. Returns number of bytes placed in arr */
1234 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1235 			  int target_dev_id, int dev_id_num,
1236 			  const char *dev_id_str, int dev_id_str_len,
1237 			  const uuid_t *lu_name)
1238 {
1239 	int num, port_a;
1240 	char b[32];
1241 
1242 	port_a = target_dev_id + 1;
1243 	/* T10 vendor identifier field format (faked) */
1244 	arr[0] = 0x2;	/* ASCII */
1245 	arr[1] = 0x1;
1246 	arr[2] = 0x0;
1247 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1248 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1249 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1250 	num = 8 + 16 + dev_id_str_len;
1251 	arr[3] = num;
1252 	num += 4;
1253 	if (dev_id_num >= 0) {
1254 		if (sdebug_uuid_ctl) {
1255 			/* Locally assigned UUID */
1256 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1257 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1258 			arr[num++] = 0x0;
1259 			arr[num++] = 0x12;
1260 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1261 			arr[num++] = 0x0;
1262 			memcpy(arr + num, lu_name, 16);
1263 			num += 16;
1264 		} else {
1265 			/* NAA-3, Logical unit identifier (binary) */
1266 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1267 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1268 			arr[num++] = 0x0;
1269 			arr[num++] = 0x8;
1270 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1271 			num += 8;
1272 		}
1273 		/* Target relative port number */
1274 		arr[num++] = 0x61;	/* proto=sas, binary */
1275 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1276 		arr[num++] = 0x0;	/* reserved */
1277 		arr[num++] = 0x4;	/* length */
1278 		arr[num++] = 0x0;	/* reserved */
1279 		arr[num++] = 0x0;	/* reserved */
1280 		arr[num++] = 0x0;
1281 		arr[num++] = 0x1;	/* relative port A */
1282 	}
1283 	/* NAA-3, Target port identifier */
1284 	arr[num++] = 0x61;	/* proto=sas, binary */
1285 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1286 	arr[num++] = 0x0;
1287 	arr[num++] = 0x8;
1288 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1289 	num += 8;
1290 	/* NAA-3, Target port group identifier */
1291 	arr[num++] = 0x61;	/* proto=sas, binary */
1292 	arr[num++] = 0x95;	/* piv=1, target port group id */
1293 	arr[num++] = 0x0;
1294 	arr[num++] = 0x4;
1295 	arr[num++] = 0;
1296 	arr[num++] = 0;
1297 	put_unaligned_be16(port_group_id, arr + num);
1298 	num += 2;
1299 	/* NAA-3, Target device identifier */
1300 	arr[num++] = 0x61;	/* proto=sas, binary */
1301 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1302 	arr[num++] = 0x0;
1303 	arr[num++] = 0x8;
1304 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1305 	num += 8;
1306 	/* SCSI name string: Target device identifier */
1307 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1308 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1309 	arr[num++] = 0x0;
1310 	arr[num++] = 24;
1311 	memcpy(arr + num, "naa.32222220", 12);
1312 	num += 12;
1313 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1314 	memcpy(arr + num, b, 8);
1315 	num += 8;
1316 	memset(arr + num, 0, 4);
1317 	num += 4;
1318 	return num;
1319 }
1320 
1321 static unsigned char vpd84_data[] = {
1322 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1323     0x22,0x22,0x22,0x0,0xbb,0x1,
1324     0x22,0x22,0x22,0x0,0xbb,0x2,
1325 };
1326 
1327 /*  Software interface identification VPD page */
1328 static int inquiry_vpd_84(unsigned char *arr)
1329 {
1330 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1331 	return sizeof(vpd84_data);
1332 }
1333 
1334 /* Management network addresses VPD page */
1335 static int inquiry_vpd_85(unsigned char *arr)
1336 {
1337 	int num = 0;
1338 	const char *na1 = "https://www.kernel.org/config";
1339 	const char *na2 = "http://www.kernel.org/log";
1340 	int plen, olen;
1341 
1342 	arr[num++] = 0x1;	/* lu, storage config */
1343 	arr[num++] = 0x0;	/* reserved */
1344 	arr[num++] = 0x0;
1345 	olen = strlen(na1);
1346 	plen = olen + 1;
1347 	if (plen % 4)
1348 		plen = ((plen / 4) + 1) * 4;
1349 	arr[num++] = plen;	/* length, null termianted, padded */
1350 	memcpy(arr + num, na1, olen);
1351 	memset(arr + num + olen, 0, plen - olen);
1352 	num += plen;
1353 
1354 	arr[num++] = 0x4;	/* lu, logging */
1355 	arr[num++] = 0x0;	/* reserved */
1356 	arr[num++] = 0x0;
1357 	olen = strlen(na2);
1358 	plen = olen + 1;
1359 	if (plen % 4)
1360 		plen = ((plen / 4) + 1) * 4;
1361 	arr[num++] = plen;	/* length, null terminated, padded */
1362 	memcpy(arr + num, na2, olen);
1363 	memset(arr + num + olen, 0, plen - olen);
1364 	num += plen;
1365 
1366 	return num;
1367 }
1368 
1369 /* SCSI ports VPD page */
1370 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1371 {
1372 	int num = 0;
1373 	int port_a, port_b;
1374 
1375 	port_a = target_dev_id + 1;
1376 	port_b = port_a + 1;
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;
1380 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1381 	memset(arr + num, 0, 6);
1382 	num += 6;
1383 	arr[num++] = 0x0;
1384 	arr[num++] = 12;	/* length tp descriptor */
1385 	/* naa-5 target port identifier (A) */
1386 	arr[num++] = 0x61;	/* proto=sas, binary */
1387 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1388 	arr[num++] = 0x0;	/* reserved */
1389 	arr[num++] = 0x8;	/* length */
1390 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1391 	num += 8;
1392 	arr[num++] = 0x0;	/* reserved */
1393 	arr[num++] = 0x0;	/* reserved */
1394 	arr[num++] = 0x0;
1395 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1396 	memset(arr + num, 0, 6);
1397 	num += 6;
1398 	arr[num++] = 0x0;
1399 	arr[num++] = 12;	/* length tp descriptor */
1400 	/* naa-5 target port identifier (B) */
1401 	arr[num++] = 0x61;	/* proto=sas, binary */
1402 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1403 	arr[num++] = 0x0;	/* reserved */
1404 	arr[num++] = 0x8;	/* length */
1405 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1406 	num += 8;
1407 
1408 	return num;
1409 }
1410 
1411 
1412 static unsigned char vpd89_data[] = {
1413 /* from 4th byte */ 0,0,0,0,
1414 'l','i','n','u','x',' ',' ',' ',
1415 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1416 '1','2','3','4',
1417 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1418 0xec,0,0,0,
1419 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1420 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1422 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1423 0x53,0x41,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1425 0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1427 0x10,0x80,
1428 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1429 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1430 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1432 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1433 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1434 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1439 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1440 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1441 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1454 };
1455 
1456 /* ATA Information VPD page */
1457 static int inquiry_vpd_89(unsigned char *arr)
1458 {
1459 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1460 	return sizeof(vpd89_data);
1461 }
1462 
1463 
1464 static unsigned char vpdb0_data[] = {
1465 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1466 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1469 };
1470 
1471 /* Block limits VPD page (SBC-3) */
1472 static int inquiry_vpd_b0(unsigned char *arr)
1473 {
1474 	unsigned int gran;
1475 
1476 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1477 
1478 	/* Optimal transfer length granularity */
1479 	if (sdebug_opt_xferlen_exp != 0 &&
1480 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1481 		gran = 1 << sdebug_opt_xferlen_exp;
1482 	else
1483 		gran = 1 << sdebug_physblk_exp;
1484 	put_unaligned_be16(gran, arr + 2);
1485 
1486 	/* Maximum Transfer Length */
1487 	if (sdebug_store_sectors > 0x400)
1488 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1489 
1490 	/* Optimal Transfer Length */
1491 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1492 
1493 	if (sdebug_lbpu) {
1494 		/* Maximum Unmap LBA Count */
1495 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1496 
1497 		/* Maximum Unmap Block Descriptor Count */
1498 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1499 	}
1500 
1501 	/* Unmap Granularity Alignment */
1502 	if (sdebug_unmap_alignment) {
1503 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1504 		arr[28] |= 0x80; /* UGAVALID */
1505 	}
1506 
1507 	/* Optimal Unmap Granularity */
1508 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1509 
1510 	/* Maximum WRITE SAME Length */
1511 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1512 
1513 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1514 }
1515 
1516 /* Block device characteristics VPD page (SBC-3) */
1517 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1518 {
1519 	memset(arr, 0, 0x3c);
1520 	arr[0] = 0;
1521 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1522 	arr[2] = 0;
1523 	arr[3] = 5;	/* less than 1.8" */
1524 	if (devip->zmodel == BLK_ZONED_HA)
1525 		arr[4] = 1 << 4;	/* zoned field = 01b */
1526 
1527 	return 0x3c;
1528 }
1529 
1530 /* Logical block provisioning VPD page (SBC-4) */
1531 static int inquiry_vpd_b2(unsigned char *arr)
1532 {
1533 	memset(arr, 0, 0x4);
1534 	arr[0] = 0;			/* threshold exponent */
1535 	if (sdebug_lbpu)
1536 		arr[1] = 1 << 7;
1537 	if (sdebug_lbpws)
1538 		arr[1] |= 1 << 6;
1539 	if (sdebug_lbpws10)
1540 		arr[1] |= 1 << 5;
1541 	if (sdebug_lbprz && scsi_debug_lbp())
1542 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1543 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1544 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1545 	/* threshold_percentage=0 */
1546 	return 0x4;
1547 }
1548 
1549 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1550 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1551 {
1552 	memset(arr, 0, 0x3c);
1553 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1554 	/*
1555 	 * Set Optimal number of open sequential write preferred zones and
1556 	 * Optimal number of non-sequentially written sequential write
1557 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1558 	 * fields set to zero, apart from Max. number of open swrz_s field.
1559 	 */
1560 	put_unaligned_be32(0xffffffff, &arr[4]);
1561 	put_unaligned_be32(0xffffffff, &arr[8]);
1562 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1563 		put_unaligned_be32(devip->max_open, &arr[12]);
1564 	else
1565 		put_unaligned_be32(0xffffffff, &arr[12]);
1566 	if (devip->zcap < devip->zsize) {
1567 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1568 		put_unaligned_be64(devip->zsize, &arr[20]);
1569 	} else {
1570 		arr[19] = 0;
1571 	}
1572 	return 0x3c;
1573 }
1574 
1575 #define SDEBUG_LONG_INQ_SZ 96
1576 #define SDEBUG_MAX_INQ_ARR_SZ 584
1577 
1578 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1579 {
1580 	unsigned char pq_pdt;
1581 	unsigned char *arr;
1582 	unsigned char *cmd = scp->cmnd;
1583 	u32 alloc_len, n;
1584 	int ret;
1585 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1586 
1587 	alloc_len = get_unaligned_be16(cmd + 3);
1588 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1589 	if (! arr)
1590 		return DID_REQUEUE << 16;
1591 	is_disk = (sdebug_ptype == TYPE_DISK);
1592 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1593 	is_disk_zbc = (is_disk || is_zbc);
1594 	have_wlun = scsi_is_wlun(scp->device->lun);
1595 	if (have_wlun)
1596 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1597 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1598 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1599 	else
1600 		pq_pdt = (sdebug_ptype & 0x1f);
1601 	arr[0] = pq_pdt;
1602 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1603 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1604 		kfree(arr);
1605 		return check_condition_result;
1606 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1607 		int lu_id_num, port_group_id, target_dev_id;
1608 		u32 len;
1609 		char lu_id_str[6];
1610 		int host_no = devip->sdbg_host->shost->host_no;
1611 
1612 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1613 		    (devip->channel & 0x7f);
1614 		if (sdebug_vpd_use_hostno == 0)
1615 			host_no = 0;
1616 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1617 			    (devip->target * 1000) + devip->lun);
1618 		target_dev_id = ((host_no + 1) * 2000) +
1619 				 (devip->target * 1000) - 3;
1620 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1621 		if (0 == cmd[2]) { /* supported vital product data pages */
1622 			arr[1] = cmd[2];	/*sanity */
1623 			n = 4;
1624 			arr[n++] = 0x0;   /* this page */
1625 			arr[n++] = 0x80;  /* unit serial number */
1626 			arr[n++] = 0x83;  /* device identification */
1627 			arr[n++] = 0x84;  /* software interface ident. */
1628 			arr[n++] = 0x85;  /* management network addresses */
1629 			arr[n++] = 0x86;  /* extended inquiry */
1630 			arr[n++] = 0x87;  /* mode page policy */
1631 			arr[n++] = 0x88;  /* SCSI ports */
1632 			if (is_disk_zbc) {	  /* SBC or ZBC */
1633 				arr[n++] = 0x89;  /* ATA information */
1634 				arr[n++] = 0xb0;  /* Block limits */
1635 				arr[n++] = 0xb1;  /* Block characteristics */
1636 				if (is_disk)
1637 					arr[n++] = 0xb2;  /* LB Provisioning */
1638 				if (is_zbc)
1639 					arr[n++] = 0xb6;  /* ZB dev. char. */
1640 			}
1641 			arr[3] = n - 4;	  /* number of supported VPD pages */
1642 		} else if (0x80 == cmd[2]) { /* unit serial number */
1643 			arr[1] = cmd[2];	/*sanity */
1644 			arr[3] = len;
1645 			memcpy(&arr[4], lu_id_str, len);
1646 		} else if (0x83 == cmd[2]) { /* device identification */
1647 			arr[1] = cmd[2];	/*sanity */
1648 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1649 						target_dev_id, lu_id_num,
1650 						lu_id_str, len,
1651 						&devip->lu_name);
1652 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1653 			arr[1] = cmd[2];	/*sanity */
1654 			arr[3] = inquiry_vpd_84(&arr[4]);
1655 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1656 			arr[1] = cmd[2];	/*sanity */
1657 			arr[3] = inquiry_vpd_85(&arr[4]);
1658 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1659 			arr[1] = cmd[2];	/*sanity */
1660 			arr[3] = 0x3c;	/* number of following entries */
1661 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1662 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1663 			else if (have_dif_prot)
1664 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1665 			else
1666 				arr[4] = 0x0;   /* no protection stuff */
1667 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1668 		} else if (0x87 == cmd[2]) { /* mode page policy */
1669 			arr[1] = cmd[2];	/*sanity */
1670 			arr[3] = 0x8;	/* number of following entries */
1671 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1672 			arr[6] = 0x80;	/* mlus, shared */
1673 			arr[8] = 0x18;	 /* protocol specific lu */
1674 			arr[10] = 0x82;	 /* mlus, per initiator port */
1675 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1676 			arr[1] = cmd[2];	/*sanity */
1677 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1678 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1679 			arr[1] = cmd[2];        /*sanity */
1680 			n = inquiry_vpd_89(&arr[4]);
1681 			put_unaligned_be16(n, arr + 2);
1682 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1683 			arr[1] = cmd[2];        /*sanity */
1684 			arr[3] = inquiry_vpd_b0(&arr[4]);
1685 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1686 			arr[1] = cmd[2];        /*sanity */
1687 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1688 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1689 			arr[1] = cmd[2];        /*sanity */
1690 			arr[3] = inquiry_vpd_b2(&arr[4]);
1691 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1692 			arr[1] = cmd[2];        /*sanity */
1693 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1694 		} else {
1695 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1696 			kfree(arr);
1697 			return check_condition_result;
1698 		}
1699 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1700 		ret = fill_from_dev_buffer(scp, arr,
1701 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1702 		kfree(arr);
1703 		return ret;
1704 	}
1705 	/* drops through here for a standard inquiry */
1706 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1707 	arr[2] = sdebug_scsi_level;
1708 	arr[3] = 2;    /* response_data_format==2 */
1709 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1710 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1711 	if (sdebug_vpd_use_hostno == 0)
1712 		arr[5] |= 0x10; /* claim: implicit TPGS */
1713 	arr[6] = 0x10; /* claim: MultiP */
1714 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1715 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1716 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1717 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1718 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1719 	/* Use Vendor Specific area to place driver date in ASCII hex */
1720 	memcpy(&arr[36], sdebug_version_date, 8);
1721 	/* version descriptors (2 bytes each) follow */
1722 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1723 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1724 	n = 62;
1725 	if (is_disk) {		/* SBC-4 no version claimed */
1726 		put_unaligned_be16(0x600, arr + n);
1727 		n += 2;
1728 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1729 		put_unaligned_be16(0x525, arr + n);
1730 		n += 2;
1731 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1732 		put_unaligned_be16(0x624, arr + n);
1733 		n += 2;
1734 	}
1735 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1736 	ret = fill_from_dev_buffer(scp, arr,
1737 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1738 	kfree(arr);
1739 	return ret;
1740 }
1741 
1742 /* See resp_iec_m_pg() for how this data is manipulated */
1743 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1744 				   0, 0, 0x0, 0x0};
1745 
1746 static int resp_requests(struct scsi_cmnd *scp,
1747 			 struct sdebug_dev_info *devip)
1748 {
1749 	unsigned char *cmd = scp->cmnd;
1750 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1751 	bool dsense = !!(cmd[1] & 1);
1752 	u32 alloc_len = cmd[4];
1753 	u32 len = 18;
1754 	int stopped_state = atomic_read(&devip->stopped);
1755 
1756 	memset(arr, 0, sizeof(arr));
1757 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1758 		if (dsense) {
1759 			arr[0] = 0x72;
1760 			arr[1] = NOT_READY;
1761 			arr[2] = LOGICAL_UNIT_NOT_READY;
1762 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1763 			len = 8;
1764 		} else {
1765 			arr[0] = 0x70;
1766 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1767 			arr[7] = 0xa;			/* 18 byte sense buffer */
1768 			arr[12] = LOGICAL_UNIT_NOT_READY;
1769 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1770 		}
1771 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1772 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1773 		if (dsense) {
1774 			arr[0] = 0x72;
1775 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1776 			arr[2] = THRESHOLD_EXCEEDED;
1777 			arr[3] = 0xff;		/* Failure prediction(false) */
1778 			len = 8;
1779 		} else {
1780 			arr[0] = 0x70;
1781 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1782 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1783 			arr[12] = THRESHOLD_EXCEEDED;
1784 			arr[13] = 0xff;		/* Failure prediction(false) */
1785 		}
1786 	} else {	/* nothing to report */
1787 		if (dsense) {
1788 			len = 8;
1789 			memset(arr, 0, len);
1790 			arr[0] = 0x72;
1791 		} else {
1792 			memset(arr, 0, len);
1793 			arr[0] = 0x70;
1794 			arr[7] = 0xa;
1795 		}
1796 	}
1797 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1798 }
1799 
1800 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1801 {
1802 	unsigned char *cmd = scp->cmnd;
1803 	int power_cond, want_stop, stopped_state;
1804 	bool changing;
1805 
1806 	power_cond = (cmd[4] & 0xf0) >> 4;
1807 	if (power_cond) {
1808 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1809 		return check_condition_result;
1810 	}
1811 	want_stop = !(cmd[4] & 1);
1812 	stopped_state = atomic_read(&devip->stopped);
1813 	if (stopped_state == 2) {
1814 		ktime_t now_ts = ktime_get_boottime();
1815 
1816 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1817 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1818 
1819 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1820 				/* tur_ms_to_ready timer extinguished */
1821 				atomic_set(&devip->stopped, 0);
1822 				stopped_state = 0;
1823 			}
1824 		}
1825 		if (stopped_state == 2) {
1826 			if (want_stop) {
1827 				stopped_state = 1;	/* dummy up success */
1828 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1829 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1830 				return check_condition_result;
1831 			}
1832 		}
1833 	}
1834 	changing = (stopped_state != want_stop);
1835 	if (changing)
1836 		atomic_xchg(&devip->stopped, want_stop);
1837 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1838 		return SDEG_RES_IMMED_MASK;
1839 	else
1840 		return 0;
1841 }
1842 
1843 static sector_t get_sdebug_capacity(void)
1844 {
1845 	static const unsigned int gibibyte = 1073741824;
1846 
1847 	if (sdebug_virtual_gb > 0)
1848 		return (sector_t)sdebug_virtual_gb *
1849 			(gibibyte / sdebug_sector_size);
1850 	else
1851 		return sdebug_store_sectors;
1852 }
1853 
1854 #define SDEBUG_READCAP_ARR_SZ 8
1855 static int resp_readcap(struct scsi_cmnd *scp,
1856 			struct sdebug_dev_info *devip)
1857 {
1858 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1859 	unsigned int capac;
1860 
1861 	/* following just in case virtual_gb changed */
1862 	sdebug_capacity = get_sdebug_capacity();
1863 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1864 	if (sdebug_capacity < 0xffffffff) {
1865 		capac = (unsigned int)sdebug_capacity - 1;
1866 		put_unaligned_be32(capac, arr + 0);
1867 	} else
1868 		put_unaligned_be32(0xffffffff, arr + 0);
1869 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1870 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1871 }
1872 
1873 #define SDEBUG_READCAP16_ARR_SZ 32
1874 static int resp_readcap16(struct scsi_cmnd *scp,
1875 			  struct sdebug_dev_info *devip)
1876 {
1877 	unsigned char *cmd = scp->cmnd;
1878 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1879 	u32 alloc_len;
1880 
1881 	alloc_len = get_unaligned_be32(cmd + 10);
1882 	/* following just in case virtual_gb changed */
1883 	sdebug_capacity = get_sdebug_capacity();
1884 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1885 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1886 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1887 	arr[13] = sdebug_physblk_exp & 0xf;
1888 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1889 
1890 	if (scsi_debug_lbp()) {
1891 		arr[14] |= 0x80; /* LBPME */
1892 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1893 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1894 		 * in the wider field maps to 0 in this field.
1895 		 */
1896 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1897 			arr[14] |= 0x40;
1898 	}
1899 
1900 	/*
1901 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1902 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1903 	 */
1904 	if (devip->zmodel == BLK_ZONED_HM)
1905 		arr[12] |= 1 << 4;
1906 
1907 	arr[15] = sdebug_lowest_aligned & 0xff;
1908 
1909 	if (have_dif_prot) {
1910 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1911 		arr[12] |= 1; /* PROT_EN */
1912 	}
1913 
1914 	return fill_from_dev_buffer(scp, arr,
1915 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1916 }
1917 
1918 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1919 
1920 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1921 			      struct sdebug_dev_info *devip)
1922 {
1923 	unsigned char *cmd = scp->cmnd;
1924 	unsigned char *arr;
1925 	int host_no = devip->sdbg_host->shost->host_no;
1926 	int port_group_a, port_group_b, port_a, port_b;
1927 	u32 alen, n, rlen;
1928 	int ret;
1929 
1930 	alen = get_unaligned_be32(cmd + 6);
1931 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1932 	if (! arr)
1933 		return DID_REQUEUE << 16;
1934 	/*
1935 	 * EVPD page 0x88 states we have two ports, one
1936 	 * real and a fake port with no device connected.
1937 	 * So we create two port groups with one port each
1938 	 * and set the group with port B to unavailable.
1939 	 */
1940 	port_a = 0x1; /* relative port A */
1941 	port_b = 0x2; /* relative port B */
1942 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1943 			(devip->channel & 0x7f);
1944 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1945 			(devip->channel & 0x7f) + 0x80;
1946 
1947 	/*
1948 	 * The asymmetric access state is cycled according to the host_id.
1949 	 */
1950 	n = 4;
1951 	if (sdebug_vpd_use_hostno == 0) {
1952 		arr[n++] = host_no % 3; /* Asymm access state */
1953 		arr[n++] = 0x0F; /* claim: all states are supported */
1954 	} else {
1955 		arr[n++] = 0x0; /* Active/Optimized path */
1956 		arr[n++] = 0x01; /* only support active/optimized paths */
1957 	}
1958 	put_unaligned_be16(port_group_a, arr + n);
1959 	n += 2;
1960 	arr[n++] = 0;    /* Reserved */
1961 	arr[n++] = 0;    /* Status code */
1962 	arr[n++] = 0;    /* Vendor unique */
1963 	arr[n++] = 0x1;  /* One port per group */
1964 	arr[n++] = 0;    /* Reserved */
1965 	arr[n++] = 0;    /* Reserved */
1966 	put_unaligned_be16(port_a, arr + n);
1967 	n += 2;
1968 	arr[n++] = 3;    /* Port unavailable */
1969 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1970 	put_unaligned_be16(port_group_b, arr + n);
1971 	n += 2;
1972 	arr[n++] = 0;    /* Reserved */
1973 	arr[n++] = 0;    /* Status code */
1974 	arr[n++] = 0;    /* Vendor unique */
1975 	arr[n++] = 0x1;  /* One port per group */
1976 	arr[n++] = 0;    /* Reserved */
1977 	arr[n++] = 0;    /* Reserved */
1978 	put_unaligned_be16(port_b, arr + n);
1979 	n += 2;
1980 
1981 	rlen = n - 4;
1982 	put_unaligned_be32(rlen, arr + 0);
1983 
1984 	/*
1985 	 * Return the smallest value of either
1986 	 * - The allocated length
1987 	 * - The constructed command length
1988 	 * - The maximum array size
1989 	 */
1990 	rlen = min(alen, n);
1991 	ret = fill_from_dev_buffer(scp, arr,
1992 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1993 	kfree(arr);
1994 	return ret;
1995 }
1996 
1997 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1998 			     struct sdebug_dev_info *devip)
1999 {
2000 	bool rctd;
2001 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2002 	u16 req_sa, u;
2003 	u32 alloc_len, a_len;
2004 	int k, offset, len, errsts, count, bump, na;
2005 	const struct opcode_info_t *oip;
2006 	const struct opcode_info_t *r_oip;
2007 	u8 *arr;
2008 	u8 *cmd = scp->cmnd;
2009 
2010 	rctd = !!(cmd[2] & 0x80);
2011 	reporting_opts = cmd[2] & 0x7;
2012 	req_opcode = cmd[3];
2013 	req_sa = get_unaligned_be16(cmd + 4);
2014 	alloc_len = get_unaligned_be32(cmd + 6);
2015 	if (alloc_len < 4 || alloc_len > 0xffff) {
2016 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2017 		return check_condition_result;
2018 	}
2019 	if (alloc_len > 8192)
2020 		a_len = 8192;
2021 	else
2022 		a_len = alloc_len;
2023 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2024 	if (NULL == arr) {
2025 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2026 				INSUFF_RES_ASCQ);
2027 		return check_condition_result;
2028 	}
2029 	switch (reporting_opts) {
2030 	case 0:	/* all commands */
2031 		/* count number of commands */
2032 		for (count = 0, oip = opcode_info_arr;
2033 		     oip->num_attached != 0xff; ++oip) {
2034 			if (F_INV_OP & oip->flags)
2035 				continue;
2036 			count += (oip->num_attached + 1);
2037 		}
2038 		bump = rctd ? 20 : 8;
2039 		put_unaligned_be32(count * bump, arr);
2040 		for (offset = 4, oip = opcode_info_arr;
2041 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2042 			if (F_INV_OP & oip->flags)
2043 				continue;
2044 			na = oip->num_attached;
2045 			arr[offset] = oip->opcode;
2046 			put_unaligned_be16(oip->sa, arr + offset + 2);
2047 			if (rctd)
2048 				arr[offset + 5] |= 0x2;
2049 			if (FF_SA & oip->flags)
2050 				arr[offset + 5] |= 0x1;
2051 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2052 			if (rctd)
2053 				put_unaligned_be16(0xa, arr + offset + 8);
2054 			r_oip = oip;
2055 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2056 				if (F_INV_OP & oip->flags)
2057 					continue;
2058 				offset += bump;
2059 				arr[offset] = oip->opcode;
2060 				put_unaligned_be16(oip->sa, arr + offset + 2);
2061 				if (rctd)
2062 					arr[offset + 5] |= 0x2;
2063 				if (FF_SA & oip->flags)
2064 					arr[offset + 5] |= 0x1;
2065 				put_unaligned_be16(oip->len_mask[0],
2066 						   arr + offset + 6);
2067 				if (rctd)
2068 					put_unaligned_be16(0xa,
2069 							   arr + offset + 8);
2070 			}
2071 			oip = r_oip;
2072 			offset += bump;
2073 		}
2074 		break;
2075 	case 1:	/* one command: opcode only */
2076 	case 2:	/* one command: opcode plus service action */
2077 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2078 		sdeb_i = opcode_ind_arr[req_opcode];
2079 		oip = &opcode_info_arr[sdeb_i];
2080 		if (F_INV_OP & oip->flags) {
2081 			supp = 1;
2082 			offset = 4;
2083 		} else {
2084 			if (1 == reporting_opts) {
2085 				if (FF_SA & oip->flags) {
2086 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2087 							     2, 2);
2088 					kfree(arr);
2089 					return check_condition_result;
2090 				}
2091 				req_sa = 0;
2092 			} else if (2 == reporting_opts &&
2093 				   0 == (FF_SA & oip->flags)) {
2094 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2095 				kfree(arr);	/* point at requested sa */
2096 				return check_condition_result;
2097 			}
2098 			if (0 == (FF_SA & oip->flags) &&
2099 			    req_opcode == oip->opcode)
2100 				supp = 3;
2101 			else if (0 == (FF_SA & oip->flags)) {
2102 				na = oip->num_attached;
2103 				for (k = 0, oip = oip->arrp; k < na;
2104 				     ++k, ++oip) {
2105 					if (req_opcode == oip->opcode)
2106 						break;
2107 				}
2108 				supp = (k >= na) ? 1 : 3;
2109 			} else if (req_sa != oip->sa) {
2110 				na = oip->num_attached;
2111 				for (k = 0, oip = oip->arrp; k < na;
2112 				     ++k, ++oip) {
2113 					if (req_sa == oip->sa)
2114 						break;
2115 				}
2116 				supp = (k >= na) ? 1 : 3;
2117 			} else
2118 				supp = 3;
2119 			if (3 == supp) {
2120 				u = oip->len_mask[0];
2121 				put_unaligned_be16(u, arr + 2);
2122 				arr[4] = oip->opcode;
2123 				for (k = 1; k < u; ++k)
2124 					arr[4 + k] = (k < 16) ?
2125 						 oip->len_mask[k] : 0xff;
2126 				offset = 4 + u;
2127 			} else
2128 				offset = 4;
2129 		}
2130 		arr[1] = (rctd ? 0x80 : 0) | supp;
2131 		if (rctd) {
2132 			put_unaligned_be16(0xa, arr + offset);
2133 			offset += 12;
2134 		}
2135 		break;
2136 	default:
2137 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2138 		kfree(arr);
2139 		return check_condition_result;
2140 	}
2141 	offset = (offset < a_len) ? offset : a_len;
2142 	len = (offset < alloc_len) ? offset : alloc_len;
2143 	errsts = fill_from_dev_buffer(scp, arr, len);
2144 	kfree(arr);
2145 	return errsts;
2146 }
2147 
2148 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2149 			  struct sdebug_dev_info *devip)
2150 {
2151 	bool repd;
2152 	u32 alloc_len, len;
2153 	u8 arr[16];
2154 	u8 *cmd = scp->cmnd;
2155 
2156 	memset(arr, 0, sizeof(arr));
2157 	repd = !!(cmd[2] & 0x80);
2158 	alloc_len = get_unaligned_be32(cmd + 6);
2159 	if (alloc_len < 4) {
2160 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2161 		return check_condition_result;
2162 	}
2163 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2164 	arr[1] = 0x1;		/* ITNRS */
2165 	if (repd) {
2166 		arr[3] = 0xc;
2167 		len = 16;
2168 	} else
2169 		len = 4;
2170 
2171 	len = (len < alloc_len) ? len : alloc_len;
2172 	return fill_from_dev_buffer(scp, arr, len);
2173 }
2174 
2175 /* <<Following mode page info copied from ST318451LW>> */
2176 
2177 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2178 {	/* Read-Write Error Recovery page for mode_sense */
2179 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2180 					5, 0, 0xff, 0xff};
2181 
2182 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2183 	if (1 == pcontrol)
2184 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2185 	return sizeof(err_recov_pg);
2186 }
2187 
2188 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2189 { 	/* Disconnect-Reconnect page for mode_sense */
2190 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2191 					 0, 0, 0, 0, 0, 0, 0, 0};
2192 
2193 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2194 	if (1 == pcontrol)
2195 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2196 	return sizeof(disconnect_pg);
2197 }
2198 
2199 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2200 {       /* Format device page for mode_sense */
2201 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2202 				     0, 0, 0, 0, 0, 0, 0, 0,
2203 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2204 
2205 	memcpy(p, format_pg, sizeof(format_pg));
2206 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2207 	put_unaligned_be16(sdebug_sector_size, p + 12);
2208 	if (sdebug_removable)
2209 		p[20] |= 0x20; /* should agree with INQUIRY */
2210 	if (1 == pcontrol)
2211 		memset(p + 2, 0, sizeof(format_pg) - 2);
2212 	return sizeof(format_pg);
2213 }
2214 
2215 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2216 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2217 				     0, 0, 0, 0};
2218 
2219 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2220 { 	/* Caching page for mode_sense */
2221 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2222 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2223 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2224 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2225 
2226 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2227 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2228 	memcpy(p, caching_pg, sizeof(caching_pg));
2229 	if (1 == pcontrol)
2230 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2231 	else if (2 == pcontrol)
2232 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2233 	return sizeof(caching_pg);
2234 }
2235 
2236 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2237 				    0, 0, 0x2, 0x4b};
2238 
2239 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2240 { 	/* Control mode page for mode_sense */
2241 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2242 					0, 0, 0, 0};
2243 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2244 				     0, 0, 0x2, 0x4b};
2245 
2246 	if (sdebug_dsense)
2247 		ctrl_m_pg[2] |= 0x4;
2248 	else
2249 		ctrl_m_pg[2] &= ~0x4;
2250 
2251 	if (sdebug_ato)
2252 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2253 
2254 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2255 	if (1 == pcontrol)
2256 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2257 	else if (2 == pcontrol)
2258 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2259 	return sizeof(ctrl_m_pg);
2260 }
2261 
2262 
2263 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2264 {	/* Informational Exceptions control mode page for mode_sense */
2265 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2266 				       0, 0, 0x0, 0x0};
2267 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2268 				      0, 0, 0x0, 0x0};
2269 
2270 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2271 	if (1 == pcontrol)
2272 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2273 	else if (2 == pcontrol)
2274 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2275 	return sizeof(iec_m_pg);
2276 }
2277 
2278 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2279 {	/* SAS SSP mode page - short format for mode_sense */
2280 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2281 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2282 
2283 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2284 	if (1 == pcontrol)
2285 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2286 	return sizeof(sas_sf_m_pg);
2287 }
2288 
2289 
2290 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2291 			      int target_dev_id)
2292 {	/* SAS phy control and discover mode page for mode_sense */
2293 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2294 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2295 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2296 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2297 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2298 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2299 		    0, 0, 0, 0, 0, 0, 0, 0,
2300 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2301 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2302 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2303 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2304 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2305 		    0, 0, 0, 0, 0, 0, 0, 0,
2306 		};
2307 	int port_a, port_b;
2308 
2309 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2310 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2311 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2312 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2313 	port_a = target_dev_id + 1;
2314 	port_b = port_a + 1;
2315 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2316 	put_unaligned_be32(port_a, p + 20);
2317 	put_unaligned_be32(port_b, p + 48 + 20);
2318 	if (1 == pcontrol)
2319 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2320 	return sizeof(sas_pcd_m_pg);
2321 }
2322 
2323 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2324 {	/* SAS SSP shared protocol specific port mode subpage */
2325 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2326 		    0, 0, 0, 0, 0, 0, 0, 0,
2327 		};
2328 
2329 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2330 	if (1 == pcontrol)
2331 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2332 	return sizeof(sas_sha_m_pg);
2333 }
2334 
2335 #define SDEBUG_MAX_MSENSE_SZ 256
2336 
2337 static int resp_mode_sense(struct scsi_cmnd *scp,
2338 			   struct sdebug_dev_info *devip)
2339 {
2340 	int pcontrol, pcode, subpcode, bd_len;
2341 	unsigned char dev_spec;
2342 	u32 alloc_len, offset, len;
2343 	int target_dev_id;
2344 	int target = scp->device->id;
2345 	unsigned char *ap;
2346 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2347 	unsigned char *cmd = scp->cmnd;
2348 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2349 
2350 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2351 	pcontrol = (cmd[2] & 0xc0) >> 6;
2352 	pcode = cmd[2] & 0x3f;
2353 	subpcode = cmd[3];
2354 	msense_6 = (MODE_SENSE == cmd[0]);
2355 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2356 	is_disk = (sdebug_ptype == TYPE_DISK);
2357 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2358 	if ((is_disk || is_zbc) && !dbd)
2359 		bd_len = llbaa ? 16 : 8;
2360 	else
2361 		bd_len = 0;
2362 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2363 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2364 	if (0x3 == pcontrol) {  /* Saving values not supported */
2365 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2366 		return check_condition_result;
2367 	}
2368 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2369 			(devip->target * 1000) - 3;
2370 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2371 	if (is_disk || is_zbc) {
2372 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2373 		if (sdebug_wp)
2374 			dev_spec |= 0x80;
2375 	} else
2376 		dev_spec = 0x0;
2377 	if (msense_6) {
2378 		arr[2] = dev_spec;
2379 		arr[3] = bd_len;
2380 		offset = 4;
2381 	} else {
2382 		arr[3] = dev_spec;
2383 		if (16 == bd_len)
2384 			arr[4] = 0x1;	/* set LONGLBA bit */
2385 		arr[7] = bd_len;	/* assume 255 or less */
2386 		offset = 8;
2387 	}
2388 	ap = arr + offset;
2389 	if ((bd_len > 0) && (!sdebug_capacity))
2390 		sdebug_capacity = get_sdebug_capacity();
2391 
2392 	if (8 == bd_len) {
2393 		if (sdebug_capacity > 0xfffffffe)
2394 			put_unaligned_be32(0xffffffff, ap + 0);
2395 		else
2396 			put_unaligned_be32(sdebug_capacity, ap + 0);
2397 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2398 		offset += bd_len;
2399 		ap = arr + offset;
2400 	} else if (16 == bd_len) {
2401 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2402 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2403 		offset += bd_len;
2404 		ap = arr + offset;
2405 	}
2406 
2407 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2408 		/* TODO: Control Extension page */
2409 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2410 		return check_condition_result;
2411 	}
2412 	bad_pcode = false;
2413 
2414 	switch (pcode) {
2415 	case 0x1:	/* Read-Write error recovery page, direct access */
2416 		len = resp_err_recov_pg(ap, pcontrol, target);
2417 		offset += len;
2418 		break;
2419 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2420 		len = resp_disconnect_pg(ap, pcontrol, target);
2421 		offset += len;
2422 		break;
2423 	case 0x3:       /* Format device page, direct access */
2424 		if (is_disk) {
2425 			len = resp_format_pg(ap, pcontrol, target);
2426 			offset += len;
2427 		} else
2428 			bad_pcode = true;
2429 		break;
2430 	case 0x8:	/* Caching page, direct access */
2431 		if (is_disk || is_zbc) {
2432 			len = resp_caching_pg(ap, pcontrol, target);
2433 			offset += len;
2434 		} else
2435 			bad_pcode = true;
2436 		break;
2437 	case 0xa:	/* Control Mode page, all devices */
2438 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2439 		offset += len;
2440 		break;
2441 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2442 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2443 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2444 			return check_condition_result;
2445 		}
2446 		len = 0;
2447 		if ((0x0 == subpcode) || (0xff == subpcode))
2448 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2449 		if ((0x1 == subpcode) || (0xff == subpcode))
2450 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2451 						  target_dev_id);
2452 		if ((0x2 == subpcode) || (0xff == subpcode))
2453 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2454 		offset += len;
2455 		break;
2456 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2457 		len = resp_iec_m_pg(ap, pcontrol, target);
2458 		offset += len;
2459 		break;
2460 	case 0x3f:	/* Read all Mode pages */
2461 		if ((0 == subpcode) || (0xff == subpcode)) {
2462 			len = resp_err_recov_pg(ap, pcontrol, target);
2463 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2464 			if (is_disk) {
2465 				len += resp_format_pg(ap + len, pcontrol,
2466 						      target);
2467 				len += resp_caching_pg(ap + len, pcontrol,
2468 						       target);
2469 			} else if (is_zbc) {
2470 				len += resp_caching_pg(ap + len, pcontrol,
2471 						       target);
2472 			}
2473 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2474 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2475 			if (0xff == subpcode) {
2476 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2477 						  target, target_dev_id);
2478 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2479 			}
2480 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2481 			offset += len;
2482 		} else {
2483 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2484 			return check_condition_result;
2485 		}
2486 		break;
2487 	default:
2488 		bad_pcode = true;
2489 		break;
2490 	}
2491 	if (bad_pcode) {
2492 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2493 		return check_condition_result;
2494 	}
2495 	if (msense_6)
2496 		arr[0] = offset - 1;
2497 	else
2498 		put_unaligned_be16((offset - 2), arr + 0);
2499 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2500 }
2501 
2502 #define SDEBUG_MAX_MSELECT_SZ 512
2503 
2504 static int resp_mode_select(struct scsi_cmnd *scp,
2505 			    struct sdebug_dev_info *devip)
2506 {
2507 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2508 	int param_len, res, mpage;
2509 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2510 	unsigned char *cmd = scp->cmnd;
2511 	int mselect6 = (MODE_SELECT == cmd[0]);
2512 
2513 	memset(arr, 0, sizeof(arr));
2514 	pf = cmd[1] & 0x10;
2515 	sp = cmd[1] & 0x1;
2516 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2517 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2518 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2519 		return check_condition_result;
2520 	}
2521 	res = fetch_to_dev_buffer(scp, arr, param_len);
2522 	if (-1 == res)
2523 		return DID_ERROR << 16;
2524 	else if (sdebug_verbose && (res < param_len))
2525 		sdev_printk(KERN_INFO, scp->device,
2526 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2527 			    __func__, param_len, res);
2528 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2529 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2530 	off = bd_len + (mselect6 ? 4 : 8);
2531 	if (md_len > 2 || off >= res) {
2532 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2533 		return check_condition_result;
2534 	}
2535 	mpage = arr[off] & 0x3f;
2536 	ps = !!(arr[off] & 0x80);
2537 	if (ps) {
2538 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2539 		return check_condition_result;
2540 	}
2541 	spf = !!(arr[off] & 0x40);
2542 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2543 		       (arr[off + 1] + 2);
2544 	if ((pg_len + off) > param_len) {
2545 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2546 				PARAMETER_LIST_LENGTH_ERR, 0);
2547 		return check_condition_result;
2548 	}
2549 	switch (mpage) {
2550 	case 0x8:      /* Caching Mode page */
2551 		if (caching_pg[1] == arr[off + 1]) {
2552 			memcpy(caching_pg + 2, arr + off + 2,
2553 			       sizeof(caching_pg) - 2);
2554 			goto set_mode_changed_ua;
2555 		}
2556 		break;
2557 	case 0xa:      /* Control Mode page */
2558 		if (ctrl_m_pg[1] == arr[off + 1]) {
2559 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2560 			       sizeof(ctrl_m_pg) - 2);
2561 			if (ctrl_m_pg[4] & 0x8)
2562 				sdebug_wp = true;
2563 			else
2564 				sdebug_wp = false;
2565 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2566 			goto set_mode_changed_ua;
2567 		}
2568 		break;
2569 	case 0x1c:      /* Informational Exceptions Mode page */
2570 		if (iec_m_pg[1] == arr[off + 1]) {
2571 			memcpy(iec_m_pg + 2, arr + off + 2,
2572 			       sizeof(iec_m_pg) - 2);
2573 			goto set_mode_changed_ua;
2574 		}
2575 		break;
2576 	default:
2577 		break;
2578 	}
2579 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2580 	return check_condition_result;
2581 set_mode_changed_ua:
2582 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2583 	return 0;
2584 }
2585 
2586 static int resp_temp_l_pg(unsigned char *arr)
2587 {
2588 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2589 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2590 		};
2591 
2592 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2593 	return sizeof(temp_l_pg);
2594 }
2595 
2596 static int resp_ie_l_pg(unsigned char *arr)
2597 {
2598 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2599 		};
2600 
2601 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2602 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2603 		arr[4] = THRESHOLD_EXCEEDED;
2604 		arr[5] = 0xff;
2605 	}
2606 	return sizeof(ie_l_pg);
2607 }
2608 
2609 static int resp_env_rep_l_spg(unsigned char *arr)
2610 {
2611 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2612 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2613 					 0x1, 0x0, 0x23, 0x8,
2614 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2615 		};
2616 
2617 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2618 	return sizeof(env_rep_l_spg);
2619 }
2620 
2621 #define SDEBUG_MAX_LSENSE_SZ 512
2622 
2623 static int resp_log_sense(struct scsi_cmnd *scp,
2624 			  struct sdebug_dev_info *devip)
2625 {
2626 	int ppc, sp, pcode, subpcode;
2627 	u32 alloc_len, len, n;
2628 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2629 	unsigned char *cmd = scp->cmnd;
2630 
2631 	memset(arr, 0, sizeof(arr));
2632 	ppc = cmd[1] & 0x2;
2633 	sp = cmd[1] & 0x1;
2634 	if (ppc || sp) {
2635 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2636 		return check_condition_result;
2637 	}
2638 	pcode = cmd[2] & 0x3f;
2639 	subpcode = cmd[3] & 0xff;
2640 	alloc_len = get_unaligned_be16(cmd + 7);
2641 	arr[0] = pcode;
2642 	if (0 == subpcode) {
2643 		switch (pcode) {
2644 		case 0x0:	/* Supported log pages log page */
2645 			n = 4;
2646 			arr[n++] = 0x0;		/* this page */
2647 			arr[n++] = 0xd;		/* Temperature */
2648 			arr[n++] = 0x2f;	/* Informational exceptions */
2649 			arr[3] = n - 4;
2650 			break;
2651 		case 0xd:	/* Temperature log page */
2652 			arr[3] = resp_temp_l_pg(arr + 4);
2653 			break;
2654 		case 0x2f:	/* Informational exceptions log page */
2655 			arr[3] = resp_ie_l_pg(arr + 4);
2656 			break;
2657 		default:
2658 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2659 			return check_condition_result;
2660 		}
2661 	} else if (0xff == subpcode) {
2662 		arr[0] |= 0x40;
2663 		arr[1] = subpcode;
2664 		switch (pcode) {
2665 		case 0x0:	/* Supported log pages and subpages log page */
2666 			n = 4;
2667 			arr[n++] = 0x0;
2668 			arr[n++] = 0x0;		/* 0,0 page */
2669 			arr[n++] = 0x0;
2670 			arr[n++] = 0xff;	/* this page */
2671 			arr[n++] = 0xd;
2672 			arr[n++] = 0x0;		/* Temperature */
2673 			arr[n++] = 0xd;
2674 			arr[n++] = 0x1;		/* Environment reporting */
2675 			arr[n++] = 0xd;
2676 			arr[n++] = 0xff;	/* all 0xd subpages */
2677 			arr[n++] = 0x2f;
2678 			arr[n++] = 0x0;	/* Informational exceptions */
2679 			arr[n++] = 0x2f;
2680 			arr[n++] = 0xff;	/* all 0x2f subpages */
2681 			arr[3] = n - 4;
2682 			break;
2683 		case 0xd:	/* Temperature subpages */
2684 			n = 4;
2685 			arr[n++] = 0xd;
2686 			arr[n++] = 0x0;		/* Temperature */
2687 			arr[n++] = 0xd;
2688 			arr[n++] = 0x1;		/* Environment reporting */
2689 			arr[n++] = 0xd;
2690 			arr[n++] = 0xff;	/* these subpages */
2691 			arr[3] = n - 4;
2692 			break;
2693 		case 0x2f:	/* Informational exceptions subpages */
2694 			n = 4;
2695 			arr[n++] = 0x2f;
2696 			arr[n++] = 0x0;		/* Informational exceptions */
2697 			arr[n++] = 0x2f;
2698 			arr[n++] = 0xff;	/* these subpages */
2699 			arr[3] = n - 4;
2700 			break;
2701 		default:
2702 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2703 			return check_condition_result;
2704 		}
2705 	} else if (subpcode > 0) {
2706 		arr[0] |= 0x40;
2707 		arr[1] = subpcode;
2708 		if (pcode == 0xd && subpcode == 1)
2709 			arr[3] = resp_env_rep_l_spg(arr + 4);
2710 		else {
2711 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2712 			return check_condition_result;
2713 		}
2714 	} else {
2715 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2716 		return check_condition_result;
2717 	}
2718 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2719 	return fill_from_dev_buffer(scp, arr,
2720 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2721 }
2722 
2723 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2724 {
2725 	return devip->nr_zones != 0;
2726 }
2727 
2728 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2729 					unsigned long long lba)
2730 {
2731 	u32 zno = lba >> devip->zsize_shift;
2732 	struct sdeb_zone_state *zsp;
2733 
2734 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2735 		return &devip->zstate[zno];
2736 
2737 	/*
2738 	 * If the zone capacity is less than the zone size, adjust for gap
2739 	 * zones.
2740 	 */
2741 	zno = 2 * zno - devip->nr_conv_zones;
2742 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2743 	zsp = &devip->zstate[zno];
2744 	if (lba >= zsp->z_start + zsp->z_size)
2745 		zsp++;
2746 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2747 	return zsp;
2748 }
2749 
2750 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2751 {
2752 	return zsp->z_type == ZBC_ZTYPE_CNV;
2753 }
2754 
2755 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2756 {
2757 	return zsp->z_type == ZBC_ZTYPE_GAP;
2758 }
2759 
2760 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2761 {
2762 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2763 }
2764 
2765 static void zbc_close_zone(struct sdebug_dev_info *devip,
2766 			   struct sdeb_zone_state *zsp)
2767 {
2768 	enum sdebug_z_cond zc;
2769 
2770 	if (!zbc_zone_is_seq(zsp))
2771 		return;
2772 
2773 	zc = zsp->z_cond;
2774 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2775 		return;
2776 
2777 	if (zc == ZC2_IMPLICIT_OPEN)
2778 		devip->nr_imp_open--;
2779 	else
2780 		devip->nr_exp_open--;
2781 
2782 	if (zsp->z_wp == zsp->z_start) {
2783 		zsp->z_cond = ZC1_EMPTY;
2784 	} else {
2785 		zsp->z_cond = ZC4_CLOSED;
2786 		devip->nr_closed++;
2787 	}
2788 }
2789 
2790 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2791 {
2792 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2793 	unsigned int i;
2794 
2795 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2796 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2797 			zbc_close_zone(devip, zsp);
2798 			return;
2799 		}
2800 	}
2801 }
2802 
2803 static void zbc_open_zone(struct sdebug_dev_info *devip,
2804 			  struct sdeb_zone_state *zsp, bool explicit)
2805 {
2806 	enum sdebug_z_cond zc;
2807 
2808 	if (!zbc_zone_is_seq(zsp))
2809 		return;
2810 
2811 	zc = zsp->z_cond;
2812 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2813 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2814 		return;
2815 
2816 	/* Close an implicit open zone if necessary */
2817 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2818 		zbc_close_zone(devip, zsp);
2819 	else if (devip->max_open &&
2820 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2821 		zbc_close_imp_open_zone(devip);
2822 
2823 	if (zsp->z_cond == ZC4_CLOSED)
2824 		devip->nr_closed--;
2825 	if (explicit) {
2826 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2827 		devip->nr_exp_open++;
2828 	} else {
2829 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2830 		devip->nr_imp_open++;
2831 	}
2832 }
2833 
2834 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2835 				     struct sdeb_zone_state *zsp)
2836 {
2837 	switch (zsp->z_cond) {
2838 	case ZC2_IMPLICIT_OPEN:
2839 		devip->nr_imp_open--;
2840 		break;
2841 	case ZC3_EXPLICIT_OPEN:
2842 		devip->nr_exp_open--;
2843 		break;
2844 	default:
2845 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2846 			  zsp->z_start, zsp->z_cond);
2847 		break;
2848 	}
2849 	zsp->z_cond = ZC5_FULL;
2850 }
2851 
2852 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2853 		       unsigned long long lba, unsigned int num)
2854 {
2855 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2856 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2857 
2858 	if (!zbc_zone_is_seq(zsp))
2859 		return;
2860 
2861 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2862 		zsp->z_wp += num;
2863 		if (zsp->z_wp >= zend)
2864 			zbc_set_zone_full(devip, zsp);
2865 		return;
2866 	}
2867 
2868 	while (num) {
2869 		if (lba != zsp->z_wp)
2870 			zsp->z_non_seq_resource = true;
2871 
2872 		end = lba + num;
2873 		if (end >= zend) {
2874 			n = zend - lba;
2875 			zsp->z_wp = zend;
2876 		} else if (end > zsp->z_wp) {
2877 			n = num;
2878 			zsp->z_wp = end;
2879 		} else {
2880 			n = num;
2881 		}
2882 		if (zsp->z_wp >= zend)
2883 			zbc_set_zone_full(devip, zsp);
2884 
2885 		num -= n;
2886 		lba += n;
2887 		if (num) {
2888 			zsp++;
2889 			zend = zsp->z_start + zsp->z_size;
2890 		}
2891 	}
2892 }
2893 
2894 static int check_zbc_access_params(struct scsi_cmnd *scp,
2895 			unsigned long long lba, unsigned int num, bool write)
2896 {
2897 	struct scsi_device *sdp = scp->device;
2898 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2899 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2900 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2901 
2902 	if (!write) {
2903 		if (devip->zmodel == BLK_ZONED_HA)
2904 			return 0;
2905 		/* For host-managed, reads cannot cross zone types boundaries */
2906 		if (zsp->z_type != zsp_end->z_type) {
2907 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2908 					LBA_OUT_OF_RANGE,
2909 					READ_INVDATA_ASCQ);
2910 			return check_condition_result;
2911 		}
2912 		return 0;
2913 	}
2914 
2915 	/* Writing into a gap zone is not allowed */
2916 	if (zbc_zone_is_gap(zsp)) {
2917 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2918 				ATTEMPT_ACCESS_GAP);
2919 		return check_condition_result;
2920 	}
2921 
2922 	/* No restrictions for writes within conventional zones */
2923 	if (zbc_zone_is_conv(zsp)) {
2924 		if (!zbc_zone_is_conv(zsp_end)) {
2925 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2926 					LBA_OUT_OF_RANGE,
2927 					WRITE_BOUNDARY_ASCQ);
2928 			return check_condition_result;
2929 		}
2930 		return 0;
2931 	}
2932 
2933 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2934 		/* Writes cannot cross sequential zone boundaries */
2935 		if (zsp_end != zsp) {
2936 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2937 					LBA_OUT_OF_RANGE,
2938 					WRITE_BOUNDARY_ASCQ);
2939 			return check_condition_result;
2940 		}
2941 		/* Cannot write full zones */
2942 		if (zsp->z_cond == ZC5_FULL) {
2943 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2944 					INVALID_FIELD_IN_CDB, 0);
2945 			return check_condition_result;
2946 		}
2947 		/* Writes must be aligned to the zone WP */
2948 		if (lba != zsp->z_wp) {
2949 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2950 					LBA_OUT_OF_RANGE,
2951 					UNALIGNED_WRITE_ASCQ);
2952 			return check_condition_result;
2953 		}
2954 	}
2955 
2956 	/* Handle implicit open of closed and empty zones */
2957 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2958 		if (devip->max_open &&
2959 		    devip->nr_exp_open >= devip->max_open) {
2960 			mk_sense_buffer(scp, DATA_PROTECT,
2961 					INSUFF_RES_ASC,
2962 					INSUFF_ZONE_ASCQ);
2963 			return check_condition_result;
2964 		}
2965 		zbc_open_zone(devip, zsp, false);
2966 	}
2967 
2968 	return 0;
2969 }
2970 
2971 static inline int check_device_access_params
2972 			(struct scsi_cmnd *scp, unsigned long long lba,
2973 			 unsigned int num, bool write)
2974 {
2975 	struct scsi_device *sdp = scp->device;
2976 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2977 
2978 	if (lba + num > sdebug_capacity) {
2979 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2980 		return check_condition_result;
2981 	}
2982 	/* transfer length excessive (tie in to block limits VPD page) */
2983 	if (num > sdebug_store_sectors) {
2984 		/* needs work to find which cdb byte 'num' comes from */
2985 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2986 		return check_condition_result;
2987 	}
2988 	if (write && unlikely(sdebug_wp)) {
2989 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2990 		return check_condition_result;
2991 	}
2992 	if (sdebug_dev_is_zoned(devip))
2993 		return check_zbc_access_params(scp, lba, num, write);
2994 
2995 	return 0;
2996 }
2997 
2998 /*
2999  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3000  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3001  * that access any of the "stores" in struct sdeb_store_info should call this
3002  * function with bug_if_fake_rw set to true.
3003  */
3004 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3005 						bool bug_if_fake_rw)
3006 {
3007 	if (sdebug_fake_rw) {
3008 		BUG_ON(bug_if_fake_rw);	/* See note above */
3009 		return NULL;
3010 	}
3011 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3012 }
3013 
3014 /* Returns number of bytes copied or -1 if error. */
3015 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3016 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3017 {
3018 	int ret;
3019 	u64 block, rest = 0;
3020 	enum dma_data_direction dir;
3021 	struct scsi_data_buffer *sdb = &scp->sdb;
3022 	u8 *fsp;
3023 
3024 	if (do_write) {
3025 		dir = DMA_TO_DEVICE;
3026 		write_since_sync = true;
3027 	} else {
3028 		dir = DMA_FROM_DEVICE;
3029 	}
3030 
3031 	if (!sdb->length || !sip)
3032 		return 0;
3033 	if (scp->sc_data_direction != dir)
3034 		return -1;
3035 	fsp = sip->storep;
3036 
3037 	block = do_div(lba, sdebug_store_sectors);
3038 	if (block + num > sdebug_store_sectors)
3039 		rest = block + num - sdebug_store_sectors;
3040 
3041 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3042 		   fsp + (block * sdebug_sector_size),
3043 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3044 	if (ret != (num - rest) * sdebug_sector_size)
3045 		return ret;
3046 
3047 	if (rest) {
3048 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3049 			    fsp, rest * sdebug_sector_size,
3050 			    sg_skip + ((num - rest) * sdebug_sector_size),
3051 			    do_write);
3052 	}
3053 
3054 	return ret;
3055 }
3056 
3057 /* Returns number of bytes copied or -1 if error. */
3058 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3059 {
3060 	struct scsi_data_buffer *sdb = &scp->sdb;
3061 
3062 	if (!sdb->length)
3063 		return 0;
3064 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3065 		return -1;
3066 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3067 			      num * sdebug_sector_size, 0, true);
3068 }
3069 
3070 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3071  * arr into sip->storep+lba and return true. If comparison fails then
3072  * return false. */
3073 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3074 			      const u8 *arr, bool compare_only)
3075 {
3076 	bool res;
3077 	u64 block, rest = 0;
3078 	u32 store_blks = sdebug_store_sectors;
3079 	u32 lb_size = sdebug_sector_size;
3080 	u8 *fsp = sip->storep;
3081 
3082 	block = do_div(lba, store_blks);
3083 	if (block + num > store_blks)
3084 		rest = block + num - store_blks;
3085 
3086 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3087 	if (!res)
3088 		return res;
3089 	if (rest)
3090 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3091 			     rest * lb_size);
3092 	if (!res)
3093 		return res;
3094 	if (compare_only)
3095 		return true;
3096 	arr += num * lb_size;
3097 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3098 	if (rest)
3099 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3100 	return res;
3101 }
3102 
3103 static __be16 dif_compute_csum(const void *buf, int len)
3104 {
3105 	__be16 csum;
3106 
3107 	if (sdebug_guard)
3108 		csum = (__force __be16)ip_compute_csum(buf, len);
3109 	else
3110 		csum = cpu_to_be16(crc_t10dif(buf, len));
3111 
3112 	return csum;
3113 }
3114 
3115 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3116 		      sector_t sector, u32 ei_lba)
3117 {
3118 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3119 
3120 	if (sdt->guard_tag != csum) {
3121 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3122 			(unsigned long)sector,
3123 			be16_to_cpu(sdt->guard_tag),
3124 			be16_to_cpu(csum));
3125 		return 0x01;
3126 	}
3127 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3128 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3129 		pr_err("REF check failed on sector %lu\n",
3130 			(unsigned long)sector);
3131 		return 0x03;
3132 	}
3133 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3134 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3135 		pr_err("REF check failed on sector %lu\n",
3136 			(unsigned long)sector);
3137 		return 0x03;
3138 	}
3139 	return 0;
3140 }
3141 
3142 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3143 			  unsigned int sectors, bool read)
3144 {
3145 	size_t resid;
3146 	void *paddr;
3147 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3148 						scp->device->hostdata, true);
3149 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3150 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3151 	struct sg_mapping_iter miter;
3152 
3153 	/* Bytes of protection data to copy into sgl */
3154 	resid = sectors * sizeof(*dif_storep);
3155 
3156 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3157 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3158 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3159 
3160 	while (sg_miter_next(&miter) && resid > 0) {
3161 		size_t len = min_t(size_t, miter.length, resid);
3162 		void *start = dif_store(sip, sector);
3163 		size_t rest = 0;
3164 
3165 		if (dif_store_end < start + len)
3166 			rest = start + len - dif_store_end;
3167 
3168 		paddr = miter.addr;
3169 
3170 		if (read)
3171 			memcpy(paddr, start, len - rest);
3172 		else
3173 			memcpy(start, paddr, len - rest);
3174 
3175 		if (rest) {
3176 			if (read)
3177 				memcpy(paddr + len - rest, dif_storep, rest);
3178 			else
3179 				memcpy(dif_storep, paddr + len - rest, rest);
3180 		}
3181 
3182 		sector += len / sizeof(*dif_storep);
3183 		resid -= len;
3184 	}
3185 	sg_miter_stop(&miter);
3186 }
3187 
3188 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3189 			    unsigned int sectors, u32 ei_lba)
3190 {
3191 	int ret = 0;
3192 	unsigned int i;
3193 	sector_t sector;
3194 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3195 						scp->device->hostdata, true);
3196 	struct t10_pi_tuple *sdt;
3197 
3198 	for (i = 0; i < sectors; i++, ei_lba++) {
3199 		sector = start_sec + i;
3200 		sdt = dif_store(sip, sector);
3201 
3202 		if (sdt->app_tag == cpu_to_be16(0xffff))
3203 			continue;
3204 
3205 		/*
3206 		 * Because scsi_debug acts as both initiator and
3207 		 * target we proceed to verify the PI even if
3208 		 * RDPROTECT=3. This is done so the "initiator" knows
3209 		 * which type of error to return. Otherwise we would
3210 		 * have to iterate over the PI twice.
3211 		 */
3212 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3213 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3214 					 sector, ei_lba);
3215 			if (ret) {
3216 				dif_errors++;
3217 				break;
3218 			}
3219 		}
3220 	}
3221 
3222 	dif_copy_prot(scp, start_sec, sectors, true);
3223 	dix_reads++;
3224 
3225 	return ret;
3226 }
3227 
3228 static inline void
3229 sdeb_read_lock(struct sdeb_store_info *sip)
3230 {
3231 	if (sdebug_no_rwlock) {
3232 		if (sip)
3233 			__acquire(&sip->macc_lck);
3234 		else
3235 			__acquire(&sdeb_fake_rw_lck);
3236 	} else {
3237 		if (sip)
3238 			read_lock(&sip->macc_lck);
3239 		else
3240 			read_lock(&sdeb_fake_rw_lck);
3241 	}
3242 }
3243 
3244 static inline void
3245 sdeb_read_unlock(struct sdeb_store_info *sip)
3246 {
3247 	if (sdebug_no_rwlock) {
3248 		if (sip)
3249 			__release(&sip->macc_lck);
3250 		else
3251 			__release(&sdeb_fake_rw_lck);
3252 	} else {
3253 		if (sip)
3254 			read_unlock(&sip->macc_lck);
3255 		else
3256 			read_unlock(&sdeb_fake_rw_lck);
3257 	}
3258 }
3259 
3260 static inline void
3261 sdeb_write_lock(struct sdeb_store_info *sip)
3262 {
3263 	if (sdebug_no_rwlock) {
3264 		if (sip)
3265 			__acquire(&sip->macc_lck);
3266 		else
3267 			__acquire(&sdeb_fake_rw_lck);
3268 	} else {
3269 		if (sip)
3270 			write_lock(&sip->macc_lck);
3271 		else
3272 			write_lock(&sdeb_fake_rw_lck);
3273 	}
3274 }
3275 
3276 static inline void
3277 sdeb_write_unlock(struct sdeb_store_info *sip)
3278 {
3279 	if (sdebug_no_rwlock) {
3280 		if (sip)
3281 			__release(&sip->macc_lck);
3282 		else
3283 			__release(&sdeb_fake_rw_lck);
3284 	} else {
3285 		if (sip)
3286 			write_unlock(&sip->macc_lck);
3287 		else
3288 			write_unlock(&sdeb_fake_rw_lck);
3289 	}
3290 }
3291 
3292 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3293 {
3294 	bool check_prot;
3295 	u32 num;
3296 	u32 ei_lba;
3297 	int ret;
3298 	u64 lba;
3299 	struct sdeb_store_info *sip = devip2sip(devip, true);
3300 	u8 *cmd = scp->cmnd;
3301 
3302 	switch (cmd[0]) {
3303 	case READ_16:
3304 		ei_lba = 0;
3305 		lba = get_unaligned_be64(cmd + 2);
3306 		num = get_unaligned_be32(cmd + 10);
3307 		check_prot = true;
3308 		break;
3309 	case READ_10:
3310 		ei_lba = 0;
3311 		lba = get_unaligned_be32(cmd + 2);
3312 		num = get_unaligned_be16(cmd + 7);
3313 		check_prot = true;
3314 		break;
3315 	case READ_6:
3316 		ei_lba = 0;
3317 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3318 		      (u32)(cmd[1] & 0x1f) << 16;
3319 		num = (0 == cmd[4]) ? 256 : cmd[4];
3320 		check_prot = true;
3321 		break;
3322 	case READ_12:
3323 		ei_lba = 0;
3324 		lba = get_unaligned_be32(cmd + 2);
3325 		num = get_unaligned_be32(cmd + 6);
3326 		check_prot = true;
3327 		break;
3328 	case XDWRITEREAD_10:
3329 		ei_lba = 0;
3330 		lba = get_unaligned_be32(cmd + 2);
3331 		num = get_unaligned_be16(cmd + 7);
3332 		check_prot = false;
3333 		break;
3334 	default:	/* assume READ(32) */
3335 		lba = get_unaligned_be64(cmd + 12);
3336 		ei_lba = get_unaligned_be32(cmd + 20);
3337 		num = get_unaligned_be32(cmd + 28);
3338 		check_prot = false;
3339 		break;
3340 	}
3341 	if (unlikely(have_dif_prot && check_prot)) {
3342 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3343 		    (cmd[1] & 0xe0)) {
3344 			mk_sense_invalid_opcode(scp);
3345 			return check_condition_result;
3346 		}
3347 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3348 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3349 		    (cmd[1] & 0xe0) == 0)
3350 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3351 				    "to DIF device\n");
3352 	}
3353 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3354 		     atomic_read(&sdeb_inject_pending))) {
3355 		num /= 2;
3356 		atomic_set(&sdeb_inject_pending, 0);
3357 	}
3358 
3359 	ret = check_device_access_params(scp, lba, num, false);
3360 	if (ret)
3361 		return ret;
3362 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3363 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3364 		     ((lba + num) > sdebug_medium_error_start))) {
3365 		/* claim unrecoverable read error */
3366 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3367 		/* set info field and valid bit for fixed descriptor */
3368 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3369 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3370 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3371 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3372 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3373 		}
3374 		scsi_set_resid(scp, scsi_bufflen(scp));
3375 		return check_condition_result;
3376 	}
3377 
3378 	sdeb_read_lock(sip);
3379 
3380 	/* DIX + T10 DIF */
3381 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3382 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3383 		case 1: /* Guard tag error */
3384 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3385 				sdeb_read_unlock(sip);
3386 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3387 				return check_condition_result;
3388 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3389 				sdeb_read_unlock(sip);
3390 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3391 				return illegal_condition_result;
3392 			}
3393 			break;
3394 		case 3: /* Reference tag error */
3395 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3396 				sdeb_read_unlock(sip);
3397 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3398 				return check_condition_result;
3399 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3400 				sdeb_read_unlock(sip);
3401 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3402 				return illegal_condition_result;
3403 			}
3404 			break;
3405 		}
3406 	}
3407 
3408 	ret = do_device_access(sip, scp, 0, lba, num, false);
3409 	sdeb_read_unlock(sip);
3410 	if (unlikely(ret == -1))
3411 		return DID_ERROR << 16;
3412 
3413 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3414 
3415 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3416 		     atomic_read(&sdeb_inject_pending))) {
3417 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3418 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3419 			atomic_set(&sdeb_inject_pending, 0);
3420 			return check_condition_result;
3421 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3422 			/* Logical block guard check failed */
3423 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3424 			atomic_set(&sdeb_inject_pending, 0);
3425 			return illegal_condition_result;
3426 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3427 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3428 			atomic_set(&sdeb_inject_pending, 0);
3429 			return illegal_condition_result;
3430 		}
3431 	}
3432 	return 0;
3433 }
3434 
3435 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3436 			     unsigned int sectors, u32 ei_lba)
3437 {
3438 	int ret;
3439 	struct t10_pi_tuple *sdt;
3440 	void *daddr;
3441 	sector_t sector = start_sec;
3442 	int ppage_offset;
3443 	int dpage_offset;
3444 	struct sg_mapping_iter diter;
3445 	struct sg_mapping_iter piter;
3446 
3447 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3448 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3449 
3450 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3451 			scsi_prot_sg_count(SCpnt),
3452 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3453 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3454 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3455 
3456 	/* For each protection page */
3457 	while (sg_miter_next(&piter)) {
3458 		dpage_offset = 0;
3459 		if (WARN_ON(!sg_miter_next(&diter))) {
3460 			ret = 0x01;
3461 			goto out;
3462 		}
3463 
3464 		for (ppage_offset = 0; ppage_offset < piter.length;
3465 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3466 			/* If we're at the end of the current
3467 			 * data page advance to the next one
3468 			 */
3469 			if (dpage_offset >= diter.length) {
3470 				if (WARN_ON(!sg_miter_next(&diter))) {
3471 					ret = 0x01;
3472 					goto out;
3473 				}
3474 				dpage_offset = 0;
3475 			}
3476 
3477 			sdt = piter.addr + ppage_offset;
3478 			daddr = diter.addr + dpage_offset;
3479 
3480 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3481 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3482 				if (ret)
3483 					goto out;
3484 			}
3485 
3486 			sector++;
3487 			ei_lba++;
3488 			dpage_offset += sdebug_sector_size;
3489 		}
3490 		diter.consumed = dpage_offset;
3491 		sg_miter_stop(&diter);
3492 	}
3493 	sg_miter_stop(&piter);
3494 
3495 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3496 	dix_writes++;
3497 
3498 	return 0;
3499 
3500 out:
3501 	dif_errors++;
3502 	sg_miter_stop(&diter);
3503 	sg_miter_stop(&piter);
3504 	return ret;
3505 }
3506 
3507 static unsigned long lba_to_map_index(sector_t lba)
3508 {
3509 	if (sdebug_unmap_alignment)
3510 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3511 	sector_div(lba, sdebug_unmap_granularity);
3512 	return lba;
3513 }
3514 
3515 static sector_t map_index_to_lba(unsigned long index)
3516 {
3517 	sector_t lba = index * sdebug_unmap_granularity;
3518 
3519 	if (sdebug_unmap_alignment)
3520 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3521 	return lba;
3522 }
3523 
3524 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3525 			      unsigned int *num)
3526 {
3527 	sector_t end;
3528 	unsigned int mapped;
3529 	unsigned long index;
3530 	unsigned long next;
3531 
3532 	index = lba_to_map_index(lba);
3533 	mapped = test_bit(index, sip->map_storep);
3534 
3535 	if (mapped)
3536 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3537 	else
3538 		next = find_next_bit(sip->map_storep, map_size, index);
3539 
3540 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3541 	*num = end - lba;
3542 	return mapped;
3543 }
3544 
3545 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3546 		       unsigned int len)
3547 {
3548 	sector_t end = lba + len;
3549 
3550 	while (lba < end) {
3551 		unsigned long index = lba_to_map_index(lba);
3552 
3553 		if (index < map_size)
3554 			set_bit(index, sip->map_storep);
3555 
3556 		lba = map_index_to_lba(index + 1);
3557 	}
3558 }
3559 
3560 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3561 			 unsigned int len)
3562 {
3563 	sector_t end = lba + len;
3564 	u8 *fsp = sip->storep;
3565 
3566 	while (lba < end) {
3567 		unsigned long index = lba_to_map_index(lba);
3568 
3569 		if (lba == map_index_to_lba(index) &&
3570 		    lba + sdebug_unmap_granularity <= end &&
3571 		    index < map_size) {
3572 			clear_bit(index, sip->map_storep);
3573 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3574 				memset(fsp + lba * sdebug_sector_size,
3575 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3576 				       sdebug_sector_size *
3577 				       sdebug_unmap_granularity);
3578 			}
3579 			if (sip->dif_storep) {
3580 				memset(sip->dif_storep + lba, 0xff,
3581 				       sizeof(*sip->dif_storep) *
3582 				       sdebug_unmap_granularity);
3583 			}
3584 		}
3585 		lba = map_index_to_lba(index + 1);
3586 	}
3587 }
3588 
3589 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3590 {
3591 	bool check_prot;
3592 	u32 num;
3593 	u32 ei_lba;
3594 	int ret;
3595 	u64 lba;
3596 	struct sdeb_store_info *sip = devip2sip(devip, true);
3597 	u8 *cmd = scp->cmnd;
3598 
3599 	switch (cmd[0]) {
3600 	case WRITE_16:
3601 		ei_lba = 0;
3602 		lba = get_unaligned_be64(cmd + 2);
3603 		num = get_unaligned_be32(cmd + 10);
3604 		check_prot = true;
3605 		break;
3606 	case WRITE_10:
3607 		ei_lba = 0;
3608 		lba = get_unaligned_be32(cmd + 2);
3609 		num = get_unaligned_be16(cmd + 7);
3610 		check_prot = true;
3611 		break;
3612 	case WRITE_6:
3613 		ei_lba = 0;
3614 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3615 		      (u32)(cmd[1] & 0x1f) << 16;
3616 		num = (0 == cmd[4]) ? 256 : cmd[4];
3617 		check_prot = true;
3618 		break;
3619 	case WRITE_12:
3620 		ei_lba = 0;
3621 		lba = get_unaligned_be32(cmd + 2);
3622 		num = get_unaligned_be32(cmd + 6);
3623 		check_prot = true;
3624 		break;
3625 	case 0x53:	/* XDWRITEREAD(10) */
3626 		ei_lba = 0;
3627 		lba = get_unaligned_be32(cmd + 2);
3628 		num = get_unaligned_be16(cmd + 7);
3629 		check_prot = false;
3630 		break;
3631 	default:	/* assume WRITE(32) */
3632 		lba = get_unaligned_be64(cmd + 12);
3633 		ei_lba = get_unaligned_be32(cmd + 20);
3634 		num = get_unaligned_be32(cmd + 28);
3635 		check_prot = false;
3636 		break;
3637 	}
3638 	if (unlikely(have_dif_prot && check_prot)) {
3639 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3640 		    (cmd[1] & 0xe0)) {
3641 			mk_sense_invalid_opcode(scp);
3642 			return check_condition_result;
3643 		}
3644 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3645 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3646 		    (cmd[1] & 0xe0) == 0)
3647 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3648 				    "to DIF device\n");
3649 	}
3650 
3651 	sdeb_write_lock(sip);
3652 	ret = check_device_access_params(scp, lba, num, true);
3653 	if (ret) {
3654 		sdeb_write_unlock(sip);
3655 		return ret;
3656 	}
3657 
3658 	/* DIX + T10 DIF */
3659 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3660 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3661 		case 1: /* Guard tag error */
3662 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3663 				sdeb_write_unlock(sip);
3664 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3665 				return illegal_condition_result;
3666 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3667 				sdeb_write_unlock(sip);
3668 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3669 				return check_condition_result;
3670 			}
3671 			break;
3672 		case 3: /* Reference tag error */
3673 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3674 				sdeb_write_unlock(sip);
3675 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3676 				return illegal_condition_result;
3677 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3678 				sdeb_write_unlock(sip);
3679 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3680 				return check_condition_result;
3681 			}
3682 			break;
3683 		}
3684 	}
3685 
3686 	ret = do_device_access(sip, scp, 0, lba, num, true);
3687 	if (unlikely(scsi_debug_lbp()))
3688 		map_region(sip, lba, num);
3689 	/* If ZBC zone then bump its write pointer */
3690 	if (sdebug_dev_is_zoned(devip))
3691 		zbc_inc_wp(devip, lba, num);
3692 	sdeb_write_unlock(sip);
3693 	if (unlikely(-1 == ret))
3694 		return DID_ERROR << 16;
3695 	else if (unlikely(sdebug_verbose &&
3696 			  (ret < (num * sdebug_sector_size))))
3697 		sdev_printk(KERN_INFO, scp->device,
3698 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3699 			    my_name, num * sdebug_sector_size, ret);
3700 
3701 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3702 		     atomic_read(&sdeb_inject_pending))) {
3703 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3704 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3705 			atomic_set(&sdeb_inject_pending, 0);
3706 			return check_condition_result;
3707 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3708 			/* Logical block guard check failed */
3709 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3710 			atomic_set(&sdeb_inject_pending, 0);
3711 			return illegal_condition_result;
3712 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3713 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3714 			atomic_set(&sdeb_inject_pending, 0);
3715 			return illegal_condition_result;
3716 		}
3717 	}
3718 	return 0;
3719 }
3720 
3721 /*
3722  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3723  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3724  */
3725 static int resp_write_scat(struct scsi_cmnd *scp,
3726 			   struct sdebug_dev_info *devip)
3727 {
3728 	u8 *cmd = scp->cmnd;
3729 	u8 *lrdp = NULL;
3730 	u8 *up;
3731 	struct sdeb_store_info *sip = devip2sip(devip, true);
3732 	u8 wrprotect;
3733 	u16 lbdof, num_lrd, k;
3734 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3735 	u32 lb_size = sdebug_sector_size;
3736 	u32 ei_lba;
3737 	u64 lba;
3738 	int ret, res;
3739 	bool is_16;
3740 	static const u32 lrd_size = 32; /* + parameter list header size */
3741 
3742 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3743 		is_16 = false;
3744 		wrprotect = (cmd[10] >> 5) & 0x7;
3745 		lbdof = get_unaligned_be16(cmd + 12);
3746 		num_lrd = get_unaligned_be16(cmd + 16);
3747 		bt_len = get_unaligned_be32(cmd + 28);
3748 	} else {        /* that leaves WRITE SCATTERED(16) */
3749 		is_16 = true;
3750 		wrprotect = (cmd[2] >> 5) & 0x7;
3751 		lbdof = get_unaligned_be16(cmd + 4);
3752 		num_lrd = get_unaligned_be16(cmd + 8);
3753 		bt_len = get_unaligned_be32(cmd + 10);
3754 		if (unlikely(have_dif_prot)) {
3755 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3756 			    wrprotect) {
3757 				mk_sense_invalid_opcode(scp);
3758 				return illegal_condition_result;
3759 			}
3760 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3761 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3762 			     wrprotect == 0)
3763 				sdev_printk(KERN_ERR, scp->device,
3764 					    "Unprotected WR to DIF device\n");
3765 		}
3766 	}
3767 	if ((num_lrd == 0) || (bt_len == 0))
3768 		return 0;       /* T10 says these do-nothings are not errors */
3769 	if (lbdof == 0) {
3770 		if (sdebug_verbose)
3771 			sdev_printk(KERN_INFO, scp->device,
3772 				"%s: %s: LB Data Offset field bad\n",
3773 				my_name, __func__);
3774 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3775 		return illegal_condition_result;
3776 	}
3777 	lbdof_blen = lbdof * lb_size;
3778 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3779 		if (sdebug_verbose)
3780 			sdev_printk(KERN_INFO, scp->device,
3781 				"%s: %s: LBA range descriptors don't fit\n",
3782 				my_name, __func__);
3783 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3784 		return illegal_condition_result;
3785 	}
3786 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3787 	if (lrdp == NULL)
3788 		return SCSI_MLQUEUE_HOST_BUSY;
3789 	if (sdebug_verbose)
3790 		sdev_printk(KERN_INFO, scp->device,
3791 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3792 			my_name, __func__, lbdof_blen);
3793 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3794 	if (res == -1) {
3795 		ret = DID_ERROR << 16;
3796 		goto err_out;
3797 	}
3798 
3799 	sdeb_write_lock(sip);
3800 	sg_off = lbdof_blen;
3801 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3802 	cum_lb = 0;
3803 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3804 		lba = get_unaligned_be64(up + 0);
3805 		num = get_unaligned_be32(up + 8);
3806 		if (sdebug_verbose)
3807 			sdev_printk(KERN_INFO, scp->device,
3808 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3809 				my_name, __func__, k, lba, num, sg_off);
3810 		if (num == 0)
3811 			continue;
3812 		ret = check_device_access_params(scp, lba, num, true);
3813 		if (ret)
3814 			goto err_out_unlock;
3815 		num_by = num * lb_size;
3816 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3817 
3818 		if ((cum_lb + num) > bt_len) {
3819 			if (sdebug_verbose)
3820 				sdev_printk(KERN_INFO, scp->device,
3821 				    "%s: %s: sum of blocks > data provided\n",
3822 				    my_name, __func__);
3823 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3824 					0);
3825 			ret = illegal_condition_result;
3826 			goto err_out_unlock;
3827 		}
3828 
3829 		/* DIX + T10 DIF */
3830 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3831 			int prot_ret = prot_verify_write(scp, lba, num,
3832 							 ei_lba);
3833 
3834 			if (prot_ret) {
3835 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3836 						prot_ret);
3837 				ret = illegal_condition_result;
3838 				goto err_out_unlock;
3839 			}
3840 		}
3841 
3842 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3843 		/* If ZBC zone then bump its write pointer */
3844 		if (sdebug_dev_is_zoned(devip))
3845 			zbc_inc_wp(devip, lba, num);
3846 		if (unlikely(scsi_debug_lbp()))
3847 			map_region(sip, lba, num);
3848 		if (unlikely(-1 == ret)) {
3849 			ret = DID_ERROR << 16;
3850 			goto err_out_unlock;
3851 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3852 			sdev_printk(KERN_INFO, scp->device,
3853 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3854 			    my_name, num_by, ret);
3855 
3856 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3857 			     atomic_read(&sdeb_inject_pending))) {
3858 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3859 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3860 				atomic_set(&sdeb_inject_pending, 0);
3861 				ret = check_condition_result;
3862 				goto err_out_unlock;
3863 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3864 				/* Logical block guard check failed */
3865 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3866 				atomic_set(&sdeb_inject_pending, 0);
3867 				ret = illegal_condition_result;
3868 				goto err_out_unlock;
3869 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3870 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3871 				atomic_set(&sdeb_inject_pending, 0);
3872 				ret = illegal_condition_result;
3873 				goto err_out_unlock;
3874 			}
3875 		}
3876 		sg_off += num_by;
3877 		cum_lb += num;
3878 	}
3879 	ret = 0;
3880 err_out_unlock:
3881 	sdeb_write_unlock(sip);
3882 err_out:
3883 	kfree(lrdp);
3884 	return ret;
3885 }
3886 
3887 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3888 			   u32 ei_lba, bool unmap, bool ndob)
3889 {
3890 	struct scsi_device *sdp = scp->device;
3891 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3892 	unsigned long long i;
3893 	u64 block, lbaa;
3894 	u32 lb_size = sdebug_sector_size;
3895 	int ret;
3896 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3897 						scp->device->hostdata, true);
3898 	u8 *fs1p;
3899 	u8 *fsp;
3900 
3901 	sdeb_write_lock(sip);
3902 
3903 	ret = check_device_access_params(scp, lba, num, true);
3904 	if (ret) {
3905 		sdeb_write_unlock(sip);
3906 		return ret;
3907 	}
3908 
3909 	if (unmap && scsi_debug_lbp()) {
3910 		unmap_region(sip, lba, num);
3911 		goto out;
3912 	}
3913 	lbaa = lba;
3914 	block = do_div(lbaa, sdebug_store_sectors);
3915 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3916 	fsp = sip->storep;
3917 	fs1p = fsp + (block * lb_size);
3918 	if (ndob) {
3919 		memset(fs1p, 0, lb_size);
3920 		ret = 0;
3921 	} else
3922 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3923 
3924 	if (-1 == ret) {
3925 		sdeb_write_unlock(sip);
3926 		return DID_ERROR << 16;
3927 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3928 		sdev_printk(KERN_INFO, scp->device,
3929 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3930 			    my_name, "write same", lb_size, ret);
3931 
3932 	/* Copy first sector to remaining blocks */
3933 	for (i = 1 ; i < num ; i++) {
3934 		lbaa = lba + i;
3935 		block = do_div(lbaa, sdebug_store_sectors);
3936 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3937 	}
3938 	if (scsi_debug_lbp())
3939 		map_region(sip, lba, num);
3940 	/* If ZBC zone then bump its write pointer */
3941 	if (sdebug_dev_is_zoned(devip))
3942 		zbc_inc_wp(devip, lba, num);
3943 out:
3944 	sdeb_write_unlock(sip);
3945 
3946 	return 0;
3947 }
3948 
3949 static int resp_write_same_10(struct scsi_cmnd *scp,
3950 			      struct sdebug_dev_info *devip)
3951 {
3952 	u8 *cmd = scp->cmnd;
3953 	u32 lba;
3954 	u16 num;
3955 	u32 ei_lba = 0;
3956 	bool unmap = false;
3957 
3958 	if (cmd[1] & 0x8) {
3959 		if (sdebug_lbpws10 == 0) {
3960 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3961 			return check_condition_result;
3962 		} else
3963 			unmap = true;
3964 	}
3965 	lba = get_unaligned_be32(cmd + 2);
3966 	num = get_unaligned_be16(cmd + 7);
3967 	if (num > sdebug_write_same_length) {
3968 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3969 		return check_condition_result;
3970 	}
3971 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3972 }
3973 
3974 static int resp_write_same_16(struct scsi_cmnd *scp,
3975 			      struct sdebug_dev_info *devip)
3976 {
3977 	u8 *cmd = scp->cmnd;
3978 	u64 lba;
3979 	u32 num;
3980 	u32 ei_lba = 0;
3981 	bool unmap = false;
3982 	bool ndob = false;
3983 
3984 	if (cmd[1] & 0x8) {	/* UNMAP */
3985 		if (sdebug_lbpws == 0) {
3986 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3987 			return check_condition_result;
3988 		} else
3989 			unmap = true;
3990 	}
3991 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3992 		ndob = true;
3993 	lba = get_unaligned_be64(cmd + 2);
3994 	num = get_unaligned_be32(cmd + 10);
3995 	if (num > sdebug_write_same_length) {
3996 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3997 		return check_condition_result;
3998 	}
3999 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4000 }
4001 
4002 /* Note the mode field is in the same position as the (lower) service action
4003  * field. For the Report supported operation codes command, SPC-4 suggests
4004  * each mode of this command should be reported separately; for future. */
4005 static int resp_write_buffer(struct scsi_cmnd *scp,
4006 			     struct sdebug_dev_info *devip)
4007 {
4008 	u8 *cmd = scp->cmnd;
4009 	struct scsi_device *sdp = scp->device;
4010 	struct sdebug_dev_info *dp;
4011 	u8 mode;
4012 
4013 	mode = cmd[1] & 0x1f;
4014 	switch (mode) {
4015 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4016 		/* set UAs on this device only */
4017 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4018 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4019 		break;
4020 	case 0x5:	/* download MC, save and ACT */
4021 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4022 		break;
4023 	case 0x6:	/* download MC with offsets and ACT */
4024 		/* set UAs on most devices (LUs) in this target */
4025 		list_for_each_entry(dp,
4026 				    &devip->sdbg_host->dev_info_list,
4027 				    dev_list)
4028 			if (dp->target == sdp->id) {
4029 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4030 				if (devip != dp)
4031 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4032 						dp->uas_bm);
4033 			}
4034 		break;
4035 	case 0x7:	/* download MC with offsets, save, and ACT */
4036 		/* set UA on all devices (LUs) in this target */
4037 		list_for_each_entry(dp,
4038 				    &devip->sdbg_host->dev_info_list,
4039 				    dev_list)
4040 			if (dp->target == sdp->id)
4041 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4042 					dp->uas_bm);
4043 		break;
4044 	default:
4045 		/* do nothing for this command for other mode values */
4046 		break;
4047 	}
4048 	return 0;
4049 }
4050 
4051 static int resp_comp_write(struct scsi_cmnd *scp,
4052 			   struct sdebug_dev_info *devip)
4053 {
4054 	u8 *cmd = scp->cmnd;
4055 	u8 *arr;
4056 	struct sdeb_store_info *sip = devip2sip(devip, true);
4057 	u64 lba;
4058 	u32 dnum;
4059 	u32 lb_size = sdebug_sector_size;
4060 	u8 num;
4061 	int ret;
4062 	int retval = 0;
4063 
4064 	lba = get_unaligned_be64(cmd + 2);
4065 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4066 	if (0 == num)
4067 		return 0;	/* degenerate case, not an error */
4068 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4069 	    (cmd[1] & 0xe0)) {
4070 		mk_sense_invalid_opcode(scp);
4071 		return check_condition_result;
4072 	}
4073 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4074 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4075 	    (cmd[1] & 0xe0) == 0)
4076 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4077 			    "to DIF device\n");
4078 	ret = check_device_access_params(scp, lba, num, false);
4079 	if (ret)
4080 		return ret;
4081 	dnum = 2 * num;
4082 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4083 	if (NULL == arr) {
4084 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4085 				INSUFF_RES_ASCQ);
4086 		return check_condition_result;
4087 	}
4088 
4089 	sdeb_write_lock(sip);
4090 
4091 	ret = do_dout_fetch(scp, dnum, arr);
4092 	if (ret == -1) {
4093 		retval = DID_ERROR << 16;
4094 		goto cleanup;
4095 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4096 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4097 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4098 			    dnum * lb_size, ret);
4099 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4100 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4101 		retval = check_condition_result;
4102 		goto cleanup;
4103 	}
4104 	if (scsi_debug_lbp())
4105 		map_region(sip, lba, num);
4106 cleanup:
4107 	sdeb_write_unlock(sip);
4108 	kfree(arr);
4109 	return retval;
4110 }
4111 
4112 struct unmap_block_desc {
4113 	__be64	lba;
4114 	__be32	blocks;
4115 	__be32	__reserved;
4116 };
4117 
4118 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4119 {
4120 	unsigned char *buf;
4121 	struct unmap_block_desc *desc;
4122 	struct sdeb_store_info *sip = devip2sip(devip, true);
4123 	unsigned int i, payload_len, descriptors;
4124 	int ret;
4125 
4126 	if (!scsi_debug_lbp())
4127 		return 0;	/* fib and say its done */
4128 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4129 	BUG_ON(scsi_bufflen(scp) != payload_len);
4130 
4131 	descriptors = (payload_len - 8) / 16;
4132 	if (descriptors > sdebug_unmap_max_desc) {
4133 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4134 		return check_condition_result;
4135 	}
4136 
4137 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4138 	if (!buf) {
4139 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4140 				INSUFF_RES_ASCQ);
4141 		return check_condition_result;
4142 	}
4143 
4144 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4145 
4146 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4147 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4148 
4149 	desc = (void *)&buf[8];
4150 
4151 	sdeb_write_lock(sip);
4152 
4153 	for (i = 0 ; i < descriptors ; i++) {
4154 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4155 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4156 
4157 		ret = check_device_access_params(scp, lba, num, true);
4158 		if (ret)
4159 			goto out;
4160 
4161 		unmap_region(sip, lba, num);
4162 	}
4163 
4164 	ret = 0;
4165 
4166 out:
4167 	sdeb_write_unlock(sip);
4168 	kfree(buf);
4169 
4170 	return ret;
4171 }
4172 
4173 #define SDEBUG_GET_LBA_STATUS_LEN 32
4174 
4175 static int resp_get_lba_status(struct scsi_cmnd *scp,
4176 			       struct sdebug_dev_info *devip)
4177 {
4178 	u8 *cmd = scp->cmnd;
4179 	u64 lba;
4180 	u32 alloc_len, mapped, num;
4181 	int ret;
4182 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4183 
4184 	lba = get_unaligned_be64(cmd + 2);
4185 	alloc_len = get_unaligned_be32(cmd + 10);
4186 
4187 	if (alloc_len < 24)
4188 		return 0;
4189 
4190 	ret = check_device_access_params(scp, lba, 1, false);
4191 	if (ret)
4192 		return ret;
4193 
4194 	if (scsi_debug_lbp()) {
4195 		struct sdeb_store_info *sip = devip2sip(devip, true);
4196 
4197 		mapped = map_state(sip, lba, &num);
4198 	} else {
4199 		mapped = 1;
4200 		/* following just in case virtual_gb changed */
4201 		sdebug_capacity = get_sdebug_capacity();
4202 		if (sdebug_capacity - lba <= 0xffffffff)
4203 			num = sdebug_capacity - lba;
4204 		else
4205 			num = 0xffffffff;
4206 	}
4207 
4208 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4209 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4210 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4211 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4212 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4213 
4214 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4215 }
4216 
4217 static int resp_sync_cache(struct scsi_cmnd *scp,
4218 			   struct sdebug_dev_info *devip)
4219 {
4220 	int res = 0;
4221 	u64 lba;
4222 	u32 num_blocks;
4223 	u8 *cmd = scp->cmnd;
4224 
4225 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4226 		lba = get_unaligned_be32(cmd + 2);
4227 		num_blocks = get_unaligned_be16(cmd + 7);
4228 	} else {				/* SYNCHRONIZE_CACHE(16) */
4229 		lba = get_unaligned_be64(cmd + 2);
4230 		num_blocks = get_unaligned_be32(cmd + 10);
4231 	}
4232 	if (lba + num_blocks > sdebug_capacity) {
4233 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4234 		return check_condition_result;
4235 	}
4236 	if (!write_since_sync || (cmd[1] & 0x2))
4237 		res = SDEG_RES_IMMED_MASK;
4238 	else		/* delay if write_since_sync and IMMED clear */
4239 		write_since_sync = false;
4240 	return res;
4241 }
4242 
4243 /*
4244  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4245  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4246  * a GOOD status otherwise. Model a disk with a big cache and yield
4247  * CONDITION MET. Actually tries to bring range in main memory into the
4248  * cache associated with the CPU(s).
4249  */
4250 static int resp_pre_fetch(struct scsi_cmnd *scp,
4251 			  struct sdebug_dev_info *devip)
4252 {
4253 	int res = 0;
4254 	u64 lba;
4255 	u64 block, rest = 0;
4256 	u32 nblks;
4257 	u8 *cmd = scp->cmnd;
4258 	struct sdeb_store_info *sip = devip2sip(devip, true);
4259 	u8 *fsp = sip->storep;
4260 
4261 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4262 		lba = get_unaligned_be32(cmd + 2);
4263 		nblks = get_unaligned_be16(cmd + 7);
4264 	} else {			/* PRE-FETCH(16) */
4265 		lba = get_unaligned_be64(cmd + 2);
4266 		nblks = get_unaligned_be32(cmd + 10);
4267 	}
4268 	if (lba + nblks > sdebug_capacity) {
4269 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4270 		return check_condition_result;
4271 	}
4272 	if (!fsp)
4273 		goto fini;
4274 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4275 	block = do_div(lba, sdebug_store_sectors);
4276 	if (block + nblks > sdebug_store_sectors)
4277 		rest = block + nblks - sdebug_store_sectors;
4278 
4279 	/* Try to bring the PRE-FETCH range into CPU's cache */
4280 	sdeb_read_lock(sip);
4281 	prefetch_range(fsp + (sdebug_sector_size * block),
4282 		       (nblks - rest) * sdebug_sector_size);
4283 	if (rest)
4284 		prefetch_range(fsp, rest * sdebug_sector_size);
4285 	sdeb_read_unlock(sip);
4286 fini:
4287 	if (cmd[1] & 0x2)
4288 		res = SDEG_RES_IMMED_MASK;
4289 	return res | condition_met_result;
4290 }
4291 
4292 #define RL_BUCKET_ELEMS 8
4293 
4294 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4295  * (W-LUN), the normal Linux scanning logic does not associate it with a
4296  * device (e.g. /dev/sg7). The following magic will make that association:
4297  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4298  * where <n> is a host number. If there are multiple targets in a host then
4299  * the above will associate a W-LUN to each target. To only get a W-LUN
4300  * for target 2, then use "echo '- 2 49409' > scan" .
4301  */
4302 static int resp_report_luns(struct scsi_cmnd *scp,
4303 			    struct sdebug_dev_info *devip)
4304 {
4305 	unsigned char *cmd = scp->cmnd;
4306 	unsigned int alloc_len;
4307 	unsigned char select_report;
4308 	u64 lun;
4309 	struct scsi_lun *lun_p;
4310 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4311 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4312 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4313 	unsigned int tlun_cnt;	/* total LUN count */
4314 	unsigned int rlen;	/* response length (in bytes) */
4315 	int k, j, n, res;
4316 	unsigned int off_rsp = 0;
4317 	const int sz_lun = sizeof(struct scsi_lun);
4318 
4319 	clear_luns_changed_on_target(devip);
4320 
4321 	select_report = cmd[2];
4322 	alloc_len = get_unaligned_be32(cmd + 6);
4323 
4324 	if (alloc_len < 4) {
4325 		pr_err("alloc len too small %d\n", alloc_len);
4326 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4327 		return check_condition_result;
4328 	}
4329 
4330 	switch (select_report) {
4331 	case 0:		/* all LUNs apart from W-LUNs */
4332 		lun_cnt = sdebug_max_luns;
4333 		wlun_cnt = 0;
4334 		break;
4335 	case 1:		/* only W-LUNs */
4336 		lun_cnt = 0;
4337 		wlun_cnt = 1;
4338 		break;
4339 	case 2:		/* all LUNs */
4340 		lun_cnt = sdebug_max_luns;
4341 		wlun_cnt = 1;
4342 		break;
4343 	case 0x10:	/* only administrative LUs */
4344 	case 0x11:	/* see SPC-5 */
4345 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4346 	default:
4347 		pr_debug("select report invalid %d\n", select_report);
4348 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4349 		return check_condition_result;
4350 	}
4351 
4352 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4353 		--lun_cnt;
4354 
4355 	tlun_cnt = lun_cnt + wlun_cnt;
4356 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4357 	scsi_set_resid(scp, scsi_bufflen(scp));
4358 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4359 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4360 
4361 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4362 	lun = sdebug_no_lun_0 ? 1 : 0;
4363 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4364 		memset(arr, 0, sizeof(arr));
4365 		lun_p = (struct scsi_lun *)&arr[0];
4366 		if (k == 0) {
4367 			put_unaligned_be32(rlen, &arr[0]);
4368 			++lun_p;
4369 			j = 1;
4370 		}
4371 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4372 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4373 				break;
4374 			int_to_scsilun(lun++, lun_p);
4375 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4376 				lun_p->scsi_lun[0] |= 0x40;
4377 		}
4378 		if (j < RL_BUCKET_ELEMS)
4379 			break;
4380 		n = j * sz_lun;
4381 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4382 		if (res)
4383 			return res;
4384 		off_rsp += n;
4385 	}
4386 	if (wlun_cnt) {
4387 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4388 		++j;
4389 	}
4390 	if (j > 0)
4391 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4392 	return res;
4393 }
4394 
4395 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4396 {
4397 	bool is_bytchk3 = false;
4398 	u8 bytchk;
4399 	int ret, j;
4400 	u32 vnum, a_num, off;
4401 	const u32 lb_size = sdebug_sector_size;
4402 	u64 lba;
4403 	u8 *arr;
4404 	u8 *cmd = scp->cmnd;
4405 	struct sdeb_store_info *sip = devip2sip(devip, true);
4406 
4407 	bytchk = (cmd[1] >> 1) & 0x3;
4408 	if (bytchk == 0) {
4409 		return 0;	/* always claim internal verify okay */
4410 	} else if (bytchk == 2) {
4411 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4412 		return check_condition_result;
4413 	} else if (bytchk == 3) {
4414 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4415 	}
4416 	switch (cmd[0]) {
4417 	case VERIFY_16:
4418 		lba = get_unaligned_be64(cmd + 2);
4419 		vnum = get_unaligned_be32(cmd + 10);
4420 		break;
4421 	case VERIFY:		/* is VERIFY(10) */
4422 		lba = get_unaligned_be32(cmd + 2);
4423 		vnum = get_unaligned_be16(cmd + 7);
4424 		break;
4425 	default:
4426 		mk_sense_invalid_opcode(scp);
4427 		return check_condition_result;
4428 	}
4429 	if (vnum == 0)
4430 		return 0;	/* not an error */
4431 	a_num = is_bytchk3 ? 1 : vnum;
4432 	/* Treat following check like one for read (i.e. no write) access */
4433 	ret = check_device_access_params(scp, lba, a_num, false);
4434 	if (ret)
4435 		return ret;
4436 
4437 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4438 	if (!arr) {
4439 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4440 				INSUFF_RES_ASCQ);
4441 		return check_condition_result;
4442 	}
4443 	/* Not changing store, so only need read access */
4444 	sdeb_read_lock(sip);
4445 
4446 	ret = do_dout_fetch(scp, a_num, arr);
4447 	if (ret == -1) {
4448 		ret = DID_ERROR << 16;
4449 		goto cleanup;
4450 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4451 		sdev_printk(KERN_INFO, scp->device,
4452 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4453 			    my_name, __func__, a_num * lb_size, ret);
4454 	}
4455 	if (is_bytchk3) {
4456 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4457 			memcpy(arr + off, arr, lb_size);
4458 	}
4459 	ret = 0;
4460 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4461 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4462 		ret = check_condition_result;
4463 		goto cleanup;
4464 	}
4465 cleanup:
4466 	sdeb_read_unlock(sip);
4467 	kfree(arr);
4468 	return ret;
4469 }
4470 
4471 #define RZONES_DESC_HD 64
4472 
4473 /* Report zones depending on start LBA and reporting options */
4474 static int resp_report_zones(struct scsi_cmnd *scp,
4475 			     struct sdebug_dev_info *devip)
4476 {
4477 	unsigned int rep_max_zones, nrz = 0;
4478 	int ret = 0;
4479 	u32 alloc_len, rep_opts, rep_len;
4480 	bool partial;
4481 	u64 lba, zs_lba;
4482 	u8 *arr = NULL, *desc;
4483 	u8 *cmd = scp->cmnd;
4484 	struct sdeb_zone_state *zsp = NULL;
4485 	struct sdeb_store_info *sip = devip2sip(devip, false);
4486 
4487 	if (!sdebug_dev_is_zoned(devip)) {
4488 		mk_sense_invalid_opcode(scp);
4489 		return check_condition_result;
4490 	}
4491 	zs_lba = get_unaligned_be64(cmd + 2);
4492 	alloc_len = get_unaligned_be32(cmd + 10);
4493 	if (alloc_len == 0)
4494 		return 0;	/* not an error */
4495 	rep_opts = cmd[14] & 0x3f;
4496 	partial = cmd[14] & 0x80;
4497 
4498 	if (zs_lba >= sdebug_capacity) {
4499 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4500 		return check_condition_result;
4501 	}
4502 
4503 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4504 
4505 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4506 	if (!arr) {
4507 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4508 				INSUFF_RES_ASCQ);
4509 		return check_condition_result;
4510 	}
4511 
4512 	sdeb_read_lock(sip);
4513 
4514 	desc = arr + 64;
4515 	for (lba = zs_lba; lba < sdebug_capacity;
4516 	     lba = zsp->z_start + zsp->z_size) {
4517 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4518 			break;
4519 		zsp = zbc_zone(devip, lba);
4520 		switch (rep_opts) {
4521 		case 0x00:
4522 			/* All zones */
4523 			break;
4524 		case 0x01:
4525 			/* Empty zones */
4526 			if (zsp->z_cond != ZC1_EMPTY)
4527 				continue;
4528 			break;
4529 		case 0x02:
4530 			/* Implicit open zones */
4531 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4532 				continue;
4533 			break;
4534 		case 0x03:
4535 			/* Explicit open zones */
4536 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4537 				continue;
4538 			break;
4539 		case 0x04:
4540 			/* Closed zones */
4541 			if (zsp->z_cond != ZC4_CLOSED)
4542 				continue;
4543 			break;
4544 		case 0x05:
4545 			/* Full zones */
4546 			if (zsp->z_cond != ZC5_FULL)
4547 				continue;
4548 			break;
4549 		case 0x06:
4550 		case 0x07:
4551 		case 0x10:
4552 			/*
4553 			 * Read-only, offline, reset WP recommended are
4554 			 * not emulated: no zones to report;
4555 			 */
4556 			continue;
4557 		case 0x11:
4558 			/* non-seq-resource set */
4559 			if (!zsp->z_non_seq_resource)
4560 				continue;
4561 			break;
4562 		case 0x3e:
4563 			/* All zones except gap zones. */
4564 			if (zbc_zone_is_gap(zsp))
4565 				continue;
4566 			break;
4567 		case 0x3f:
4568 			/* Not write pointer (conventional) zones */
4569 			if (zbc_zone_is_seq(zsp))
4570 				continue;
4571 			break;
4572 		default:
4573 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4574 					INVALID_FIELD_IN_CDB, 0);
4575 			ret = check_condition_result;
4576 			goto fini;
4577 		}
4578 
4579 		if (nrz < rep_max_zones) {
4580 			/* Fill zone descriptor */
4581 			desc[0] = zsp->z_type;
4582 			desc[1] = zsp->z_cond << 4;
4583 			if (zsp->z_non_seq_resource)
4584 				desc[1] |= 1 << 1;
4585 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4586 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4587 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4588 			desc += 64;
4589 		}
4590 
4591 		if (partial && nrz >= rep_max_zones)
4592 			break;
4593 
4594 		nrz++;
4595 	}
4596 
4597 	/* Report header */
4598 	/* Zone list length. */
4599 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4600 	/* Maximum LBA */
4601 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4602 	/* Zone starting LBA granularity. */
4603 	if (devip->zcap < devip->zsize)
4604 		put_unaligned_be64(devip->zsize, arr + 16);
4605 
4606 	rep_len = (unsigned long)desc - (unsigned long)arr;
4607 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4608 
4609 fini:
4610 	sdeb_read_unlock(sip);
4611 	kfree(arr);
4612 	return ret;
4613 }
4614 
4615 /* Logic transplanted from tcmu-runner, file_zbc.c */
4616 static void zbc_open_all(struct sdebug_dev_info *devip)
4617 {
4618 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4619 	unsigned int i;
4620 
4621 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4622 		if (zsp->z_cond == ZC4_CLOSED)
4623 			zbc_open_zone(devip, &devip->zstate[i], true);
4624 	}
4625 }
4626 
4627 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4628 {
4629 	int res = 0;
4630 	u64 z_id;
4631 	enum sdebug_z_cond zc;
4632 	u8 *cmd = scp->cmnd;
4633 	struct sdeb_zone_state *zsp;
4634 	bool all = cmd[14] & 0x01;
4635 	struct sdeb_store_info *sip = devip2sip(devip, false);
4636 
4637 	if (!sdebug_dev_is_zoned(devip)) {
4638 		mk_sense_invalid_opcode(scp);
4639 		return check_condition_result;
4640 	}
4641 
4642 	sdeb_write_lock(sip);
4643 
4644 	if (all) {
4645 		/* Check if all closed zones can be open */
4646 		if (devip->max_open &&
4647 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4648 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4649 					INSUFF_ZONE_ASCQ);
4650 			res = check_condition_result;
4651 			goto fini;
4652 		}
4653 		/* Open all closed zones */
4654 		zbc_open_all(devip);
4655 		goto fini;
4656 	}
4657 
4658 	/* Open the specified zone */
4659 	z_id = get_unaligned_be64(cmd + 2);
4660 	if (z_id >= sdebug_capacity) {
4661 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4662 		res = check_condition_result;
4663 		goto fini;
4664 	}
4665 
4666 	zsp = zbc_zone(devip, z_id);
4667 	if (z_id != zsp->z_start) {
4668 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4669 		res = check_condition_result;
4670 		goto fini;
4671 	}
4672 	if (zbc_zone_is_conv(zsp)) {
4673 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4674 		res = check_condition_result;
4675 		goto fini;
4676 	}
4677 
4678 	zc = zsp->z_cond;
4679 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4680 		goto fini;
4681 
4682 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4683 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4684 				INSUFF_ZONE_ASCQ);
4685 		res = check_condition_result;
4686 		goto fini;
4687 	}
4688 
4689 	zbc_open_zone(devip, zsp, true);
4690 fini:
4691 	sdeb_write_unlock(sip);
4692 	return res;
4693 }
4694 
4695 static void zbc_close_all(struct sdebug_dev_info *devip)
4696 {
4697 	unsigned int i;
4698 
4699 	for (i = 0; i < devip->nr_zones; i++)
4700 		zbc_close_zone(devip, &devip->zstate[i]);
4701 }
4702 
4703 static int resp_close_zone(struct scsi_cmnd *scp,
4704 			   struct sdebug_dev_info *devip)
4705 {
4706 	int res = 0;
4707 	u64 z_id;
4708 	u8 *cmd = scp->cmnd;
4709 	struct sdeb_zone_state *zsp;
4710 	bool all = cmd[14] & 0x01;
4711 	struct sdeb_store_info *sip = devip2sip(devip, false);
4712 
4713 	if (!sdebug_dev_is_zoned(devip)) {
4714 		mk_sense_invalid_opcode(scp);
4715 		return check_condition_result;
4716 	}
4717 
4718 	sdeb_write_lock(sip);
4719 
4720 	if (all) {
4721 		zbc_close_all(devip);
4722 		goto fini;
4723 	}
4724 
4725 	/* Close specified zone */
4726 	z_id = get_unaligned_be64(cmd + 2);
4727 	if (z_id >= sdebug_capacity) {
4728 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4729 		res = check_condition_result;
4730 		goto fini;
4731 	}
4732 
4733 	zsp = zbc_zone(devip, z_id);
4734 	if (z_id != zsp->z_start) {
4735 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4736 		res = check_condition_result;
4737 		goto fini;
4738 	}
4739 	if (zbc_zone_is_conv(zsp)) {
4740 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4741 		res = check_condition_result;
4742 		goto fini;
4743 	}
4744 
4745 	zbc_close_zone(devip, zsp);
4746 fini:
4747 	sdeb_write_unlock(sip);
4748 	return res;
4749 }
4750 
4751 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4752 			    struct sdeb_zone_state *zsp, bool empty)
4753 {
4754 	enum sdebug_z_cond zc = zsp->z_cond;
4755 
4756 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4757 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4758 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4759 			zbc_close_zone(devip, zsp);
4760 		if (zsp->z_cond == ZC4_CLOSED)
4761 			devip->nr_closed--;
4762 		zsp->z_wp = zsp->z_start + zsp->z_size;
4763 		zsp->z_cond = ZC5_FULL;
4764 	}
4765 }
4766 
4767 static void zbc_finish_all(struct sdebug_dev_info *devip)
4768 {
4769 	unsigned int i;
4770 
4771 	for (i = 0; i < devip->nr_zones; i++)
4772 		zbc_finish_zone(devip, &devip->zstate[i], false);
4773 }
4774 
4775 static int resp_finish_zone(struct scsi_cmnd *scp,
4776 			    struct sdebug_dev_info *devip)
4777 {
4778 	struct sdeb_zone_state *zsp;
4779 	int res = 0;
4780 	u64 z_id;
4781 	u8 *cmd = scp->cmnd;
4782 	bool all = cmd[14] & 0x01;
4783 	struct sdeb_store_info *sip = devip2sip(devip, false);
4784 
4785 	if (!sdebug_dev_is_zoned(devip)) {
4786 		mk_sense_invalid_opcode(scp);
4787 		return check_condition_result;
4788 	}
4789 
4790 	sdeb_write_lock(sip);
4791 
4792 	if (all) {
4793 		zbc_finish_all(devip);
4794 		goto fini;
4795 	}
4796 
4797 	/* Finish the specified zone */
4798 	z_id = get_unaligned_be64(cmd + 2);
4799 	if (z_id >= sdebug_capacity) {
4800 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4801 		res = check_condition_result;
4802 		goto fini;
4803 	}
4804 
4805 	zsp = zbc_zone(devip, z_id);
4806 	if (z_id != zsp->z_start) {
4807 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4808 		res = check_condition_result;
4809 		goto fini;
4810 	}
4811 	if (zbc_zone_is_conv(zsp)) {
4812 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4813 		res = check_condition_result;
4814 		goto fini;
4815 	}
4816 
4817 	zbc_finish_zone(devip, zsp, true);
4818 fini:
4819 	sdeb_write_unlock(sip);
4820 	return res;
4821 }
4822 
4823 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4824 			 struct sdeb_zone_state *zsp)
4825 {
4826 	enum sdebug_z_cond zc;
4827 	struct sdeb_store_info *sip = devip2sip(devip, false);
4828 
4829 	if (!zbc_zone_is_seq(zsp))
4830 		return;
4831 
4832 	zc = zsp->z_cond;
4833 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4834 		zbc_close_zone(devip, zsp);
4835 
4836 	if (zsp->z_cond == ZC4_CLOSED)
4837 		devip->nr_closed--;
4838 
4839 	if (zsp->z_wp > zsp->z_start)
4840 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4841 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4842 
4843 	zsp->z_non_seq_resource = false;
4844 	zsp->z_wp = zsp->z_start;
4845 	zsp->z_cond = ZC1_EMPTY;
4846 }
4847 
4848 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4849 {
4850 	unsigned int i;
4851 
4852 	for (i = 0; i < devip->nr_zones; i++)
4853 		zbc_rwp_zone(devip, &devip->zstate[i]);
4854 }
4855 
4856 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4857 {
4858 	struct sdeb_zone_state *zsp;
4859 	int res = 0;
4860 	u64 z_id;
4861 	u8 *cmd = scp->cmnd;
4862 	bool all = cmd[14] & 0x01;
4863 	struct sdeb_store_info *sip = devip2sip(devip, false);
4864 
4865 	if (!sdebug_dev_is_zoned(devip)) {
4866 		mk_sense_invalid_opcode(scp);
4867 		return check_condition_result;
4868 	}
4869 
4870 	sdeb_write_lock(sip);
4871 
4872 	if (all) {
4873 		zbc_rwp_all(devip);
4874 		goto fini;
4875 	}
4876 
4877 	z_id = get_unaligned_be64(cmd + 2);
4878 	if (z_id >= sdebug_capacity) {
4879 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4880 		res = check_condition_result;
4881 		goto fini;
4882 	}
4883 
4884 	zsp = zbc_zone(devip, z_id);
4885 	if (z_id != zsp->z_start) {
4886 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4887 		res = check_condition_result;
4888 		goto fini;
4889 	}
4890 	if (zbc_zone_is_conv(zsp)) {
4891 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4892 		res = check_condition_result;
4893 		goto fini;
4894 	}
4895 
4896 	zbc_rwp_zone(devip, zsp);
4897 fini:
4898 	sdeb_write_unlock(sip);
4899 	return res;
4900 }
4901 
4902 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4903 {
4904 	u16 hwq;
4905 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4906 
4907 	hwq = blk_mq_unique_tag_to_hwq(tag);
4908 
4909 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4910 	if (WARN_ON_ONCE(hwq >= submit_queues))
4911 		hwq = 0;
4912 
4913 	return sdebug_q_arr + hwq;
4914 }
4915 
4916 static u32 get_tag(struct scsi_cmnd *cmnd)
4917 {
4918 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4919 }
4920 
4921 /* Queued (deferred) command completions converge here. */
4922 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4923 {
4924 	bool aborted = sd_dp->aborted;
4925 	int qc_idx;
4926 	int retiring = 0;
4927 	unsigned long iflags;
4928 	struct sdebug_queue *sqp;
4929 	struct sdebug_queued_cmd *sqcp;
4930 	struct scsi_cmnd *scp;
4931 	struct sdebug_dev_info *devip;
4932 
4933 	if (unlikely(aborted))
4934 		sd_dp->aborted = false;
4935 	qc_idx = sd_dp->qc_idx;
4936 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4937 	if (sdebug_statistics) {
4938 		atomic_inc(&sdebug_completions);
4939 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4940 			atomic_inc(&sdebug_miss_cpus);
4941 	}
4942 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4943 		pr_err("wild qc_idx=%d\n", qc_idx);
4944 		return;
4945 	}
4946 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4947 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4948 	sqcp = &sqp->qc_arr[qc_idx];
4949 	scp = sqcp->a_cmnd;
4950 	if (unlikely(scp == NULL)) {
4951 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4952 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4953 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4954 		return;
4955 	}
4956 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4957 	if (likely(devip))
4958 		atomic_dec(&devip->num_in_q);
4959 	else
4960 		pr_err("devip=NULL\n");
4961 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4962 		retiring = 1;
4963 
4964 	sqcp->a_cmnd = NULL;
4965 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4966 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4967 		pr_err("Unexpected completion\n");
4968 		return;
4969 	}
4970 
4971 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4972 		int k, retval;
4973 
4974 		retval = atomic_read(&retired_max_queue);
4975 		if (qc_idx >= retval) {
4976 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4977 			pr_err("index %d too large\n", retval);
4978 			return;
4979 		}
4980 		k = find_last_bit(sqp->in_use_bm, retval);
4981 		if ((k < sdebug_max_queue) || (k == retval))
4982 			atomic_set(&retired_max_queue, 0);
4983 		else
4984 			atomic_set(&retired_max_queue, k + 1);
4985 	}
4986 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4987 	if (unlikely(aborted)) {
4988 		if (sdebug_verbose)
4989 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4990 		return;
4991 	}
4992 	scsi_done(scp); /* callback to mid level */
4993 }
4994 
4995 /* When high resolution timer goes off this function is called. */
4996 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4997 {
4998 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4999 						  hrt);
5000 	sdebug_q_cmd_complete(sd_dp);
5001 	return HRTIMER_NORESTART;
5002 }
5003 
5004 /* When work queue schedules work, it calls this function. */
5005 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5006 {
5007 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5008 						  ew.work);
5009 	sdebug_q_cmd_complete(sd_dp);
5010 }
5011 
5012 static bool got_shared_uuid;
5013 static uuid_t shared_uuid;
5014 
5015 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5016 {
5017 	struct sdeb_zone_state *zsp;
5018 	sector_t capacity = get_sdebug_capacity();
5019 	sector_t conv_capacity;
5020 	sector_t zstart = 0;
5021 	unsigned int i;
5022 
5023 	/*
5024 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5025 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5026 	 * use the specified zone size checking that at least 2 zones can be
5027 	 * created for the device.
5028 	 */
5029 	if (!sdeb_zbc_zone_size_mb) {
5030 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5031 			>> ilog2(sdebug_sector_size);
5032 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5033 			devip->zsize >>= 1;
5034 		if (devip->zsize < 2) {
5035 			pr_err("Device capacity too small\n");
5036 			return -EINVAL;
5037 		}
5038 	} else {
5039 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5040 			pr_err("Zone size is not a power of 2\n");
5041 			return -EINVAL;
5042 		}
5043 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5044 			>> ilog2(sdebug_sector_size);
5045 		if (devip->zsize >= capacity) {
5046 			pr_err("Zone size too large for device capacity\n");
5047 			return -EINVAL;
5048 		}
5049 	}
5050 
5051 	devip->zsize_shift = ilog2(devip->zsize);
5052 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5053 
5054 	if (sdeb_zbc_zone_cap_mb == 0) {
5055 		devip->zcap = devip->zsize;
5056 	} else {
5057 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5058 			      ilog2(sdebug_sector_size);
5059 		if (devip->zcap > devip->zsize) {
5060 			pr_err("Zone capacity too large\n");
5061 			return -EINVAL;
5062 		}
5063 	}
5064 
5065 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5066 	if (conv_capacity >= capacity) {
5067 		pr_err("Number of conventional zones too large\n");
5068 		return -EINVAL;
5069 	}
5070 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5071 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5072 			      devip->zsize_shift;
5073 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5074 
5075 	/* Add gap zones if zone capacity is smaller than the zone size */
5076 	if (devip->zcap < devip->zsize)
5077 		devip->nr_zones += devip->nr_seq_zones;
5078 
5079 	if (devip->zmodel == BLK_ZONED_HM) {
5080 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5081 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5082 			devip->max_open = (devip->nr_zones - 1) / 2;
5083 		else
5084 			devip->max_open = sdeb_zbc_max_open;
5085 	}
5086 
5087 	devip->zstate = kcalloc(devip->nr_zones,
5088 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5089 	if (!devip->zstate)
5090 		return -ENOMEM;
5091 
5092 	for (i = 0; i < devip->nr_zones; i++) {
5093 		zsp = &devip->zstate[i];
5094 
5095 		zsp->z_start = zstart;
5096 
5097 		if (i < devip->nr_conv_zones) {
5098 			zsp->z_type = ZBC_ZTYPE_CNV;
5099 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5100 			zsp->z_wp = (sector_t)-1;
5101 			zsp->z_size =
5102 				min_t(u64, devip->zsize, capacity - zstart);
5103 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5104 			if (devip->zmodel == BLK_ZONED_HM)
5105 				zsp->z_type = ZBC_ZTYPE_SWR;
5106 			else
5107 				zsp->z_type = ZBC_ZTYPE_SWP;
5108 			zsp->z_cond = ZC1_EMPTY;
5109 			zsp->z_wp = zsp->z_start;
5110 			zsp->z_size =
5111 				min_t(u64, devip->zcap, capacity - zstart);
5112 		} else {
5113 			zsp->z_type = ZBC_ZTYPE_GAP;
5114 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5115 			zsp->z_wp = (sector_t)-1;
5116 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5117 					    capacity - zstart);
5118 		}
5119 
5120 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5121 		zstart += zsp->z_size;
5122 	}
5123 
5124 	return 0;
5125 }
5126 
5127 static struct sdebug_dev_info *sdebug_device_create(
5128 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5129 {
5130 	struct sdebug_dev_info *devip;
5131 
5132 	devip = kzalloc(sizeof(*devip), flags);
5133 	if (devip) {
5134 		if (sdebug_uuid_ctl == 1)
5135 			uuid_gen(&devip->lu_name);
5136 		else if (sdebug_uuid_ctl == 2) {
5137 			if (got_shared_uuid)
5138 				devip->lu_name = shared_uuid;
5139 			else {
5140 				uuid_gen(&shared_uuid);
5141 				got_shared_uuid = true;
5142 				devip->lu_name = shared_uuid;
5143 			}
5144 		}
5145 		devip->sdbg_host = sdbg_host;
5146 		if (sdeb_zbc_in_use) {
5147 			devip->zmodel = sdeb_zbc_model;
5148 			if (sdebug_device_create_zones(devip)) {
5149 				kfree(devip);
5150 				return NULL;
5151 			}
5152 		} else {
5153 			devip->zmodel = BLK_ZONED_NONE;
5154 		}
5155 		devip->sdbg_host = sdbg_host;
5156 		devip->create_ts = ktime_get_boottime();
5157 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5158 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5159 	}
5160 	return devip;
5161 }
5162 
5163 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5164 {
5165 	struct sdebug_host_info *sdbg_host;
5166 	struct sdebug_dev_info *open_devip = NULL;
5167 	struct sdebug_dev_info *devip;
5168 
5169 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5170 	if (!sdbg_host) {
5171 		pr_err("Host info NULL\n");
5172 		return NULL;
5173 	}
5174 
5175 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5176 		if ((devip->used) && (devip->channel == sdev->channel) &&
5177 		    (devip->target == sdev->id) &&
5178 		    (devip->lun == sdev->lun))
5179 			return devip;
5180 		else {
5181 			if ((!devip->used) && (!open_devip))
5182 				open_devip = devip;
5183 		}
5184 	}
5185 	if (!open_devip) { /* try and make a new one */
5186 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5187 		if (!open_devip) {
5188 			pr_err("out of memory at line %d\n", __LINE__);
5189 			return NULL;
5190 		}
5191 	}
5192 
5193 	open_devip->channel = sdev->channel;
5194 	open_devip->target = sdev->id;
5195 	open_devip->lun = sdev->lun;
5196 	open_devip->sdbg_host = sdbg_host;
5197 	atomic_set(&open_devip->num_in_q, 0);
5198 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5199 	open_devip->used = true;
5200 	return open_devip;
5201 }
5202 
5203 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5204 {
5205 	if (sdebug_verbose)
5206 		pr_info("slave_alloc <%u %u %u %llu>\n",
5207 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5208 	return 0;
5209 }
5210 
5211 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5212 {
5213 	struct sdebug_dev_info *devip =
5214 			(struct sdebug_dev_info *)sdp->hostdata;
5215 
5216 	if (sdebug_verbose)
5217 		pr_info("slave_configure <%u %u %u %llu>\n",
5218 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5219 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5220 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5221 	if (devip == NULL) {
5222 		devip = find_build_dev_info(sdp);
5223 		if (devip == NULL)
5224 			return 1;  /* no resources, will be marked offline */
5225 	}
5226 	sdp->hostdata = devip;
5227 	if (sdebug_no_uld)
5228 		sdp->no_uld_attach = 1;
5229 	config_cdb_len(sdp);
5230 	return 0;
5231 }
5232 
5233 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5234 {
5235 	struct sdebug_dev_info *devip =
5236 		(struct sdebug_dev_info *)sdp->hostdata;
5237 
5238 	if (sdebug_verbose)
5239 		pr_info("slave_destroy <%u %u %u %llu>\n",
5240 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5241 	if (devip) {
5242 		/* make this slot available for re-use */
5243 		devip->used = false;
5244 		sdp->hostdata = NULL;
5245 	}
5246 }
5247 
5248 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5249 			   enum sdeb_defer_type defer_t)
5250 {
5251 	if (!sd_dp)
5252 		return;
5253 	if (defer_t == SDEB_DEFER_HRT)
5254 		hrtimer_cancel(&sd_dp->hrt);
5255 	else if (defer_t == SDEB_DEFER_WQ)
5256 		cancel_work_sync(&sd_dp->ew.work);
5257 }
5258 
5259 /* If @cmnd found deletes its timer or work queue and returns true; else
5260    returns false */
5261 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5262 {
5263 	unsigned long iflags;
5264 	int j, k, qmax, r_qmax;
5265 	enum sdeb_defer_type l_defer_t;
5266 	struct sdebug_queue *sqp;
5267 	struct sdebug_queued_cmd *sqcp;
5268 	struct sdebug_dev_info *devip;
5269 	struct sdebug_defer *sd_dp;
5270 
5271 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5272 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5273 		qmax = sdebug_max_queue;
5274 		r_qmax = atomic_read(&retired_max_queue);
5275 		if (r_qmax > qmax)
5276 			qmax = r_qmax;
5277 		for (k = 0; k < qmax; ++k) {
5278 			if (test_bit(k, sqp->in_use_bm)) {
5279 				sqcp = &sqp->qc_arr[k];
5280 				if (cmnd != sqcp->a_cmnd)
5281 					continue;
5282 				/* found */
5283 				devip = (struct sdebug_dev_info *)
5284 						cmnd->device->hostdata;
5285 				if (devip)
5286 					atomic_dec(&devip->num_in_q);
5287 				sqcp->a_cmnd = NULL;
5288 				sd_dp = sqcp->sd_dp;
5289 				if (sd_dp) {
5290 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5291 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5292 				} else
5293 					l_defer_t = SDEB_DEFER_NONE;
5294 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5295 				stop_qc_helper(sd_dp, l_defer_t);
5296 				clear_bit(k, sqp->in_use_bm);
5297 				return true;
5298 			}
5299 		}
5300 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5301 	}
5302 	return false;
5303 }
5304 
5305 /* Deletes (stops) timers or work queues of all queued commands */
5306 static void stop_all_queued(void)
5307 {
5308 	unsigned long iflags;
5309 	int j, k;
5310 	enum sdeb_defer_type l_defer_t;
5311 	struct sdebug_queue *sqp;
5312 	struct sdebug_queued_cmd *sqcp;
5313 	struct sdebug_dev_info *devip;
5314 	struct sdebug_defer *sd_dp;
5315 
5316 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5317 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5318 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5319 			if (test_bit(k, sqp->in_use_bm)) {
5320 				sqcp = &sqp->qc_arr[k];
5321 				if (sqcp->a_cmnd == NULL)
5322 					continue;
5323 				devip = (struct sdebug_dev_info *)
5324 					sqcp->a_cmnd->device->hostdata;
5325 				if (devip)
5326 					atomic_dec(&devip->num_in_q);
5327 				sqcp->a_cmnd = NULL;
5328 				sd_dp = sqcp->sd_dp;
5329 				if (sd_dp) {
5330 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5331 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5332 				} else
5333 					l_defer_t = SDEB_DEFER_NONE;
5334 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5335 				stop_qc_helper(sd_dp, l_defer_t);
5336 				clear_bit(k, sqp->in_use_bm);
5337 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5338 			}
5339 		}
5340 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5341 	}
5342 }
5343 
5344 /* Free queued command memory on heap */
5345 static void free_all_queued(void)
5346 {
5347 	int j, k;
5348 	struct sdebug_queue *sqp;
5349 	struct sdebug_queued_cmd *sqcp;
5350 
5351 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5352 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5353 			sqcp = &sqp->qc_arr[k];
5354 			kfree(sqcp->sd_dp);
5355 			sqcp->sd_dp = NULL;
5356 		}
5357 	}
5358 }
5359 
5360 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5361 {
5362 	bool ok;
5363 
5364 	++num_aborts;
5365 	if (SCpnt) {
5366 		ok = stop_queued_cmnd(SCpnt);
5367 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5368 			sdev_printk(KERN_INFO, SCpnt->device,
5369 				    "%s: command%s found\n", __func__,
5370 				    ok ? "" : " not");
5371 	}
5372 	return SUCCESS;
5373 }
5374 
5375 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5376 {
5377 	++num_dev_resets;
5378 	if (SCpnt && SCpnt->device) {
5379 		struct scsi_device *sdp = SCpnt->device;
5380 		struct sdebug_dev_info *devip =
5381 				(struct sdebug_dev_info *)sdp->hostdata;
5382 
5383 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5384 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5385 		if (devip)
5386 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5387 	}
5388 	return SUCCESS;
5389 }
5390 
5391 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5392 {
5393 	struct sdebug_host_info *sdbg_host;
5394 	struct sdebug_dev_info *devip;
5395 	struct scsi_device *sdp;
5396 	struct Scsi_Host *hp;
5397 	int k = 0;
5398 
5399 	++num_target_resets;
5400 	if (!SCpnt)
5401 		goto lie;
5402 	sdp = SCpnt->device;
5403 	if (!sdp)
5404 		goto lie;
5405 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5406 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5407 	hp = sdp->host;
5408 	if (!hp)
5409 		goto lie;
5410 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5411 	if (sdbg_host) {
5412 		list_for_each_entry(devip,
5413 				    &sdbg_host->dev_info_list,
5414 				    dev_list)
5415 			if (devip->target == sdp->id) {
5416 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5417 				++k;
5418 			}
5419 	}
5420 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5421 		sdev_printk(KERN_INFO, sdp,
5422 			    "%s: %d device(s) found in target\n", __func__, k);
5423 lie:
5424 	return SUCCESS;
5425 }
5426 
5427 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5428 {
5429 	struct sdebug_host_info *sdbg_host;
5430 	struct sdebug_dev_info *devip;
5431 	struct scsi_device *sdp;
5432 	struct Scsi_Host *hp;
5433 	int k = 0;
5434 
5435 	++num_bus_resets;
5436 	if (!(SCpnt && SCpnt->device))
5437 		goto lie;
5438 	sdp = SCpnt->device;
5439 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5440 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5441 	hp = sdp->host;
5442 	if (hp) {
5443 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5444 		if (sdbg_host) {
5445 			list_for_each_entry(devip,
5446 					    &sdbg_host->dev_info_list,
5447 					    dev_list) {
5448 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5449 				++k;
5450 			}
5451 		}
5452 	}
5453 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5454 		sdev_printk(KERN_INFO, sdp,
5455 			    "%s: %d device(s) found in host\n", __func__, k);
5456 lie:
5457 	return SUCCESS;
5458 }
5459 
5460 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5461 {
5462 	struct sdebug_host_info *sdbg_host;
5463 	struct sdebug_dev_info *devip;
5464 	int k = 0;
5465 
5466 	++num_host_resets;
5467 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5468 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5469 	spin_lock(&sdebug_host_list_lock);
5470 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5471 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5472 				    dev_list) {
5473 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5474 			++k;
5475 		}
5476 	}
5477 	spin_unlock(&sdebug_host_list_lock);
5478 	stop_all_queued();
5479 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5480 		sdev_printk(KERN_INFO, SCpnt->device,
5481 			    "%s: %d device(s) found\n", __func__, k);
5482 	return SUCCESS;
5483 }
5484 
5485 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5486 {
5487 	struct msdos_partition *pp;
5488 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5489 	int sectors_per_part, num_sectors, k;
5490 	int heads_by_sects, start_sec, end_sec;
5491 
5492 	/* assume partition table already zeroed */
5493 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5494 		return;
5495 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5496 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5497 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5498 	}
5499 	num_sectors = (int)get_sdebug_capacity();
5500 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5501 			   / sdebug_num_parts;
5502 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5503 	starts[0] = sdebug_sectors_per;
5504 	max_part_secs = sectors_per_part;
5505 	for (k = 1; k < sdebug_num_parts; ++k) {
5506 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5507 			    * heads_by_sects;
5508 		if (starts[k] - starts[k - 1] < max_part_secs)
5509 			max_part_secs = starts[k] - starts[k - 1];
5510 	}
5511 	starts[sdebug_num_parts] = num_sectors;
5512 	starts[sdebug_num_parts + 1] = 0;
5513 
5514 	ramp[510] = 0x55;	/* magic partition markings */
5515 	ramp[511] = 0xAA;
5516 	pp = (struct msdos_partition *)(ramp + 0x1be);
5517 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5518 		start_sec = starts[k];
5519 		end_sec = starts[k] + max_part_secs - 1;
5520 		pp->boot_ind = 0;
5521 
5522 		pp->cyl = start_sec / heads_by_sects;
5523 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5524 			   / sdebug_sectors_per;
5525 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5526 
5527 		pp->end_cyl = end_sec / heads_by_sects;
5528 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5529 			       / sdebug_sectors_per;
5530 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5531 
5532 		pp->start_sect = cpu_to_le32(start_sec);
5533 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5534 		pp->sys_ind = 0x83;	/* plain Linux partition */
5535 	}
5536 }
5537 
5538 static void block_unblock_all_queues(bool block)
5539 {
5540 	int j;
5541 	struct sdebug_queue *sqp;
5542 
5543 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5544 		atomic_set(&sqp->blocked, (int)block);
5545 }
5546 
5547 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5548  * commands will be processed normally before triggers occur.
5549  */
5550 static void tweak_cmnd_count(void)
5551 {
5552 	int count, modulo;
5553 
5554 	modulo = abs(sdebug_every_nth);
5555 	if (modulo < 2)
5556 		return;
5557 	block_unblock_all_queues(true);
5558 	count = atomic_read(&sdebug_cmnd_count);
5559 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5560 	block_unblock_all_queues(false);
5561 }
5562 
5563 static void clear_queue_stats(void)
5564 {
5565 	atomic_set(&sdebug_cmnd_count, 0);
5566 	atomic_set(&sdebug_completions, 0);
5567 	atomic_set(&sdebug_miss_cpus, 0);
5568 	atomic_set(&sdebug_a_tsf, 0);
5569 }
5570 
5571 static bool inject_on_this_cmd(void)
5572 {
5573 	if (sdebug_every_nth == 0)
5574 		return false;
5575 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5576 }
5577 
5578 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5579 
5580 /* Complete the processing of the thread that queued a SCSI command to this
5581  * driver. It either completes the command by calling cmnd_done() or
5582  * schedules a hr timer or work queue then returns 0. Returns
5583  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5584  */
5585 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5586 			 int scsi_result,
5587 			 int (*pfp)(struct scsi_cmnd *,
5588 				    struct sdebug_dev_info *),
5589 			 int delta_jiff, int ndelay)
5590 {
5591 	bool new_sd_dp;
5592 	bool inject = false;
5593 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5594 	int k, num_in_q, qdepth;
5595 	unsigned long iflags;
5596 	u64 ns_from_boot = 0;
5597 	struct sdebug_queue *sqp;
5598 	struct sdebug_queued_cmd *sqcp;
5599 	struct scsi_device *sdp;
5600 	struct sdebug_defer *sd_dp;
5601 
5602 	if (unlikely(devip == NULL)) {
5603 		if (scsi_result == 0)
5604 			scsi_result = DID_NO_CONNECT << 16;
5605 		goto respond_in_thread;
5606 	}
5607 	sdp = cmnd->device;
5608 
5609 	if (delta_jiff == 0)
5610 		goto respond_in_thread;
5611 
5612 	sqp = get_queue(cmnd);
5613 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5614 	if (unlikely(atomic_read(&sqp->blocked))) {
5615 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5616 		return SCSI_MLQUEUE_HOST_BUSY;
5617 	}
5618 	num_in_q = atomic_read(&devip->num_in_q);
5619 	qdepth = cmnd->device->queue_depth;
5620 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5621 		if (scsi_result) {
5622 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5623 			goto respond_in_thread;
5624 		} else
5625 			scsi_result = device_qfull_result;
5626 	} else if (unlikely(sdebug_every_nth &&
5627 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5628 			    (scsi_result == 0))) {
5629 		if ((num_in_q == (qdepth - 1)) &&
5630 		    (atomic_inc_return(&sdebug_a_tsf) >=
5631 		     abs(sdebug_every_nth))) {
5632 			atomic_set(&sdebug_a_tsf, 0);
5633 			inject = true;
5634 			scsi_result = device_qfull_result;
5635 		}
5636 	}
5637 
5638 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5639 	if (unlikely(k >= sdebug_max_queue)) {
5640 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5641 		if (scsi_result)
5642 			goto respond_in_thread;
5643 		scsi_result = device_qfull_result;
5644 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5645 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5646 				    __func__, sdebug_max_queue);
5647 		goto respond_in_thread;
5648 	}
5649 	set_bit(k, sqp->in_use_bm);
5650 	atomic_inc(&devip->num_in_q);
5651 	sqcp = &sqp->qc_arr[k];
5652 	sqcp->a_cmnd = cmnd;
5653 	cmnd->host_scribble = (unsigned char *)sqcp;
5654 	sd_dp = sqcp->sd_dp;
5655 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5656 
5657 	if (!sd_dp) {
5658 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5659 		if (!sd_dp) {
5660 			atomic_dec(&devip->num_in_q);
5661 			clear_bit(k, sqp->in_use_bm);
5662 			return SCSI_MLQUEUE_HOST_BUSY;
5663 		}
5664 		new_sd_dp = true;
5665 	} else {
5666 		new_sd_dp = false;
5667 	}
5668 
5669 	/* Set the hostwide tag */
5670 	if (sdebug_host_max_queue)
5671 		sd_dp->hc_idx = get_tag(cmnd);
5672 
5673 	if (polled)
5674 		ns_from_boot = ktime_get_boottime_ns();
5675 
5676 	/* one of the resp_*() response functions is called here */
5677 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5678 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5679 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5680 		delta_jiff = ndelay = 0;
5681 	}
5682 	if (cmnd->result == 0 && scsi_result != 0)
5683 		cmnd->result = scsi_result;
5684 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5685 		if (atomic_read(&sdeb_inject_pending)) {
5686 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5687 			atomic_set(&sdeb_inject_pending, 0);
5688 			cmnd->result = check_condition_result;
5689 		}
5690 	}
5691 
5692 	if (unlikely(sdebug_verbose && cmnd->result))
5693 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5694 			    __func__, cmnd->result);
5695 
5696 	if (delta_jiff > 0 || ndelay > 0) {
5697 		ktime_t kt;
5698 
5699 		if (delta_jiff > 0) {
5700 			u64 ns = jiffies_to_nsecs(delta_jiff);
5701 
5702 			if (sdebug_random && ns < U32_MAX) {
5703 				ns = get_random_u32_below((u32)ns);
5704 			} else if (sdebug_random) {
5705 				ns >>= 12;	/* scale to 4 usec precision */
5706 				if (ns < U32_MAX)	/* over 4 hours max */
5707 					ns = get_random_u32_below((u32)ns);
5708 				ns <<= 12;
5709 			}
5710 			kt = ns_to_ktime(ns);
5711 		} else {	/* ndelay has a 4.2 second max */
5712 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5713 					     (u32)ndelay;
5714 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5715 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5716 
5717 				if (kt <= d) {	/* elapsed duration >= kt */
5718 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5719 					sqcp->a_cmnd = NULL;
5720 					atomic_dec(&devip->num_in_q);
5721 					clear_bit(k, sqp->in_use_bm);
5722 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5723 					if (new_sd_dp)
5724 						kfree(sd_dp);
5725 					/* call scsi_done() from this thread */
5726 					scsi_done(cmnd);
5727 					return 0;
5728 				}
5729 				/* otherwise reduce kt by elapsed time */
5730 				kt -= d;
5731 			}
5732 		}
5733 		if (polled) {
5734 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5735 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5736 			if (!sd_dp->init_poll) {
5737 				sd_dp->init_poll = true;
5738 				sqcp->sd_dp = sd_dp;
5739 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5740 				sd_dp->qc_idx = k;
5741 			}
5742 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5743 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5744 		} else {
5745 			if (!sd_dp->init_hrt) {
5746 				sd_dp->init_hrt = true;
5747 				sqcp->sd_dp = sd_dp;
5748 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5749 					     HRTIMER_MODE_REL_PINNED);
5750 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5751 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5752 				sd_dp->qc_idx = k;
5753 			}
5754 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5755 			/* schedule the invocation of scsi_done() for a later time */
5756 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5757 		}
5758 		if (sdebug_statistics)
5759 			sd_dp->issuing_cpu = raw_smp_processor_id();
5760 	} else {	/* jdelay < 0, use work queue */
5761 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5762 			     atomic_read(&sdeb_inject_pending)))
5763 			sd_dp->aborted = true;
5764 		if (polled) {
5765 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5766 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5767 			if (!sd_dp->init_poll) {
5768 				sd_dp->init_poll = true;
5769 				sqcp->sd_dp = sd_dp;
5770 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5771 				sd_dp->qc_idx = k;
5772 			}
5773 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5774 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5775 		} else {
5776 			if (!sd_dp->init_wq) {
5777 				sd_dp->init_wq = true;
5778 				sqcp->sd_dp = sd_dp;
5779 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5780 				sd_dp->qc_idx = k;
5781 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5782 			}
5783 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5784 			schedule_work(&sd_dp->ew.work);
5785 		}
5786 		if (sdebug_statistics)
5787 			sd_dp->issuing_cpu = raw_smp_processor_id();
5788 		if (unlikely(sd_dp->aborted)) {
5789 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5790 				    scsi_cmd_to_rq(cmnd)->tag);
5791 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5792 			atomic_set(&sdeb_inject_pending, 0);
5793 			sd_dp->aborted = false;
5794 		}
5795 	}
5796 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5797 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5798 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5799 	return 0;
5800 
5801 respond_in_thread:	/* call back to mid-layer using invocation thread */
5802 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5803 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5804 	if (cmnd->result == 0 && scsi_result != 0)
5805 		cmnd->result = scsi_result;
5806 	scsi_done(cmnd);
5807 	return 0;
5808 }
5809 
5810 /* Note: The following macros create attribute files in the
5811    /sys/module/scsi_debug/parameters directory. Unfortunately this
5812    driver is unaware of a change and cannot trigger auxiliary actions
5813    as it can when the corresponding attribute in the
5814    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5815  */
5816 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5817 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5818 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5819 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5820 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5821 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5822 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5823 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5824 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5825 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5826 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5827 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5828 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5829 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5830 module_param_string(inq_product, sdebug_inq_product_id,
5831 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5832 module_param_string(inq_rev, sdebug_inq_product_rev,
5833 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5834 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5835 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5836 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5837 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5838 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5839 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5840 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5841 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5842 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5843 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5844 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5845 		   S_IRUGO | S_IWUSR);
5846 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5847 		   S_IRUGO | S_IWUSR);
5848 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5849 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5850 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5851 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5852 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5853 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5854 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5855 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5856 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5857 module_param_named(per_host_store, sdebug_per_host_store, bool,
5858 		   S_IRUGO | S_IWUSR);
5859 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5860 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5861 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5862 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5863 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5864 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5865 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5866 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5867 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5868 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5869 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5870 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5871 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5872 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5873 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5874 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5875 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5876 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5877 		   S_IRUGO | S_IWUSR);
5878 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5879 module_param_named(write_same_length, sdebug_write_same_length, int,
5880 		   S_IRUGO | S_IWUSR);
5881 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5882 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5883 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5884 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5885 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5886 
5887 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5888 MODULE_DESCRIPTION("SCSI debug adapter driver");
5889 MODULE_LICENSE("GPL");
5890 MODULE_VERSION(SDEBUG_VERSION);
5891 
5892 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5893 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5894 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5895 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5896 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5897 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5898 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5899 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5900 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5901 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5902 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5903 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5904 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5905 MODULE_PARM_DESC(host_max_queue,
5906 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5907 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5908 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5909 		 SDEBUG_VERSION "\")");
5910 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5911 MODULE_PARM_DESC(lbprz,
5912 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5913 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5914 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5915 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5916 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5917 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5918 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5919 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5920 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5921 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5922 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5923 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5924 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5925 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5926 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5927 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5928 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5929 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5930 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5931 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5932 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5933 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5934 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5935 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5936 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5937 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5938 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5939 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5940 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5941 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5942 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5943 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5944 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5945 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5946 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5947 MODULE_PARM_DESC(uuid_ctl,
5948 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5949 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5950 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5951 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5952 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5953 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5954 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5955 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5956 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5957 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5958 
5959 #define SDEBUG_INFO_LEN 256
5960 static char sdebug_info[SDEBUG_INFO_LEN];
5961 
5962 static const char *scsi_debug_info(struct Scsi_Host *shp)
5963 {
5964 	int k;
5965 
5966 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5967 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5968 	if (k >= (SDEBUG_INFO_LEN - 1))
5969 		return sdebug_info;
5970 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5971 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5972 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5973 		  "statistics", (int)sdebug_statistics);
5974 	return sdebug_info;
5975 }
5976 
5977 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5978 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5979 				 int length)
5980 {
5981 	char arr[16];
5982 	int opts;
5983 	int minLen = length > 15 ? 15 : length;
5984 
5985 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5986 		return -EACCES;
5987 	memcpy(arr, buffer, minLen);
5988 	arr[minLen] = '\0';
5989 	if (1 != sscanf(arr, "%d", &opts))
5990 		return -EINVAL;
5991 	sdebug_opts = opts;
5992 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5993 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5994 	if (sdebug_every_nth != 0)
5995 		tweak_cmnd_count();
5996 	return length;
5997 }
5998 
5999 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6000  * same for each scsi_debug host (if more than one). Some of the counters
6001  * output are not atomics so might be inaccurate in a busy system. */
6002 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6003 {
6004 	int f, j, l;
6005 	struct sdebug_queue *sqp;
6006 	struct sdebug_host_info *sdhp;
6007 
6008 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6009 		   SDEBUG_VERSION, sdebug_version_date);
6010 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6011 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6012 		   sdebug_opts, sdebug_every_nth);
6013 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6014 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6015 		   sdebug_sector_size, "bytes");
6016 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6017 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6018 		   num_aborts);
6019 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6020 		   num_dev_resets, num_target_resets, num_bus_resets,
6021 		   num_host_resets);
6022 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6023 		   dix_reads, dix_writes, dif_errors);
6024 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6025 		   sdebug_statistics);
6026 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6027 		   atomic_read(&sdebug_cmnd_count),
6028 		   atomic_read(&sdebug_completions),
6029 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6030 		   atomic_read(&sdebug_a_tsf),
6031 		   atomic_read(&sdeb_mq_poll_count));
6032 
6033 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6034 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6035 		seq_printf(m, "  queue %d:\n", j);
6036 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6037 		if (f != sdebug_max_queue) {
6038 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6039 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6040 				   "first,last bits", f, l);
6041 		}
6042 	}
6043 
6044 	seq_printf(m, "this host_no=%d\n", host->host_no);
6045 	if (!xa_empty(per_store_ap)) {
6046 		bool niu;
6047 		int idx;
6048 		unsigned long l_idx;
6049 		struct sdeb_store_info *sip;
6050 
6051 		seq_puts(m, "\nhost list:\n");
6052 		j = 0;
6053 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6054 			idx = sdhp->si_idx;
6055 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6056 				   sdhp->shost->host_no, idx);
6057 			++j;
6058 		}
6059 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6060 			   sdeb_most_recent_idx);
6061 		j = 0;
6062 		xa_for_each(per_store_ap, l_idx, sip) {
6063 			niu = xa_get_mark(per_store_ap, l_idx,
6064 					  SDEB_XA_NOT_IN_USE);
6065 			idx = (int)l_idx;
6066 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6067 				   (niu ? "  not_in_use" : ""));
6068 			++j;
6069 		}
6070 	}
6071 	return 0;
6072 }
6073 
6074 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6075 {
6076 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6077 }
6078 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6079  * of delay is jiffies.
6080  */
6081 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6082 			   size_t count)
6083 {
6084 	int jdelay, res;
6085 
6086 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6087 		res = count;
6088 		if (sdebug_jdelay != jdelay) {
6089 			int j, k;
6090 			struct sdebug_queue *sqp;
6091 
6092 			block_unblock_all_queues(true);
6093 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6094 			     ++j, ++sqp) {
6095 				k = find_first_bit(sqp->in_use_bm,
6096 						   sdebug_max_queue);
6097 				if (k != sdebug_max_queue) {
6098 					res = -EBUSY;   /* queued commands */
6099 					break;
6100 				}
6101 			}
6102 			if (res > 0) {
6103 				sdebug_jdelay = jdelay;
6104 				sdebug_ndelay = 0;
6105 			}
6106 			block_unblock_all_queues(false);
6107 		}
6108 		return res;
6109 	}
6110 	return -EINVAL;
6111 }
6112 static DRIVER_ATTR_RW(delay);
6113 
6114 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6115 {
6116 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6117 }
6118 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6119 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6120 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6121 			    size_t count)
6122 {
6123 	int ndelay, res;
6124 
6125 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6126 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6127 		res = count;
6128 		if (sdebug_ndelay != ndelay) {
6129 			int j, k;
6130 			struct sdebug_queue *sqp;
6131 
6132 			block_unblock_all_queues(true);
6133 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6134 			     ++j, ++sqp) {
6135 				k = find_first_bit(sqp->in_use_bm,
6136 						   sdebug_max_queue);
6137 				if (k != sdebug_max_queue) {
6138 					res = -EBUSY;   /* queued commands */
6139 					break;
6140 				}
6141 			}
6142 			if (res > 0) {
6143 				sdebug_ndelay = ndelay;
6144 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6145 							: DEF_JDELAY;
6146 			}
6147 			block_unblock_all_queues(false);
6148 		}
6149 		return res;
6150 	}
6151 	return -EINVAL;
6152 }
6153 static DRIVER_ATTR_RW(ndelay);
6154 
6155 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6156 {
6157 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6158 }
6159 
6160 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6161 			  size_t count)
6162 {
6163 	int opts;
6164 	char work[20];
6165 
6166 	if (sscanf(buf, "%10s", work) == 1) {
6167 		if (strncasecmp(work, "0x", 2) == 0) {
6168 			if (kstrtoint(work + 2, 16, &opts) == 0)
6169 				goto opts_done;
6170 		} else {
6171 			if (kstrtoint(work, 10, &opts) == 0)
6172 				goto opts_done;
6173 		}
6174 	}
6175 	return -EINVAL;
6176 opts_done:
6177 	sdebug_opts = opts;
6178 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6179 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6180 	tweak_cmnd_count();
6181 	return count;
6182 }
6183 static DRIVER_ATTR_RW(opts);
6184 
6185 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6186 {
6187 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6188 }
6189 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6190 			   size_t count)
6191 {
6192 	int n;
6193 
6194 	/* Cannot change from or to TYPE_ZBC with sysfs */
6195 	if (sdebug_ptype == TYPE_ZBC)
6196 		return -EINVAL;
6197 
6198 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6199 		if (n == TYPE_ZBC)
6200 			return -EINVAL;
6201 		sdebug_ptype = n;
6202 		return count;
6203 	}
6204 	return -EINVAL;
6205 }
6206 static DRIVER_ATTR_RW(ptype);
6207 
6208 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6209 {
6210 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6211 }
6212 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6213 			    size_t count)
6214 {
6215 	int n;
6216 
6217 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6218 		sdebug_dsense = n;
6219 		return count;
6220 	}
6221 	return -EINVAL;
6222 }
6223 static DRIVER_ATTR_RW(dsense);
6224 
6225 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6226 {
6227 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6228 }
6229 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6230 			     size_t count)
6231 {
6232 	int n, idx;
6233 
6234 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6235 		bool want_store = (n == 0);
6236 		struct sdebug_host_info *sdhp;
6237 
6238 		n = (n > 0);
6239 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6240 		if (sdebug_fake_rw == n)
6241 			return count;	/* not transitioning so do nothing */
6242 
6243 		if (want_store) {	/* 1 --> 0 transition, set up store */
6244 			if (sdeb_first_idx < 0) {
6245 				idx = sdebug_add_store();
6246 				if (idx < 0)
6247 					return idx;
6248 			} else {
6249 				idx = sdeb_first_idx;
6250 				xa_clear_mark(per_store_ap, idx,
6251 					      SDEB_XA_NOT_IN_USE);
6252 			}
6253 			/* make all hosts use same store */
6254 			list_for_each_entry(sdhp, &sdebug_host_list,
6255 					    host_list) {
6256 				if (sdhp->si_idx != idx) {
6257 					xa_set_mark(per_store_ap, sdhp->si_idx,
6258 						    SDEB_XA_NOT_IN_USE);
6259 					sdhp->si_idx = idx;
6260 				}
6261 			}
6262 			sdeb_most_recent_idx = idx;
6263 		} else {	/* 0 --> 1 transition is trigger for shrink */
6264 			sdebug_erase_all_stores(true /* apart from first */);
6265 		}
6266 		sdebug_fake_rw = n;
6267 		return count;
6268 	}
6269 	return -EINVAL;
6270 }
6271 static DRIVER_ATTR_RW(fake_rw);
6272 
6273 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6274 {
6275 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6276 }
6277 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6278 			      size_t count)
6279 {
6280 	int n;
6281 
6282 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6283 		sdebug_no_lun_0 = n;
6284 		return count;
6285 	}
6286 	return -EINVAL;
6287 }
6288 static DRIVER_ATTR_RW(no_lun_0);
6289 
6290 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6291 {
6292 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6293 }
6294 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6295 			      size_t count)
6296 {
6297 	int n;
6298 
6299 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6300 		sdebug_num_tgts = n;
6301 		sdebug_max_tgts_luns();
6302 		return count;
6303 	}
6304 	return -EINVAL;
6305 }
6306 static DRIVER_ATTR_RW(num_tgts);
6307 
6308 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6309 {
6310 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6311 }
6312 static DRIVER_ATTR_RO(dev_size_mb);
6313 
6314 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6315 {
6316 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6317 }
6318 
6319 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6320 				    size_t count)
6321 {
6322 	bool v;
6323 
6324 	if (kstrtobool(buf, &v))
6325 		return -EINVAL;
6326 
6327 	sdebug_per_host_store = v;
6328 	return count;
6329 }
6330 static DRIVER_ATTR_RW(per_host_store);
6331 
6332 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6333 {
6334 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6335 }
6336 static DRIVER_ATTR_RO(num_parts);
6337 
6338 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6339 {
6340 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6341 }
6342 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6343 			       size_t count)
6344 {
6345 	int nth;
6346 	char work[20];
6347 
6348 	if (sscanf(buf, "%10s", work) == 1) {
6349 		if (strncasecmp(work, "0x", 2) == 0) {
6350 			if (kstrtoint(work + 2, 16, &nth) == 0)
6351 				goto every_nth_done;
6352 		} else {
6353 			if (kstrtoint(work, 10, &nth) == 0)
6354 				goto every_nth_done;
6355 		}
6356 	}
6357 	return -EINVAL;
6358 
6359 every_nth_done:
6360 	sdebug_every_nth = nth;
6361 	if (nth && !sdebug_statistics) {
6362 		pr_info("every_nth needs statistics=1, set it\n");
6363 		sdebug_statistics = true;
6364 	}
6365 	tweak_cmnd_count();
6366 	return count;
6367 }
6368 static DRIVER_ATTR_RW(every_nth);
6369 
6370 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6371 {
6372 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6373 }
6374 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6375 				size_t count)
6376 {
6377 	int n;
6378 	bool changed;
6379 
6380 	if (kstrtoint(buf, 0, &n))
6381 		return -EINVAL;
6382 	if (n >= 0) {
6383 		if (n > (int)SAM_LUN_AM_FLAT) {
6384 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6385 			return -EINVAL;
6386 		}
6387 		changed = ((int)sdebug_lun_am != n);
6388 		sdebug_lun_am = n;
6389 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6390 			struct sdebug_host_info *sdhp;
6391 			struct sdebug_dev_info *dp;
6392 
6393 			spin_lock(&sdebug_host_list_lock);
6394 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6395 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6396 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6397 				}
6398 			}
6399 			spin_unlock(&sdebug_host_list_lock);
6400 		}
6401 		return count;
6402 	}
6403 	return -EINVAL;
6404 }
6405 static DRIVER_ATTR_RW(lun_format);
6406 
6407 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6408 {
6409 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6410 }
6411 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6412 			      size_t count)
6413 {
6414 	int n;
6415 	bool changed;
6416 
6417 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6418 		if (n > 256) {
6419 			pr_warn("max_luns can be no more than 256\n");
6420 			return -EINVAL;
6421 		}
6422 		changed = (sdebug_max_luns != n);
6423 		sdebug_max_luns = n;
6424 		sdebug_max_tgts_luns();
6425 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6426 			struct sdebug_host_info *sdhp;
6427 			struct sdebug_dev_info *dp;
6428 
6429 			spin_lock(&sdebug_host_list_lock);
6430 			list_for_each_entry(sdhp, &sdebug_host_list,
6431 					    host_list) {
6432 				list_for_each_entry(dp, &sdhp->dev_info_list,
6433 						    dev_list) {
6434 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6435 						dp->uas_bm);
6436 				}
6437 			}
6438 			spin_unlock(&sdebug_host_list_lock);
6439 		}
6440 		return count;
6441 	}
6442 	return -EINVAL;
6443 }
6444 static DRIVER_ATTR_RW(max_luns);
6445 
6446 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6447 {
6448 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6449 }
6450 /* N.B. max_queue can be changed while there are queued commands. In flight
6451  * commands beyond the new max_queue will be completed. */
6452 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6453 			       size_t count)
6454 {
6455 	int j, n, k, a;
6456 	struct sdebug_queue *sqp;
6457 
6458 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6459 	    (n <= SDEBUG_CANQUEUE) &&
6460 	    (sdebug_host_max_queue == 0)) {
6461 		block_unblock_all_queues(true);
6462 		k = 0;
6463 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6464 		     ++j, ++sqp) {
6465 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6466 			if (a > k)
6467 				k = a;
6468 		}
6469 		sdebug_max_queue = n;
6470 		if (k == SDEBUG_CANQUEUE)
6471 			atomic_set(&retired_max_queue, 0);
6472 		else if (k >= n)
6473 			atomic_set(&retired_max_queue, k + 1);
6474 		else
6475 			atomic_set(&retired_max_queue, 0);
6476 		block_unblock_all_queues(false);
6477 		return count;
6478 	}
6479 	return -EINVAL;
6480 }
6481 static DRIVER_ATTR_RW(max_queue);
6482 
6483 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6484 {
6485 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6486 }
6487 
6488 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6489 {
6490 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6491 }
6492 
6493 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6494 {
6495 	bool v;
6496 
6497 	if (kstrtobool(buf, &v))
6498 		return -EINVAL;
6499 
6500 	sdebug_no_rwlock = v;
6501 	return count;
6502 }
6503 static DRIVER_ATTR_RW(no_rwlock);
6504 
6505 /*
6506  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6507  * in range [0, sdebug_host_max_queue), we can't change it.
6508  */
6509 static DRIVER_ATTR_RO(host_max_queue);
6510 
6511 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6512 {
6513 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6514 }
6515 static DRIVER_ATTR_RO(no_uld);
6516 
6517 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6518 {
6519 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6520 }
6521 static DRIVER_ATTR_RO(scsi_level);
6522 
6523 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6524 {
6525 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6526 }
6527 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6528 				size_t count)
6529 {
6530 	int n;
6531 	bool changed;
6532 
6533 	/* Ignore capacity change for ZBC drives for now */
6534 	if (sdeb_zbc_in_use)
6535 		return -ENOTSUPP;
6536 
6537 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6538 		changed = (sdebug_virtual_gb != n);
6539 		sdebug_virtual_gb = n;
6540 		sdebug_capacity = get_sdebug_capacity();
6541 		if (changed) {
6542 			struct sdebug_host_info *sdhp;
6543 			struct sdebug_dev_info *dp;
6544 
6545 			spin_lock(&sdebug_host_list_lock);
6546 			list_for_each_entry(sdhp, &sdebug_host_list,
6547 					    host_list) {
6548 				list_for_each_entry(dp, &sdhp->dev_info_list,
6549 						    dev_list) {
6550 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6551 						dp->uas_bm);
6552 				}
6553 			}
6554 			spin_unlock(&sdebug_host_list_lock);
6555 		}
6556 		return count;
6557 	}
6558 	return -EINVAL;
6559 }
6560 static DRIVER_ATTR_RW(virtual_gb);
6561 
6562 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6563 {
6564 	/* absolute number of hosts currently active is what is shown */
6565 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6566 }
6567 
6568 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6569 			      size_t count)
6570 {
6571 	bool found;
6572 	unsigned long idx;
6573 	struct sdeb_store_info *sip;
6574 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6575 	int delta_hosts;
6576 
6577 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6578 		return -EINVAL;
6579 	if (delta_hosts > 0) {
6580 		do {
6581 			found = false;
6582 			if (want_phs) {
6583 				xa_for_each_marked(per_store_ap, idx, sip,
6584 						   SDEB_XA_NOT_IN_USE) {
6585 					sdeb_most_recent_idx = (int)idx;
6586 					found = true;
6587 					break;
6588 				}
6589 				if (found)	/* re-use case */
6590 					sdebug_add_host_helper((int)idx);
6591 				else
6592 					sdebug_do_add_host(true);
6593 			} else {
6594 				sdebug_do_add_host(false);
6595 			}
6596 		} while (--delta_hosts);
6597 	} else if (delta_hosts < 0) {
6598 		do {
6599 			sdebug_do_remove_host(false);
6600 		} while (++delta_hosts);
6601 	}
6602 	return count;
6603 }
6604 static DRIVER_ATTR_RW(add_host);
6605 
6606 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6607 {
6608 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6609 }
6610 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6611 				    size_t count)
6612 {
6613 	int n;
6614 
6615 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6616 		sdebug_vpd_use_hostno = n;
6617 		return count;
6618 	}
6619 	return -EINVAL;
6620 }
6621 static DRIVER_ATTR_RW(vpd_use_hostno);
6622 
6623 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6624 {
6625 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6626 }
6627 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6628 				size_t count)
6629 {
6630 	int n;
6631 
6632 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6633 		if (n > 0)
6634 			sdebug_statistics = true;
6635 		else {
6636 			clear_queue_stats();
6637 			sdebug_statistics = false;
6638 		}
6639 		return count;
6640 	}
6641 	return -EINVAL;
6642 }
6643 static DRIVER_ATTR_RW(statistics);
6644 
6645 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6646 {
6647 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6648 }
6649 static DRIVER_ATTR_RO(sector_size);
6650 
6651 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6652 {
6653 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6654 }
6655 static DRIVER_ATTR_RO(submit_queues);
6656 
6657 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6658 {
6659 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6660 }
6661 static DRIVER_ATTR_RO(dix);
6662 
6663 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6664 {
6665 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6666 }
6667 static DRIVER_ATTR_RO(dif);
6668 
6669 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6670 {
6671 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6672 }
6673 static DRIVER_ATTR_RO(guard);
6674 
6675 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6676 {
6677 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6678 }
6679 static DRIVER_ATTR_RO(ato);
6680 
6681 static ssize_t map_show(struct device_driver *ddp, char *buf)
6682 {
6683 	ssize_t count = 0;
6684 
6685 	if (!scsi_debug_lbp())
6686 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6687 				 sdebug_store_sectors);
6688 
6689 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6690 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6691 
6692 		if (sip)
6693 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6694 					  (int)map_size, sip->map_storep);
6695 	}
6696 	buf[count++] = '\n';
6697 	buf[count] = '\0';
6698 
6699 	return count;
6700 }
6701 static DRIVER_ATTR_RO(map);
6702 
6703 static ssize_t random_show(struct device_driver *ddp, char *buf)
6704 {
6705 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6706 }
6707 
6708 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6709 			    size_t count)
6710 {
6711 	bool v;
6712 
6713 	if (kstrtobool(buf, &v))
6714 		return -EINVAL;
6715 
6716 	sdebug_random = v;
6717 	return count;
6718 }
6719 static DRIVER_ATTR_RW(random);
6720 
6721 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6722 {
6723 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6724 }
6725 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6726 			       size_t count)
6727 {
6728 	int n;
6729 
6730 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6731 		sdebug_removable = (n > 0);
6732 		return count;
6733 	}
6734 	return -EINVAL;
6735 }
6736 static DRIVER_ATTR_RW(removable);
6737 
6738 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6739 {
6740 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6741 }
6742 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6743 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6744 			       size_t count)
6745 {
6746 	int n;
6747 
6748 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6749 		sdebug_host_lock = (n > 0);
6750 		return count;
6751 	}
6752 	return -EINVAL;
6753 }
6754 static DRIVER_ATTR_RW(host_lock);
6755 
6756 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6757 {
6758 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6759 }
6760 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6761 			    size_t count)
6762 {
6763 	int n;
6764 
6765 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6766 		sdebug_strict = (n > 0);
6767 		return count;
6768 	}
6769 	return -EINVAL;
6770 }
6771 static DRIVER_ATTR_RW(strict);
6772 
6773 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6774 {
6775 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6776 }
6777 static DRIVER_ATTR_RO(uuid_ctl);
6778 
6779 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6780 {
6781 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6782 }
6783 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6784 			     size_t count)
6785 {
6786 	int ret, n;
6787 
6788 	ret = kstrtoint(buf, 0, &n);
6789 	if (ret)
6790 		return ret;
6791 	sdebug_cdb_len = n;
6792 	all_config_cdb_len();
6793 	return count;
6794 }
6795 static DRIVER_ATTR_RW(cdb_len);
6796 
6797 static const char * const zbc_model_strs_a[] = {
6798 	[BLK_ZONED_NONE] = "none",
6799 	[BLK_ZONED_HA]   = "host-aware",
6800 	[BLK_ZONED_HM]   = "host-managed",
6801 };
6802 
6803 static const char * const zbc_model_strs_b[] = {
6804 	[BLK_ZONED_NONE] = "no",
6805 	[BLK_ZONED_HA]   = "aware",
6806 	[BLK_ZONED_HM]   = "managed",
6807 };
6808 
6809 static const char * const zbc_model_strs_c[] = {
6810 	[BLK_ZONED_NONE] = "0",
6811 	[BLK_ZONED_HA]   = "1",
6812 	[BLK_ZONED_HM]   = "2",
6813 };
6814 
6815 static int sdeb_zbc_model_str(const char *cp)
6816 {
6817 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6818 
6819 	if (res < 0) {
6820 		res = sysfs_match_string(zbc_model_strs_b, cp);
6821 		if (res < 0) {
6822 			res = sysfs_match_string(zbc_model_strs_c, cp);
6823 			if (res < 0)
6824 				return -EINVAL;
6825 		}
6826 	}
6827 	return res;
6828 }
6829 
6830 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6831 {
6832 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6833 			 zbc_model_strs_a[sdeb_zbc_model]);
6834 }
6835 static DRIVER_ATTR_RO(zbc);
6836 
6837 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6838 {
6839 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6840 }
6841 static DRIVER_ATTR_RO(tur_ms_to_ready);
6842 
6843 /* Note: The following array creates attribute files in the
6844    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6845    files (over those found in the /sys/module/scsi_debug/parameters
6846    directory) is that auxiliary actions can be triggered when an attribute
6847    is changed. For example see: add_host_store() above.
6848  */
6849 
6850 static struct attribute *sdebug_drv_attrs[] = {
6851 	&driver_attr_delay.attr,
6852 	&driver_attr_opts.attr,
6853 	&driver_attr_ptype.attr,
6854 	&driver_attr_dsense.attr,
6855 	&driver_attr_fake_rw.attr,
6856 	&driver_attr_host_max_queue.attr,
6857 	&driver_attr_no_lun_0.attr,
6858 	&driver_attr_num_tgts.attr,
6859 	&driver_attr_dev_size_mb.attr,
6860 	&driver_attr_num_parts.attr,
6861 	&driver_attr_every_nth.attr,
6862 	&driver_attr_lun_format.attr,
6863 	&driver_attr_max_luns.attr,
6864 	&driver_attr_max_queue.attr,
6865 	&driver_attr_no_rwlock.attr,
6866 	&driver_attr_no_uld.attr,
6867 	&driver_attr_scsi_level.attr,
6868 	&driver_attr_virtual_gb.attr,
6869 	&driver_attr_add_host.attr,
6870 	&driver_attr_per_host_store.attr,
6871 	&driver_attr_vpd_use_hostno.attr,
6872 	&driver_attr_sector_size.attr,
6873 	&driver_attr_statistics.attr,
6874 	&driver_attr_submit_queues.attr,
6875 	&driver_attr_dix.attr,
6876 	&driver_attr_dif.attr,
6877 	&driver_attr_guard.attr,
6878 	&driver_attr_ato.attr,
6879 	&driver_attr_map.attr,
6880 	&driver_attr_random.attr,
6881 	&driver_attr_removable.attr,
6882 	&driver_attr_host_lock.attr,
6883 	&driver_attr_ndelay.attr,
6884 	&driver_attr_strict.attr,
6885 	&driver_attr_uuid_ctl.attr,
6886 	&driver_attr_cdb_len.attr,
6887 	&driver_attr_tur_ms_to_ready.attr,
6888 	&driver_attr_zbc.attr,
6889 	NULL,
6890 };
6891 ATTRIBUTE_GROUPS(sdebug_drv);
6892 
6893 static struct device *pseudo_primary;
6894 
6895 static int __init scsi_debug_init(void)
6896 {
6897 	bool want_store = (sdebug_fake_rw == 0);
6898 	unsigned long sz;
6899 	int k, ret, hosts_to_add;
6900 	int idx = -1;
6901 
6902 	ramdisk_lck_a[0] = &atomic_rw;
6903 	ramdisk_lck_a[1] = &atomic_rw2;
6904 	atomic_set(&retired_max_queue, 0);
6905 
6906 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6907 		pr_warn("ndelay must be less than 1 second, ignored\n");
6908 		sdebug_ndelay = 0;
6909 	} else if (sdebug_ndelay > 0)
6910 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6911 
6912 	switch (sdebug_sector_size) {
6913 	case  512:
6914 	case 1024:
6915 	case 2048:
6916 	case 4096:
6917 		break;
6918 	default:
6919 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6920 		return -EINVAL;
6921 	}
6922 
6923 	switch (sdebug_dif) {
6924 	case T10_PI_TYPE0_PROTECTION:
6925 		break;
6926 	case T10_PI_TYPE1_PROTECTION:
6927 	case T10_PI_TYPE2_PROTECTION:
6928 	case T10_PI_TYPE3_PROTECTION:
6929 		have_dif_prot = true;
6930 		break;
6931 
6932 	default:
6933 		pr_err("dif must be 0, 1, 2 or 3\n");
6934 		return -EINVAL;
6935 	}
6936 
6937 	if (sdebug_num_tgts < 0) {
6938 		pr_err("num_tgts must be >= 0\n");
6939 		return -EINVAL;
6940 	}
6941 
6942 	if (sdebug_guard > 1) {
6943 		pr_err("guard must be 0 or 1\n");
6944 		return -EINVAL;
6945 	}
6946 
6947 	if (sdebug_ato > 1) {
6948 		pr_err("ato must be 0 or 1\n");
6949 		return -EINVAL;
6950 	}
6951 
6952 	if (sdebug_physblk_exp > 15) {
6953 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6954 		return -EINVAL;
6955 	}
6956 
6957 	sdebug_lun_am = sdebug_lun_am_i;
6958 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6959 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6960 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6961 	}
6962 
6963 	if (sdebug_max_luns > 256) {
6964 		if (sdebug_max_luns > 16384) {
6965 			pr_warn("max_luns can be no more than 16384, use default\n");
6966 			sdebug_max_luns = DEF_MAX_LUNS;
6967 		}
6968 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6969 	}
6970 
6971 	if (sdebug_lowest_aligned > 0x3fff) {
6972 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6973 		return -EINVAL;
6974 	}
6975 
6976 	if (submit_queues < 1) {
6977 		pr_err("submit_queues must be 1 or more\n");
6978 		return -EINVAL;
6979 	}
6980 
6981 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6982 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6983 		return -EINVAL;
6984 	}
6985 
6986 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6987 	    (sdebug_host_max_queue < 0)) {
6988 		pr_err("host_max_queue must be in range [0 %d]\n",
6989 		       SDEBUG_CANQUEUE);
6990 		return -EINVAL;
6991 	}
6992 
6993 	if (sdebug_host_max_queue &&
6994 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6995 		sdebug_max_queue = sdebug_host_max_queue;
6996 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6997 			sdebug_max_queue);
6998 	}
6999 
7000 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
7001 			       GFP_KERNEL);
7002 	if (sdebug_q_arr == NULL)
7003 		return -ENOMEM;
7004 	for (k = 0; k < submit_queues; ++k)
7005 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
7006 
7007 	/*
7008 	 * check for host managed zoned block device specified with
7009 	 * ptype=0x14 or zbc=XXX.
7010 	 */
7011 	if (sdebug_ptype == TYPE_ZBC) {
7012 		sdeb_zbc_model = BLK_ZONED_HM;
7013 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7014 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7015 		if (k < 0) {
7016 			ret = k;
7017 			goto free_q_arr;
7018 		}
7019 		sdeb_zbc_model = k;
7020 		switch (sdeb_zbc_model) {
7021 		case BLK_ZONED_NONE:
7022 		case BLK_ZONED_HA:
7023 			sdebug_ptype = TYPE_DISK;
7024 			break;
7025 		case BLK_ZONED_HM:
7026 			sdebug_ptype = TYPE_ZBC;
7027 			break;
7028 		default:
7029 			pr_err("Invalid ZBC model\n");
7030 			ret = -EINVAL;
7031 			goto free_q_arr;
7032 		}
7033 	}
7034 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7035 		sdeb_zbc_in_use = true;
7036 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7037 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7038 	}
7039 
7040 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7041 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7042 	if (sdebug_dev_size_mb < 1)
7043 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7044 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7045 	sdebug_store_sectors = sz / sdebug_sector_size;
7046 	sdebug_capacity = get_sdebug_capacity();
7047 
7048 	/* play around with geometry, don't waste too much on track 0 */
7049 	sdebug_heads = 8;
7050 	sdebug_sectors_per = 32;
7051 	if (sdebug_dev_size_mb >= 256)
7052 		sdebug_heads = 64;
7053 	else if (sdebug_dev_size_mb >= 16)
7054 		sdebug_heads = 32;
7055 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7056 			       (sdebug_sectors_per * sdebug_heads);
7057 	if (sdebug_cylinders_per >= 1024) {
7058 		/* other LLDs do this; implies >= 1GB ram disk ... */
7059 		sdebug_heads = 255;
7060 		sdebug_sectors_per = 63;
7061 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7062 			       (sdebug_sectors_per * sdebug_heads);
7063 	}
7064 	if (scsi_debug_lbp()) {
7065 		sdebug_unmap_max_blocks =
7066 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7067 
7068 		sdebug_unmap_max_desc =
7069 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7070 
7071 		sdebug_unmap_granularity =
7072 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7073 
7074 		if (sdebug_unmap_alignment &&
7075 		    sdebug_unmap_granularity <=
7076 		    sdebug_unmap_alignment) {
7077 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7078 			ret = -EINVAL;
7079 			goto free_q_arr;
7080 		}
7081 	}
7082 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7083 	if (want_store) {
7084 		idx = sdebug_add_store();
7085 		if (idx < 0) {
7086 			ret = idx;
7087 			goto free_q_arr;
7088 		}
7089 	}
7090 
7091 	pseudo_primary = root_device_register("pseudo_0");
7092 	if (IS_ERR(pseudo_primary)) {
7093 		pr_warn("root_device_register() error\n");
7094 		ret = PTR_ERR(pseudo_primary);
7095 		goto free_vm;
7096 	}
7097 	ret = bus_register(&pseudo_lld_bus);
7098 	if (ret < 0) {
7099 		pr_warn("bus_register error: %d\n", ret);
7100 		goto dev_unreg;
7101 	}
7102 	ret = driver_register(&sdebug_driverfs_driver);
7103 	if (ret < 0) {
7104 		pr_warn("driver_register error: %d\n", ret);
7105 		goto bus_unreg;
7106 	}
7107 
7108 	hosts_to_add = sdebug_add_host;
7109 	sdebug_add_host = 0;
7110 
7111 	for (k = 0; k < hosts_to_add; k++) {
7112 		if (want_store && k == 0) {
7113 			ret = sdebug_add_host_helper(idx);
7114 			if (ret < 0) {
7115 				pr_err("add_host_helper k=%d, error=%d\n",
7116 				       k, -ret);
7117 				break;
7118 			}
7119 		} else {
7120 			ret = sdebug_do_add_host(want_store &&
7121 						 sdebug_per_host_store);
7122 			if (ret < 0) {
7123 				pr_err("add_host k=%d error=%d\n", k, -ret);
7124 				break;
7125 			}
7126 		}
7127 	}
7128 	if (sdebug_verbose)
7129 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7130 
7131 	return 0;
7132 
7133 bus_unreg:
7134 	bus_unregister(&pseudo_lld_bus);
7135 dev_unreg:
7136 	root_device_unregister(pseudo_primary);
7137 free_vm:
7138 	sdebug_erase_store(idx, NULL);
7139 free_q_arr:
7140 	kfree(sdebug_q_arr);
7141 	return ret;
7142 }
7143 
7144 static void __exit scsi_debug_exit(void)
7145 {
7146 	int k = sdebug_num_hosts;
7147 
7148 	stop_all_queued();
7149 	for (; k; k--)
7150 		sdebug_do_remove_host(true);
7151 	free_all_queued();
7152 	driver_unregister(&sdebug_driverfs_driver);
7153 	bus_unregister(&pseudo_lld_bus);
7154 	root_device_unregister(pseudo_primary);
7155 
7156 	sdebug_erase_all_stores(false);
7157 	xa_destroy(per_store_ap);
7158 	kfree(sdebug_q_arr);
7159 }
7160 
7161 device_initcall(scsi_debug_init);
7162 module_exit(scsi_debug_exit);
7163 
7164 static void sdebug_release_adapter(struct device *dev)
7165 {
7166 	struct sdebug_host_info *sdbg_host;
7167 
7168 	sdbg_host = to_sdebug_host(dev);
7169 	kfree(sdbg_host);
7170 }
7171 
7172 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7173 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7174 {
7175 	if (idx < 0)
7176 		return;
7177 	if (!sip) {
7178 		if (xa_empty(per_store_ap))
7179 			return;
7180 		sip = xa_load(per_store_ap, idx);
7181 		if (!sip)
7182 			return;
7183 	}
7184 	vfree(sip->map_storep);
7185 	vfree(sip->dif_storep);
7186 	vfree(sip->storep);
7187 	xa_erase(per_store_ap, idx);
7188 	kfree(sip);
7189 }
7190 
7191 /* Assume apart_from_first==false only in shutdown case. */
7192 static void sdebug_erase_all_stores(bool apart_from_first)
7193 {
7194 	unsigned long idx;
7195 	struct sdeb_store_info *sip = NULL;
7196 
7197 	xa_for_each(per_store_ap, idx, sip) {
7198 		if (apart_from_first)
7199 			apart_from_first = false;
7200 		else
7201 			sdebug_erase_store(idx, sip);
7202 	}
7203 	if (apart_from_first)
7204 		sdeb_most_recent_idx = sdeb_first_idx;
7205 }
7206 
7207 /*
7208  * Returns store xarray new element index (idx) if >=0 else negated errno.
7209  * Limit the number of stores to 65536.
7210  */
7211 static int sdebug_add_store(void)
7212 {
7213 	int res;
7214 	u32 n_idx;
7215 	unsigned long iflags;
7216 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7217 	struct sdeb_store_info *sip = NULL;
7218 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7219 
7220 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7221 	if (!sip)
7222 		return -ENOMEM;
7223 
7224 	xa_lock_irqsave(per_store_ap, iflags);
7225 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7226 	if (unlikely(res < 0)) {
7227 		xa_unlock_irqrestore(per_store_ap, iflags);
7228 		kfree(sip);
7229 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7230 		return res;
7231 	}
7232 	sdeb_most_recent_idx = n_idx;
7233 	if (sdeb_first_idx < 0)
7234 		sdeb_first_idx = n_idx;
7235 	xa_unlock_irqrestore(per_store_ap, iflags);
7236 
7237 	res = -ENOMEM;
7238 	sip->storep = vzalloc(sz);
7239 	if (!sip->storep) {
7240 		pr_err("user data oom\n");
7241 		goto err;
7242 	}
7243 	if (sdebug_num_parts > 0)
7244 		sdebug_build_parts(sip->storep, sz);
7245 
7246 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7247 	if (sdebug_dix) {
7248 		int dif_size;
7249 
7250 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7251 		sip->dif_storep = vmalloc(dif_size);
7252 
7253 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7254 			sip->dif_storep);
7255 
7256 		if (!sip->dif_storep) {
7257 			pr_err("DIX oom\n");
7258 			goto err;
7259 		}
7260 		memset(sip->dif_storep, 0xff, dif_size);
7261 	}
7262 	/* Logical Block Provisioning */
7263 	if (scsi_debug_lbp()) {
7264 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7265 		sip->map_storep = vmalloc(array_size(sizeof(long),
7266 						     BITS_TO_LONGS(map_size)));
7267 
7268 		pr_info("%lu provisioning blocks\n", map_size);
7269 
7270 		if (!sip->map_storep) {
7271 			pr_err("LBP map oom\n");
7272 			goto err;
7273 		}
7274 
7275 		bitmap_zero(sip->map_storep, map_size);
7276 
7277 		/* Map first 1KB for partition table */
7278 		if (sdebug_num_parts)
7279 			map_region(sip, 0, 2);
7280 	}
7281 
7282 	rwlock_init(&sip->macc_lck);
7283 	return (int)n_idx;
7284 err:
7285 	sdebug_erase_store((int)n_idx, sip);
7286 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7287 	return res;
7288 }
7289 
7290 static int sdebug_add_host_helper(int per_host_idx)
7291 {
7292 	int k, devs_per_host, idx;
7293 	int error = -ENOMEM;
7294 	struct sdebug_host_info *sdbg_host;
7295 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7296 
7297 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7298 	if (!sdbg_host)
7299 		return -ENOMEM;
7300 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7301 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7302 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7303 	sdbg_host->si_idx = idx;
7304 
7305 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7306 
7307 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7308 	for (k = 0; k < devs_per_host; k++) {
7309 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7310 		if (!sdbg_devinfo)
7311 			goto clean;
7312 	}
7313 
7314 	spin_lock(&sdebug_host_list_lock);
7315 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7316 	spin_unlock(&sdebug_host_list_lock);
7317 
7318 	sdbg_host->dev.bus = &pseudo_lld_bus;
7319 	sdbg_host->dev.parent = pseudo_primary;
7320 	sdbg_host->dev.release = &sdebug_release_adapter;
7321 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7322 
7323 	error = device_register(&sdbg_host->dev);
7324 	if (error) {
7325 		spin_lock(&sdebug_host_list_lock);
7326 		list_del(&sdbg_host->host_list);
7327 		spin_unlock(&sdebug_host_list_lock);
7328 		goto clean;
7329 	}
7330 
7331 	++sdebug_num_hosts;
7332 	return 0;
7333 
7334 clean:
7335 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7336 				 dev_list) {
7337 		list_del(&sdbg_devinfo->dev_list);
7338 		kfree(sdbg_devinfo->zstate);
7339 		kfree(sdbg_devinfo);
7340 	}
7341 	if (sdbg_host->dev.release)
7342 		put_device(&sdbg_host->dev);
7343 	else
7344 		kfree(sdbg_host);
7345 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7346 	return error;
7347 }
7348 
7349 static int sdebug_do_add_host(bool mk_new_store)
7350 {
7351 	int ph_idx = sdeb_most_recent_idx;
7352 
7353 	if (mk_new_store) {
7354 		ph_idx = sdebug_add_store();
7355 		if (ph_idx < 0)
7356 			return ph_idx;
7357 	}
7358 	return sdebug_add_host_helper(ph_idx);
7359 }
7360 
7361 static void sdebug_do_remove_host(bool the_end)
7362 {
7363 	int idx = -1;
7364 	struct sdebug_host_info *sdbg_host = NULL;
7365 	struct sdebug_host_info *sdbg_host2;
7366 
7367 	spin_lock(&sdebug_host_list_lock);
7368 	if (!list_empty(&sdebug_host_list)) {
7369 		sdbg_host = list_entry(sdebug_host_list.prev,
7370 				       struct sdebug_host_info, host_list);
7371 		idx = sdbg_host->si_idx;
7372 	}
7373 	if (!the_end && idx >= 0) {
7374 		bool unique = true;
7375 
7376 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7377 			if (sdbg_host2 == sdbg_host)
7378 				continue;
7379 			if (idx == sdbg_host2->si_idx) {
7380 				unique = false;
7381 				break;
7382 			}
7383 		}
7384 		if (unique) {
7385 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7386 			if (idx == sdeb_most_recent_idx)
7387 				--sdeb_most_recent_idx;
7388 		}
7389 	}
7390 	if (sdbg_host)
7391 		list_del(&sdbg_host->host_list);
7392 	spin_unlock(&sdebug_host_list_lock);
7393 
7394 	if (!sdbg_host)
7395 		return;
7396 
7397 	device_unregister(&sdbg_host->dev);
7398 	--sdebug_num_hosts;
7399 }
7400 
7401 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7402 {
7403 	int num_in_q = 0;
7404 	struct sdebug_dev_info *devip;
7405 
7406 	block_unblock_all_queues(true);
7407 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7408 	if (NULL == devip) {
7409 		block_unblock_all_queues(false);
7410 		return	-ENODEV;
7411 	}
7412 	num_in_q = atomic_read(&devip->num_in_q);
7413 
7414 	if (qdepth > SDEBUG_CANQUEUE) {
7415 		qdepth = SDEBUG_CANQUEUE;
7416 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7417 			qdepth, SDEBUG_CANQUEUE);
7418 	}
7419 	if (qdepth < 1)
7420 		qdepth = 1;
7421 	if (qdepth != sdev->queue_depth)
7422 		scsi_change_queue_depth(sdev, qdepth);
7423 
7424 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7425 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7426 			    __func__, qdepth, num_in_q);
7427 	}
7428 	block_unblock_all_queues(false);
7429 	return sdev->queue_depth;
7430 }
7431 
7432 static bool fake_timeout(struct scsi_cmnd *scp)
7433 {
7434 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7435 		if (sdebug_every_nth < -1)
7436 			sdebug_every_nth = -1;
7437 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7438 			return true; /* ignore command causing timeout */
7439 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7440 			 scsi_medium_access_command(scp))
7441 			return true; /* time out reads and writes */
7442 	}
7443 	return false;
7444 }
7445 
7446 /* Response to TUR or media access command when device stopped */
7447 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7448 {
7449 	int stopped_state;
7450 	u64 diff_ns = 0;
7451 	ktime_t now_ts = ktime_get_boottime();
7452 	struct scsi_device *sdp = scp->device;
7453 
7454 	stopped_state = atomic_read(&devip->stopped);
7455 	if (stopped_state == 2) {
7456 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7457 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7458 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7459 				/* tur_ms_to_ready timer extinguished */
7460 				atomic_set(&devip->stopped, 0);
7461 				return 0;
7462 			}
7463 		}
7464 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7465 		if (sdebug_verbose)
7466 			sdev_printk(KERN_INFO, sdp,
7467 				    "%s: Not ready: in process of becoming ready\n", my_name);
7468 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7469 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7470 
7471 			if (diff_ns <= tur_nanosecs_to_ready)
7472 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7473 			else
7474 				diff_ns = tur_nanosecs_to_ready;
7475 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7476 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7477 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7478 						   diff_ns);
7479 			return check_condition_result;
7480 		}
7481 	}
7482 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7483 	if (sdebug_verbose)
7484 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7485 			    my_name);
7486 	return check_condition_result;
7487 }
7488 
7489 static void sdebug_map_queues(struct Scsi_Host *shost)
7490 {
7491 	int i, qoff;
7492 
7493 	if (shost->nr_hw_queues == 1)
7494 		return;
7495 
7496 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7497 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7498 
7499 		map->nr_queues  = 0;
7500 
7501 		if (i == HCTX_TYPE_DEFAULT)
7502 			map->nr_queues = submit_queues - poll_queues;
7503 		else if (i == HCTX_TYPE_POLL)
7504 			map->nr_queues = poll_queues;
7505 
7506 		if (!map->nr_queues) {
7507 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7508 			continue;
7509 		}
7510 
7511 		map->queue_offset = qoff;
7512 		blk_mq_map_queues(map);
7513 
7514 		qoff += map->nr_queues;
7515 	}
7516 }
7517 
7518 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7519 {
7520 	bool first;
7521 	bool retiring = false;
7522 	int num_entries = 0;
7523 	unsigned int qc_idx = 0;
7524 	unsigned long iflags;
7525 	ktime_t kt_from_boot = ktime_get_boottime();
7526 	struct sdebug_queue *sqp;
7527 	struct sdebug_queued_cmd *sqcp;
7528 	struct scsi_cmnd *scp;
7529 	struct sdebug_dev_info *devip;
7530 	struct sdebug_defer *sd_dp;
7531 
7532 	sqp = sdebug_q_arr + queue_num;
7533 
7534 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7535 
7536 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7537 	if (qc_idx >= sdebug_max_queue)
7538 		goto unlock;
7539 
7540 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7541 		if (first) {
7542 			first = false;
7543 			if (!test_bit(qc_idx, sqp->in_use_bm))
7544 				continue;
7545 		} else {
7546 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7547 		}
7548 		if (qc_idx >= sdebug_max_queue)
7549 			break;
7550 
7551 		sqcp = &sqp->qc_arr[qc_idx];
7552 		sd_dp = sqcp->sd_dp;
7553 		if (unlikely(!sd_dp))
7554 			continue;
7555 		scp = sqcp->a_cmnd;
7556 		if (unlikely(scp == NULL)) {
7557 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7558 			       queue_num, qc_idx, __func__);
7559 			break;
7560 		}
7561 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7562 			if (kt_from_boot < sd_dp->cmpl_ts)
7563 				continue;
7564 
7565 		} else		/* ignoring non REQ_POLLED requests */
7566 			continue;
7567 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7568 		if (likely(devip))
7569 			atomic_dec(&devip->num_in_q);
7570 		else
7571 			pr_err("devip=NULL from %s\n", __func__);
7572 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7573 			retiring = true;
7574 
7575 		sqcp->a_cmnd = NULL;
7576 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7577 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7578 				sqp, queue_num, qc_idx, __func__);
7579 			break;
7580 		}
7581 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7582 			int k, retval;
7583 
7584 			retval = atomic_read(&retired_max_queue);
7585 			if (qc_idx >= retval) {
7586 				pr_err("index %d too large\n", retval);
7587 				break;
7588 			}
7589 			k = find_last_bit(sqp->in_use_bm, retval);
7590 			if ((k < sdebug_max_queue) || (k == retval))
7591 				atomic_set(&retired_max_queue, 0);
7592 			else
7593 				atomic_set(&retired_max_queue, k + 1);
7594 		}
7595 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7596 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7597 		scsi_done(scp); /* callback to mid level */
7598 		num_entries++;
7599 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7600 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7601 			break;
7602 	}
7603 
7604 unlock:
7605 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7606 
7607 	if (num_entries > 0)
7608 		atomic_add(num_entries, &sdeb_mq_poll_count);
7609 	return num_entries;
7610 }
7611 
7612 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7613 				   struct scsi_cmnd *scp)
7614 {
7615 	u8 sdeb_i;
7616 	struct scsi_device *sdp = scp->device;
7617 	const struct opcode_info_t *oip;
7618 	const struct opcode_info_t *r_oip;
7619 	struct sdebug_dev_info *devip;
7620 	u8 *cmd = scp->cmnd;
7621 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7622 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7623 	int k, na;
7624 	int errsts = 0;
7625 	u64 lun_index = sdp->lun & 0x3FFF;
7626 	u32 flags;
7627 	u16 sa;
7628 	u8 opcode = cmd[0];
7629 	bool has_wlun_rl;
7630 	bool inject_now;
7631 
7632 	scsi_set_resid(scp, 0);
7633 	if (sdebug_statistics) {
7634 		atomic_inc(&sdebug_cmnd_count);
7635 		inject_now = inject_on_this_cmd();
7636 	} else {
7637 		inject_now = false;
7638 	}
7639 	if (unlikely(sdebug_verbose &&
7640 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7641 		char b[120];
7642 		int n, len, sb;
7643 
7644 		len = scp->cmd_len;
7645 		sb = (int)sizeof(b);
7646 		if (len > 32)
7647 			strcpy(b, "too long, over 32 bytes");
7648 		else {
7649 			for (k = 0, n = 0; k < len && n < sb; ++k)
7650 				n += scnprintf(b + n, sb - n, "%02x ",
7651 					       (u32)cmd[k]);
7652 		}
7653 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7654 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7655 	}
7656 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7657 		return SCSI_MLQUEUE_HOST_BUSY;
7658 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7659 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7660 		goto err_out;
7661 
7662 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7663 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7664 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7665 	if (unlikely(!devip)) {
7666 		devip = find_build_dev_info(sdp);
7667 		if (NULL == devip)
7668 			goto err_out;
7669 	}
7670 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7671 		atomic_set(&sdeb_inject_pending, 1);
7672 
7673 	na = oip->num_attached;
7674 	r_pfp = oip->pfp;
7675 	if (na) {	/* multiple commands with this opcode */
7676 		r_oip = oip;
7677 		if (FF_SA & r_oip->flags) {
7678 			if (F_SA_LOW & oip->flags)
7679 				sa = 0x1f & cmd[1];
7680 			else
7681 				sa = get_unaligned_be16(cmd + 8);
7682 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7683 				if (opcode == oip->opcode && sa == oip->sa)
7684 					break;
7685 			}
7686 		} else {   /* since no service action only check opcode */
7687 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7688 				if (opcode == oip->opcode)
7689 					break;
7690 			}
7691 		}
7692 		if (k > na) {
7693 			if (F_SA_LOW & r_oip->flags)
7694 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7695 			else if (F_SA_HIGH & r_oip->flags)
7696 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7697 			else
7698 				mk_sense_invalid_opcode(scp);
7699 			goto check_cond;
7700 		}
7701 	}	/* else (when na==0) we assume the oip is a match */
7702 	flags = oip->flags;
7703 	if (unlikely(F_INV_OP & flags)) {
7704 		mk_sense_invalid_opcode(scp);
7705 		goto check_cond;
7706 	}
7707 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7708 		if (sdebug_verbose)
7709 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7710 				    my_name, opcode, " supported for wlun");
7711 		mk_sense_invalid_opcode(scp);
7712 		goto check_cond;
7713 	}
7714 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7715 		u8 rem;
7716 		int j;
7717 
7718 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7719 			rem = ~oip->len_mask[k] & cmd[k];
7720 			if (rem) {
7721 				for (j = 7; j >= 0; --j, rem <<= 1) {
7722 					if (0x80 & rem)
7723 						break;
7724 				}
7725 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7726 				goto check_cond;
7727 			}
7728 		}
7729 	}
7730 	if (unlikely(!(F_SKIP_UA & flags) &&
7731 		     find_first_bit(devip->uas_bm,
7732 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7733 		errsts = make_ua(scp, devip);
7734 		if (errsts)
7735 			goto check_cond;
7736 	}
7737 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7738 		     atomic_read(&devip->stopped))) {
7739 		errsts = resp_not_ready(scp, devip);
7740 		if (errsts)
7741 			goto fini;
7742 	}
7743 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7744 		goto fini;
7745 	if (unlikely(sdebug_every_nth)) {
7746 		if (fake_timeout(scp))
7747 			return 0;	/* ignore command: make trouble */
7748 	}
7749 	if (likely(oip->pfp))
7750 		pfp = oip->pfp;	/* calls a resp_* function */
7751 	else
7752 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7753 
7754 fini:
7755 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7756 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7757 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7758 					    sdebug_ndelay > 10000)) {
7759 		/*
7760 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7761 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7762 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7763 		 * For Synchronize Cache want 1/20 of SSU's delay.
7764 		 */
7765 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7766 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7767 
7768 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7769 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7770 	} else
7771 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7772 				     sdebug_ndelay);
7773 check_cond:
7774 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7775 err_out:
7776 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7777 }
7778 
7779 static struct scsi_host_template sdebug_driver_template = {
7780 	.show_info =		scsi_debug_show_info,
7781 	.write_info =		scsi_debug_write_info,
7782 	.proc_name =		sdebug_proc_name,
7783 	.name =			"SCSI DEBUG",
7784 	.info =			scsi_debug_info,
7785 	.slave_alloc =		scsi_debug_slave_alloc,
7786 	.slave_configure =	scsi_debug_slave_configure,
7787 	.slave_destroy =	scsi_debug_slave_destroy,
7788 	.ioctl =		scsi_debug_ioctl,
7789 	.queuecommand =		scsi_debug_queuecommand,
7790 	.change_queue_depth =	sdebug_change_qdepth,
7791 	.map_queues =		sdebug_map_queues,
7792 	.mq_poll =		sdebug_blk_mq_poll,
7793 	.eh_abort_handler =	scsi_debug_abort,
7794 	.eh_device_reset_handler = scsi_debug_device_reset,
7795 	.eh_target_reset_handler = scsi_debug_target_reset,
7796 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7797 	.eh_host_reset_handler = scsi_debug_host_reset,
7798 	.can_queue =		SDEBUG_CANQUEUE,
7799 	.this_id =		7,
7800 	.sg_tablesize =		SG_MAX_SEGMENTS,
7801 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7802 	.max_sectors =		-1U,
7803 	.max_segment_size =	-1U,
7804 	.module =		THIS_MODULE,
7805 	.track_queue_depth =	1,
7806 };
7807 
7808 static int sdebug_driver_probe(struct device *dev)
7809 {
7810 	int error = 0;
7811 	struct sdebug_host_info *sdbg_host;
7812 	struct Scsi_Host *hpnt;
7813 	int hprot;
7814 
7815 	sdbg_host = to_sdebug_host(dev);
7816 
7817 	sdebug_driver_template.can_queue = sdebug_max_queue;
7818 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7819 	if (!sdebug_clustering)
7820 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7821 
7822 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7823 	if (NULL == hpnt) {
7824 		pr_err("scsi_host_alloc failed\n");
7825 		error = -ENODEV;
7826 		return error;
7827 	}
7828 	if (submit_queues > nr_cpu_ids) {
7829 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7830 			my_name, submit_queues, nr_cpu_ids);
7831 		submit_queues = nr_cpu_ids;
7832 	}
7833 	/*
7834 	 * Decide whether to tell scsi subsystem that we want mq. The
7835 	 * following should give the same answer for each host.
7836 	 */
7837 	hpnt->nr_hw_queues = submit_queues;
7838 	if (sdebug_host_max_queue)
7839 		hpnt->host_tagset = 1;
7840 
7841 	/* poll queues are possible for nr_hw_queues > 1 */
7842 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7843 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7844 			 my_name, poll_queues, hpnt->nr_hw_queues);
7845 		poll_queues = 0;
7846 	}
7847 
7848 	/*
7849 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7850 	 * left over for non-polled I/O.
7851 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7852 	 */
7853 	if (poll_queues >= submit_queues) {
7854 		if (submit_queues < 3)
7855 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7856 		else
7857 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7858 				my_name, submit_queues - 1);
7859 		poll_queues = 1;
7860 	}
7861 	if (poll_queues)
7862 		hpnt->nr_maps = 3;
7863 
7864 	sdbg_host->shost = hpnt;
7865 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7866 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7867 		hpnt->max_id = sdebug_num_tgts + 1;
7868 	else
7869 		hpnt->max_id = sdebug_num_tgts;
7870 	/* = sdebug_max_luns; */
7871 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7872 
7873 	hprot = 0;
7874 
7875 	switch (sdebug_dif) {
7876 
7877 	case T10_PI_TYPE1_PROTECTION:
7878 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7879 		if (sdebug_dix)
7880 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7881 		break;
7882 
7883 	case T10_PI_TYPE2_PROTECTION:
7884 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7885 		if (sdebug_dix)
7886 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7887 		break;
7888 
7889 	case T10_PI_TYPE3_PROTECTION:
7890 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7891 		if (sdebug_dix)
7892 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7893 		break;
7894 
7895 	default:
7896 		if (sdebug_dix)
7897 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7898 		break;
7899 	}
7900 
7901 	scsi_host_set_prot(hpnt, hprot);
7902 
7903 	if (have_dif_prot || sdebug_dix)
7904 		pr_info("host protection%s%s%s%s%s%s%s\n",
7905 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7906 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7907 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7908 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7909 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7910 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7911 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7912 
7913 	if (sdebug_guard == 1)
7914 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7915 	else
7916 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7917 
7918 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7919 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7920 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7921 		sdebug_statistics = true;
7922 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7923 	if (error) {
7924 		pr_err("scsi_add_host failed\n");
7925 		error = -ENODEV;
7926 		scsi_host_put(hpnt);
7927 	} else {
7928 		scsi_scan_host(hpnt);
7929 	}
7930 
7931 	return error;
7932 }
7933 
7934 static void sdebug_driver_remove(struct device *dev)
7935 {
7936 	struct sdebug_host_info *sdbg_host;
7937 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7938 
7939 	sdbg_host = to_sdebug_host(dev);
7940 
7941 	scsi_remove_host(sdbg_host->shost);
7942 
7943 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7944 				 dev_list) {
7945 		list_del(&sdbg_devinfo->dev_list);
7946 		kfree(sdbg_devinfo->zstate);
7947 		kfree(sdbg_devinfo);
7948 	}
7949 
7950 	scsi_host_put(sdbg_host->shost);
7951 }
7952 
7953 static int pseudo_lld_bus_match(struct device *dev,
7954 				struct device_driver *dev_driver)
7955 {
7956 	return 1;
7957 }
7958 
7959 static struct bus_type pseudo_lld_bus = {
7960 	.name = "pseudo",
7961 	.match = pseudo_lld_bus_match,
7962 	.probe = sdebug_driver_probe,
7963 	.remove = sdebug_driver_remove,
7964 	.drv_groups = sdebug_drv_groups,
7965 };
7966