xref: /linux/drivers/scsi/scsi_debug.c (revision baaa68a9796ef2cadfe5caaf4c730412eda0f31c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20200710";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
86 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
87 #define CAPACITY_CHANGED_ASCQ 0x9
88 #define SAVING_PARAMS_UNSUP 0x39
89 #define TRANSPORT_PROBLEM 0x4b
90 #define THRESHOLD_EXCEEDED 0x5d
91 #define LOW_POWER_COND_ON 0x5e
92 #define MISCOMPARE_VERIFY_ASC 0x1d
93 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
94 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
95 #define WRITE_ERROR_ASC 0xc
96 #define UNALIGNED_WRITE_ASCQ 0x4
97 #define WRITE_BOUNDARY_ASCQ 0x5
98 #define READ_INVDATA_ASCQ 0x6
99 #define READ_BOUNDARY_ASCQ 0x7
100 #define INSUFF_ZONE_ASCQ 0xe
101 
102 /* Additional Sense Code Qualifier (ASCQ) */
103 #define ACK_NAK_TO 0x3
104 
105 /* Default values for driver parameters */
106 #define DEF_NUM_HOST   1
107 #define DEF_NUM_TGTS   1
108 #define DEF_MAX_LUNS   1
109 /* With these defaults, this driver will make 1 host with 1 target
110  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
111  */
112 #define DEF_ATO 1
113 #define DEF_CDB_LEN 10
114 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
115 #define DEF_DEV_SIZE_PRE_INIT   0
116 #define DEF_DEV_SIZE_MB   8
117 #define DEF_ZBC_DEV_SIZE_MB   128
118 #define DEF_DIF 0
119 #define DEF_DIX 0
120 #define DEF_PER_HOST_STORE false
121 #define DEF_D_SENSE   0
122 #define DEF_EVERY_NTH   0
123 #define DEF_FAKE_RW	0
124 #define DEF_GUARD 0
125 #define DEF_HOST_LOCK 0
126 #define DEF_LBPU 0
127 #define DEF_LBPWS 0
128 #define DEF_LBPWS10 0
129 #define DEF_LBPRZ 1
130 #define DEF_LOWEST_ALIGNED 0
131 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
132 #define DEF_NO_LUN_0   0
133 #define DEF_NUM_PARTS   0
134 #define DEF_OPTS   0
135 #define DEF_OPT_BLKS 1024
136 #define DEF_PHYSBLK_EXP 0
137 #define DEF_OPT_XFERLEN_EXP 0
138 #define DEF_PTYPE   TYPE_DISK
139 #define DEF_RANDOM false
140 #define DEF_REMOVABLE false
141 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
142 #define DEF_SECTOR_SIZE 512
143 #define DEF_UNMAP_ALIGNMENT 0
144 #define DEF_UNMAP_GRANULARITY 1
145 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
146 #define DEF_UNMAP_MAX_DESC 256
147 #define DEF_VIRTUAL_GB   0
148 #define DEF_VPD_USE_HOSTNO 1
149 #define DEF_WRITESAME_LENGTH 0xFFFF
150 #define DEF_STRICT 0
151 #define DEF_STATISTICS false
152 #define DEF_SUBMIT_QUEUES 1
153 #define DEF_TUR_MS_TO_READY 0
154 #define DEF_UUID_CTL 0
155 #define JDELAY_OVERRIDDEN -9999
156 
157 /* Default parameters for ZBC drives */
158 #define DEF_ZBC_ZONE_SIZE_MB	128
159 #define DEF_ZBC_MAX_OPEN_ZONES	8
160 #define DEF_ZBC_NR_CONV_ZONES	1
161 
162 #define SDEBUG_LUN_0_VAL 0
163 
164 /* bit mask values for sdebug_opts */
165 #define SDEBUG_OPT_NOISE		1
166 #define SDEBUG_OPT_MEDIUM_ERR		2
167 #define SDEBUG_OPT_TIMEOUT		4
168 #define SDEBUG_OPT_RECOVERED_ERR	8
169 #define SDEBUG_OPT_TRANSPORT_ERR	16
170 #define SDEBUG_OPT_DIF_ERR		32
171 #define SDEBUG_OPT_DIX_ERR		64
172 #define SDEBUG_OPT_MAC_TIMEOUT		128
173 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
174 #define SDEBUG_OPT_Q_NOISE		0x200
175 #define SDEBUG_OPT_ALL_TSF		0x400
176 #define SDEBUG_OPT_RARE_TSF		0x800
177 #define SDEBUG_OPT_N_WCE		0x1000
178 #define SDEBUG_OPT_RESET_NOISE		0x2000
179 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
180 #define SDEBUG_OPT_HOST_BUSY		0x8000
181 #define SDEBUG_OPT_CMD_ABORT		0x10000
182 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
183 			      SDEBUG_OPT_RESET_NOISE)
184 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
185 				  SDEBUG_OPT_TRANSPORT_ERR | \
186 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
187 				  SDEBUG_OPT_SHORT_TRANSFER | \
188 				  SDEBUG_OPT_HOST_BUSY | \
189 				  SDEBUG_OPT_CMD_ABORT)
190 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
191 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
192 
193 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
194  * priority order. In the subset implemented here lower numbers have higher
195  * priority. The UA numbers should be a sequence starting from 0 with
196  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
197 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
198 #define SDEBUG_UA_BUS_RESET 1
199 #define SDEBUG_UA_MODE_CHANGED 2
200 #define SDEBUG_UA_CAPACITY_CHANGED 3
201 #define SDEBUG_UA_LUNS_CHANGED 4
202 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
203 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
204 #define SDEBUG_NUM_UAS 7
205 
206 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
207  * sector on read commands: */
208 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
209 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
210 
211 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
212  * (for response) per submit queue at one time. Can be reduced by max_queue
213  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
214  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
215  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
216  * but cannot exceed SDEBUG_CANQUEUE .
217  */
218 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
219 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
220 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
221 
222 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
223 #define F_D_IN			1	/* Data-in command (e.g. READ) */
224 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
225 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
226 #define F_D_UNKN		8
227 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
228 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
229 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
230 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
231 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
232 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
233 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
234 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
235 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
236 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
237 
238 /* Useful combinations of the above flags */
239 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
240 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
241 #define FF_SA (F_SA_HIGH | F_SA_LOW)
242 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
243 
244 #define SDEBUG_MAX_PARTS 4
245 
246 #define SDEBUG_MAX_CMD_LEN 32
247 
248 #define SDEB_XA_NOT_IN_USE XA_MARK_1
249 
250 /* Zone types (zbcr05 table 25) */
251 enum sdebug_z_type {
252 	ZBC_ZONE_TYPE_CNV	= 0x1,
253 	ZBC_ZONE_TYPE_SWR	= 0x2,
254 	ZBC_ZONE_TYPE_SWP	= 0x3,
255 };
256 
257 /* enumeration names taken from table 26, zbcr05 */
258 enum sdebug_z_cond {
259 	ZBC_NOT_WRITE_POINTER	= 0x0,
260 	ZC1_EMPTY		= 0x1,
261 	ZC2_IMPLICIT_OPEN	= 0x2,
262 	ZC3_EXPLICIT_OPEN	= 0x3,
263 	ZC4_CLOSED		= 0x4,
264 	ZC6_READ_ONLY		= 0xd,
265 	ZC5_FULL		= 0xe,
266 	ZC7_OFFLINE		= 0xf,
267 };
268 
269 struct sdeb_zone_state {	/* ZBC: per zone state */
270 	enum sdebug_z_type z_type;
271 	enum sdebug_z_cond z_cond;
272 	bool z_non_seq_resource;
273 	unsigned int z_size;
274 	sector_t z_start;
275 	sector_t z_wp;
276 };
277 
278 struct sdebug_dev_info {
279 	struct list_head dev_list;
280 	unsigned int channel;
281 	unsigned int target;
282 	u64 lun;
283 	uuid_t lu_name;
284 	struct sdebug_host_info *sdbg_host;
285 	unsigned long uas_bm[1];
286 	atomic_t num_in_q;
287 	atomic_t stopped;	/* 1: by SSU, 2: device start */
288 	bool used;
289 
290 	/* For ZBC devices */
291 	enum blk_zoned_model zmodel;
292 	unsigned int zsize;
293 	unsigned int zsize_shift;
294 	unsigned int nr_zones;
295 	unsigned int nr_conv_zones;
296 	unsigned int nr_imp_open;
297 	unsigned int nr_exp_open;
298 	unsigned int nr_closed;
299 	unsigned int max_open;
300 	ktime_t create_ts;	/* time since bootup that this device was created */
301 	struct sdeb_zone_state *zstate;
302 };
303 
304 struct sdebug_host_info {
305 	struct list_head host_list;
306 	int si_idx;	/* sdeb_store_info (per host) xarray index */
307 	struct Scsi_Host *shost;
308 	struct device dev;
309 	struct list_head dev_info_list;
310 };
311 
312 /* There is an xarray of pointers to this struct's objects, one per host */
313 struct sdeb_store_info {
314 	rwlock_t macc_lck;	/* for atomic media access on this store */
315 	u8 *storep;		/* user data storage (ram) */
316 	struct t10_pi_tuple *dif_storep; /* protection info */
317 	void *map_storep;	/* provisioning map */
318 };
319 
320 #define to_sdebug_host(d)	\
321 	container_of(d, struct sdebug_host_info, dev)
322 
323 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
324 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
325 
326 struct sdebug_defer {
327 	struct hrtimer hrt;
328 	struct execute_work ew;
329 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
330 	int sqa_idx;	/* index of sdebug_queue array */
331 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
332 	int hc_idx;	/* hostwide tag index */
333 	int issuing_cpu;
334 	bool init_hrt;
335 	bool init_wq;
336 	bool init_poll;
337 	bool aborted;	/* true when blk_abort_request() already called */
338 	enum sdeb_defer_type defer_t;
339 };
340 
341 struct sdebug_queued_cmd {
342 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
343 	 * instance indicates this slot is in use.
344 	 */
345 	struct sdebug_defer *sd_dp;
346 	struct scsi_cmnd *a_cmnd;
347 };
348 
349 struct sdebug_queue {
350 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
351 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
352 	spinlock_t qc_lock;
353 	atomic_t blocked;	/* to temporarily stop more being queued */
354 };
355 
356 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
357 static atomic_t sdebug_completions;  /* count of deferred completions */
358 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
359 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
360 static atomic_t sdeb_inject_pending;
361 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
362 
363 struct opcode_info_t {
364 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
365 				/* for terminating element */
366 	u8 opcode;		/* if num_attached > 0, preferred */
367 	u16 sa;			/* service action */
368 	u32 flags;		/* OR-ed set of SDEB_F_* */
369 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
370 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
371 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
372 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
373 };
374 
375 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
376 enum sdeb_opcode_index {
377 	SDEB_I_INVALID_OPCODE =	0,
378 	SDEB_I_INQUIRY = 1,
379 	SDEB_I_REPORT_LUNS = 2,
380 	SDEB_I_REQUEST_SENSE = 3,
381 	SDEB_I_TEST_UNIT_READY = 4,
382 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
383 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
384 	SDEB_I_LOG_SENSE = 7,
385 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
386 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
387 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
388 	SDEB_I_START_STOP = 11,
389 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
390 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
391 	SDEB_I_MAINT_IN = 14,
392 	SDEB_I_MAINT_OUT = 15,
393 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
394 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
395 	SDEB_I_RESERVE = 18,		/* 6, 10 */
396 	SDEB_I_RELEASE = 19,		/* 6, 10 */
397 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
398 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
399 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
400 	SDEB_I_SEND_DIAG = 23,
401 	SDEB_I_UNMAP = 24,
402 	SDEB_I_WRITE_BUFFER = 25,
403 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
404 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
405 	SDEB_I_COMP_WRITE = 28,
406 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
407 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
408 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
409 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
410 };
411 
412 
413 static const unsigned char opcode_ind_arr[256] = {
414 /* 0x0; 0x0->0x1f: 6 byte cdbs */
415 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
416 	    0, 0, 0, 0,
417 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
418 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
419 	    SDEB_I_RELEASE,
420 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
421 	    SDEB_I_ALLOW_REMOVAL, 0,
422 /* 0x20; 0x20->0x3f: 10 byte cdbs */
423 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
424 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
425 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
426 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
427 /* 0x40; 0x40->0x5f: 10 byte cdbs */
428 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
429 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
430 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
431 	    SDEB_I_RELEASE,
432 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
433 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
434 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
435 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 	0, SDEB_I_VARIABLE_LEN,
437 /* 0x80; 0x80->0x9f: 16 byte cdbs */
438 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
439 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
440 	0, 0, 0, SDEB_I_VERIFY,
441 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
442 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
443 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
444 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
445 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
446 	     SDEB_I_MAINT_OUT, 0, 0, 0,
447 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
448 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
449 	0, 0, 0, 0, 0, 0, 0, 0,
450 	0, 0, 0, 0, 0, 0, 0, 0,
451 /* 0xc0; 0xc0->0xff: vendor specific */
452 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 };
457 
458 /*
459  * The following "response" functions return the SCSI mid-level's 4 byte
460  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
461  * command completion, they can mask their return value with
462  * SDEG_RES_IMMED_MASK .
463  */
464 #define SDEG_RES_IMMED_MASK 0x40000000
465 
466 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
467 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 
496 static int sdebug_do_add_host(bool mk_new_store);
497 static int sdebug_add_host_helper(int per_host_idx);
498 static void sdebug_do_remove_host(bool the_end);
499 static int sdebug_add_store(void);
500 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
501 static void sdebug_erase_all_stores(bool apart_from_first);
502 
503 /*
504  * The following are overflow arrays for cdbs that "hit" the same index in
505  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
506  * should be placed in opcode_info_arr[], the others should be placed here.
507  */
508 static const struct opcode_info_t msense_iarr[] = {
509 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
510 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
511 };
512 
513 static const struct opcode_info_t mselect_iarr[] = {
514 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
515 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 };
517 
518 static const struct opcode_info_t read_iarr[] = {
519 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
520 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
521 	     0, 0, 0, 0} },
522 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
523 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
524 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
525 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
526 	     0xc7, 0, 0, 0, 0} },
527 };
528 
529 static const struct opcode_info_t write_iarr[] = {
530 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
531 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
532 		   0, 0, 0, 0, 0, 0} },
533 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
534 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
535 		   0, 0, 0} },
536 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
537 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
538 		   0xbf, 0xc7, 0, 0, 0, 0} },
539 };
540 
541 static const struct opcode_info_t verify_iarr[] = {
542 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
543 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
544 		   0, 0, 0, 0, 0, 0} },
545 };
546 
547 static const struct opcode_info_t sa_in_16_iarr[] = {
548 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
549 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
550 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
551 };
552 
553 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
554 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
555 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
556 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
557 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
558 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
559 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
560 };
561 
562 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
563 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
564 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
565 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
566 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
567 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
568 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
569 };
570 
571 static const struct opcode_info_t write_same_iarr[] = {
572 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
573 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
574 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
575 };
576 
577 static const struct opcode_info_t reserve_iarr[] = {
578 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
579 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
580 };
581 
582 static const struct opcode_info_t release_iarr[] = {
583 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
584 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 };
586 
587 static const struct opcode_info_t sync_cache_iarr[] = {
588 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
589 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
590 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
591 };
592 
593 static const struct opcode_info_t pre_fetch_iarr[] = {
594 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
595 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
596 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
597 };
598 
599 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
600 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
601 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
602 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
603 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
604 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
605 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
606 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
607 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
609 };
610 
611 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
612 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
613 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
614 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
615 };
616 
617 
618 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
619  * plus the terminating elements for logic that scans this table such as
620  * REPORT SUPPORTED OPERATION CODES. */
621 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
622 /* 0 */
623 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
624 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
626 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
627 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
628 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
629 	     0, 0} },					/* REPORT LUNS */
630 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
631 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
633 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 /* 5 */
635 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
636 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
637 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
638 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
639 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
640 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
641 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
642 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
643 	     0, 0, 0} },
644 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
645 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
646 	     0, 0} },
647 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
648 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
649 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
650 /* 10 */
651 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
652 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
653 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
655 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
656 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
657 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
658 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
659 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
661 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
662 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
663 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
664 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
665 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
666 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
667 				0xff, 0, 0xc7, 0, 0, 0, 0} },
668 /* 15 */
669 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
670 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
671 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
672 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
673 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
674 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
675 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
676 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
677 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
678 	     0xff, 0xff} },
679 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
680 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
681 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
682 	     0} },
683 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
684 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
685 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
686 	     0} },
687 /* 20 */
688 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
689 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
691 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
693 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
695 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
697 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
698 /* 25 */
699 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
700 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
701 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
702 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
703 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
704 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
705 		 0, 0, 0, 0, 0} },
706 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
707 	    resp_sync_cache, sync_cache_iarr,
708 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
709 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
710 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
711 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
712 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
713 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
714 	    resp_pre_fetch, pre_fetch_iarr,
715 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
717 
718 /* 30 */
719 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
720 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
721 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
722 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
723 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
724 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
725 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
726 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
727 /* sentinel */
728 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
729 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730 };
731 
732 static int sdebug_num_hosts;
733 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
734 static int sdebug_ato = DEF_ATO;
735 static int sdebug_cdb_len = DEF_CDB_LEN;
736 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
737 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
738 static int sdebug_dif = DEF_DIF;
739 static int sdebug_dix = DEF_DIX;
740 static int sdebug_dsense = DEF_D_SENSE;
741 static int sdebug_every_nth = DEF_EVERY_NTH;
742 static int sdebug_fake_rw = DEF_FAKE_RW;
743 static unsigned int sdebug_guard = DEF_GUARD;
744 static int sdebug_host_max_queue;	/* per host */
745 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
746 static int sdebug_max_luns = DEF_MAX_LUNS;
747 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
748 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
749 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
750 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
751 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
752 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
753 static int sdebug_no_uld;
754 static int sdebug_num_parts = DEF_NUM_PARTS;
755 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
756 static int sdebug_opt_blks = DEF_OPT_BLKS;
757 static int sdebug_opts = DEF_OPTS;
758 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
759 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
760 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
761 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
762 static int sdebug_sector_size = DEF_SECTOR_SIZE;
763 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
764 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
765 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
766 static unsigned int sdebug_lbpu = DEF_LBPU;
767 static unsigned int sdebug_lbpws = DEF_LBPWS;
768 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
769 static unsigned int sdebug_lbprz = DEF_LBPRZ;
770 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
771 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
772 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
773 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
774 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
775 static int sdebug_uuid_ctl = DEF_UUID_CTL;
776 static bool sdebug_random = DEF_RANDOM;
777 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
778 static bool sdebug_removable = DEF_REMOVABLE;
779 static bool sdebug_clustering;
780 static bool sdebug_host_lock = DEF_HOST_LOCK;
781 static bool sdebug_strict = DEF_STRICT;
782 static bool sdebug_any_injecting_opt;
783 static bool sdebug_verbose;
784 static bool have_dif_prot;
785 static bool write_since_sync;
786 static bool sdebug_statistics = DEF_STATISTICS;
787 static bool sdebug_wp;
788 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
789 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
790 static char *sdeb_zbc_model_s;
791 
792 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
793 			  SAM_LUN_AM_FLAT = 0x1,
794 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
795 			  SAM_LUN_AM_EXTENDED = 0x3};
796 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
797 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
798 
799 static unsigned int sdebug_store_sectors;
800 static sector_t sdebug_capacity;	/* in sectors */
801 
802 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
803    may still need them */
804 static int sdebug_heads;		/* heads per disk */
805 static int sdebug_cylinders_per;	/* cylinders per surface */
806 static int sdebug_sectors_per;		/* sectors per cylinder */
807 
808 static LIST_HEAD(sdebug_host_list);
809 static DEFINE_SPINLOCK(sdebug_host_list_lock);
810 
811 static struct xarray per_store_arr;
812 static struct xarray *per_store_ap = &per_store_arr;
813 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
814 static int sdeb_most_recent_idx = -1;
815 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
816 
817 static unsigned long map_size;
818 static int num_aborts;
819 static int num_dev_resets;
820 static int num_target_resets;
821 static int num_bus_resets;
822 static int num_host_resets;
823 static int dix_writes;
824 static int dix_reads;
825 static int dif_errors;
826 
827 /* ZBC global data */
828 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
829 static int sdeb_zbc_zone_size_mb;
830 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
831 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
832 
833 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
834 static int poll_queues; /* iouring iopoll interface.*/
835 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
836 
837 static DEFINE_RWLOCK(atomic_rw);
838 static DEFINE_RWLOCK(atomic_rw2);
839 
840 static rwlock_t *ramdisk_lck_a[2];
841 
842 static char sdebug_proc_name[] = MY_NAME;
843 static const char *my_name = MY_NAME;
844 
845 static struct bus_type pseudo_lld_bus;
846 
847 static struct device_driver sdebug_driverfs_driver = {
848 	.name 		= sdebug_proc_name,
849 	.bus		= &pseudo_lld_bus,
850 };
851 
852 static const int check_condition_result =
853 	SAM_STAT_CHECK_CONDITION;
854 
855 static const int illegal_condition_result =
856 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
857 
858 static const int device_qfull_result =
859 	(DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
860 
861 static const int condition_met_result = SAM_STAT_CONDITION_MET;
862 
863 
864 /* Only do the extra work involved in logical block provisioning if one or
865  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
866  * real reads and writes (i.e. not skipping them for speed).
867  */
868 static inline bool scsi_debug_lbp(void)
869 {
870 	return 0 == sdebug_fake_rw &&
871 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
872 }
873 
874 static void *lba2fake_store(struct sdeb_store_info *sip,
875 			    unsigned long long lba)
876 {
877 	struct sdeb_store_info *lsip = sip;
878 
879 	lba = do_div(lba, sdebug_store_sectors);
880 	if (!sip || !sip->storep) {
881 		WARN_ON_ONCE(true);
882 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
883 	}
884 	return lsip->storep + lba * sdebug_sector_size;
885 }
886 
887 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
888 				      sector_t sector)
889 {
890 	sector = sector_div(sector, sdebug_store_sectors);
891 
892 	return sip->dif_storep + sector;
893 }
894 
895 static void sdebug_max_tgts_luns(void)
896 {
897 	struct sdebug_host_info *sdbg_host;
898 	struct Scsi_Host *hpnt;
899 
900 	spin_lock(&sdebug_host_list_lock);
901 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
902 		hpnt = sdbg_host->shost;
903 		if ((hpnt->this_id >= 0) &&
904 		    (sdebug_num_tgts > hpnt->this_id))
905 			hpnt->max_id = sdebug_num_tgts + 1;
906 		else
907 			hpnt->max_id = sdebug_num_tgts;
908 		/* sdebug_max_luns; */
909 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
910 	}
911 	spin_unlock(&sdebug_host_list_lock);
912 }
913 
914 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
915 
916 /* Set in_bit to -1 to indicate no bit position of invalid field */
917 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
918 				 enum sdeb_cmd_data c_d,
919 				 int in_byte, int in_bit)
920 {
921 	unsigned char *sbuff;
922 	u8 sks[4];
923 	int sl, asc;
924 
925 	sbuff = scp->sense_buffer;
926 	if (!sbuff) {
927 		sdev_printk(KERN_ERR, scp->device,
928 			    "%s: sense_buffer is NULL\n", __func__);
929 		return;
930 	}
931 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
932 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
933 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
934 	memset(sks, 0, sizeof(sks));
935 	sks[0] = 0x80;
936 	if (c_d)
937 		sks[0] |= 0x40;
938 	if (in_bit >= 0) {
939 		sks[0] |= 0x8;
940 		sks[0] |= 0x7 & in_bit;
941 	}
942 	put_unaligned_be16(in_byte, sks + 1);
943 	if (sdebug_dsense) {
944 		sl = sbuff[7] + 8;
945 		sbuff[7] = sl;
946 		sbuff[sl] = 0x2;
947 		sbuff[sl + 1] = 0x6;
948 		memcpy(sbuff + sl + 4, sks, 3);
949 	} else
950 		memcpy(sbuff + 15, sks, 3);
951 	if (sdebug_verbose)
952 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
953 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
954 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
955 }
956 
957 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
958 {
959 	if (!scp->sense_buffer) {
960 		sdev_printk(KERN_ERR, scp->device,
961 			    "%s: sense_buffer is NULL\n", __func__);
962 		return;
963 	}
964 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
965 
966 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
967 
968 	if (sdebug_verbose)
969 		sdev_printk(KERN_INFO, scp->device,
970 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
971 			    my_name, key, asc, asq);
972 }
973 
974 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
975 {
976 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
977 }
978 
979 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
980 			    void __user *arg)
981 {
982 	if (sdebug_verbose) {
983 		if (0x1261 == cmd)
984 			sdev_printk(KERN_INFO, dev,
985 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
986 		else if (0x5331 == cmd)
987 			sdev_printk(KERN_INFO, dev,
988 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
989 				    __func__);
990 		else
991 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
992 				    __func__, cmd);
993 	}
994 	return -EINVAL;
995 	/* return -ENOTTY; // correct return but upsets fdisk */
996 }
997 
998 static void config_cdb_len(struct scsi_device *sdev)
999 {
1000 	switch (sdebug_cdb_len) {
1001 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1002 		sdev->use_10_for_rw = false;
1003 		sdev->use_16_for_rw = false;
1004 		sdev->use_10_for_ms = false;
1005 		break;
1006 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1007 		sdev->use_10_for_rw = true;
1008 		sdev->use_16_for_rw = false;
1009 		sdev->use_10_for_ms = false;
1010 		break;
1011 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1012 		sdev->use_10_for_rw = true;
1013 		sdev->use_16_for_rw = false;
1014 		sdev->use_10_for_ms = true;
1015 		break;
1016 	case 16:
1017 		sdev->use_10_for_rw = false;
1018 		sdev->use_16_for_rw = true;
1019 		sdev->use_10_for_ms = true;
1020 		break;
1021 	case 32: /* No knobs to suggest this so same as 16 for now */
1022 		sdev->use_10_for_rw = false;
1023 		sdev->use_16_for_rw = true;
1024 		sdev->use_10_for_ms = true;
1025 		break;
1026 	default:
1027 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1028 			sdebug_cdb_len);
1029 		sdev->use_10_for_rw = true;
1030 		sdev->use_16_for_rw = false;
1031 		sdev->use_10_for_ms = false;
1032 		sdebug_cdb_len = 10;
1033 		break;
1034 	}
1035 }
1036 
1037 static void all_config_cdb_len(void)
1038 {
1039 	struct sdebug_host_info *sdbg_host;
1040 	struct Scsi_Host *shost;
1041 	struct scsi_device *sdev;
1042 
1043 	spin_lock(&sdebug_host_list_lock);
1044 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1045 		shost = sdbg_host->shost;
1046 		shost_for_each_device(sdev, shost) {
1047 			config_cdb_len(sdev);
1048 		}
1049 	}
1050 	spin_unlock(&sdebug_host_list_lock);
1051 }
1052 
1053 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1054 {
1055 	struct sdebug_host_info *sdhp;
1056 	struct sdebug_dev_info *dp;
1057 
1058 	spin_lock(&sdebug_host_list_lock);
1059 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1060 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061 			if ((devip->sdbg_host == dp->sdbg_host) &&
1062 			    (devip->target == dp->target))
1063 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1064 		}
1065 	}
1066 	spin_unlock(&sdebug_host_list_lock);
1067 }
1068 
1069 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1070 {
1071 	int k;
1072 
1073 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1074 	if (k != SDEBUG_NUM_UAS) {
1075 		const char *cp = NULL;
1076 
1077 		switch (k) {
1078 		case SDEBUG_UA_POR:
1079 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1080 					POWER_ON_RESET_ASCQ);
1081 			if (sdebug_verbose)
1082 				cp = "power on reset";
1083 			break;
1084 		case SDEBUG_UA_BUS_RESET:
1085 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1086 					BUS_RESET_ASCQ);
1087 			if (sdebug_verbose)
1088 				cp = "bus reset";
1089 			break;
1090 		case SDEBUG_UA_MODE_CHANGED:
1091 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1092 					MODE_CHANGED_ASCQ);
1093 			if (sdebug_verbose)
1094 				cp = "mode parameters changed";
1095 			break;
1096 		case SDEBUG_UA_CAPACITY_CHANGED:
1097 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1098 					CAPACITY_CHANGED_ASCQ);
1099 			if (sdebug_verbose)
1100 				cp = "capacity data changed";
1101 			break;
1102 		case SDEBUG_UA_MICROCODE_CHANGED:
1103 			mk_sense_buffer(scp, UNIT_ATTENTION,
1104 					TARGET_CHANGED_ASC,
1105 					MICROCODE_CHANGED_ASCQ);
1106 			if (sdebug_verbose)
1107 				cp = "microcode has been changed";
1108 			break;
1109 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1110 			mk_sense_buffer(scp, UNIT_ATTENTION,
1111 					TARGET_CHANGED_ASC,
1112 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1113 			if (sdebug_verbose)
1114 				cp = "microcode has been changed without reset";
1115 			break;
1116 		case SDEBUG_UA_LUNS_CHANGED:
1117 			/*
1118 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1119 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1120 			 * on the target, until a REPORT LUNS command is
1121 			 * received.  SPC-4 behavior is to report it only once.
1122 			 * NOTE:  sdebug_scsi_level does not use the same
1123 			 * values as struct scsi_device->scsi_level.
1124 			 */
1125 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1126 				clear_luns_changed_on_target(devip);
1127 			mk_sense_buffer(scp, UNIT_ATTENTION,
1128 					TARGET_CHANGED_ASC,
1129 					LUNS_CHANGED_ASCQ);
1130 			if (sdebug_verbose)
1131 				cp = "reported luns data has changed";
1132 			break;
1133 		default:
1134 			pr_warn("unexpected unit attention code=%d\n", k);
1135 			if (sdebug_verbose)
1136 				cp = "unknown";
1137 			break;
1138 		}
1139 		clear_bit(k, devip->uas_bm);
1140 		if (sdebug_verbose)
1141 			sdev_printk(KERN_INFO, scp->device,
1142 				   "%s reports: Unit attention: %s\n",
1143 				   my_name, cp);
1144 		return check_condition_result;
1145 	}
1146 	return 0;
1147 }
1148 
1149 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1150 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1151 				int arr_len)
1152 {
1153 	int act_len;
1154 	struct scsi_data_buffer *sdb = &scp->sdb;
1155 
1156 	if (!sdb->length)
1157 		return 0;
1158 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1159 		return DID_ERROR << 16;
1160 
1161 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1162 				      arr, arr_len);
1163 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1164 
1165 	return 0;
1166 }
1167 
1168 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1169  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1170  * calls, not required to write in ascending offset order. Assumes resid
1171  * set to scsi_bufflen() prior to any calls.
1172  */
1173 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1174 				  int arr_len, unsigned int off_dst)
1175 {
1176 	unsigned int act_len, n;
1177 	struct scsi_data_buffer *sdb = &scp->sdb;
1178 	off_t skip = off_dst;
1179 
1180 	if (sdb->length <= off_dst)
1181 		return 0;
1182 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1183 		return DID_ERROR << 16;
1184 
1185 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1186 				       arr, arr_len, skip);
1187 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1188 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1189 		 scsi_get_resid(scp));
1190 	n = scsi_bufflen(scp) - (off_dst + act_len);
1191 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1192 	return 0;
1193 }
1194 
1195 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1196  * 'arr' or -1 if error.
1197  */
1198 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1199 			       int arr_len)
1200 {
1201 	if (!scsi_bufflen(scp))
1202 		return 0;
1203 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1204 		return -1;
1205 
1206 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1207 }
1208 
1209 
1210 static char sdebug_inq_vendor_id[9] = "Linux   ";
1211 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1212 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1213 /* Use some locally assigned NAAs for SAS addresses. */
1214 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1215 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1216 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1217 
1218 /* Device identification VPD page. Returns number of bytes placed in arr */
1219 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1220 			  int target_dev_id, int dev_id_num,
1221 			  const char *dev_id_str, int dev_id_str_len,
1222 			  const uuid_t *lu_name)
1223 {
1224 	int num, port_a;
1225 	char b[32];
1226 
1227 	port_a = target_dev_id + 1;
1228 	/* T10 vendor identifier field format (faked) */
1229 	arr[0] = 0x2;	/* ASCII */
1230 	arr[1] = 0x1;
1231 	arr[2] = 0x0;
1232 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1233 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1234 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1235 	num = 8 + 16 + dev_id_str_len;
1236 	arr[3] = num;
1237 	num += 4;
1238 	if (dev_id_num >= 0) {
1239 		if (sdebug_uuid_ctl) {
1240 			/* Locally assigned UUID */
1241 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1242 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1243 			arr[num++] = 0x0;
1244 			arr[num++] = 0x12;
1245 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1246 			arr[num++] = 0x0;
1247 			memcpy(arr + num, lu_name, 16);
1248 			num += 16;
1249 		} else {
1250 			/* NAA-3, Logical unit identifier (binary) */
1251 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1252 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1253 			arr[num++] = 0x0;
1254 			arr[num++] = 0x8;
1255 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1256 			num += 8;
1257 		}
1258 		/* Target relative port number */
1259 		arr[num++] = 0x61;	/* proto=sas, binary */
1260 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1261 		arr[num++] = 0x0;	/* reserved */
1262 		arr[num++] = 0x4;	/* length */
1263 		arr[num++] = 0x0;	/* reserved */
1264 		arr[num++] = 0x0;	/* reserved */
1265 		arr[num++] = 0x0;
1266 		arr[num++] = 0x1;	/* relative port A */
1267 	}
1268 	/* NAA-3, Target port identifier */
1269 	arr[num++] = 0x61;	/* proto=sas, binary */
1270 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1271 	arr[num++] = 0x0;
1272 	arr[num++] = 0x8;
1273 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1274 	num += 8;
1275 	/* NAA-3, Target port group identifier */
1276 	arr[num++] = 0x61;	/* proto=sas, binary */
1277 	arr[num++] = 0x95;	/* piv=1, target port group id */
1278 	arr[num++] = 0x0;
1279 	arr[num++] = 0x4;
1280 	arr[num++] = 0;
1281 	arr[num++] = 0;
1282 	put_unaligned_be16(port_group_id, arr + num);
1283 	num += 2;
1284 	/* NAA-3, Target device identifier */
1285 	arr[num++] = 0x61;	/* proto=sas, binary */
1286 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1287 	arr[num++] = 0x0;
1288 	arr[num++] = 0x8;
1289 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1290 	num += 8;
1291 	/* SCSI name string: Target device identifier */
1292 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1293 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1294 	arr[num++] = 0x0;
1295 	arr[num++] = 24;
1296 	memcpy(arr + num, "naa.32222220", 12);
1297 	num += 12;
1298 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1299 	memcpy(arr + num, b, 8);
1300 	num += 8;
1301 	memset(arr + num, 0, 4);
1302 	num += 4;
1303 	return num;
1304 }
1305 
1306 static unsigned char vpd84_data[] = {
1307 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1308     0x22,0x22,0x22,0x0,0xbb,0x1,
1309     0x22,0x22,0x22,0x0,0xbb,0x2,
1310 };
1311 
1312 /*  Software interface identification VPD page */
1313 static int inquiry_vpd_84(unsigned char *arr)
1314 {
1315 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1316 	return sizeof(vpd84_data);
1317 }
1318 
1319 /* Management network addresses VPD page */
1320 static int inquiry_vpd_85(unsigned char *arr)
1321 {
1322 	int num = 0;
1323 	const char *na1 = "https://www.kernel.org/config";
1324 	const char *na2 = "http://www.kernel.org/log";
1325 	int plen, olen;
1326 
1327 	arr[num++] = 0x1;	/* lu, storage config */
1328 	arr[num++] = 0x0;	/* reserved */
1329 	arr[num++] = 0x0;
1330 	olen = strlen(na1);
1331 	plen = olen + 1;
1332 	if (plen % 4)
1333 		plen = ((plen / 4) + 1) * 4;
1334 	arr[num++] = plen;	/* length, null termianted, padded */
1335 	memcpy(arr + num, na1, olen);
1336 	memset(arr + num + olen, 0, plen - olen);
1337 	num += plen;
1338 
1339 	arr[num++] = 0x4;	/* lu, logging */
1340 	arr[num++] = 0x0;	/* reserved */
1341 	arr[num++] = 0x0;
1342 	olen = strlen(na2);
1343 	plen = olen + 1;
1344 	if (plen % 4)
1345 		plen = ((plen / 4) + 1) * 4;
1346 	arr[num++] = plen;	/* length, null terminated, padded */
1347 	memcpy(arr + num, na2, olen);
1348 	memset(arr + num + olen, 0, plen - olen);
1349 	num += plen;
1350 
1351 	return num;
1352 }
1353 
1354 /* SCSI ports VPD page */
1355 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1356 {
1357 	int num = 0;
1358 	int port_a, port_b;
1359 
1360 	port_a = target_dev_id + 1;
1361 	port_b = port_a + 1;
1362 	arr[num++] = 0x0;	/* reserved */
1363 	arr[num++] = 0x0;	/* reserved */
1364 	arr[num++] = 0x0;
1365 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1366 	memset(arr + num, 0, 6);
1367 	num += 6;
1368 	arr[num++] = 0x0;
1369 	arr[num++] = 12;	/* length tp descriptor */
1370 	/* naa-5 target port identifier (A) */
1371 	arr[num++] = 0x61;	/* proto=sas, binary */
1372 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1373 	arr[num++] = 0x0;	/* reserved */
1374 	arr[num++] = 0x8;	/* length */
1375 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1376 	num += 8;
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;
1380 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1381 	memset(arr + num, 0, 6);
1382 	num += 6;
1383 	arr[num++] = 0x0;
1384 	arr[num++] = 12;	/* length tp descriptor */
1385 	/* naa-5 target port identifier (B) */
1386 	arr[num++] = 0x61;	/* proto=sas, binary */
1387 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1388 	arr[num++] = 0x0;	/* reserved */
1389 	arr[num++] = 0x8;	/* length */
1390 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1391 	num += 8;
1392 
1393 	return num;
1394 }
1395 
1396 
1397 static unsigned char vpd89_data[] = {
1398 /* from 4th byte */ 0,0,0,0,
1399 'l','i','n','u','x',' ',' ',' ',
1400 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1401 '1','2','3','4',
1402 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1403 0xec,0,0,0,
1404 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1405 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1407 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1408 0x53,0x41,
1409 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1410 0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x10,0x80,
1413 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1414 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1415 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1417 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1418 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1419 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1424 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1425 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1426 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1439 };
1440 
1441 /* ATA Information VPD page */
1442 static int inquiry_vpd_89(unsigned char *arr)
1443 {
1444 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1445 	return sizeof(vpd89_data);
1446 }
1447 
1448 
1449 static unsigned char vpdb0_data[] = {
1450 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1451 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 };
1455 
1456 /* Block limits VPD page (SBC-3) */
1457 static int inquiry_vpd_b0(unsigned char *arr)
1458 {
1459 	unsigned int gran;
1460 
1461 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1462 
1463 	/* Optimal transfer length granularity */
1464 	if (sdebug_opt_xferlen_exp != 0 &&
1465 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1466 		gran = 1 << sdebug_opt_xferlen_exp;
1467 	else
1468 		gran = 1 << sdebug_physblk_exp;
1469 	put_unaligned_be16(gran, arr + 2);
1470 
1471 	/* Maximum Transfer Length */
1472 	if (sdebug_store_sectors > 0x400)
1473 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1474 
1475 	/* Optimal Transfer Length */
1476 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1477 
1478 	if (sdebug_lbpu) {
1479 		/* Maximum Unmap LBA Count */
1480 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1481 
1482 		/* Maximum Unmap Block Descriptor Count */
1483 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1484 	}
1485 
1486 	/* Unmap Granularity Alignment */
1487 	if (sdebug_unmap_alignment) {
1488 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1489 		arr[28] |= 0x80; /* UGAVALID */
1490 	}
1491 
1492 	/* Optimal Unmap Granularity */
1493 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1494 
1495 	/* Maximum WRITE SAME Length */
1496 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1497 
1498 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1499 
1500 	return sizeof(vpdb0_data);
1501 }
1502 
1503 /* Block device characteristics VPD page (SBC-3) */
1504 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1505 {
1506 	memset(arr, 0, 0x3c);
1507 	arr[0] = 0;
1508 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1509 	arr[2] = 0;
1510 	arr[3] = 5;	/* less than 1.8" */
1511 	if (devip->zmodel == BLK_ZONED_HA)
1512 		arr[4] = 1 << 4;	/* zoned field = 01b */
1513 
1514 	return 0x3c;
1515 }
1516 
1517 /* Logical block provisioning VPD page (SBC-4) */
1518 static int inquiry_vpd_b2(unsigned char *arr)
1519 {
1520 	memset(arr, 0, 0x4);
1521 	arr[0] = 0;			/* threshold exponent */
1522 	if (sdebug_lbpu)
1523 		arr[1] = 1 << 7;
1524 	if (sdebug_lbpws)
1525 		arr[1] |= 1 << 6;
1526 	if (sdebug_lbpws10)
1527 		arr[1] |= 1 << 5;
1528 	if (sdebug_lbprz && scsi_debug_lbp())
1529 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1530 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1531 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1532 	/* threshold_percentage=0 */
1533 	return 0x4;
1534 }
1535 
1536 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1537 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1538 {
1539 	memset(arr, 0, 0x3c);
1540 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1541 	/*
1542 	 * Set Optimal number of open sequential write preferred zones and
1543 	 * Optimal number of non-sequentially written sequential write
1544 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1545 	 * fields set to zero, apart from Max. number of open swrz_s field.
1546 	 */
1547 	put_unaligned_be32(0xffffffff, &arr[4]);
1548 	put_unaligned_be32(0xffffffff, &arr[8]);
1549 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1550 		put_unaligned_be32(devip->max_open, &arr[12]);
1551 	else
1552 		put_unaligned_be32(0xffffffff, &arr[12]);
1553 	return 0x3c;
1554 }
1555 
1556 #define SDEBUG_LONG_INQ_SZ 96
1557 #define SDEBUG_MAX_INQ_ARR_SZ 584
1558 
1559 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1560 {
1561 	unsigned char pq_pdt;
1562 	unsigned char *arr;
1563 	unsigned char *cmd = scp->cmnd;
1564 	u32 alloc_len, n;
1565 	int ret;
1566 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1567 
1568 	alloc_len = get_unaligned_be16(cmd + 3);
1569 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1570 	if (! arr)
1571 		return DID_REQUEUE << 16;
1572 	is_disk = (sdebug_ptype == TYPE_DISK);
1573 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1574 	is_disk_zbc = (is_disk || is_zbc);
1575 	have_wlun = scsi_is_wlun(scp->device->lun);
1576 	if (have_wlun)
1577 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1578 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1579 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1580 	else
1581 		pq_pdt = (sdebug_ptype & 0x1f);
1582 	arr[0] = pq_pdt;
1583 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1584 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1585 		kfree(arr);
1586 		return check_condition_result;
1587 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1588 		int lu_id_num, port_group_id, target_dev_id;
1589 		u32 len;
1590 		char lu_id_str[6];
1591 		int host_no = devip->sdbg_host->shost->host_no;
1592 
1593 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1594 		    (devip->channel & 0x7f);
1595 		if (sdebug_vpd_use_hostno == 0)
1596 			host_no = 0;
1597 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1598 			    (devip->target * 1000) + devip->lun);
1599 		target_dev_id = ((host_no + 1) * 2000) +
1600 				 (devip->target * 1000) - 3;
1601 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1602 		if (0 == cmd[2]) { /* supported vital product data pages */
1603 			arr[1] = cmd[2];	/*sanity */
1604 			n = 4;
1605 			arr[n++] = 0x0;   /* this page */
1606 			arr[n++] = 0x80;  /* unit serial number */
1607 			arr[n++] = 0x83;  /* device identification */
1608 			arr[n++] = 0x84;  /* software interface ident. */
1609 			arr[n++] = 0x85;  /* management network addresses */
1610 			arr[n++] = 0x86;  /* extended inquiry */
1611 			arr[n++] = 0x87;  /* mode page policy */
1612 			arr[n++] = 0x88;  /* SCSI ports */
1613 			if (is_disk_zbc) {	  /* SBC or ZBC */
1614 				arr[n++] = 0x89;  /* ATA information */
1615 				arr[n++] = 0xb0;  /* Block limits */
1616 				arr[n++] = 0xb1;  /* Block characteristics */
1617 				if (is_disk)
1618 					arr[n++] = 0xb2;  /* LB Provisioning */
1619 				if (is_zbc)
1620 					arr[n++] = 0xb6;  /* ZB dev. char. */
1621 			}
1622 			arr[3] = n - 4;	  /* number of supported VPD pages */
1623 		} else if (0x80 == cmd[2]) { /* unit serial number */
1624 			arr[1] = cmd[2];	/*sanity */
1625 			arr[3] = len;
1626 			memcpy(&arr[4], lu_id_str, len);
1627 		} else if (0x83 == cmd[2]) { /* device identification */
1628 			arr[1] = cmd[2];	/*sanity */
1629 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1630 						target_dev_id, lu_id_num,
1631 						lu_id_str, len,
1632 						&devip->lu_name);
1633 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1634 			arr[1] = cmd[2];	/*sanity */
1635 			arr[3] = inquiry_vpd_84(&arr[4]);
1636 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1637 			arr[1] = cmd[2];	/*sanity */
1638 			arr[3] = inquiry_vpd_85(&arr[4]);
1639 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1640 			arr[1] = cmd[2];	/*sanity */
1641 			arr[3] = 0x3c;	/* number of following entries */
1642 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1643 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1644 			else if (have_dif_prot)
1645 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1646 			else
1647 				arr[4] = 0x0;   /* no protection stuff */
1648 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1649 		} else if (0x87 == cmd[2]) { /* mode page policy */
1650 			arr[1] = cmd[2];	/*sanity */
1651 			arr[3] = 0x8;	/* number of following entries */
1652 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1653 			arr[6] = 0x80;	/* mlus, shared */
1654 			arr[8] = 0x18;	 /* protocol specific lu */
1655 			arr[10] = 0x82;	 /* mlus, per initiator port */
1656 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1657 			arr[1] = cmd[2];	/*sanity */
1658 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1659 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1660 			arr[1] = cmd[2];        /*sanity */
1661 			n = inquiry_vpd_89(&arr[4]);
1662 			put_unaligned_be16(n, arr + 2);
1663 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1664 			arr[1] = cmd[2];        /*sanity */
1665 			arr[3] = inquiry_vpd_b0(&arr[4]);
1666 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1667 			arr[1] = cmd[2];        /*sanity */
1668 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1669 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1670 			arr[1] = cmd[2];        /*sanity */
1671 			arr[3] = inquiry_vpd_b2(&arr[4]);
1672 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1673 			arr[1] = cmd[2];        /*sanity */
1674 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1675 		} else {
1676 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1677 			kfree(arr);
1678 			return check_condition_result;
1679 		}
1680 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1681 		ret = fill_from_dev_buffer(scp, arr,
1682 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1683 		kfree(arr);
1684 		return ret;
1685 	}
1686 	/* drops through here for a standard inquiry */
1687 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1688 	arr[2] = sdebug_scsi_level;
1689 	arr[3] = 2;    /* response_data_format==2 */
1690 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1691 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1692 	if (sdebug_vpd_use_hostno == 0)
1693 		arr[5] |= 0x10; /* claim: implicit TPGS */
1694 	arr[6] = 0x10; /* claim: MultiP */
1695 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1696 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1697 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1698 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1699 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1700 	/* Use Vendor Specific area to place driver date in ASCII hex */
1701 	memcpy(&arr[36], sdebug_version_date, 8);
1702 	/* version descriptors (2 bytes each) follow */
1703 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1704 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1705 	n = 62;
1706 	if (is_disk) {		/* SBC-4 no version claimed */
1707 		put_unaligned_be16(0x600, arr + n);
1708 		n += 2;
1709 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1710 		put_unaligned_be16(0x525, arr + n);
1711 		n += 2;
1712 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1713 		put_unaligned_be16(0x624, arr + n);
1714 		n += 2;
1715 	}
1716 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1717 	ret = fill_from_dev_buffer(scp, arr,
1718 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1719 	kfree(arr);
1720 	return ret;
1721 }
1722 
1723 /* See resp_iec_m_pg() for how this data is manipulated */
1724 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1725 				   0, 0, 0x0, 0x0};
1726 
1727 static int resp_requests(struct scsi_cmnd *scp,
1728 			 struct sdebug_dev_info *devip)
1729 {
1730 	unsigned char *cmd = scp->cmnd;
1731 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1732 	bool dsense = !!(cmd[1] & 1);
1733 	u32 alloc_len = cmd[4];
1734 	u32 len = 18;
1735 	int stopped_state = atomic_read(&devip->stopped);
1736 
1737 	memset(arr, 0, sizeof(arr));
1738 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1739 		if (dsense) {
1740 			arr[0] = 0x72;
1741 			arr[1] = NOT_READY;
1742 			arr[2] = LOGICAL_UNIT_NOT_READY;
1743 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1744 			len = 8;
1745 		} else {
1746 			arr[0] = 0x70;
1747 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1748 			arr[7] = 0xa;			/* 18 byte sense buffer */
1749 			arr[12] = LOGICAL_UNIT_NOT_READY;
1750 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1751 		}
1752 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1753 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1754 		if (dsense) {
1755 			arr[0] = 0x72;
1756 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1757 			arr[2] = THRESHOLD_EXCEEDED;
1758 			arr[3] = 0xff;		/* Failure prediction(false) */
1759 			len = 8;
1760 		} else {
1761 			arr[0] = 0x70;
1762 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1763 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1764 			arr[12] = THRESHOLD_EXCEEDED;
1765 			arr[13] = 0xff;		/* Failure prediction(false) */
1766 		}
1767 	} else {	/* nothing to report */
1768 		if (dsense) {
1769 			len = 8;
1770 			memset(arr, 0, len);
1771 			arr[0] = 0x72;
1772 		} else {
1773 			memset(arr, 0, len);
1774 			arr[0] = 0x70;
1775 			arr[7] = 0xa;
1776 		}
1777 	}
1778 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1779 }
1780 
1781 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1782 {
1783 	unsigned char *cmd = scp->cmnd;
1784 	int power_cond, want_stop, stopped_state;
1785 	bool changing;
1786 
1787 	power_cond = (cmd[4] & 0xf0) >> 4;
1788 	if (power_cond) {
1789 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1790 		return check_condition_result;
1791 	}
1792 	want_stop = !(cmd[4] & 1);
1793 	stopped_state = atomic_read(&devip->stopped);
1794 	if (stopped_state == 2) {
1795 		ktime_t now_ts = ktime_get_boottime();
1796 
1797 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1798 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1799 
1800 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1801 				/* tur_ms_to_ready timer extinguished */
1802 				atomic_set(&devip->stopped, 0);
1803 				stopped_state = 0;
1804 			}
1805 		}
1806 		if (stopped_state == 2) {
1807 			if (want_stop) {
1808 				stopped_state = 1;	/* dummy up success */
1809 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1810 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1811 				return check_condition_result;
1812 			}
1813 		}
1814 	}
1815 	changing = (stopped_state != want_stop);
1816 	if (changing)
1817 		atomic_xchg(&devip->stopped, want_stop);
1818 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1819 		return SDEG_RES_IMMED_MASK;
1820 	else
1821 		return 0;
1822 }
1823 
1824 static sector_t get_sdebug_capacity(void)
1825 {
1826 	static const unsigned int gibibyte = 1073741824;
1827 
1828 	if (sdebug_virtual_gb > 0)
1829 		return (sector_t)sdebug_virtual_gb *
1830 			(gibibyte / sdebug_sector_size);
1831 	else
1832 		return sdebug_store_sectors;
1833 }
1834 
1835 #define SDEBUG_READCAP_ARR_SZ 8
1836 static int resp_readcap(struct scsi_cmnd *scp,
1837 			struct sdebug_dev_info *devip)
1838 {
1839 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1840 	unsigned int capac;
1841 
1842 	/* following just in case virtual_gb changed */
1843 	sdebug_capacity = get_sdebug_capacity();
1844 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1845 	if (sdebug_capacity < 0xffffffff) {
1846 		capac = (unsigned int)sdebug_capacity - 1;
1847 		put_unaligned_be32(capac, arr + 0);
1848 	} else
1849 		put_unaligned_be32(0xffffffff, arr + 0);
1850 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1851 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1852 }
1853 
1854 #define SDEBUG_READCAP16_ARR_SZ 32
1855 static int resp_readcap16(struct scsi_cmnd *scp,
1856 			  struct sdebug_dev_info *devip)
1857 {
1858 	unsigned char *cmd = scp->cmnd;
1859 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1860 	u32 alloc_len;
1861 
1862 	alloc_len = get_unaligned_be32(cmd + 10);
1863 	/* following just in case virtual_gb changed */
1864 	sdebug_capacity = get_sdebug_capacity();
1865 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1866 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1867 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1868 	arr[13] = sdebug_physblk_exp & 0xf;
1869 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1870 
1871 	if (scsi_debug_lbp()) {
1872 		arr[14] |= 0x80; /* LBPME */
1873 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1874 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1875 		 * in the wider field maps to 0 in this field.
1876 		 */
1877 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1878 			arr[14] |= 0x40;
1879 	}
1880 
1881 	arr[15] = sdebug_lowest_aligned & 0xff;
1882 
1883 	if (have_dif_prot) {
1884 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1885 		arr[12] |= 1; /* PROT_EN */
1886 	}
1887 
1888 	return fill_from_dev_buffer(scp, arr,
1889 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1890 }
1891 
1892 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1893 
1894 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1895 			      struct sdebug_dev_info *devip)
1896 {
1897 	unsigned char *cmd = scp->cmnd;
1898 	unsigned char *arr;
1899 	int host_no = devip->sdbg_host->shost->host_no;
1900 	int port_group_a, port_group_b, port_a, port_b;
1901 	u32 alen, n, rlen;
1902 	int ret;
1903 
1904 	alen = get_unaligned_be32(cmd + 6);
1905 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1906 	if (! arr)
1907 		return DID_REQUEUE << 16;
1908 	/*
1909 	 * EVPD page 0x88 states we have two ports, one
1910 	 * real and a fake port with no device connected.
1911 	 * So we create two port groups with one port each
1912 	 * and set the group with port B to unavailable.
1913 	 */
1914 	port_a = 0x1; /* relative port A */
1915 	port_b = 0x2; /* relative port B */
1916 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1917 			(devip->channel & 0x7f);
1918 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1919 			(devip->channel & 0x7f) + 0x80;
1920 
1921 	/*
1922 	 * The asymmetric access state is cycled according to the host_id.
1923 	 */
1924 	n = 4;
1925 	if (sdebug_vpd_use_hostno == 0) {
1926 		arr[n++] = host_no % 3; /* Asymm access state */
1927 		arr[n++] = 0x0F; /* claim: all states are supported */
1928 	} else {
1929 		arr[n++] = 0x0; /* Active/Optimized path */
1930 		arr[n++] = 0x01; /* only support active/optimized paths */
1931 	}
1932 	put_unaligned_be16(port_group_a, arr + n);
1933 	n += 2;
1934 	arr[n++] = 0;    /* Reserved */
1935 	arr[n++] = 0;    /* Status code */
1936 	arr[n++] = 0;    /* Vendor unique */
1937 	arr[n++] = 0x1;  /* One port per group */
1938 	arr[n++] = 0;    /* Reserved */
1939 	arr[n++] = 0;    /* Reserved */
1940 	put_unaligned_be16(port_a, arr + n);
1941 	n += 2;
1942 	arr[n++] = 3;    /* Port unavailable */
1943 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1944 	put_unaligned_be16(port_group_b, arr + n);
1945 	n += 2;
1946 	arr[n++] = 0;    /* Reserved */
1947 	arr[n++] = 0;    /* Status code */
1948 	arr[n++] = 0;    /* Vendor unique */
1949 	arr[n++] = 0x1;  /* One port per group */
1950 	arr[n++] = 0;    /* Reserved */
1951 	arr[n++] = 0;    /* Reserved */
1952 	put_unaligned_be16(port_b, arr + n);
1953 	n += 2;
1954 
1955 	rlen = n - 4;
1956 	put_unaligned_be32(rlen, arr + 0);
1957 
1958 	/*
1959 	 * Return the smallest value of either
1960 	 * - The allocated length
1961 	 * - The constructed command length
1962 	 * - The maximum array size
1963 	 */
1964 	rlen = min(alen, n);
1965 	ret = fill_from_dev_buffer(scp, arr,
1966 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1967 	kfree(arr);
1968 	return ret;
1969 }
1970 
1971 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1972 			     struct sdebug_dev_info *devip)
1973 {
1974 	bool rctd;
1975 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1976 	u16 req_sa, u;
1977 	u32 alloc_len, a_len;
1978 	int k, offset, len, errsts, count, bump, na;
1979 	const struct opcode_info_t *oip;
1980 	const struct opcode_info_t *r_oip;
1981 	u8 *arr;
1982 	u8 *cmd = scp->cmnd;
1983 
1984 	rctd = !!(cmd[2] & 0x80);
1985 	reporting_opts = cmd[2] & 0x7;
1986 	req_opcode = cmd[3];
1987 	req_sa = get_unaligned_be16(cmd + 4);
1988 	alloc_len = get_unaligned_be32(cmd + 6);
1989 	if (alloc_len < 4 || alloc_len > 0xffff) {
1990 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1991 		return check_condition_result;
1992 	}
1993 	if (alloc_len > 8192)
1994 		a_len = 8192;
1995 	else
1996 		a_len = alloc_len;
1997 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1998 	if (NULL == arr) {
1999 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2000 				INSUFF_RES_ASCQ);
2001 		return check_condition_result;
2002 	}
2003 	switch (reporting_opts) {
2004 	case 0:	/* all commands */
2005 		/* count number of commands */
2006 		for (count = 0, oip = opcode_info_arr;
2007 		     oip->num_attached != 0xff; ++oip) {
2008 			if (F_INV_OP & oip->flags)
2009 				continue;
2010 			count += (oip->num_attached + 1);
2011 		}
2012 		bump = rctd ? 20 : 8;
2013 		put_unaligned_be32(count * bump, arr);
2014 		for (offset = 4, oip = opcode_info_arr;
2015 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2016 			if (F_INV_OP & oip->flags)
2017 				continue;
2018 			na = oip->num_attached;
2019 			arr[offset] = oip->opcode;
2020 			put_unaligned_be16(oip->sa, arr + offset + 2);
2021 			if (rctd)
2022 				arr[offset + 5] |= 0x2;
2023 			if (FF_SA & oip->flags)
2024 				arr[offset + 5] |= 0x1;
2025 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2026 			if (rctd)
2027 				put_unaligned_be16(0xa, arr + offset + 8);
2028 			r_oip = oip;
2029 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2030 				if (F_INV_OP & oip->flags)
2031 					continue;
2032 				offset += bump;
2033 				arr[offset] = oip->opcode;
2034 				put_unaligned_be16(oip->sa, arr + offset + 2);
2035 				if (rctd)
2036 					arr[offset + 5] |= 0x2;
2037 				if (FF_SA & oip->flags)
2038 					arr[offset + 5] |= 0x1;
2039 				put_unaligned_be16(oip->len_mask[0],
2040 						   arr + offset + 6);
2041 				if (rctd)
2042 					put_unaligned_be16(0xa,
2043 							   arr + offset + 8);
2044 			}
2045 			oip = r_oip;
2046 			offset += bump;
2047 		}
2048 		break;
2049 	case 1:	/* one command: opcode only */
2050 	case 2:	/* one command: opcode plus service action */
2051 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2052 		sdeb_i = opcode_ind_arr[req_opcode];
2053 		oip = &opcode_info_arr[sdeb_i];
2054 		if (F_INV_OP & oip->flags) {
2055 			supp = 1;
2056 			offset = 4;
2057 		} else {
2058 			if (1 == reporting_opts) {
2059 				if (FF_SA & oip->flags) {
2060 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2061 							     2, 2);
2062 					kfree(arr);
2063 					return check_condition_result;
2064 				}
2065 				req_sa = 0;
2066 			} else if (2 == reporting_opts &&
2067 				   0 == (FF_SA & oip->flags)) {
2068 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2069 				kfree(arr);	/* point at requested sa */
2070 				return check_condition_result;
2071 			}
2072 			if (0 == (FF_SA & oip->flags) &&
2073 			    req_opcode == oip->opcode)
2074 				supp = 3;
2075 			else if (0 == (FF_SA & oip->flags)) {
2076 				na = oip->num_attached;
2077 				for (k = 0, oip = oip->arrp; k < na;
2078 				     ++k, ++oip) {
2079 					if (req_opcode == oip->opcode)
2080 						break;
2081 				}
2082 				supp = (k >= na) ? 1 : 3;
2083 			} else if (req_sa != oip->sa) {
2084 				na = oip->num_attached;
2085 				for (k = 0, oip = oip->arrp; k < na;
2086 				     ++k, ++oip) {
2087 					if (req_sa == oip->sa)
2088 						break;
2089 				}
2090 				supp = (k >= na) ? 1 : 3;
2091 			} else
2092 				supp = 3;
2093 			if (3 == supp) {
2094 				u = oip->len_mask[0];
2095 				put_unaligned_be16(u, arr + 2);
2096 				arr[4] = oip->opcode;
2097 				for (k = 1; k < u; ++k)
2098 					arr[4 + k] = (k < 16) ?
2099 						 oip->len_mask[k] : 0xff;
2100 				offset = 4 + u;
2101 			} else
2102 				offset = 4;
2103 		}
2104 		arr[1] = (rctd ? 0x80 : 0) | supp;
2105 		if (rctd) {
2106 			put_unaligned_be16(0xa, arr + offset);
2107 			offset += 12;
2108 		}
2109 		break;
2110 	default:
2111 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2112 		kfree(arr);
2113 		return check_condition_result;
2114 	}
2115 	offset = (offset < a_len) ? offset : a_len;
2116 	len = (offset < alloc_len) ? offset : alloc_len;
2117 	errsts = fill_from_dev_buffer(scp, arr, len);
2118 	kfree(arr);
2119 	return errsts;
2120 }
2121 
2122 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2123 			  struct sdebug_dev_info *devip)
2124 {
2125 	bool repd;
2126 	u32 alloc_len, len;
2127 	u8 arr[16];
2128 	u8 *cmd = scp->cmnd;
2129 
2130 	memset(arr, 0, sizeof(arr));
2131 	repd = !!(cmd[2] & 0x80);
2132 	alloc_len = get_unaligned_be32(cmd + 6);
2133 	if (alloc_len < 4) {
2134 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2135 		return check_condition_result;
2136 	}
2137 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2138 	arr[1] = 0x1;		/* ITNRS */
2139 	if (repd) {
2140 		arr[3] = 0xc;
2141 		len = 16;
2142 	} else
2143 		len = 4;
2144 
2145 	len = (len < alloc_len) ? len : alloc_len;
2146 	return fill_from_dev_buffer(scp, arr, len);
2147 }
2148 
2149 /* <<Following mode page info copied from ST318451LW>> */
2150 
2151 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2152 {	/* Read-Write Error Recovery page for mode_sense */
2153 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2154 					5, 0, 0xff, 0xff};
2155 
2156 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2157 	if (1 == pcontrol)
2158 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2159 	return sizeof(err_recov_pg);
2160 }
2161 
2162 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2163 { 	/* Disconnect-Reconnect page for mode_sense */
2164 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2165 					 0, 0, 0, 0, 0, 0, 0, 0};
2166 
2167 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2168 	if (1 == pcontrol)
2169 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2170 	return sizeof(disconnect_pg);
2171 }
2172 
2173 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2174 {       /* Format device page for mode_sense */
2175 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2176 				     0, 0, 0, 0, 0, 0, 0, 0,
2177 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2178 
2179 	memcpy(p, format_pg, sizeof(format_pg));
2180 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2181 	put_unaligned_be16(sdebug_sector_size, p + 12);
2182 	if (sdebug_removable)
2183 		p[20] |= 0x20; /* should agree with INQUIRY */
2184 	if (1 == pcontrol)
2185 		memset(p + 2, 0, sizeof(format_pg) - 2);
2186 	return sizeof(format_pg);
2187 }
2188 
2189 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2190 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2191 				     0, 0, 0, 0};
2192 
2193 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2194 { 	/* Caching page for mode_sense */
2195 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2196 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2197 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2198 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2199 
2200 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2201 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2202 	memcpy(p, caching_pg, sizeof(caching_pg));
2203 	if (1 == pcontrol)
2204 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2205 	else if (2 == pcontrol)
2206 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2207 	return sizeof(caching_pg);
2208 }
2209 
2210 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2211 				    0, 0, 0x2, 0x4b};
2212 
2213 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2214 { 	/* Control mode page for mode_sense */
2215 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2216 					0, 0, 0, 0};
2217 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2218 				     0, 0, 0x2, 0x4b};
2219 
2220 	if (sdebug_dsense)
2221 		ctrl_m_pg[2] |= 0x4;
2222 	else
2223 		ctrl_m_pg[2] &= ~0x4;
2224 
2225 	if (sdebug_ato)
2226 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2227 
2228 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2229 	if (1 == pcontrol)
2230 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2231 	else if (2 == pcontrol)
2232 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2233 	return sizeof(ctrl_m_pg);
2234 }
2235 
2236 
2237 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2238 {	/* Informational Exceptions control mode page for mode_sense */
2239 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2240 				       0, 0, 0x0, 0x0};
2241 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2242 				      0, 0, 0x0, 0x0};
2243 
2244 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2245 	if (1 == pcontrol)
2246 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2247 	else if (2 == pcontrol)
2248 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2249 	return sizeof(iec_m_pg);
2250 }
2251 
2252 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2253 {	/* SAS SSP mode page - short format for mode_sense */
2254 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2255 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2256 
2257 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2258 	if (1 == pcontrol)
2259 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2260 	return sizeof(sas_sf_m_pg);
2261 }
2262 
2263 
2264 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2265 			      int target_dev_id)
2266 {	/* SAS phy control and discover mode page for mode_sense */
2267 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2268 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2269 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2270 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2271 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2272 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2273 		    0, 0, 0, 0, 0, 0, 0, 0,
2274 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2275 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2276 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2277 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2278 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2279 		    0, 0, 0, 0, 0, 0, 0, 0,
2280 		};
2281 	int port_a, port_b;
2282 
2283 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2284 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2285 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2286 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2287 	port_a = target_dev_id + 1;
2288 	port_b = port_a + 1;
2289 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2290 	put_unaligned_be32(port_a, p + 20);
2291 	put_unaligned_be32(port_b, p + 48 + 20);
2292 	if (1 == pcontrol)
2293 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2294 	return sizeof(sas_pcd_m_pg);
2295 }
2296 
2297 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2298 {	/* SAS SSP shared protocol specific port mode subpage */
2299 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2300 		    0, 0, 0, 0, 0, 0, 0, 0,
2301 		};
2302 
2303 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2304 	if (1 == pcontrol)
2305 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2306 	return sizeof(sas_sha_m_pg);
2307 }
2308 
2309 #define SDEBUG_MAX_MSENSE_SZ 256
2310 
2311 static int resp_mode_sense(struct scsi_cmnd *scp,
2312 			   struct sdebug_dev_info *devip)
2313 {
2314 	int pcontrol, pcode, subpcode, bd_len;
2315 	unsigned char dev_spec;
2316 	u32 alloc_len, offset, len;
2317 	int target_dev_id;
2318 	int target = scp->device->id;
2319 	unsigned char *ap;
2320 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2321 	unsigned char *cmd = scp->cmnd;
2322 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2323 
2324 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2325 	pcontrol = (cmd[2] & 0xc0) >> 6;
2326 	pcode = cmd[2] & 0x3f;
2327 	subpcode = cmd[3];
2328 	msense_6 = (MODE_SENSE == cmd[0]);
2329 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2330 	is_disk = (sdebug_ptype == TYPE_DISK);
2331 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2332 	if ((is_disk || is_zbc) && !dbd)
2333 		bd_len = llbaa ? 16 : 8;
2334 	else
2335 		bd_len = 0;
2336 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2337 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2338 	if (0x3 == pcontrol) {  /* Saving values not supported */
2339 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2340 		return check_condition_result;
2341 	}
2342 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2343 			(devip->target * 1000) - 3;
2344 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2345 	if (is_disk || is_zbc) {
2346 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2347 		if (sdebug_wp)
2348 			dev_spec |= 0x80;
2349 	} else
2350 		dev_spec = 0x0;
2351 	if (msense_6) {
2352 		arr[2] = dev_spec;
2353 		arr[3] = bd_len;
2354 		offset = 4;
2355 	} else {
2356 		arr[3] = dev_spec;
2357 		if (16 == bd_len)
2358 			arr[4] = 0x1;	/* set LONGLBA bit */
2359 		arr[7] = bd_len;	/* assume 255 or less */
2360 		offset = 8;
2361 	}
2362 	ap = arr + offset;
2363 	if ((bd_len > 0) && (!sdebug_capacity))
2364 		sdebug_capacity = get_sdebug_capacity();
2365 
2366 	if (8 == bd_len) {
2367 		if (sdebug_capacity > 0xfffffffe)
2368 			put_unaligned_be32(0xffffffff, ap + 0);
2369 		else
2370 			put_unaligned_be32(sdebug_capacity, ap + 0);
2371 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2372 		offset += bd_len;
2373 		ap = arr + offset;
2374 	} else if (16 == bd_len) {
2375 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2376 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2377 		offset += bd_len;
2378 		ap = arr + offset;
2379 	}
2380 
2381 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2382 		/* TODO: Control Extension page */
2383 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2384 		return check_condition_result;
2385 	}
2386 	bad_pcode = false;
2387 
2388 	switch (pcode) {
2389 	case 0x1:	/* Read-Write error recovery page, direct access */
2390 		len = resp_err_recov_pg(ap, pcontrol, target);
2391 		offset += len;
2392 		break;
2393 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2394 		len = resp_disconnect_pg(ap, pcontrol, target);
2395 		offset += len;
2396 		break;
2397 	case 0x3:       /* Format device page, direct access */
2398 		if (is_disk) {
2399 			len = resp_format_pg(ap, pcontrol, target);
2400 			offset += len;
2401 		} else
2402 			bad_pcode = true;
2403 		break;
2404 	case 0x8:	/* Caching page, direct access */
2405 		if (is_disk || is_zbc) {
2406 			len = resp_caching_pg(ap, pcontrol, target);
2407 			offset += len;
2408 		} else
2409 			bad_pcode = true;
2410 		break;
2411 	case 0xa:	/* Control Mode page, all devices */
2412 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2413 		offset += len;
2414 		break;
2415 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2416 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2417 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2418 			return check_condition_result;
2419 		}
2420 		len = 0;
2421 		if ((0x0 == subpcode) || (0xff == subpcode))
2422 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2423 		if ((0x1 == subpcode) || (0xff == subpcode))
2424 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2425 						  target_dev_id);
2426 		if ((0x2 == subpcode) || (0xff == subpcode))
2427 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2428 		offset += len;
2429 		break;
2430 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2431 		len = resp_iec_m_pg(ap, pcontrol, target);
2432 		offset += len;
2433 		break;
2434 	case 0x3f:	/* Read all Mode pages */
2435 		if ((0 == subpcode) || (0xff == subpcode)) {
2436 			len = resp_err_recov_pg(ap, pcontrol, target);
2437 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2438 			if (is_disk) {
2439 				len += resp_format_pg(ap + len, pcontrol,
2440 						      target);
2441 				len += resp_caching_pg(ap + len, pcontrol,
2442 						       target);
2443 			} else if (is_zbc) {
2444 				len += resp_caching_pg(ap + len, pcontrol,
2445 						       target);
2446 			}
2447 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2448 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2449 			if (0xff == subpcode) {
2450 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2451 						  target, target_dev_id);
2452 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2453 			}
2454 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2455 			offset += len;
2456 		} else {
2457 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2458 			return check_condition_result;
2459 		}
2460 		break;
2461 	default:
2462 		bad_pcode = true;
2463 		break;
2464 	}
2465 	if (bad_pcode) {
2466 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2467 		return check_condition_result;
2468 	}
2469 	if (msense_6)
2470 		arr[0] = offset - 1;
2471 	else
2472 		put_unaligned_be16((offset - 2), arr + 0);
2473 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2474 }
2475 
2476 #define SDEBUG_MAX_MSELECT_SZ 512
2477 
2478 static int resp_mode_select(struct scsi_cmnd *scp,
2479 			    struct sdebug_dev_info *devip)
2480 {
2481 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2482 	int param_len, res, mpage;
2483 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2484 	unsigned char *cmd = scp->cmnd;
2485 	int mselect6 = (MODE_SELECT == cmd[0]);
2486 
2487 	memset(arr, 0, sizeof(arr));
2488 	pf = cmd[1] & 0x10;
2489 	sp = cmd[1] & 0x1;
2490 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2491 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2492 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2493 		return check_condition_result;
2494 	}
2495 	res = fetch_to_dev_buffer(scp, arr, param_len);
2496 	if (-1 == res)
2497 		return DID_ERROR << 16;
2498 	else if (sdebug_verbose && (res < param_len))
2499 		sdev_printk(KERN_INFO, scp->device,
2500 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2501 			    __func__, param_len, res);
2502 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2503 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2504 	off = bd_len + (mselect6 ? 4 : 8);
2505 	if (md_len > 2 || off >= res) {
2506 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2507 		return check_condition_result;
2508 	}
2509 	mpage = arr[off] & 0x3f;
2510 	ps = !!(arr[off] & 0x80);
2511 	if (ps) {
2512 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2513 		return check_condition_result;
2514 	}
2515 	spf = !!(arr[off] & 0x40);
2516 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2517 		       (arr[off + 1] + 2);
2518 	if ((pg_len + off) > param_len) {
2519 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2520 				PARAMETER_LIST_LENGTH_ERR, 0);
2521 		return check_condition_result;
2522 	}
2523 	switch (mpage) {
2524 	case 0x8:      /* Caching Mode page */
2525 		if (caching_pg[1] == arr[off + 1]) {
2526 			memcpy(caching_pg + 2, arr + off + 2,
2527 			       sizeof(caching_pg) - 2);
2528 			goto set_mode_changed_ua;
2529 		}
2530 		break;
2531 	case 0xa:      /* Control Mode page */
2532 		if (ctrl_m_pg[1] == arr[off + 1]) {
2533 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2534 			       sizeof(ctrl_m_pg) - 2);
2535 			if (ctrl_m_pg[4] & 0x8)
2536 				sdebug_wp = true;
2537 			else
2538 				sdebug_wp = false;
2539 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2540 			goto set_mode_changed_ua;
2541 		}
2542 		break;
2543 	case 0x1c:      /* Informational Exceptions Mode page */
2544 		if (iec_m_pg[1] == arr[off + 1]) {
2545 			memcpy(iec_m_pg + 2, arr + off + 2,
2546 			       sizeof(iec_m_pg) - 2);
2547 			goto set_mode_changed_ua;
2548 		}
2549 		break;
2550 	default:
2551 		break;
2552 	}
2553 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2554 	return check_condition_result;
2555 set_mode_changed_ua:
2556 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2557 	return 0;
2558 }
2559 
2560 static int resp_temp_l_pg(unsigned char *arr)
2561 {
2562 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2563 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2564 		};
2565 
2566 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2567 	return sizeof(temp_l_pg);
2568 }
2569 
2570 static int resp_ie_l_pg(unsigned char *arr)
2571 {
2572 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2573 		};
2574 
2575 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2576 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2577 		arr[4] = THRESHOLD_EXCEEDED;
2578 		arr[5] = 0xff;
2579 	}
2580 	return sizeof(ie_l_pg);
2581 }
2582 
2583 #define SDEBUG_MAX_LSENSE_SZ 512
2584 
2585 static int resp_log_sense(struct scsi_cmnd *scp,
2586 			  struct sdebug_dev_info *devip)
2587 {
2588 	int ppc, sp, pcode, subpcode;
2589 	u32 alloc_len, len, n;
2590 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2591 	unsigned char *cmd = scp->cmnd;
2592 
2593 	memset(arr, 0, sizeof(arr));
2594 	ppc = cmd[1] & 0x2;
2595 	sp = cmd[1] & 0x1;
2596 	if (ppc || sp) {
2597 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2598 		return check_condition_result;
2599 	}
2600 	pcode = cmd[2] & 0x3f;
2601 	subpcode = cmd[3] & 0xff;
2602 	alloc_len = get_unaligned_be16(cmd + 7);
2603 	arr[0] = pcode;
2604 	if (0 == subpcode) {
2605 		switch (pcode) {
2606 		case 0x0:	/* Supported log pages log page */
2607 			n = 4;
2608 			arr[n++] = 0x0;		/* this page */
2609 			arr[n++] = 0xd;		/* Temperature */
2610 			arr[n++] = 0x2f;	/* Informational exceptions */
2611 			arr[3] = n - 4;
2612 			break;
2613 		case 0xd:	/* Temperature log page */
2614 			arr[3] = resp_temp_l_pg(arr + 4);
2615 			break;
2616 		case 0x2f:	/* Informational exceptions log page */
2617 			arr[3] = resp_ie_l_pg(arr + 4);
2618 			break;
2619 		default:
2620 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2621 			return check_condition_result;
2622 		}
2623 	} else if (0xff == subpcode) {
2624 		arr[0] |= 0x40;
2625 		arr[1] = subpcode;
2626 		switch (pcode) {
2627 		case 0x0:	/* Supported log pages and subpages log page */
2628 			n = 4;
2629 			arr[n++] = 0x0;
2630 			arr[n++] = 0x0;		/* 0,0 page */
2631 			arr[n++] = 0x0;
2632 			arr[n++] = 0xff;	/* this page */
2633 			arr[n++] = 0xd;
2634 			arr[n++] = 0x0;		/* Temperature */
2635 			arr[n++] = 0x2f;
2636 			arr[n++] = 0x0;	/* Informational exceptions */
2637 			arr[3] = n - 4;
2638 			break;
2639 		case 0xd:	/* Temperature subpages */
2640 			n = 4;
2641 			arr[n++] = 0xd;
2642 			arr[n++] = 0x0;		/* Temperature */
2643 			arr[3] = n - 4;
2644 			break;
2645 		case 0x2f:	/* Informational exceptions subpages */
2646 			n = 4;
2647 			arr[n++] = 0x2f;
2648 			arr[n++] = 0x0;		/* Informational exceptions */
2649 			arr[3] = n - 4;
2650 			break;
2651 		default:
2652 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2653 			return check_condition_result;
2654 		}
2655 	} else {
2656 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2657 		return check_condition_result;
2658 	}
2659 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2660 	return fill_from_dev_buffer(scp, arr,
2661 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2662 }
2663 
2664 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2665 {
2666 	return devip->nr_zones != 0;
2667 }
2668 
2669 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2670 					unsigned long long lba)
2671 {
2672 	return &devip->zstate[lba >> devip->zsize_shift];
2673 }
2674 
2675 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2676 {
2677 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2678 }
2679 
2680 static void zbc_close_zone(struct sdebug_dev_info *devip,
2681 			   struct sdeb_zone_state *zsp)
2682 {
2683 	enum sdebug_z_cond zc;
2684 
2685 	if (zbc_zone_is_conv(zsp))
2686 		return;
2687 
2688 	zc = zsp->z_cond;
2689 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2690 		return;
2691 
2692 	if (zc == ZC2_IMPLICIT_OPEN)
2693 		devip->nr_imp_open--;
2694 	else
2695 		devip->nr_exp_open--;
2696 
2697 	if (zsp->z_wp == zsp->z_start) {
2698 		zsp->z_cond = ZC1_EMPTY;
2699 	} else {
2700 		zsp->z_cond = ZC4_CLOSED;
2701 		devip->nr_closed++;
2702 	}
2703 }
2704 
2705 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2706 {
2707 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2708 	unsigned int i;
2709 
2710 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2711 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2712 			zbc_close_zone(devip, zsp);
2713 			return;
2714 		}
2715 	}
2716 }
2717 
2718 static void zbc_open_zone(struct sdebug_dev_info *devip,
2719 			  struct sdeb_zone_state *zsp, bool explicit)
2720 {
2721 	enum sdebug_z_cond zc;
2722 
2723 	if (zbc_zone_is_conv(zsp))
2724 		return;
2725 
2726 	zc = zsp->z_cond;
2727 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2728 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2729 		return;
2730 
2731 	/* Close an implicit open zone if necessary */
2732 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2733 		zbc_close_zone(devip, zsp);
2734 	else if (devip->max_open &&
2735 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2736 		zbc_close_imp_open_zone(devip);
2737 
2738 	if (zsp->z_cond == ZC4_CLOSED)
2739 		devip->nr_closed--;
2740 	if (explicit) {
2741 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2742 		devip->nr_exp_open++;
2743 	} else {
2744 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2745 		devip->nr_imp_open++;
2746 	}
2747 }
2748 
2749 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2750 		       unsigned long long lba, unsigned int num)
2751 {
2752 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2753 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2754 
2755 	if (zbc_zone_is_conv(zsp))
2756 		return;
2757 
2758 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2759 		zsp->z_wp += num;
2760 		if (zsp->z_wp >= zend)
2761 			zsp->z_cond = ZC5_FULL;
2762 		return;
2763 	}
2764 
2765 	while (num) {
2766 		if (lba != zsp->z_wp)
2767 			zsp->z_non_seq_resource = true;
2768 
2769 		end = lba + num;
2770 		if (end >= zend) {
2771 			n = zend - lba;
2772 			zsp->z_wp = zend;
2773 		} else if (end > zsp->z_wp) {
2774 			n = num;
2775 			zsp->z_wp = end;
2776 		} else {
2777 			n = num;
2778 		}
2779 		if (zsp->z_wp >= zend)
2780 			zsp->z_cond = ZC5_FULL;
2781 
2782 		num -= n;
2783 		lba += n;
2784 		if (num) {
2785 			zsp++;
2786 			zend = zsp->z_start + zsp->z_size;
2787 		}
2788 	}
2789 }
2790 
2791 static int check_zbc_access_params(struct scsi_cmnd *scp,
2792 			unsigned long long lba, unsigned int num, bool write)
2793 {
2794 	struct scsi_device *sdp = scp->device;
2795 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2796 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2797 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2798 
2799 	if (!write) {
2800 		if (devip->zmodel == BLK_ZONED_HA)
2801 			return 0;
2802 		/* For host-managed, reads cannot cross zone types boundaries */
2803 		if (zsp_end != zsp &&
2804 		    zbc_zone_is_conv(zsp) &&
2805 		    !zbc_zone_is_conv(zsp_end)) {
2806 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2807 					LBA_OUT_OF_RANGE,
2808 					READ_INVDATA_ASCQ);
2809 			return check_condition_result;
2810 		}
2811 		return 0;
2812 	}
2813 
2814 	/* No restrictions for writes within conventional zones */
2815 	if (zbc_zone_is_conv(zsp)) {
2816 		if (!zbc_zone_is_conv(zsp_end)) {
2817 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2818 					LBA_OUT_OF_RANGE,
2819 					WRITE_BOUNDARY_ASCQ);
2820 			return check_condition_result;
2821 		}
2822 		return 0;
2823 	}
2824 
2825 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2826 		/* Writes cannot cross sequential zone boundaries */
2827 		if (zsp_end != zsp) {
2828 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2829 					LBA_OUT_OF_RANGE,
2830 					WRITE_BOUNDARY_ASCQ);
2831 			return check_condition_result;
2832 		}
2833 		/* Cannot write full zones */
2834 		if (zsp->z_cond == ZC5_FULL) {
2835 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2836 					INVALID_FIELD_IN_CDB, 0);
2837 			return check_condition_result;
2838 		}
2839 		/* Writes must be aligned to the zone WP */
2840 		if (lba != zsp->z_wp) {
2841 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2842 					LBA_OUT_OF_RANGE,
2843 					UNALIGNED_WRITE_ASCQ);
2844 			return check_condition_result;
2845 		}
2846 	}
2847 
2848 	/* Handle implicit open of closed and empty zones */
2849 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2850 		if (devip->max_open &&
2851 		    devip->nr_exp_open >= devip->max_open) {
2852 			mk_sense_buffer(scp, DATA_PROTECT,
2853 					INSUFF_RES_ASC,
2854 					INSUFF_ZONE_ASCQ);
2855 			return check_condition_result;
2856 		}
2857 		zbc_open_zone(devip, zsp, false);
2858 	}
2859 
2860 	return 0;
2861 }
2862 
2863 static inline int check_device_access_params
2864 			(struct scsi_cmnd *scp, unsigned long long lba,
2865 			 unsigned int num, bool write)
2866 {
2867 	struct scsi_device *sdp = scp->device;
2868 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2869 
2870 	if (lba + num > sdebug_capacity) {
2871 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2872 		return check_condition_result;
2873 	}
2874 	/* transfer length excessive (tie in to block limits VPD page) */
2875 	if (num > sdebug_store_sectors) {
2876 		/* needs work to find which cdb byte 'num' comes from */
2877 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2878 		return check_condition_result;
2879 	}
2880 	if (write && unlikely(sdebug_wp)) {
2881 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2882 		return check_condition_result;
2883 	}
2884 	if (sdebug_dev_is_zoned(devip))
2885 		return check_zbc_access_params(scp, lba, num, write);
2886 
2887 	return 0;
2888 }
2889 
2890 /*
2891  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2892  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2893  * that access any of the "stores" in struct sdeb_store_info should call this
2894  * function with bug_if_fake_rw set to true.
2895  */
2896 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2897 						bool bug_if_fake_rw)
2898 {
2899 	if (sdebug_fake_rw) {
2900 		BUG_ON(bug_if_fake_rw);	/* See note above */
2901 		return NULL;
2902 	}
2903 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2904 }
2905 
2906 /* Returns number of bytes copied or -1 if error. */
2907 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2908 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2909 {
2910 	int ret;
2911 	u64 block, rest = 0;
2912 	enum dma_data_direction dir;
2913 	struct scsi_data_buffer *sdb = &scp->sdb;
2914 	u8 *fsp;
2915 
2916 	if (do_write) {
2917 		dir = DMA_TO_DEVICE;
2918 		write_since_sync = true;
2919 	} else {
2920 		dir = DMA_FROM_DEVICE;
2921 	}
2922 
2923 	if (!sdb->length || !sip)
2924 		return 0;
2925 	if (scp->sc_data_direction != dir)
2926 		return -1;
2927 	fsp = sip->storep;
2928 
2929 	block = do_div(lba, sdebug_store_sectors);
2930 	if (block + num > sdebug_store_sectors)
2931 		rest = block + num - sdebug_store_sectors;
2932 
2933 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2934 		   fsp + (block * sdebug_sector_size),
2935 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2936 	if (ret != (num - rest) * sdebug_sector_size)
2937 		return ret;
2938 
2939 	if (rest) {
2940 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2941 			    fsp, rest * sdebug_sector_size,
2942 			    sg_skip + ((num - rest) * sdebug_sector_size),
2943 			    do_write);
2944 	}
2945 
2946 	return ret;
2947 }
2948 
2949 /* Returns number of bytes copied or -1 if error. */
2950 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2951 {
2952 	struct scsi_data_buffer *sdb = &scp->sdb;
2953 
2954 	if (!sdb->length)
2955 		return 0;
2956 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2957 		return -1;
2958 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2959 			      num * sdebug_sector_size, 0, true);
2960 }
2961 
2962 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2963  * arr into sip->storep+lba and return true. If comparison fails then
2964  * return false. */
2965 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2966 			      const u8 *arr, bool compare_only)
2967 {
2968 	bool res;
2969 	u64 block, rest = 0;
2970 	u32 store_blks = sdebug_store_sectors;
2971 	u32 lb_size = sdebug_sector_size;
2972 	u8 *fsp = sip->storep;
2973 
2974 	block = do_div(lba, store_blks);
2975 	if (block + num > store_blks)
2976 		rest = block + num - store_blks;
2977 
2978 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2979 	if (!res)
2980 		return res;
2981 	if (rest)
2982 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2983 			     rest * lb_size);
2984 	if (!res)
2985 		return res;
2986 	if (compare_only)
2987 		return true;
2988 	arr += num * lb_size;
2989 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2990 	if (rest)
2991 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2992 	return res;
2993 }
2994 
2995 static __be16 dif_compute_csum(const void *buf, int len)
2996 {
2997 	__be16 csum;
2998 
2999 	if (sdebug_guard)
3000 		csum = (__force __be16)ip_compute_csum(buf, len);
3001 	else
3002 		csum = cpu_to_be16(crc_t10dif(buf, len));
3003 
3004 	return csum;
3005 }
3006 
3007 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3008 		      sector_t sector, u32 ei_lba)
3009 {
3010 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3011 
3012 	if (sdt->guard_tag != csum) {
3013 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3014 			(unsigned long)sector,
3015 			be16_to_cpu(sdt->guard_tag),
3016 			be16_to_cpu(csum));
3017 		return 0x01;
3018 	}
3019 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3020 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3021 		pr_err("REF check failed on sector %lu\n",
3022 			(unsigned long)sector);
3023 		return 0x03;
3024 	}
3025 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3026 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3027 		pr_err("REF check failed on sector %lu\n",
3028 			(unsigned long)sector);
3029 		return 0x03;
3030 	}
3031 	return 0;
3032 }
3033 
3034 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3035 			  unsigned int sectors, bool read)
3036 {
3037 	size_t resid;
3038 	void *paddr;
3039 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3040 						scp->device->hostdata, true);
3041 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3042 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3043 	struct sg_mapping_iter miter;
3044 
3045 	/* Bytes of protection data to copy into sgl */
3046 	resid = sectors * sizeof(*dif_storep);
3047 
3048 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3049 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3050 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3051 
3052 	while (sg_miter_next(&miter) && resid > 0) {
3053 		size_t len = min_t(size_t, miter.length, resid);
3054 		void *start = dif_store(sip, sector);
3055 		size_t rest = 0;
3056 
3057 		if (dif_store_end < start + len)
3058 			rest = start + len - dif_store_end;
3059 
3060 		paddr = miter.addr;
3061 
3062 		if (read)
3063 			memcpy(paddr, start, len - rest);
3064 		else
3065 			memcpy(start, paddr, len - rest);
3066 
3067 		if (rest) {
3068 			if (read)
3069 				memcpy(paddr + len - rest, dif_storep, rest);
3070 			else
3071 				memcpy(dif_storep, paddr + len - rest, rest);
3072 		}
3073 
3074 		sector += len / sizeof(*dif_storep);
3075 		resid -= len;
3076 	}
3077 	sg_miter_stop(&miter);
3078 }
3079 
3080 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3081 			    unsigned int sectors, u32 ei_lba)
3082 {
3083 	int ret = 0;
3084 	unsigned int i;
3085 	sector_t sector;
3086 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3087 						scp->device->hostdata, true);
3088 	struct t10_pi_tuple *sdt;
3089 
3090 	for (i = 0; i < sectors; i++, ei_lba++) {
3091 		sector = start_sec + i;
3092 		sdt = dif_store(sip, sector);
3093 
3094 		if (sdt->app_tag == cpu_to_be16(0xffff))
3095 			continue;
3096 
3097 		/*
3098 		 * Because scsi_debug acts as both initiator and
3099 		 * target we proceed to verify the PI even if
3100 		 * RDPROTECT=3. This is done so the "initiator" knows
3101 		 * which type of error to return. Otherwise we would
3102 		 * have to iterate over the PI twice.
3103 		 */
3104 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3105 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3106 					 sector, ei_lba);
3107 			if (ret) {
3108 				dif_errors++;
3109 				break;
3110 			}
3111 		}
3112 	}
3113 
3114 	dif_copy_prot(scp, start_sec, sectors, true);
3115 	dix_reads++;
3116 
3117 	return ret;
3118 }
3119 
3120 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3121 {
3122 	bool check_prot;
3123 	u32 num;
3124 	u32 ei_lba;
3125 	int ret;
3126 	u64 lba;
3127 	struct sdeb_store_info *sip = devip2sip(devip, true);
3128 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3129 	u8 *cmd = scp->cmnd;
3130 
3131 	switch (cmd[0]) {
3132 	case READ_16:
3133 		ei_lba = 0;
3134 		lba = get_unaligned_be64(cmd + 2);
3135 		num = get_unaligned_be32(cmd + 10);
3136 		check_prot = true;
3137 		break;
3138 	case READ_10:
3139 		ei_lba = 0;
3140 		lba = get_unaligned_be32(cmd + 2);
3141 		num = get_unaligned_be16(cmd + 7);
3142 		check_prot = true;
3143 		break;
3144 	case READ_6:
3145 		ei_lba = 0;
3146 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3147 		      (u32)(cmd[1] & 0x1f) << 16;
3148 		num = (0 == cmd[4]) ? 256 : cmd[4];
3149 		check_prot = true;
3150 		break;
3151 	case READ_12:
3152 		ei_lba = 0;
3153 		lba = get_unaligned_be32(cmd + 2);
3154 		num = get_unaligned_be32(cmd + 6);
3155 		check_prot = true;
3156 		break;
3157 	case XDWRITEREAD_10:
3158 		ei_lba = 0;
3159 		lba = get_unaligned_be32(cmd + 2);
3160 		num = get_unaligned_be16(cmd + 7);
3161 		check_prot = false;
3162 		break;
3163 	default:	/* assume READ(32) */
3164 		lba = get_unaligned_be64(cmd + 12);
3165 		ei_lba = get_unaligned_be32(cmd + 20);
3166 		num = get_unaligned_be32(cmd + 28);
3167 		check_prot = false;
3168 		break;
3169 	}
3170 	if (unlikely(have_dif_prot && check_prot)) {
3171 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3172 		    (cmd[1] & 0xe0)) {
3173 			mk_sense_invalid_opcode(scp);
3174 			return check_condition_result;
3175 		}
3176 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3177 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3178 		    (cmd[1] & 0xe0) == 0)
3179 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3180 				    "to DIF device\n");
3181 	}
3182 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3183 		     atomic_read(&sdeb_inject_pending))) {
3184 		num /= 2;
3185 		atomic_set(&sdeb_inject_pending, 0);
3186 	}
3187 
3188 	ret = check_device_access_params(scp, lba, num, false);
3189 	if (ret)
3190 		return ret;
3191 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3192 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3193 		     ((lba + num) > sdebug_medium_error_start))) {
3194 		/* claim unrecoverable read error */
3195 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3196 		/* set info field and valid bit for fixed descriptor */
3197 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3198 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3199 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3200 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3201 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3202 		}
3203 		scsi_set_resid(scp, scsi_bufflen(scp));
3204 		return check_condition_result;
3205 	}
3206 
3207 	read_lock(macc_lckp);
3208 
3209 	/* DIX + T10 DIF */
3210 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3211 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3212 		case 1: /* Guard tag error */
3213 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3214 				read_unlock(macc_lckp);
3215 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3216 				return check_condition_result;
3217 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3218 				read_unlock(macc_lckp);
3219 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3220 				return illegal_condition_result;
3221 			}
3222 			break;
3223 		case 3: /* Reference tag error */
3224 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3225 				read_unlock(macc_lckp);
3226 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3227 				return check_condition_result;
3228 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3229 				read_unlock(macc_lckp);
3230 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3231 				return illegal_condition_result;
3232 			}
3233 			break;
3234 		}
3235 	}
3236 
3237 	ret = do_device_access(sip, scp, 0, lba, num, false);
3238 	read_unlock(macc_lckp);
3239 	if (unlikely(ret == -1))
3240 		return DID_ERROR << 16;
3241 
3242 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3243 
3244 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3245 		     atomic_read(&sdeb_inject_pending))) {
3246 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3247 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3248 			atomic_set(&sdeb_inject_pending, 0);
3249 			return check_condition_result;
3250 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3251 			/* Logical block guard check failed */
3252 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3253 			atomic_set(&sdeb_inject_pending, 0);
3254 			return illegal_condition_result;
3255 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3256 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3257 			atomic_set(&sdeb_inject_pending, 0);
3258 			return illegal_condition_result;
3259 		}
3260 	}
3261 	return 0;
3262 }
3263 
3264 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3265 			     unsigned int sectors, u32 ei_lba)
3266 {
3267 	int ret;
3268 	struct t10_pi_tuple *sdt;
3269 	void *daddr;
3270 	sector_t sector = start_sec;
3271 	int ppage_offset;
3272 	int dpage_offset;
3273 	struct sg_mapping_iter diter;
3274 	struct sg_mapping_iter piter;
3275 
3276 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3277 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3278 
3279 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3280 			scsi_prot_sg_count(SCpnt),
3281 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3282 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3283 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3284 
3285 	/* For each protection page */
3286 	while (sg_miter_next(&piter)) {
3287 		dpage_offset = 0;
3288 		if (WARN_ON(!sg_miter_next(&diter))) {
3289 			ret = 0x01;
3290 			goto out;
3291 		}
3292 
3293 		for (ppage_offset = 0; ppage_offset < piter.length;
3294 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3295 			/* If we're at the end of the current
3296 			 * data page advance to the next one
3297 			 */
3298 			if (dpage_offset >= diter.length) {
3299 				if (WARN_ON(!sg_miter_next(&diter))) {
3300 					ret = 0x01;
3301 					goto out;
3302 				}
3303 				dpage_offset = 0;
3304 			}
3305 
3306 			sdt = piter.addr + ppage_offset;
3307 			daddr = diter.addr + dpage_offset;
3308 
3309 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3310 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3311 				if (ret)
3312 					goto out;
3313 			}
3314 
3315 			sector++;
3316 			ei_lba++;
3317 			dpage_offset += sdebug_sector_size;
3318 		}
3319 		diter.consumed = dpage_offset;
3320 		sg_miter_stop(&diter);
3321 	}
3322 	sg_miter_stop(&piter);
3323 
3324 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3325 	dix_writes++;
3326 
3327 	return 0;
3328 
3329 out:
3330 	dif_errors++;
3331 	sg_miter_stop(&diter);
3332 	sg_miter_stop(&piter);
3333 	return ret;
3334 }
3335 
3336 static unsigned long lba_to_map_index(sector_t lba)
3337 {
3338 	if (sdebug_unmap_alignment)
3339 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3340 	sector_div(lba, sdebug_unmap_granularity);
3341 	return lba;
3342 }
3343 
3344 static sector_t map_index_to_lba(unsigned long index)
3345 {
3346 	sector_t lba = index * sdebug_unmap_granularity;
3347 
3348 	if (sdebug_unmap_alignment)
3349 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3350 	return lba;
3351 }
3352 
3353 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3354 			      unsigned int *num)
3355 {
3356 	sector_t end;
3357 	unsigned int mapped;
3358 	unsigned long index;
3359 	unsigned long next;
3360 
3361 	index = lba_to_map_index(lba);
3362 	mapped = test_bit(index, sip->map_storep);
3363 
3364 	if (mapped)
3365 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3366 	else
3367 		next = find_next_bit(sip->map_storep, map_size, index);
3368 
3369 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3370 	*num = end - lba;
3371 	return mapped;
3372 }
3373 
3374 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3375 		       unsigned int len)
3376 {
3377 	sector_t end = lba + len;
3378 
3379 	while (lba < end) {
3380 		unsigned long index = lba_to_map_index(lba);
3381 
3382 		if (index < map_size)
3383 			set_bit(index, sip->map_storep);
3384 
3385 		lba = map_index_to_lba(index + 1);
3386 	}
3387 }
3388 
3389 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3390 			 unsigned int len)
3391 {
3392 	sector_t end = lba + len;
3393 	u8 *fsp = sip->storep;
3394 
3395 	while (lba < end) {
3396 		unsigned long index = lba_to_map_index(lba);
3397 
3398 		if (lba == map_index_to_lba(index) &&
3399 		    lba + sdebug_unmap_granularity <= end &&
3400 		    index < map_size) {
3401 			clear_bit(index, sip->map_storep);
3402 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3403 				memset(fsp + lba * sdebug_sector_size,
3404 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3405 				       sdebug_sector_size *
3406 				       sdebug_unmap_granularity);
3407 			}
3408 			if (sip->dif_storep) {
3409 				memset(sip->dif_storep + lba, 0xff,
3410 				       sizeof(*sip->dif_storep) *
3411 				       sdebug_unmap_granularity);
3412 			}
3413 		}
3414 		lba = map_index_to_lba(index + 1);
3415 	}
3416 }
3417 
3418 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3419 {
3420 	bool check_prot;
3421 	u32 num;
3422 	u32 ei_lba;
3423 	int ret;
3424 	u64 lba;
3425 	struct sdeb_store_info *sip = devip2sip(devip, true);
3426 	rwlock_t *macc_lckp = &sip->macc_lck;
3427 	u8 *cmd = scp->cmnd;
3428 
3429 	switch (cmd[0]) {
3430 	case WRITE_16:
3431 		ei_lba = 0;
3432 		lba = get_unaligned_be64(cmd + 2);
3433 		num = get_unaligned_be32(cmd + 10);
3434 		check_prot = true;
3435 		break;
3436 	case WRITE_10:
3437 		ei_lba = 0;
3438 		lba = get_unaligned_be32(cmd + 2);
3439 		num = get_unaligned_be16(cmd + 7);
3440 		check_prot = true;
3441 		break;
3442 	case WRITE_6:
3443 		ei_lba = 0;
3444 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3445 		      (u32)(cmd[1] & 0x1f) << 16;
3446 		num = (0 == cmd[4]) ? 256 : cmd[4];
3447 		check_prot = true;
3448 		break;
3449 	case WRITE_12:
3450 		ei_lba = 0;
3451 		lba = get_unaligned_be32(cmd + 2);
3452 		num = get_unaligned_be32(cmd + 6);
3453 		check_prot = true;
3454 		break;
3455 	case 0x53:	/* XDWRITEREAD(10) */
3456 		ei_lba = 0;
3457 		lba = get_unaligned_be32(cmd + 2);
3458 		num = get_unaligned_be16(cmd + 7);
3459 		check_prot = false;
3460 		break;
3461 	default:	/* assume WRITE(32) */
3462 		lba = get_unaligned_be64(cmd + 12);
3463 		ei_lba = get_unaligned_be32(cmd + 20);
3464 		num = get_unaligned_be32(cmd + 28);
3465 		check_prot = false;
3466 		break;
3467 	}
3468 	if (unlikely(have_dif_prot && check_prot)) {
3469 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3470 		    (cmd[1] & 0xe0)) {
3471 			mk_sense_invalid_opcode(scp);
3472 			return check_condition_result;
3473 		}
3474 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3475 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3476 		    (cmd[1] & 0xe0) == 0)
3477 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3478 				    "to DIF device\n");
3479 	}
3480 
3481 	write_lock(macc_lckp);
3482 	ret = check_device_access_params(scp, lba, num, true);
3483 	if (ret) {
3484 		write_unlock(macc_lckp);
3485 		return ret;
3486 	}
3487 
3488 	/* DIX + T10 DIF */
3489 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3490 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3491 		case 1: /* Guard tag error */
3492 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3493 				write_unlock(macc_lckp);
3494 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3495 				return illegal_condition_result;
3496 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3497 				write_unlock(macc_lckp);
3498 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3499 				return check_condition_result;
3500 			}
3501 			break;
3502 		case 3: /* Reference tag error */
3503 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3504 				write_unlock(macc_lckp);
3505 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3506 				return illegal_condition_result;
3507 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3508 				write_unlock(macc_lckp);
3509 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3510 				return check_condition_result;
3511 			}
3512 			break;
3513 		}
3514 	}
3515 
3516 	ret = do_device_access(sip, scp, 0, lba, num, true);
3517 	if (unlikely(scsi_debug_lbp()))
3518 		map_region(sip, lba, num);
3519 	/* If ZBC zone then bump its write pointer */
3520 	if (sdebug_dev_is_zoned(devip))
3521 		zbc_inc_wp(devip, lba, num);
3522 	write_unlock(macc_lckp);
3523 	if (unlikely(-1 == ret))
3524 		return DID_ERROR << 16;
3525 	else if (unlikely(sdebug_verbose &&
3526 			  (ret < (num * sdebug_sector_size))))
3527 		sdev_printk(KERN_INFO, scp->device,
3528 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3529 			    my_name, num * sdebug_sector_size, ret);
3530 
3531 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3532 		     atomic_read(&sdeb_inject_pending))) {
3533 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3534 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3535 			atomic_set(&sdeb_inject_pending, 0);
3536 			return check_condition_result;
3537 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3538 			/* Logical block guard check failed */
3539 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3540 			atomic_set(&sdeb_inject_pending, 0);
3541 			return illegal_condition_result;
3542 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3543 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3544 			atomic_set(&sdeb_inject_pending, 0);
3545 			return illegal_condition_result;
3546 		}
3547 	}
3548 	return 0;
3549 }
3550 
3551 /*
3552  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3553  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3554  */
3555 static int resp_write_scat(struct scsi_cmnd *scp,
3556 			   struct sdebug_dev_info *devip)
3557 {
3558 	u8 *cmd = scp->cmnd;
3559 	u8 *lrdp = NULL;
3560 	u8 *up;
3561 	struct sdeb_store_info *sip = devip2sip(devip, true);
3562 	rwlock_t *macc_lckp = &sip->macc_lck;
3563 	u8 wrprotect;
3564 	u16 lbdof, num_lrd, k;
3565 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3566 	u32 lb_size = sdebug_sector_size;
3567 	u32 ei_lba;
3568 	u64 lba;
3569 	int ret, res;
3570 	bool is_16;
3571 	static const u32 lrd_size = 32; /* + parameter list header size */
3572 
3573 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3574 		is_16 = false;
3575 		wrprotect = (cmd[10] >> 5) & 0x7;
3576 		lbdof = get_unaligned_be16(cmd + 12);
3577 		num_lrd = get_unaligned_be16(cmd + 16);
3578 		bt_len = get_unaligned_be32(cmd + 28);
3579 	} else {        /* that leaves WRITE SCATTERED(16) */
3580 		is_16 = true;
3581 		wrprotect = (cmd[2] >> 5) & 0x7;
3582 		lbdof = get_unaligned_be16(cmd + 4);
3583 		num_lrd = get_unaligned_be16(cmd + 8);
3584 		bt_len = get_unaligned_be32(cmd + 10);
3585 		if (unlikely(have_dif_prot)) {
3586 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3587 			    wrprotect) {
3588 				mk_sense_invalid_opcode(scp);
3589 				return illegal_condition_result;
3590 			}
3591 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3592 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3593 			     wrprotect == 0)
3594 				sdev_printk(KERN_ERR, scp->device,
3595 					    "Unprotected WR to DIF device\n");
3596 		}
3597 	}
3598 	if ((num_lrd == 0) || (bt_len == 0))
3599 		return 0;       /* T10 says these do-nothings are not errors */
3600 	if (lbdof == 0) {
3601 		if (sdebug_verbose)
3602 			sdev_printk(KERN_INFO, scp->device,
3603 				"%s: %s: LB Data Offset field bad\n",
3604 				my_name, __func__);
3605 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3606 		return illegal_condition_result;
3607 	}
3608 	lbdof_blen = lbdof * lb_size;
3609 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3610 		if (sdebug_verbose)
3611 			sdev_printk(KERN_INFO, scp->device,
3612 				"%s: %s: LBA range descriptors don't fit\n",
3613 				my_name, __func__);
3614 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3615 		return illegal_condition_result;
3616 	}
3617 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3618 	if (lrdp == NULL)
3619 		return SCSI_MLQUEUE_HOST_BUSY;
3620 	if (sdebug_verbose)
3621 		sdev_printk(KERN_INFO, scp->device,
3622 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3623 			my_name, __func__, lbdof_blen);
3624 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3625 	if (res == -1) {
3626 		ret = DID_ERROR << 16;
3627 		goto err_out;
3628 	}
3629 
3630 	write_lock(macc_lckp);
3631 	sg_off = lbdof_blen;
3632 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3633 	cum_lb = 0;
3634 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3635 		lba = get_unaligned_be64(up + 0);
3636 		num = get_unaligned_be32(up + 8);
3637 		if (sdebug_verbose)
3638 			sdev_printk(KERN_INFO, scp->device,
3639 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3640 				my_name, __func__, k, lba, num, sg_off);
3641 		if (num == 0)
3642 			continue;
3643 		ret = check_device_access_params(scp, lba, num, true);
3644 		if (ret)
3645 			goto err_out_unlock;
3646 		num_by = num * lb_size;
3647 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3648 
3649 		if ((cum_lb + num) > bt_len) {
3650 			if (sdebug_verbose)
3651 				sdev_printk(KERN_INFO, scp->device,
3652 				    "%s: %s: sum of blocks > data provided\n",
3653 				    my_name, __func__);
3654 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3655 					0);
3656 			ret = illegal_condition_result;
3657 			goto err_out_unlock;
3658 		}
3659 
3660 		/* DIX + T10 DIF */
3661 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3662 			int prot_ret = prot_verify_write(scp, lba, num,
3663 							 ei_lba);
3664 
3665 			if (prot_ret) {
3666 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3667 						prot_ret);
3668 				ret = illegal_condition_result;
3669 				goto err_out_unlock;
3670 			}
3671 		}
3672 
3673 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3674 		/* If ZBC zone then bump its write pointer */
3675 		if (sdebug_dev_is_zoned(devip))
3676 			zbc_inc_wp(devip, lba, num);
3677 		if (unlikely(scsi_debug_lbp()))
3678 			map_region(sip, lba, num);
3679 		if (unlikely(-1 == ret)) {
3680 			ret = DID_ERROR << 16;
3681 			goto err_out_unlock;
3682 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3683 			sdev_printk(KERN_INFO, scp->device,
3684 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3685 			    my_name, num_by, ret);
3686 
3687 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3688 			     atomic_read(&sdeb_inject_pending))) {
3689 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3690 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3691 				atomic_set(&sdeb_inject_pending, 0);
3692 				ret = check_condition_result;
3693 				goto err_out_unlock;
3694 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3695 				/* Logical block guard check failed */
3696 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3697 				atomic_set(&sdeb_inject_pending, 0);
3698 				ret = illegal_condition_result;
3699 				goto err_out_unlock;
3700 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3701 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3702 				atomic_set(&sdeb_inject_pending, 0);
3703 				ret = illegal_condition_result;
3704 				goto err_out_unlock;
3705 			}
3706 		}
3707 		sg_off += num_by;
3708 		cum_lb += num;
3709 	}
3710 	ret = 0;
3711 err_out_unlock:
3712 	write_unlock(macc_lckp);
3713 err_out:
3714 	kfree(lrdp);
3715 	return ret;
3716 }
3717 
3718 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3719 			   u32 ei_lba, bool unmap, bool ndob)
3720 {
3721 	struct scsi_device *sdp = scp->device;
3722 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3723 	unsigned long long i;
3724 	u64 block, lbaa;
3725 	u32 lb_size = sdebug_sector_size;
3726 	int ret;
3727 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3728 						scp->device->hostdata, true);
3729 	rwlock_t *macc_lckp = &sip->macc_lck;
3730 	u8 *fs1p;
3731 	u8 *fsp;
3732 
3733 	write_lock(macc_lckp);
3734 
3735 	ret = check_device_access_params(scp, lba, num, true);
3736 	if (ret) {
3737 		write_unlock(macc_lckp);
3738 		return ret;
3739 	}
3740 
3741 	if (unmap && scsi_debug_lbp()) {
3742 		unmap_region(sip, lba, num);
3743 		goto out;
3744 	}
3745 	lbaa = lba;
3746 	block = do_div(lbaa, sdebug_store_sectors);
3747 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3748 	fsp = sip->storep;
3749 	fs1p = fsp + (block * lb_size);
3750 	if (ndob) {
3751 		memset(fs1p, 0, lb_size);
3752 		ret = 0;
3753 	} else
3754 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3755 
3756 	if (-1 == ret) {
3757 		write_unlock(&sip->macc_lck);
3758 		return DID_ERROR << 16;
3759 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3760 		sdev_printk(KERN_INFO, scp->device,
3761 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3762 			    my_name, "write same", lb_size, ret);
3763 
3764 	/* Copy first sector to remaining blocks */
3765 	for (i = 1 ; i < num ; i++) {
3766 		lbaa = lba + i;
3767 		block = do_div(lbaa, sdebug_store_sectors);
3768 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3769 	}
3770 	if (scsi_debug_lbp())
3771 		map_region(sip, lba, num);
3772 	/* If ZBC zone then bump its write pointer */
3773 	if (sdebug_dev_is_zoned(devip))
3774 		zbc_inc_wp(devip, lba, num);
3775 out:
3776 	write_unlock(macc_lckp);
3777 
3778 	return 0;
3779 }
3780 
3781 static int resp_write_same_10(struct scsi_cmnd *scp,
3782 			      struct sdebug_dev_info *devip)
3783 {
3784 	u8 *cmd = scp->cmnd;
3785 	u32 lba;
3786 	u16 num;
3787 	u32 ei_lba = 0;
3788 	bool unmap = false;
3789 
3790 	if (cmd[1] & 0x8) {
3791 		if (sdebug_lbpws10 == 0) {
3792 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3793 			return check_condition_result;
3794 		} else
3795 			unmap = true;
3796 	}
3797 	lba = get_unaligned_be32(cmd + 2);
3798 	num = get_unaligned_be16(cmd + 7);
3799 	if (num > sdebug_write_same_length) {
3800 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3801 		return check_condition_result;
3802 	}
3803 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3804 }
3805 
3806 static int resp_write_same_16(struct scsi_cmnd *scp,
3807 			      struct sdebug_dev_info *devip)
3808 {
3809 	u8 *cmd = scp->cmnd;
3810 	u64 lba;
3811 	u32 num;
3812 	u32 ei_lba = 0;
3813 	bool unmap = false;
3814 	bool ndob = false;
3815 
3816 	if (cmd[1] & 0x8) {	/* UNMAP */
3817 		if (sdebug_lbpws == 0) {
3818 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3819 			return check_condition_result;
3820 		} else
3821 			unmap = true;
3822 	}
3823 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3824 		ndob = true;
3825 	lba = get_unaligned_be64(cmd + 2);
3826 	num = get_unaligned_be32(cmd + 10);
3827 	if (num > sdebug_write_same_length) {
3828 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3829 		return check_condition_result;
3830 	}
3831 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3832 }
3833 
3834 /* Note the mode field is in the same position as the (lower) service action
3835  * field. For the Report supported operation codes command, SPC-4 suggests
3836  * each mode of this command should be reported separately; for future. */
3837 static int resp_write_buffer(struct scsi_cmnd *scp,
3838 			     struct sdebug_dev_info *devip)
3839 {
3840 	u8 *cmd = scp->cmnd;
3841 	struct scsi_device *sdp = scp->device;
3842 	struct sdebug_dev_info *dp;
3843 	u8 mode;
3844 
3845 	mode = cmd[1] & 0x1f;
3846 	switch (mode) {
3847 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3848 		/* set UAs on this device only */
3849 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3850 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3851 		break;
3852 	case 0x5:	/* download MC, save and ACT */
3853 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3854 		break;
3855 	case 0x6:	/* download MC with offsets and ACT */
3856 		/* set UAs on most devices (LUs) in this target */
3857 		list_for_each_entry(dp,
3858 				    &devip->sdbg_host->dev_info_list,
3859 				    dev_list)
3860 			if (dp->target == sdp->id) {
3861 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3862 				if (devip != dp)
3863 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3864 						dp->uas_bm);
3865 			}
3866 		break;
3867 	case 0x7:	/* download MC with offsets, save, and ACT */
3868 		/* set UA on all devices (LUs) in this target */
3869 		list_for_each_entry(dp,
3870 				    &devip->sdbg_host->dev_info_list,
3871 				    dev_list)
3872 			if (dp->target == sdp->id)
3873 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3874 					dp->uas_bm);
3875 		break;
3876 	default:
3877 		/* do nothing for this command for other mode values */
3878 		break;
3879 	}
3880 	return 0;
3881 }
3882 
3883 static int resp_comp_write(struct scsi_cmnd *scp,
3884 			   struct sdebug_dev_info *devip)
3885 {
3886 	u8 *cmd = scp->cmnd;
3887 	u8 *arr;
3888 	struct sdeb_store_info *sip = devip2sip(devip, true);
3889 	rwlock_t *macc_lckp = &sip->macc_lck;
3890 	u64 lba;
3891 	u32 dnum;
3892 	u32 lb_size = sdebug_sector_size;
3893 	u8 num;
3894 	int ret;
3895 	int retval = 0;
3896 
3897 	lba = get_unaligned_be64(cmd + 2);
3898 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3899 	if (0 == num)
3900 		return 0;	/* degenerate case, not an error */
3901 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3902 	    (cmd[1] & 0xe0)) {
3903 		mk_sense_invalid_opcode(scp);
3904 		return check_condition_result;
3905 	}
3906 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3907 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3908 	    (cmd[1] & 0xe0) == 0)
3909 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3910 			    "to DIF device\n");
3911 	ret = check_device_access_params(scp, lba, num, false);
3912 	if (ret)
3913 		return ret;
3914 	dnum = 2 * num;
3915 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3916 	if (NULL == arr) {
3917 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3918 				INSUFF_RES_ASCQ);
3919 		return check_condition_result;
3920 	}
3921 
3922 	write_lock(macc_lckp);
3923 
3924 	ret = do_dout_fetch(scp, dnum, arr);
3925 	if (ret == -1) {
3926 		retval = DID_ERROR << 16;
3927 		goto cleanup;
3928 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3929 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3930 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3931 			    dnum * lb_size, ret);
3932 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3933 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3934 		retval = check_condition_result;
3935 		goto cleanup;
3936 	}
3937 	if (scsi_debug_lbp())
3938 		map_region(sip, lba, num);
3939 cleanup:
3940 	write_unlock(macc_lckp);
3941 	kfree(arr);
3942 	return retval;
3943 }
3944 
3945 struct unmap_block_desc {
3946 	__be64	lba;
3947 	__be32	blocks;
3948 	__be32	__reserved;
3949 };
3950 
3951 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3952 {
3953 	unsigned char *buf;
3954 	struct unmap_block_desc *desc;
3955 	struct sdeb_store_info *sip = devip2sip(devip, true);
3956 	rwlock_t *macc_lckp = &sip->macc_lck;
3957 	unsigned int i, payload_len, descriptors;
3958 	int ret;
3959 
3960 	if (!scsi_debug_lbp())
3961 		return 0;	/* fib and say its done */
3962 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3963 	BUG_ON(scsi_bufflen(scp) != payload_len);
3964 
3965 	descriptors = (payload_len - 8) / 16;
3966 	if (descriptors > sdebug_unmap_max_desc) {
3967 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3968 		return check_condition_result;
3969 	}
3970 
3971 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3972 	if (!buf) {
3973 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3974 				INSUFF_RES_ASCQ);
3975 		return check_condition_result;
3976 	}
3977 
3978 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3979 
3980 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3981 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3982 
3983 	desc = (void *)&buf[8];
3984 
3985 	write_lock(macc_lckp);
3986 
3987 	for (i = 0 ; i < descriptors ; i++) {
3988 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3989 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3990 
3991 		ret = check_device_access_params(scp, lba, num, true);
3992 		if (ret)
3993 			goto out;
3994 
3995 		unmap_region(sip, lba, num);
3996 	}
3997 
3998 	ret = 0;
3999 
4000 out:
4001 	write_unlock(macc_lckp);
4002 	kfree(buf);
4003 
4004 	return ret;
4005 }
4006 
4007 #define SDEBUG_GET_LBA_STATUS_LEN 32
4008 
4009 static int resp_get_lba_status(struct scsi_cmnd *scp,
4010 			       struct sdebug_dev_info *devip)
4011 {
4012 	u8 *cmd = scp->cmnd;
4013 	u64 lba;
4014 	u32 alloc_len, mapped, num;
4015 	int ret;
4016 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4017 
4018 	lba = get_unaligned_be64(cmd + 2);
4019 	alloc_len = get_unaligned_be32(cmd + 10);
4020 
4021 	if (alloc_len < 24)
4022 		return 0;
4023 
4024 	ret = check_device_access_params(scp, lba, 1, false);
4025 	if (ret)
4026 		return ret;
4027 
4028 	if (scsi_debug_lbp()) {
4029 		struct sdeb_store_info *sip = devip2sip(devip, true);
4030 
4031 		mapped = map_state(sip, lba, &num);
4032 	} else {
4033 		mapped = 1;
4034 		/* following just in case virtual_gb changed */
4035 		sdebug_capacity = get_sdebug_capacity();
4036 		if (sdebug_capacity - lba <= 0xffffffff)
4037 			num = sdebug_capacity - lba;
4038 		else
4039 			num = 0xffffffff;
4040 	}
4041 
4042 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4043 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4044 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4045 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4046 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4047 
4048 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4049 }
4050 
4051 static int resp_sync_cache(struct scsi_cmnd *scp,
4052 			   struct sdebug_dev_info *devip)
4053 {
4054 	int res = 0;
4055 	u64 lba;
4056 	u32 num_blocks;
4057 	u8 *cmd = scp->cmnd;
4058 
4059 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4060 		lba = get_unaligned_be32(cmd + 2);
4061 		num_blocks = get_unaligned_be16(cmd + 7);
4062 	} else {				/* SYNCHRONIZE_CACHE(16) */
4063 		lba = get_unaligned_be64(cmd + 2);
4064 		num_blocks = get_unaligned_be32(cmd + 10);
4065 	}
4066 	if (lba + num_blocks > sdebug_capacity) {
4067 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4068 		return check_condition_result;
4069 	}
4070 	if (!write_since_sync || (cmd[1] & 0x2))
4071 		res = SDEG_RES_IMMED_MASK;
4072 	else		/* delay if write_since_sync and IMMED clear */
4073 		write_since_sync = false;
4074 	return res;
4075 }
4076 
4077 /*
4078  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4079  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4080  * a GOOD status otherwise. Model a disk with a big cache and yield
4081  * CONDITION MET. Actually tries to bring range in main memory into the
4082  * cache associated with the CPU(s).
4083  */
4084 static int resp_pre_fetch(struct scsi_cmnd *scp,
4085 			  struct sdebug_dev_info *devip)
4086 {
4087 	int res = 0;
4088 	u64 lba;
4089 	u64 block, rest = 0;
4090 	u32 nblks;
4091 	u8 *cmd = scp->cmnd;
4092 	struct sdeb_store_info *sip = devip2sip(devip, true);
4093 	rwlock_t *macc_lckp = &sip->macc_lck;
4094 	u8 *fsp = sip->storep;
4095 
4096 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4097 		lba = get_unaligned_be32(cmd + 2);
4098 		nblks = get_unaligned_be16(cmd + 7);
4099 	} else {			/* PRE-FETCH(16) */
4100 		lba = get_unaligned_be64(cmd + 2);
4101 		nblks = get_unaligned_be32(cmd + 10);
4102 	}
4103 	if (lba + nblks > sdebug_capacity) {
4104 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4105 		return check_condition_result;
4106 	}
4107 	if (!fsp)
4108 		goto fini;
4109 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4110 	block = do_div(lba, sdebug_store_sectors);
4111 	if (block + nblks > sdebug_store_sectors)
4112 		rest = block + nblks - sdebug_store_sectors;
4113 
4114 	/* Try to bring the PRE-FETCH range into CPU's cache */
4115 	read_lock(macc_lckp);
4116 	prefetch_range(fsp + (sdebug_sector_size * block),
4117 		       (nblks - rest) * sdebug_sector_size);
4118 	if (rest)
4119 		prefetch_range(fsp, rest * sdebug_sector_size);
4120 	read_unlock(macc_lckp);
4121 fini:
4122 	if (cmd[1] & 0x2)
4123 		res = SDEG_RES_IMMED_MASK;
4124 	return res | condition_met_result;
4125 }
4126 
4127 #define RL_BUCKET_ELEMS 8
4128 
4129 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4130  * (W-LUN), the normal Linux scanning logic does not associate it with a
4131  * device (e.g. /dev/sg7). The following magic will make that association:
4132  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4133  * where <n> is a host number. If there are multiple targets in a host then
4134  * the above will associate a W-LUN to each target. To only get a W-LUN
4135  * for target 2, then use "echo '- 2 49409' > scan" .
4136  */
4137 static int resp_report_luns(struct scsi_cmnd *scp,
4138 			    struct sdebug_dev_info *devip)
4139 {
4140 	unsigned char *cmd = scp->cmnd;
4141 	unsigned int alloc_len;
4142 	unsigned char select_report;
4143 	u64 lun;
4144 	struct scsi_lun *lun_p;
4145 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4146 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4147 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4148 	unsigned int tlun_cnt;	/* total LUN count */
4149 	unsigned int rlen;	/* response length (in bytes) */
4150 	int k, j, n, res;
4151 	unsigned int off_rsp = 0;
4152 	const int sz_lun = sizeof(struct scsi_lun);
4153 
4154 	clear_luns_changed_on_target(devip);
4155 
4156 	select_report = cmd[2];
4157 	alloc_len = get_unaligned_be32(cmd + 6);
4158 
4159 	if (alloc_len < 4) {
4160 		pr_err("alloc len too small %d\n", alloc_len);
4161 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4162 		return check_condition_result;
4163 	}
4164 
4165 	switch (select_report) {
4166 	case 0:		/* all LUNs apart from W-LUNs */
4167 		lun_cnt = sdebug_max_luns;
4168 		wlun_cnt = 0;
4169 		break;
4170 	case 1:		/* only W-LUNs */
4171 		lun_cnt = 0;
4172 		wlun_cnt = 1;
4173 		break;
4174 	case 2:		/* all LUNs */
4175 		lun_cnt = sdebug_max_luns;
4176 		wlun_cnt = 1;
4177 		break;
4178 	case 0x10:	/* only administrative LUs */
4179 	case 0x11:	/* see SPC-5 */
4180 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4181 	default:
4182 		pr_debug("select report invalid %d\n", select_report);
4183 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4184 		return check_condition_result;
4185 	}
4186 
4187 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4188 		--lun_cnt;
4189 
4190 	tlun_cnt = lun_cnt + wlun_cnt;
4191 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4192 	scsi_set_resid(scp, scsi_bufflen(scp));
4193 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4194 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4195 
4196 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4197 	lun = sdebug_no_lun_0 ? 1 : 0;
4198 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4199 		memset(arr, 0, sizeof(arr));
4200 		lun_p = (struct scsi_lun *)&arr[0];
4201 		if (k == 0) {
4202 			put_unaligned_be32(rlen, &arr[0]);
4203 			++lun_p;
4204 			j = 1;
4205 		}
4206 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4207 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4208 				break;
4209 			int_to_scsilun(lun++, lun_p);
4210 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4211 				lun_p->scsi_lun[0] |= 0x40;
4212 		}
4213 		if (j < RL_BUCKET_ELEMS)
4214 			break;
4215 		n = j * sz_lun;
4216 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4217 		if (res)
4218 			return res;
4219 		off_rsp += n;
4220 	}
4221 	if (wlun_cnt) {
4222 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4223 		++j;
4224 	}
4225 	if (j > 0)
4226 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4227 	return res;
4228 }
4229 
4230 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4231 {
4232 	bool is_bytchk3 = false;
4233 	u8 bytchk;
4234 	int ret, j;
4235 	u32 vnum, a_num, off;
4236 	const u32 lb_size = sdebug_sector_size;
4237 	u64 lba;
4238 	u8 *arr;
4239 	u8 *cmd = scp->cmnd;
4240 	struct sdeb_store_info *sip = devip2sip(devip, true);
4241 	rwlock_t *macc_lckp = &sip->macc_lck;
4242 
4243 	bytchk = (cmd[1] >> 1) & 0x3;
4244 	if (bytchk == 0) {
4245 		return 0;	/* always claim internal verify okay */
4246 	} else if (bytchk == 2) {
4247 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4248 		return check_condition_result;
4249 	} else if (bytchk == 3) {
4250 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4251 	}
4252 	switch (cmd[0]) {
4253 	case VERIFY_16:
4254 		lba = get_unaligned_be64(cmd + 2);
4255 		vnum = get_unaligned_be32(cmd + 10);
4256 		break;
4257 	case VERIFY:		/* is VERIFY(10) */
4258 		lba = get_unaligned_be32(cmd + 2);
4259 		vnum = get_unaligned_be16(cmd + 7);
4260 		break;
4261 	default:
4262 		mk_sense_invalid_opcode(scp);
4263 		return check_condition_result;
4264 	}
4265 	if (vnum == 0)
4266 		return 0;	/* not an error */
4267 	a_num = is_bytchk3 ? 1 : vnum;
4268 	/* Treat following check like one for read (i.e. no write) access */
4269 	ret = check_device_access_params(scp, lba, a_num, false);
4270 	if (ret)
4271 		return ret;
4272 
4273 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4274 	if (!arr) {
4275 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4276 				INSUFF_RES_ASCQ);
4277 		return check_condition_result;
4278 	}
4279 	/* Not changing store, so only need read access */
4280 	read_lock(macc_lckp);
4281 
4282 	ret = do_dout_fetch(scp, a_num, arr);
4283 	if (ret == -1) {
4284 		ret = DID_ERROR << 16;
4285 		goto cleanup;
4286 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4287 		sdev_printk(KERN_INFO, scp->device,
4288 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4289 			    my_name, __func__, a_num * lb_size, ret);
4290 	}
4291 	if (is_bytchk3) {
4292 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4293 			memcpy(arr + off, arr, lb_size);
4294 	}
4295 	ret = 0;
4296 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4297 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4298 		ret = check_condition_result;
4299 		goto cleanup;
4300 	}
4301 cleanup:
4302 	read_unlock(macc_lckp);
4303 	kfree(arr);
4304 	return ret;
4305 }
4306 
4307 #define RZONES_DESC_HD 64
4308 
4309 /* Report zones depending on start LBA nad reporting options */
4310 static int resp_report_zones(struct scsi_cmnd *scp,
4311 			     struct sdebug_dev_info *devip)
4312 {
4313 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4314 	int ret = 0;
4315 	u32 alloc_len, rep_opts, rep_len;
4316 	bool partial;
4317 	u64 lba, zs_lba;
4318 	u8 *arr = NULL, *desc;
4319 	u8 *cmd = scp->cmnd;
4320 	struct sdeb_zone_state *zsp;
4321 	struct sdeb_store_info *sip = devip2sip(devip, false);
4322 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4323 
4324 	if (!sdebug_dev_is_zoned(devip)) {
4325 		mk_sense_invalid_opcode(scp);
4326 		return check_condition_result;
4327 	}
4328 	zs_lba = get_unaligned_be64(cmd + 2);
4329 	alloc_len = get_unaligned_be32(cmd + 10);
4330 	if (alloc_len == 0)
4331 		return 0;	/* not an error */
4332 	rep_opts = cmd[14] & 0x3f;
4333 	partial = cmd[14] & 0x80;
4334 
4335 	if (zs_lba >= sdebug_capacity) {
4336 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4337 		return check_condition_result;
4338 	}
4339 
4340 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4341 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4342 			    max_zones);
4343 
4344 	arr = kzalloc(alloc_len, GFP_ATOMIC);
4345 	if (!arr) {
4346 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4347 				INSUFF_RES_ASCQ);
4348 		return check_condition_result;
4349 	}
4350 
4351 	read_lock(macc_lckp);
4352 
4353 	desc = arr + 64;
4354 	for (i = 0; i < max_zones; i++) {
4355 		lba = zs_lba + devip->zsize * i;
4356 		if (lba > sdebug_capacity)
4357 			break;
4358 		zsp = zbc_zone(devip, lba);
4359 		switch (rep_opts) {
4360 		case 0x00:
4361 			/* All zones */
4362 			break;
4363 		case 0x01:
4364 			/* Empty zones */
4365 			if (zsp->z_cond != ZC1_EMPTY)
4366 				continue;
4367 			break;
4368 		case 0x02:
4369 			/* Implicit open zones */
4370 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4371 				continue;
4372 			break;
4373 		case 0x03:
4374 			/* Explicit open zones */
4375 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4376 				continue;
4377 			break;
4378 		case 0x04:
4379 			/* Closed zones */
4380 			if (zsp->z_cond != ZC4_CLOSED)
4381 				continue;
4382 			break;
4383 		case 0x05:
4384 			/* Full zones */
4385 			if (zsp->z_cond != ZC5_FULL)
4386 				continue;
4387 			break;
4388 		case 0x06:
4389 		case 0x07:
4390 		case 0x10:
4391 			/*
4392 			 * Read-only, offline, reset WP recommended are
4393 			 * not emulated: no zones to report;
4394 			 */
4395 			continue;
4396 		case 0x11:
4397 			/* non-seq-resource set */
4398 			if (!zsp->z_non_seq_resource)
4399 				continue;
4400 			break;
4401 		case 0x3f:
4402 			/* Not write pointer (conventional) zones */
4403 			if (!zbc_zone_is_conv(zsp))
4404 				continue;
4405 			break;
4406 		default:
4407 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4408 					INVALID_FIELD_IN_CDB, 0);
4409 			ret = check_condition_result;
4410 			goto fini;
4411 		}
4412 
4413 		if (nrz < rep_max_zones) {
4414 			/* Fill zone descriptor */
4415 			desc[0] = zsp->z_type;
4416 			desc[1] = zsp->z_cond << 4;
4417 			if (zsp->z_non_seq_resource)
4418 				desc[1] |= 1 << 1;
4419 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4420 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4421 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4422 			desc += 64;
4423 		}
4424 
4425 		if (partial && nrz >= rep_max_zones)
4426 			break;
4427 
4428 		nrz++;
4429 	}
4430 
4431 	/* Report header */
4432 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4433 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4434 
4435 	rep_len = (unsigned long)desc - (unsigned long)arr;
4436 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4437 
4438 fini:
4439 	read_unlock(macc_lckp);
4440 	kfree(arr);
4441 	return ret;
4442 }
4443 
4444 /* Logic transplanted from tcmu-runner, file_zbc.c */
4445 static void zbc_open_all(struct sdebug_dev_info *devip)
4446 {
4447 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4448 	unsigned int i;
4449 
4450 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4451 		if (zsp->z_cond == ZC4_CLOSED)
4452 			zbc_open_zone(devip, &devip->zstate[i], true);
4453 	}
4454 }
4455 
4456 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4457 {
4458 	int res = 0;
4459 	u64 z_id;
4460 	enum sdebug_z_cond zc;
4461 	u8 *cmd = scp->cmnd;
4462 	struct sdeb_zone_state *zsp;
4463 	bool all = cmd[14] & 0x01;
4464 	struct sdeb_store_info *sip = devip2sip(devip, false);
4465 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4466 
4467 	if (!sdebug_dev_is_zoned(devip)) {
4468 		mk_sense_invalid_opcode(scp);
4469 		return check_condition_result;
4470 	}
4471 
4472 	write_lock(macc_lckp);
4473 
4474 	if (all) {
4475 		/* Check if all closed zones can be open */
4476 		if (devip->max_open &&
4477 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4478 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4479 					INSUFF_ZONE_ASCQ);
4480 			res = check_condition_result;
4481 			goto fini;
4482 		}
4483 		/* Open all closed zones */
4484 		zbc_open_all(devip);
4485 		goto fini;
4486 	}
4487 
4488 	/* Open the specified zone */
4489 	z_id = get_unaligned_be64(cmd + 2);
4490 	if (z_id >= sdebug_capacity) {
4491 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4492 		res = check_condition_result;
4493 		goto fini;
4494 	}
4495 
4496 	zsp = zbc_zone(devip, z_id);
4497 	if (z_id != zsp->z_start) {
4498 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4499 		res = check_condition_result;
4500 		goto fini;
4501 	}
4502 	if (zbc_zone_is_conv(zsp)) {
4503 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4504 		res = check_condition_result;
4505 		goto fini;
4506 	}
4507 
4508 	zc = zsp->z_cond;
4509 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4510 		goto fini;
4511 
4512 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4513 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4514 				INSUFF_ZONE_ASCQ);
4515 		res = check_condition_result;
4516 		goto fini;
4517 	}
4518 
4519 	zbc_open_zone(devip, zsp, true);
4520 fini:
4521 	write_unlock(macc_lckp);
4522 	return res;
4523 }
4524 
4525 static void zbc_close_all(struct sdebug_dev_info *devip)
4526 {
4527 	unsigned int i;
4528 
4529 	for (i = 0; i < devip->nr_zones; i++)
4530 		zbc_close_zone(devip, &devip->zstate[i]);
4531 }
4532 
4533 static int resp_close_zone(struct scsi_cmnd *scp,
4534 			   struct sdebug_dev_info *devip)
4535 {
4536 	int res = 0;
4537 	u64 z_id;
4538 	u8 *cmd = scp->cmnd;
4539 	struct sdeb_zone_state *zsp;
4540 	bool all = cmd[14] & 0x01;
4541 	struct sdeb_store_info *sip = devip2sip(devip, false);
4542 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4543 
4544 	if (!sdebug_dev_is_zoned(devip)) {
4545 		mk_sense_invalid_opcode(scp);
4546 		return check_condition_result;
4547 	}
4548 
4549 	write_lock(macc_lckp);
4550 
4551 	if (all) {
4552 		zbc_close_all(devip);
4553 		goto fini;
4554 	}
4555 
4556 	/* Close specified zone */
4557 	z_id = get_unaligned_be64(cmd + 2);
4558 	if (z_id >= sdebug_capacity) {
4559 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4560 		res = check_condition_result;
4561 		goto fini;
4562 	}
4563 
4564 	zsp = zbc_zone(devip, z_id);
4565 	if (z_id != zsp->z_start) {
4566 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4567 		res = check_condition_result;
4568 		goto fini;
4569 	}
4570 	if (zbc_zone_is_conv(zsp)) {
4571 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4572 		res = check_condition_result;
4573 		goto fini;
4574 	}
4575 
4576 	zbc_close_zone(devip, zsp);
4577 fini:
4578 	write_unlock(macc_lckp);
4579 	return res;
4580 }
4581 
4582 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4583 			    struct sdeb_zone_state *zsp, bool empty)
4584 {
4585 	enum sdebug_z_cond zc = zsp->z_cond;
4586 
4587 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4588 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4589 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4590 			zbc_close_zone(devip, zsp);
4591 		if (zsp->z_cond == ZC4_CLOSED)
4592 			devip->nr_closed--;
4593 		zsp->z_wp = zsp->z_start + zsp->z_size;
4594 		zsp->z_cond = ZC5_FULL;
4595 	}
4596 }
4597 
4598 static void zbc_finish_all(struct sdebug_dev_info *devip)
4599 {
4600 	unsigned int i;
4601 
4602 	for (i = 0; i < devip->nr_zones; i++)
4603 		zbc_finish_zone(devip, &devip->zstate[i], false);
4604 }
4605 
4606 static int resp_finish_zone(struct scsi_cmnd *scp,
4607 			    struct sdebug_dev_info *devip)
4608 {
4609 	struct sdeb_zone_state *zsp;
4610 	int res = 0;
4611 	u64 z_id;
4612 	u8 *cmd = scp->cmnd;
4613 	bool all = cmd[14] & 0x01;
4614 	struct sdeb_store_info *sip = devip2sip(devip, false);
4615 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4616 
4617 	if (!sdebug_dev_is_zoned(devip)) {
4618 		mk_sense_invalid_opcode(scp);
4619 		return check_condition_result;
4620 	}
4621 
4622 	write_lock(macc_lckp);
4623 
4624 	if (all) {
4625 		zbc_finish_all(devip);
4626 		goto fini;
4627 	}
4628 
4629 	/* Finish the specified zone */
4630 	z_id = get_unaligned_be64(cmd + 2);
4631 	if (z_id >= sdebug_capacity) {
4632 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4633 		res = check_condition_result;
4634 		goto fini;
4635 	}
4636 
4637 	zsp = zbc_zone(devip, z_id);
4638 	if (z_id != zsp->z_start) {
4639 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4640 		res = check_condition_result;
4641 		goto fini;
4642 	}
4643 	if (zbc_zone_is_conv(zsp)) {
4644 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4645 		res = check_condition_result;
4646 		goto fini;
4647 	}
4648 
4649 	zbc_finish_zone(devip, zsp, true);
4650 fini:
4651 	write_unlock(macc_lckp);
4652 	return res;
4653 }
4654 
4655 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4656 			 struct sdeb_zone_state *zsp)
4657 {
4658 	enum sdebug_z_cond zc;
4659 	struct sdeb_store_info *sip = devip2sip(devip, false);
4660 
4661 	if (zbc_zone_is_conv(zsp))
4662 		return;
4663 
4664 	zc = zsp->z_cond;
4665 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4666 		zbc_close_zone(devip, zsp);
4667 
4668 	if (zsp->z_cond == ZC4_CLOSED)
4669 		devip->nr_closed--;
4670 
4671 	if (zsp->z_wp > zsp->z_start)
4672 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4673 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4674 
4675 	zsp->z_non_seq_resource = false;
4676 	zsp->z_wp = zsp->z_start;
4677 	zsp->z_cond = ZC1_EMPTY;
4678 }
4679 
4680 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4681 {
4682 	unsigned int i;
4683 
4684 	for (i = 0; i < devip->nr_zones; i++)
4685 		zbc_rwp_zone(devip, &devip->zstate[i]);
4686 }
4687 
4688 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4689 {
4690 	struct sdeb_zone_state *zsp;
4691 	int res = 0;
4692 	u64 z_id;
4693 	u8 *cmd = scp->cmnd;
4694 	bool all = cmd[14] & 0x01;
4695 	struct sdeb_store_info *sip = devip2sip(devip, false);
4696 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4697 
4698 	if (!sdebug_dev_is_zoned(devip)) {
4699 		mk_sense_invalid_opcode(scp);
4700 		return check_condition_result;
4701 	}
4702 
4703 	write_lock(macc_lckp);
4704 
4705 	if (all) {
4706 		zbc_rwp_all(devip);
4707 		goto fini;
4708 	}
4709 
4710 	z_id = get_unaligned_be64(cmd + 2);
4711 	if (z_id >= sdebug_capacity) {
4712 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4713 		res = check_condition_result;
4714 		goto fini;
4715 	}
4716 
4717 	zsp = zbc_zone(devip, z_id);
4718 	if (z_id != zsp->z_start) {
4719 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4720 		res = check_condition_result;
4721 		goto fini;
4722 	}
4723 	if (zbc_zone_is_conv(zsp)) {
4724 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4725 		res = check_condition_result;
4726 		goto fini;
4727 	}
4728 
4729 	zbc_rwp_zone(devip, zsp);
4730 fini:
4731 	write_unlock(macc_lckp);
4732 	return res;
4733 }
4734 
4735 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4736 {
4737 	u16 hwq;
4738 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4739 
4740 	hwq = blk_mq_unique_tag_to_hwq(tag);
4741 
4742 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4743 	if (WARN_ON_ONCE(hwq >= submit_queues))
4744 		hwq = 0;
4745 
4746 	return sdebug_q_arr + hwq;
4747 }
4748 
4749 static u32 get_tag(struct scsi_cmnd *cmnd)
4750 {
4751 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4752 }
4753 
4754 /* Queued (deferred) command completions converge here. */
4755 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4756 {
4757 	bool aborted = sd_dp->aborted;
4758 	int qc_idx;
4759 	int retiring = 0;
4760 	unsigned long iflags;
4761 	struct sdebug_queue *sqp;
4762 	struct sdebug_queued_cmd *sqcp;
4763 	struct scsi_cmnd *scp;
4764 	struct sdebug_dev_info *devip;
4765 
4766 	if (unlikely(aborted))
4767 		sd_dp->aborted = false;
4768 	qc_idx = sd_dp->qc_idx;
4769 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4770 	if (sdebug_statistics) {
4771 		atomic_inc(&sdebug_completions);
4772 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4773 			atomic_inc(&sdebug_miss_cpus);
4774 	}
4775 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4776 		pr_err("wild qc_idx=%d\n", qc_idx);
4777 		return;
4778 	}
4779 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4780 	sd_dp->defer_t = SDEB_DEFER_NONE;
4781 	sqcp = &sqp->qc_arr[qc_idx];
4782 	scp = sqcp->a_cmnd;
4783 	if (unlikely(scp == NULL)) {
4784 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4785 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4786 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4787 		return;
4788 	}
4789 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4790 	if (likely(devip))
4791 		atomic_dec(&devip->num_in_q);
4792 	else
4793 		pr_err("devip=NULL\n");
4794 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4795 		retiring = 1;
4796 
4797 	sqcp->a_cmnd = NULL;
4798 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4799 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4800 		pr_err("Unexpected completion\n");
4801 		return;
4802 	}
4803 
4804 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4805 		int k, retval;
4806 
4807 		retval = atomic_read(&retired_max_queue);
4808 		if (qc_idx >= retval) {
4809 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4810 			pr_err("index %d too large\n", retval);
4811 			return;
4812 		}
4813 		k = find_last_bit(sqp->in_use_bm, retval);
4814 		if ((k < sdebug_max_queue) || (k == retval))
4815 			atomic_set(&retired_max_queue, 0);
4816 		else
4817 			atomic_set(&retired_max_queue, k + 1);
4818 	}
4819 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4820 	if (unlikely(aborted)) {
4821 		if (sdebug_verbose)
4822 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4823 		return;
4824 	}
4825 	scsi_done(scp); /* callback to mid level */
4826 }
4827 
4828 /* When high resolution timer goes off this function is called. */
4829 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4830 {
4831 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4832 						  hrt);
4833 	sdebug_q_cmd_complete(sd_dp);
4834 	return HRTIMER_NORESTART;
4835 }
4836 
4837 /* When work queue schedules work, it calls this function. */
4838 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4839 {
4840 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4841 						  ew.work);
4842 	sdebug_q_cmd_complete(sd_dp);
4843 }
4844 
4845 static bool got_shared_uuid;
4846 static uuid_t shared_uuid;
4847 
4848 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4849 {
4850 	struct sdeb_zone_state *zsp;
4851 	sector_t capacity = get_sdebug_capacity();
4852 	sector_t zstart = 0;
4853 	unsigned int i;
4854 
4855 	/*
4856 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4857 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4858 	 * use the specified zone size checking that at least 2 zones can be
4859 	 * created for the device.
4860 	 */
4861 	if (!sdeb_zbc_zone_size_mb) {
4862 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4863 			>> ilog2(sdebug_sector_size);
4864 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4865 			devip->zsize >>= 1;
4866 		if (devip->zsize < 2) {
4867 			pr_err("Device capacity too small\n");
4868 			return -EINVAL;
4869 		}
4870 	} else {
4871 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4872 			pr_err("Zone size is not a power of 2\n");
4873 			return -EINVAL;
4874 		}
4875 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4876 			>> ilog2(sdebug_sector_size);
4877 		if (devip->zsize >= capacity) {
4878 			pr_err("Zone size too large for device capacity\n");
4879 			return -EINVAL;
4880 		}
4881 	}
4882 
4883 	devip->zsize_shift = ilog2(devip->zsize);
4884 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4885 
4886 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4887 		pr_err("Number of conventional zones too large\n");
4888 		return -EINVAL;
4889 	}
4890 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4891 
4892 	if (devip->zmodel == BLK_ZONED_HM) {
4893 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4894 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4895 			devip->max_open = (devip->nr_zones - 1) / 2;
4896 		else
4897 			devip->max_open = sdeb_zbc_max_open;
4898 	}
4899 
4900 	devip->zstate = kcalloc(devip->nr_zones,
4901 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4902 	if (!devip->zstate)
4903 		return -ENOMEM;
4904 
4905 	for (i = 0; i < devip->nr_zones; i++) {
4906 		zsp = &devip->zstate[i];
4907 
4908 		zsp->z_start = zstart;
4909 
4910 		if (i < devip->nr_conv_zones) {
4911 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4912 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4913 			zsp->z_wp = (sector_t)-1;
4914 		} else {
4915 			if (devip->zmodel == BLK_ZONED_HM)
4916 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4917 			else
4918 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4919 			zsp->z_cond = ZC1_EMPTY;
4920 			zsp->z_wp = zsp->z_start;
4921 		}
4922 
4923 		if (zsp->z_start + devip->zsize < capacity)
4924 			zsp->z_size = devip->zsize;
4925 		else
4926 			zsp->z_size = capacity - zsp->z_start;
4927 
4928 		zstart += zsp->z_size;
4929 	}
4930 
4931 	return 0;
4932 }
4933 
4934 static struct sdebug_dev_info *sdebug_device_create(
4935 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4936 {
4937 	struct sdebug_dev_info *devip;
4938 
4939 	devip = kzalloc(sizeof(*devip), flags);
4940 	if (devip) {
4941 		if (sdebug_uuid_ctl == 1)
4942 			uuid_gen(&devip->lu_name);
4943 		else if (sdebug_uuid_ctl == 2) {
4944 			if (got_shared_uuid)
4945 				devip->lu_name = shared_uuid;
4946 			else {
4947 				uuid_gen(&shared_uuid);
4948 				got_shared_uuid = true;
4949 				devip->lu_name = shared_uuid;
4950 			}
4951 		}
4952 		devip->sdbg_host = sdbg_host;
4953 		if (sdeb_zbc_in_use) {
4954 			devip->zmodel = sdeb_zbc_model;
4955 			if (sdebug_device_create_zones(devip)) {
4956 				kfree(devip);
4957 				return NULL;
4958 			}
4959 		} else {
4960 			devip->zmodel = BLK_ZONED_NONE;
4961 		}
4962 		devip->sdbg_host = sdbg_host;
4963 		devip->create_ts = ktime_get_boottime();
4964 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4965 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4966 	}
4967 	return devip;
4968 }
4969 
4970 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4971 {
4972 	struct sdebug_host_info *sdbg_host;
4973 	struct sdebug_dev_info *open_devip = NULL;
4974 	struct sdebug_dev_info *devip;
4975 
4976 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4977 	if (!sdbg_host) {
4978 		pr_err("Host info NULL\n");
4979 		return NULL;
4980 	}
4981 
4982 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4983 		if ((devip->used) && (devip->channel == sdev->channel) &&
4984 		    (devip->target == sdev->id) &&
4985 		    (devip->lun == sdev->lun))
4986 			return devip;
4987 		else {
4988 			if ((!devip->used) && (!open_devip))
4989 				open_devip = devip;
4990 		}
4991 	}
4992 	if (!open_devip) { /* try and make a new one */
4993 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4994 		if (!open_devip) {
4995 			pr_err("out of memory at line %d\n", __LINE__);
4996 			return NULL;
4997 		}
4998 	}
4999 
5000 	open_devip->channel = sdev->channel;
5001 	open_devip->target = sdev->id;
5002 	open_devip->lun = sdev->lun;
5003 	open_devip->sdbg_host = sdbg_host;
5004 	atomic_set(&open_devip->num_in_q, 0);
5005 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5006 	open_devip->used = true;
5007 	return open_devip;
5008 }
5009 
5010 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5011 {
5012 	if (sdebug_verbose)
5013 		pr_info("slave_alloc <%u %u %u %llu>\n",
5014 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5015 	return 0;
5016 }
5017 
5018 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5019 {
5020 	struct sdebug_dev_info *devip =
5021 			(struct sdebug_dev_info *)sdp->hostdata;
5022 
5023 	if (sdebug_verbose)
5024 		pr_info("slave_configure <%u %u %u %llu>\n",
5025 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5026 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5027 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5028 	if (devip == NULL) {
5029 		devip = find_build_dev_info(sdp);
5030 		if (devip == NULL)
5031 			return 1;  /* no resources, will be marked offline */
5032 	}
5033 	sdp->hostdata = devip;
5034 	if (sdebug_no_uld)
5035 		sdp->no_uld_attach = 1;
5036 	config_cdb_len(sdp);
5037 	return 0;
5038 }
5039 
5040 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5041 {
5042 	struct sdebug_dev_info *devip =
5043 		(struct sdebug_dev_info *)sdp->hostdata;
5044 
5045 	if (sdebug_verbose)
5046 		pr_info("slave_destroy <%u %u %u %llu>\n",
5047 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5048 	if (devip) {
5049 		/* make this slot available for re-use */
5050 		devip->used = false;
5051 		sdp->hostdata = NULL;
5052 	}
5053 }
5054 
5055 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5056 			   enum sdeb_defer_type defer_t)
5057 {
5058 	if (!sd_dp)
5059 		return;
5060 	if (defer_t == SDEB_DEFER_HRT)
5061 		hrtimer_cancel(&sd_dp->hrt);
5062 	else if (defer_t == SDEB_DEFER_WQ)
5063 		cancel_work_sync(&sd_dp->ew.work);
5064 }
5065 
5066 /* If @cmnd found deletes its timer or work queue and returns true; else
5067    returns false */
5068 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5069 {
5070 	unsigned long iflags;
5071 	int j, k, qmax, r_qmax;
5072 	enum sdeb_defer_type l_defer_t;
5073 	struct sdebug_queue *sqp;
5074 	struct sdebug_queued_cmd *sqcp;
5075 	struct sdebug_dev_info *devip;
5076 	struct sdebug_defer *sd_dp;
5077 
5078 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5079 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5080 		qmax = sdebug_max_queue;
5081 		r_qmax = atomic_read(&retired_max_queue);
5082 		if (r_qmax > qmax)
5083 			qmax = r_qmax;
5084 		for (k = 0; k < qmax; ++k) {
5085 			if (test_bit(k, sqp->in_use_bm)) {
5086 				sqcp = &sqp->qc_arr[k];
5087 				if (cmnd != sqcp->a_cmnd)
5088 					continue;
5089 				/* found */
5090 				devip = (struct sdebug_dev_info *)
5091 						cmnd->device->hostdata;
5092 				if (devip)
5093 					atomic_dec(&devip->num_in_q);
5094 				sqcp->a_cmnd = NULL;
5095 				sd_dp = sqcp->sd_dp;
5096 				if (sd_dp) {
5097 					l_defer_t = sd_dp->defer_t;
5098 					sd_dp->defer_t = SDEB_DEFER_NONE;
5099 				} else
5100 					l_defer_t = SDEB_DEFER_NONE;
5101 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5102 				stop_qc_helper(sd_dp, l_defer_t);
5103 				clear_bit(k, sqp->in_use_bm);
5104 				return true;
5105 			}
5106 		}
5107 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5108 	}
5109 	return false;
5110 }
5111 
5112 /* Deletes (stops) timers or work queues of all queued commands */
5113 static void stop_all_queued(void)
5114 {
5115 	unsigned long iflags;
5116 	int j, k;
5117 	enum sdeb_defer_type l_defer_t;
5118 	struct sdebug_queue *sqp;
5119 	struct sdebug_queued_cmd *sqcp;
5120 	struct sdebug_dev_info *devip;
5121 	struct sdebug_defer *sd_dp;
5122 
5123 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5124 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5125 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5126 			if (test_bit(k, sqp->in_use_bm)) {
5127 				sqcp = &sqp->qc_arr[k];
5128 				if (sqcp->a_cmnd == NULL)
5129 					continue;
5130 				devip = (struct sdebug_dev_info *)
5131 					sqcp->a_cmnd->device->hostdata;
5132 				if (devip)
5133 					atomic_dec(&devip->num_in_q);
5134 				sqcp->a_cmnd = NULL;
5135 				sd_dp = sqcp->sd_dp;
5136 				if (sd_dp) {
5137 					l_defer_t = sd_dp->defer_t;
5138 					sd_dp->defer_t = SDEB_DEFER_NONE;
5139 				} else
5140 					l_defer_t = SDEB_DEFER_NONE;
5141 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5142 				stop_qc_helper(sd_dp, l_defer_t);
5143 				clear_bit(k, sqp->in_use_bm);
5144 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5145 			}
5146 		}
5147 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5148 	}
5149 }
5150 
5151 /* Free queued command memory on heap */
5152 static void free_all_queued(void)
5153 {
5154 	int j, k;
5155 	struct sdebug_queue *sqp;
5156 	struct sdebug_queued_cmd *sqcp;
5157 
5158 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5159 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5160 			sqcp = &sqp->qc_arr[k];
5161 			kfree(sqcp->sd_dp);
5162 			sqcp->sd_dp = NULL;
5163 		}
5164 	}
5165 }
5166 
5167 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5168 {
5169 	bool ok;
5170 
5171 	++num_aborts;
5172 	if (SCpnt) {
5173 		ok = stop_queued_cmnd(SCpnt);
5174 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5175 			sdev_printk(KERN_INFO, SCpnt->device,
5176 				    "%s: command%s found\n", __func__,
5177 				    ok ? "" : " not");
5178 	}
5179 	return SUCCESS;
5180 }
5181 
5182 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5183 {
5184 	++num_dev_resets;
5185 	if (SCpnt && SCpnt->device) {
5186 		struct scsi_device *sdp = SCpnt->device;
5187 		struct sdebug_dev_info *devip =
5188 				(struct sdebug_dev_info *)sdp->hostdata;
5189 
5190 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5191 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5192 		if (devip)
5193 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5194 	}
5195 	return SUCCESS;
5196 }
5197 
5198 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5199 {
5200 	struct sdebug_host_info *sdbg_host;
5201 	struct sdebug_dev_info *devip;
5202 	struct scsi_device *sdp;
5203 	struct Scsi_Host *hp;
5204 	int k = 0;
5205 
5206 	++num_target_resets;
5207 	if (!SCpnt)
5208 		goto lie;
5209 	sdp = SCpnt->device;
5210 	if (!sdp)
5211 		goto lie;
5212 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5213 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5214 	hp = sdp->host;
5215 	if (!hp)
5216 		goto lie;
5217 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5218 	if (sdbg_host) {
5219 		list_for_each_entry(devip,
5220 				    &sdbg_host->dev_info_list,
5221 				    dev_list)
5222 			if (devip->target == sdp->id) {
5223 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5224 				++k;
5225 			}
5226 	}
5227 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5228 		sdev_printk(KERN_INFO, sdp,
5229 			    "%s: %d device(s) found in target\n", __func__, k);
5230 lie:
5231 	return SUCCESS;
5232 }
5233 
5234 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5235 {
5236 	struct sdebug_host_info *sdbg_host;
5237 	struct sdebug_dev_info *devip;
5238 	struct scsi_device *sdp;
5239 	struct Scsi_Host *hp;
5240 	int k = 0;
5241 
5242 	++num_bus_resets;
5243 	if (!(SCpnt && SCpnt->device))
5244 		goto lie;
5245 	sdp = SCpnt->device;
5246 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5247 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5248 	hp = sdp->host;
5249 	if (hp) {
5250 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5251 		if (sdbg_host) {
5252 			list_for_each_entry(devip,
5253 					    &sdbg_host->dev_info_list,
5254 					    dev_list) {
5255 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5256 				++k;
5257 			}
5258 		}
5259 	}
5260 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5261 		sdev_printk(KERN_INFO, sdp,
5262 			    "%s: %d device(s) found in host\n", __func__, k);
5263 lie:
5264 	return SUCCESS;
5265 }
5266 
5267 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5268 {
5269 	struct sdebug_host_info *sdbg_host;
5270 	struct sdebug_dev_info *devip;
5271 	int k = 0;
5272 
5273 	++num_host_resets;
5274 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5275 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5276 	spin_lock(&sdebug_host_list_lock);
5277 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5278 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5279 				    dev_list) {
5280 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5281 			++k;
5282 		}
5283 	}
5284 	spin_unlock(&sdebug_host_list_lock);
5285 	stop_all_queued();
5286 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5287 		sdev_printk(KERN_INFO, SCpnt->device,
5288 			    "%s: %d device(s) found\n", __func__, k);
5289 	return SUCCESS;
5290 }
5291 
5292 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5293 {
5294 	struct msdos_partition *pp;
5295 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5296 	int sectors_per_part, num_sectors, k;
5297 	int heads_by_sects, start_sec, end_sec;
5298 
5299 	/* assume partition table already zeroed */
5300 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5301 		return;
5302 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5303 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5304 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5305 	}
5306 	num_sectors = (int)get_sdebug_capacity();
5307 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5308 			   / sdebug_num_parts;
5309 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5310 	starts[0] = sdebug_sectors_per;
5311 	max_part_secs = sectors_per_part;
5312 	for (k = 1; k < sdebug_num_parts; ++k) {
5313 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5314 			    * heads_by_sects;
5315 		if (starts[k] - starts[k - 1] < max_part_secs)
5316 			max_part_secs = starts[k] - starts[k - 1];
5317 	}
5318 	starts[sdebug_num_parts] = num_sectors;
5319 	starts[sdebug_num_parts + 1] = 0;
5320 
5321 	ramp[510] = 0x55;	/* magic partition markings */
5322 	ramp[511] = 0xAA;
5323 	pp = (struct msdos_partition *)(ramp + 0x1be);
5324 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5325 		start_sec = starts[k];
5326 		end_sec = starts[k] + max_part_secs - 1;
5327 		pp->boot_ind = 0;
5328 
5329 		pp->cyl = start_sec / heads_by_sects;
5330 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5331 			   / sdebug_sectors_per;
5332 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5333 
5334 		pp->end_cyl = end_sec / heads_by_sects;
5335 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5336 			       / sdebug_sectors_per;
5337 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5338 
5339 		pp->start_sect = cpu_to_le32(start_sec);
5340 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5341 		pp->sys_ind = 0x83;	/* plain Linux partition */
5342 	}
5343 }
5344 
5345 static void block_unblock_all_queues(bool block)
5346 {
5347 	int j;
5348 	struct sdebug_queue *sqp;
5349 
5350 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5351 		atomic_set(&sqp->blocked, (int)block);
5352 }
5353 
5354 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5355  * commands will be processed normally before triggers occur.
5356  */
5357 static void tweak_cmnd_count(void)
5358 {
5359 	int count, modulo;
5360 
5361 	modulo = abs(sdebug_every_nth);
5362 	if (modulo < 2)
5363 		return;
5364 	block_unblock_all_queues(true);
5365 	count = atomic_read(&sdebug_cmnd_count);
5366 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5367 	block_unblock_all_queues(false);
5368 }
5369 
5370 static void clear_queue_stats(void)
5371 {
5372 	atomic_set(&sdebug_cmnd_count, 0);
5373 	atomic_set(&sdebug_completions, 0);
5374 	atomic_set(&sdebug_miss_cpus, 0);
5375 	atomic_set(&sdebug_a_tsf, 0);
5376 }
5377 
5378 static bool inject_on_this_cmd(void)
5379 {
5380 	if (sdebug_every_nth == 0)
5381 		return false;
5382 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5383 }
5384 
5385 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5386 
5387 /* Complete the processing of the thread that queued a SCSI command to this
5388  * driver. It either completes the command by calling cmnd_done() or
5389  * schedules a hr timer or work queue then returns 0. Returns
5390  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5391  */
5392 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5393 			 int scsi_result,
5394 			 int (*pfp)(struct scsi_cmnd *,
5395 				    struct sdebug_dev_info *),
5396 			 int delta_jiff, int ndelay)
5397 {
5398 	bool new_sd_dp;
5399 	bool inject = false;
5400 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5401 	int k, num_in_q, qdepth;
5402 	unsigned long iflags;
5403 	u64 ns_from_boot = 0;
5404 	struct sdebug_queue *sqp;
5405 	struct sdebug_queued_cmd *sqcp;
5406 	struct scsi_device *sdp;
5407 	struct sdebug_defer *sd_dp;
5408 
5409 	if (unlikely(devip == NULL)) {
5410 		if (scsi_result == 0)
5411 			scsi_result = DID_NO_CONNECT << 16;
5412 		goto respond_in_thread;
5413 	}
5414 	sdp = cmnd->device;
5415 
5416 	if (delta_jiff == 0)
5417 		goto respond_in_thread;
5418 
5419 	sqp = get_queue(cmnd);
5420 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5421 	if (unlikely(atomic_read(&sqp->blocked))) {
5422 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5423 		return SCSI_MLQUEUE_HOST_BUSY;
5424 	}
5425 	num_in_q = atomic_read(&devip->num_in_q);
5426 	qdepth = cmnd->device->queue_depth;
5427 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5428 		if (scsi_result) {
5429 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5430 			goto respond_in_thread;
5431 		} else
5432 			scsi_result = device_qfull_result;
5433 	} else if (unlikely(sdebug_every_nth &&
5434 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5435 			    (scsi_result == 0))) {
5436 		if ((num_in_q == (qdepth - 1)) &&
5437 		    (atomic_inc_return(&sdebug_a_tsf) >=
5438 		     abs(sdebug_every_nth))) {
5439 			atomic_set(&sdebug_a_tsf, 0);
5440 			inject = true;
5441 			scsi_result = device_qfull_result;
5442 		}
5443 	}
5444 
5445 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5446 	if (unlikely(k >= sdebug_max_queue)) {
5447 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5448 		if (scsi_result)
5449 			goto respond_in_thread;
5450 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5451 			scsi_result = device_qfull_result;
5452 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5453 			sdev_printk(KERN_INFO, sdp,
5454 				    "%s: max_queue=%d exceeded, %s\n",
5455 				    __func__, sdebug_max_queue,
5456 				    (scsi_result ?  "status: TASK SET FULL" :
5457 						    "report: host busy"));
5458 		if (scsi_result)
5459 			goto respond_in_thread;
5460 		else
5461 			return SCSI_MLQUEUE_HOST_BUSY;
5462 	}
5463 	set_bit(k, sqp->in_use_bm);
5464 	atomic_inc(&devip->num_in_q);
5465 	sqcp = &sqp->qc_arr[k];
5466 	sqcp->a_cmnd = cmnd;
5467 	cmnd->host_scribble = (unsigned char *)sqcp;
5468 	sd_dp = sqcp->sd_dp;
5469 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5470 
5471 	if (!sd_dp) {
5472 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5473 		if (!sd_dp) {
5474 			atomic_dec(&devip->num_in_q);
5475 			clear_bit(k, sqp->in_use_bm);
5476 			return SCSI_MLQUEUE_HOST_BUSY;
5477 		}
5478 		new_sd_dp = true;
5479 	} else {
5480 		new_sd_dp = false;
5481 	}
5482 
5483 	/* Set the hostwide tag */
5484 	if (sdebug_host_max_queue)
5485 		sd_dp->hc_idx = get_tag(cmnd);
5486 
5487 	if (polled)
5488 		ns_from_boot = ktime_get_boottime_ns();
5489 
5490 	/* one of the resp_*() response functions is called here */
5491 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5492 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5493 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5494 		delta_jiff = ndelay = 0;
5495 	}
5496 	if (cmnd->result == 0 && scsi_result != 0)
5497 		cmnd->result = scsi_result;
5498 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5499 		if (atomic_read(&sdeb_inject_pending)) {
5500 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5501 			atomic_set(&sdeb_inject_pending, 0);
5502 			cmnd->result = check_condition_result;
5503 		}
5504 	}
5505 
5506 	if (unlikely(sdebug_verbose && cmnd->result))
5507 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5508 			    __func__, cmnd->result);
5509 
5510 	if (delta_jiff > 0 || ndelay > 0) {
5511 		ktime_t kt;
5512 
5513 		if (delta_jiff > 0) {
5514 			u64 ns = jiffies_to_nsecs(delta_jiff);
5515 
5516 			if (sdebug_random && ns < U32_MAX) {
5517 				ns = prandom_u32_max((u32)ns);
5518 			} else if (sdebug_random) {
5519 				ns >>= 12;	/* scale to 4 usec precision */
5520 				if (ns < U32_MAX)	/* over 4 hours max */
5521 					ns = prandom_u32_max((u32)ns);
5522 				ns <<= 12;
5523 			}
5524 			kt = ns_to_ktime(ns);
5525 		} else {	/* ndelay has a 4.2 second max */
5526 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5527 					     (u32)ndelay;
5528 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5529 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5530 
5531 				if (kt <= d) {	/* elapsed duration >= kt */
5532 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5533 					sqcp->a_cmnd = NULL;
5534 					atomic_dec(&devip->num_in_q);
5535 					clear_bit(k, sqp->in_use_bm);
5536 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5537 					if (new_sd_dp)
5538 						kfree(sd_dp);
5539 					/* call scsi_done() from this thread */
5540 					scsi_done(cmnd);
5541 					return 0;
5542 				}
5543 				/* otherwise reduce kt by elapsed time */
5544 				kt -= d;
5545 			}
5546 		}
5547 		if (polled) {
5548 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5549 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5550 			if (!sd_dp->init_poll) {
5551 				sd_dp->init_poll = true;
5552 				sqcp->sd_dp = sd_dp;
5553 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5554 				sd_dp->qc_idx = k;
5555 			}
5556 			sd_dp->defer_t = SDEB_DEFER_POLL;
5557 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5558 		} else {
5559 			if (!sd_dp->init_hrt) {
5560 				sd_dp->init_hrt = true;
5561 				sqcp->sd_dp = sd_dp;
5562 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5563 					     HRTIMER_MODE_REL_PINNED);
5564 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5565 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5566 				sd_dp->qc_idx = k;
5567 			}
5568 			sd_dp->defer_t = SDEB_DEFER_HRT;
5569 			/* schedule the invocation of scsi_done() for a later time */
5570 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5571 		}
5572 		if (sdebug_statistics)
5573 			sd_dp->issuing_cpu = raw_smp_processor_id();
5574 	} else {	/* jdelay < 0, use work queue */
5575 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5576 			     atomic_read(&sdeb_inject_pending)))
5577 			sd_dp->aborted = true;
5578 		if (polled) {
5579 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5580 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5581 			if (!sd_dp->init_poll) {
5582 				sd_dp->init_poll = true;
5583 				sqcp->sd_dp = sd_dp;
5584 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5585 				sd_dp->qc_idx = k;
5586 			}
5587 			sd_dp->defer_t = SDEB_DEFER_POLL;
5588 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5589 		} else {
5590 			if (!sd_dp->init_wq) {
5591 				sd_dp->init_wq = true;
5592 				sqcp->sd_dp = sd_dp;
5593 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5594 				sd_dp->qc_idx = k;
5595 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5596 			}
5597 			sd_dp->defer_t = SDEB_DEFER_WQ;
5598 			schedule_work(&sd_dp->ew.work);
5599 		}
5600 		if (sdebug_statistics)
5601 			sd_dp->issuing_cpu = raw_smp_processor_id();
5602 		if (unlikely(sd_dp->aborted)) {
5603 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5604 				    scsi_cmd_to_rq(cmnd)->tag);
5605 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5606 			atomic_set(&sdeb_inject_pending, 0);
5607 			sd_dp->aborted = false;
5608 		}
5609 	}
5610 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5611 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5612 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5613 	return 0;
5614 
5615 respond_in_thread:	/* call back to mid-layer using invocation thread */
5616 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5617 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5618 	if (cmnd->result == 0 && scsi_result != 0)
5619 		cmnd->result = scsi_result;
5620 	scsi_done(cmnd);
5621 	return 0;
5622 }
5623 
5624 /* Note: The following macros create attribute files in the
5625    /sys/module/scsi_debug/parameters directory. Unfortunately this
5626    driver is unaware of a change and cannot trigger auxiliary actions
5627    as it can when the corresponding attribute in the
5628    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5629  */
5630 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5631 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5632 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5633 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5634 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5635 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5636 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5637 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5638 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5639 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5640 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5641 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5642 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5643 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5644 module_param_string(inq_product, sdebug_inq_product_id,
5645 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5646 module_param_string(inq_rev, sdebug_inq_product_rev,
5647 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5648 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5649 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5650 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5651 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5652 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5653 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5654 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5655 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5656 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5657 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5658 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5659 		   S_IRUGO | S_IWUSR);
5660 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5661 		   S_IRUGO | S_IWUSR);
5662 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5663 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5664 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5665 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5666 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5667 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5668 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5669 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5670 module_param_named(per_host_store, sdebug_per_host_store, bool,
5671 		   S_IRUGO | S_IWUSR);
5672 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5673 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5674 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5675 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5676 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5677 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5678 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5679 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5680 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5681 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5682 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5683 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5684 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5685 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5686 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5687 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5688 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5689 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5690 		   S_IRUGO | S_IWUSR);
5691 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5692 module_param_named(write_same_length, sdebug_write_same_length, int,
5693 		   S_IRUGO | S_IWUSR);
5694 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5695 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5696 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5697 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5698 
5699 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5700 MODULE_DESCRIPTION("SCSI debug adapter driver");
5701 MODULE_LICENSE("GPL");
5702 MODULE_VERSION(SDEBUG_VERSION);
5703 
5704 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5705 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5706 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5707 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5708 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5709 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5710 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5711 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5712 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5713 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5714 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5715 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5716 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5717 MODULE_PARM_DESC(host_max_queue,
5718 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5719 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5720 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5721 		 SDEBUG_VERSION "\")");
5722 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5723 MODULE_PARM_DESC(lbprz,
5724 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5725 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5726 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5727 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5728 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5729 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5730 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5731 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5732 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5733 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5734 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5735 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5736 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5737 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5738 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5739 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5740 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5741 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5742 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5743 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5744 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5745 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5746 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5747 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5748 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5749 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5750 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5751 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5752 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5753 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5754 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5755 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5756 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5757 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5758 MODULE_PARM_DESC(uuid_ctl,
5759 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5760 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5761 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5762 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5763 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5764 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5765 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5766 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5767 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5768 
5769 #define SDEBUG_INFO_LEN 256
5770 static char sdebug_info[SDEBUG_INFO_LEN];
5771 
5772 static const char *scsi_debug_info(struct Scsi_Host *shp)
5773 {
5774 	int k;
5775 
5776 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5777 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5778 	if (k >= (SDEBUG_INFO_LEN - 1))
5779 		return sdebug_info;
5780 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5781 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5782 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5783 		  "statistics", (int)sdebug_statistics);
5784 	return sdebug_info;
5785 }
5786 
5787 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5788 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5789 				 int length)
5790 {
5791 	char arr[16];
5792 	int opts;
5793 	int minLen = length > 15 ? 15 : length;
5794 
5795 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5796 		return -EACCES;
5797 	memcpy(arr, buffer, minLen);
5798 	arr[minLen] = '\0';
5799 	if (1 != sscanf(arr, "%d", &opts))
5800 		return -EINVAL;
5801 	sdebug_opts = opts;
5802 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5803 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5804 	if (sdebug_every_nth != 0)
5805 		tweak_cmnd_count();
5806 	return length;
5807 }
5808 
5809 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5810  * same for each scsi_debug host (if more than one). Some of the counters
5811  * output are not atomics so might be inaccurate in a busy system. */
5812 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5813 {
5814 	int f, j, l;
5815 	struct sdebug_queue *sqp;
5816 	struct sdebug_host_info *sdhp;
5817 
5818 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5819 		   SDEBUG_VERSION, sdebug_version_date);
5820 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5821 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5822 		   sdebug_opts, sdebug_every_nth);
5823 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5824 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5825 		   sdebug_sector_size, "bytes");
5826 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5827 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5828 		   num_aborts);
5829 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5830 		   num_dev_resets, num_target_resets, num_bus_resets,
5831 		   num_host_resets);
5832 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5833 		   dix_reads, dix_writes, dif_errors);
5834 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5835 		   sdebug_statistics);
5836 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5837 		   atomic_read(&sdebug_cmnd_count),
5838 		   atomic_read(&sdebug_completions),
5839 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5840 		   atomic_read(&sdebug_a_tsf),
5841 		   atomic_read(&sdeb_mq_poll_count));
5842 
5843 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5844 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5845 		seq_printf(m, "  queue %d:\n", j);
5846 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5847 		if (f != sdebug_max_queue) {
5848 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5849 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5850 				   "first,last bits", f, l);
5851 		}
5852 	}
5853 
5854 	seq_printf(m, "this host_no=%d\n", host->host_no);
5855 	if (!xa_empty(per_store_ap)) {
5856 		bool niu;
5857 		int idx;
5858 		unsigned long l_idx;
5859 		struct sdeb_store_info *sip;
5860 
5861 		seq_puts(m, "\nhost list:\n");
5862 		j = 0;
5863 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5864 			idx = sdhp->si_idx;
5865 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5866 				   sdhp->shost->host_no, idx);
5867 			++j;
5868 		}
5869 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5870 			   sdeb_most_recent_idx);
5871 		j = 0;
5872 		xa_for_each(per_store_ap, l_idx, sip) {
5873 			niu = xa_get_mark(per_store_ap, l_idx,
5874 					  SDEB_XA_NOT_IN_USE);
5875 			idx = (int)l_idx;
5876 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5877 				   (niu ? "  not_in_use" : ""));
5878 			++j;
5879 		}
5880 	}
5881 	return 0;
5882 }
5883 
5884 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5885 {
5886 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5887 }
5888 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5889  * of delay is jiffies.
5890  */
5891 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5892 			   size_t count)
5893 {
5894 	int jdelay, res;
5895 
5896 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5897 		res = count;
5898 		if (sdebug_jdelay != jdelay) {
5899 			int j, k;
5900 			struct sdebug_queue *sqp;
5901 
5902 			block_unblock_all_queues(true);
5903 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5904 			     ++j, ++sqp) {
5905 				k = find_first_bit(sqp->in_use_bm,
5906 						   sdebug_max_queue);
5907 				if (k != sdebug_max_queue) {
5908 					res = -EBUSY;   /* queued commands */
5909 					break;
5910 				}
5911 			}
5912 			if (res > 0) {
5913 				sdebug_jdelay = jdelay;
5914 				sdebug_ndelay = 0;
5915 			}
5916 			block_unblock_all_queues(false);
5917 		}
5918 		return res;
5919 	}
5920 	return -EINVAL;
5921 }
5922 static DRIVER_ATTR_RW(delay);
5923 
5924 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5925 {
5926 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5927 }
5928 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5929 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5930 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5931 			    size_t count)
5932 {
5933 	int ndelay, res;
5934 
5935 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5936 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5937 		res = count;
5938 		if (sdebug_ndelay != ndelay) {
5939 			int j, k;
5940 			struct sdebug_queue *sqp;
5941 
5942 			block_unblock_all_queues(true);
5943 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5944 			     ++j, ++sqp) {
5945 				k = find_first_bit(sqp->in_use_bm,
5946 						   sdebug_max_queue);
5947 				if (k != sdebug_max_queue) {
5948 					res = -EBUSY;   /* queued commands */
5949 					break;
5950 				}
5951 			}
5952 			if (res > 0) {
5953 				sdebug_ndelay = ndelay;
5954 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5955 							: DEF_JDELAY;
5956 			}
5957 			block_unblock_all_queues(false);
5958 		}
5959 		return res;
5960 	}
5961 	return -EINVAL;
5962 }
5963 static DRIVER_ATTR_RW(ndelay);
5964 
5965 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5966 {
5967 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5968 }
5969 
5970 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5971 			  size_t count)
5972 {
5973 	int opts;
5974 	char work[20];
5975 
5976 	if (sscanf(buf, "%10s", work) == 1) {
5977 		if (strncasecmp(work, "0x", 2) == 0) {
5978 			if (kstrtoint(work + 2, 16, &opts) == 0)
5979 				goto opts_done;
5980 		} else {
5981 			if (kstrtoint(work, 10, &opts) == 0)
5982 				goto opts_done;
5983 		}
5984 	}
5985 	return -EINVAL;
5986 opts_done:
5987 	sdebug_opts = opts;
5988 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5989 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5990 	tweak_cmnd_count();
5991 	return count;
5992 }
5993 static DRIVER_ATTR_RW(opts);
5994 
5995 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5996 {
5997 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5998 }
5999 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6000 			   size_t count)
6001 {
6002 	int n;
6003 
6004 	/* Cannot change from or to TYPE_ZBC with sysfs */
6005 	if (sdebug_ptype == TYPE_ZBC)
6006 		return -EINVAL;
6007 
6008 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6009 		if (n == TYPE_ZBC)
6010 			return -EINVAL;
6011 		sdebug_ptype = n;
6012 		return count;
6013 	}
6014 	return -EINVAL;
6015 }
6016 static DRIVER_ATTR_RW(ptype);
6017 
6018 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6019 {
6020 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6021 }
6022 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6023 			    size_t count)
6024 {
6025 	int n;
6026 
6027 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6028 		sdebug_dsense = n;
6029 		return count;
6030 	}
6031 	return -EINVAL;
6032 }
6033 static DRIVER_ATTR_RW(dsense);
6034 
6035 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6036 {
6037 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6038 }
6039 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6040 			     size_t count)
6041 {
6042 	int n, idx;
6043 
6044 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6045 		bool want_store = (n == 0);
6046 		struct sdebug_host_info *sdhp;
6047 
6048 		n = (n > 0);
6049 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6050 		if (sdebug_fake_rw == n)
6051 			return count;	/* not transitioning so do nothing */
6052 
6053 		if (want_store) {	/* 1 --> 0 transition, set up store */
6054 			if (sdeb_first_idx < 0) {
6055 				idx = sdebug_add_store();
6056 				if (idx < 0)
6057 					return idx;
6058 			} else {
6059 				idx = sdeb_first_idx;
6060 				xa_clear_mark(per_store_ap, idx,
6061 					      SDEB_XA_NOT_IN_USE);
6062 			}
6063 			/* make all hosts use same store */
6064 			list_for_each_entry(sdhp, &sdebug_host_list,
6065 					    host_list) {
6066 				if (sdhp->si_idx != idx) {
6067 					xa_set_mark(per_store_ap, sdhp->si_idx,
6068 						    SDEB_XA_NOT_IN_USE);
6069 					sdhp->si_idx = idx;
6070 				}
6071 			}
6072 			sdeb_most_recent_idx = idx;
6073 		} else {	/* 0 --> 1 transition is trigger for shrink */
6074 			sdebug_erase_all_stores(true /* apart from first */);
6075 		}
6076 		sdebug_fake_rw = n;
6077 		return count;
6078 	}
6079 	return -EINVAL;
6080 }
6081 static DRIVER_ATTR_RW(fake_rw);
6082 
6083 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6084 {
6085 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6086 }
6087 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6088 			      size_t count)
6089 {
6090 	int n;
6091 
6092 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6093 		sdebug_no_lun_0 = n;
6094 		return count;
6095 	}
6096 	return -EINVAL;
6097 }
6098 static DRIVER_ATTR_RW(no_lun_0);
6099 
6100 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6101 {
6102 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6103 }
6104 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6105 			      size_t count)
6106 {
6107 	int n;
6108 
6109 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6110 		sdebug_num_tgts = n;
6111 		sdebug_max_tgts_luns();
6112 		return count;
6113 	}
6114 	return -EINVAL;
6115 }
6116 static DRIVER_ATTR_RW(num_tgts);
6117 
6118 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6119 {
6120 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6121 }
6122 static DRIVER_ATTR_RO(dev_size_mb);
6123 
6124 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6125 {
6126 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6127 }
6128 
6129 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6130 				    size_t count)
6131 {
6132 	bool v;
6133 
6134 	if (kstrtobool(buf, &v))
6135 		return -EINVAL;
6136 
6137 	sdebug_per_host_store = v;
6138 	return count;
6139 }
6140 static DRIVER_ATTR_RW(per_host_store);
6141 
6142 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6143 {
6144 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6145 }
6146 static DRIVER_ATTR_RO(num_parts);
6147 
6148 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6149 {
6150 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6151 }
6152 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6153 			       size_t count)
6154 {
6155 	int nth;
6156 	char work[20];
6157 
6158 	if (sscanf(buf, "%10s", work) == 1) {
6159 		if (strncasecmp(work, "0x", 2) == 0) {
6160 			if (kstrtoint(work + 2, 16, &nth) == 0)
6161 				goto every_nth_done;
6162 		} else {
6163 			if (kstrtoint(work, 10, &nth) == 0)
6164 				goto every_nth_done;
6165 		}
6166 	}
6167 	return -EINVAL;
6168 
6169 every_nth_done:
6170 	sdebug_every_nth = nth;
6171 	if (nth && !sdebug_statistics) {
6172 		pr_info("every_nth needs statistics=1, set it\n");
6173 		sdebug_statistics = true;
6174 	}
6175 	tweak_cmnd_count();
6176 	return count;
6177 }
6178 static DRIVER_ATTR_RW(every_nth);
6179 
6180 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6181 {
6182 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6183 }
6184 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6185 				size_t count)
6186 {
6187 	int n;
6188 	bool changed;
6189 
6190 	if (kstrtoint(buf, 0, &n))
6191 		return -EINVAL;
6192 	if (n >= 0) {
6193 		if (n > (int)SAM_LUN_AM_FLAT) {
6194 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6195 			return -EINVAL;
6196 		}
6197 		changed = ((int)sdebug_lun_am != n);
6198 		sdebug_lun_am = n;
6199 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6200 			struct sdebug_host_info *sdhp;
6201 			struct sdebug_dev_info *dp;
6202 
6203 			spin_lock(&sdebug_host_list_lock);
6204 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6205 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6206 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6207 				}
6208 			}
6209 			spin_unlock(&sdebug_host_list_lock);
6210 		}
6211 		return count;
6212 	}
6213 	return -EINVAL;
6214 }
6215 static DRIVER_ATTR_RW(lun_format);
6216 
6217 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6218 {
6219 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6220 }
6221 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6222 			      size_t count)
6223 {
6224 	int n;
6225 	bool changed;
6226 
6227 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6228 		if (n > 256) {
6229 			pr_warn("max_luns can be no more than 256\n");
6230 			return -EINVAL;
6231 		}
6232 		changed = (sdebug_max_luns != n);
6233 		sdebug_max_luns = n;
6234 		sdebug_max_tgts_luns();
6235 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6236 			struct sdebug_host_info *sdhp;
6237 			struct sdebug_dev_info *dp;
6238 
6239 			spin_lock(&sdebug_host_list_lock);
6240 			list_for_each_entry(sdhp, &sdebug_host_list,
6241 					    host_list) {
6242 				list_for_each_entry(dp, &sdhp->dev_info_list,
6243 						    dev_list) {
6244 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6245 						dp->uas_bm);
6246 				}
6247 			}
6248 			spin_unlock(&sdebug_host_list_lock);
6249 		}
6250 		return count;
6251 	}
6252 	return -EINVAL;
6253 }
6254 static DRIVER_ATTR_RW(max_luns);
6255 
6256 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6257 {
6258 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6259 }
6260 /* N.B. max_queue can be changed while there are queued commands. In flight
6261  * commands beyond the new max_queue will be completed. */
6262 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6263 			       size_t count)
6264 {
6265 	int j, n, k, a;
6266 	struct sdebug_queue *sqp;
6267 
6268 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6269 	    (n <= SDEBUG_CANQUEUE) &&
6270 	    (sdebug_host_max_queue == 0)) {
6271 		block_unblock_all_queues(true);
6272 		k = 0;
6273 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6274 		     ++j, ++sqp) {
6275 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6276 			if (a > k)
6277 				k = a;
6278 		}
6279 		sdebug_max_queue = n;
6280 		if (k == SDEBUG_CANQUEUE)
6281 			atomic_set(&retired_max_queue, 0);
6282 		else if (k >= n)
6283 			atomic_set(&retired_max_queue, k + 1);
6284 		else
6285 			atomic_set(&retired_max_queue, 0);
6286 		block_unblock_all_queues(false);
6287 		return count;
6288 	}
6289 	return -EINVAL;
6290 }
6291 static DRIVER_ATTR_RW(max_queue);
6292 
6293 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6294 {
6295 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6296 }
6297 
6298 /*
6299  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6300  * in range [0, sdebug_host_max_queue), we can't change it.
6301  */
6302 static DRIVER_ATTR_RO(host_max_queue);
6303 
6304 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6305 {
6306 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6307 }
6308 static DRIVER_ATTR_RO(no_uld);
6309 
6310 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6311 {
6312 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6313 }
6314 static DRIVER_ATTR_RO(scsi_level);
6315 
6316 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6317 {
6318 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6319 }
6320 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6321 				size_t count)
6322 {
6323 	int n;
6324 	bool changed;
6325 
6326 	/* Ignore capacity change for ZBC drives for now */
6327 	if (sdeb_zbc_in_use)
6328 		return -ENOTSUPP;
6329 
6330 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6331 		changed = (sdebug_virtual_gb != n);
6332 		sdebug_virtual_gb = n;
6333 		sdebug_capacity = get_sdebug_capacity();
6334 		if (changed) {
6335 			struct sdebug_host_info *sdhp;
6336 			struct sdebug_dev_info *dp;
6337 
6338 			spin_lock(&sdebug_host_list_lock);
6339 			list_for_each_entry(sdhp, &sdebug_host_list,
6340 					    host_list) {
6341 				list_for_each_entry(dp, &sdhp->dev_info_list,
6342 						    dev_list) {
6343 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6344 						dp->uas_bm);
6345 				}
6346 			}
6347 			spin_unlock(&sdebug_host_list_lock);
6348 		}
6349 		return count;
6350 	}
6351 	return -EINVAL;
6352 }
6353 static DRIVER_ATTR_RW(virtual_gb);
6354 
6355 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6356 {
6357 	/* absolute number of hosts currently active is what is shown */
6358 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6359 }
6360 
6361 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6362 			      size_t count)
6363 {
6364 	bool found;
6365 	unsigned long idx;
6366 	struct sdeb_store_info *sip;
6367 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6368 	int delta_hosts;
6369 
6370 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6371 		return -EINVAL;
6372 	if (delta_hosts > 0) {
6373 		do {
6374 			found = false;
6375 			if (want_phs) {
6376 				xa_for_each_marked(per_store_ap, idx, sip,
6377 						   SDEB_XA_NOT_IN_USE) {
6378 					sdeb_most_recent_idx = (int)idx;
6379 					found = true;
6380 					break;
6381 				}
6382 				if (found)	/* re-use case */
6383 					sdebug_add_host_helper((int)idx);
6384 				else
6385 					sdebug_do_add_host(true);
6386 			} else {
6387 				sdebug_do_add_host(false);
6388 			}
6389 		} while (--delta_hosts);
6390 	} else if (delta_hosts < 0) {
6391 		do {
6392 			sdebug_do_remove_host(false);
6393 		} while (++delta_hosts);
6394 	}
6395 	return count;
6396 }
6397 static DRIVER_ATTR_RW(add_host);
6398 
6399 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6400 {
6401 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6402 }
6403 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6404 				    size_t count)
6405 {
6406 	int n;
6407 
6408 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6409 		sdebug_vpd_use_hostno = n;
6410 		return count;
6411 	}
6412 	return -EINVAL;
6413 }
6414 static DRIVER_ATTR_RW(vpd_use_hostno);
6415 
6416 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6417 {
6418 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6419 }
6420 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6421 				size_t count)
6422 {
6423 	int n;
6424 
6425 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6426 		if (n > 0)
6427 			sdebug_statistics = true;
6428 		else {
6429 			clear_queue_stats();
6430 			sdebug_statistics = false;
6431 		}
6432 		return count;
6433 	}
6434 	return -EINVAL;
6435 }
6436 static DRIVER_ATTR_RW(statistics);
6437 
6438 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6439 {
6440 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6441 }
6442 static DRIVER_ATTR_RO(sector_size);
6443 
6444 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6445 {
6446 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6447 }
6448 static DRIVER_ATTR_RO(submit_queues);
6449 
6450 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6451 {
6452 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6453 }
6454 static DRIVER_ATTR_RO(dix);
6455 
6456 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6457 {
6458 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6459 }
6460 static DRIVER_ATTR_RO(dif);
6461 
6462 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6463 {
6464 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6465 }
6466 static DRIVER_ATTR_RO(guard);
6467 
6468 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6469 {
6470 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6471 }
6472 static DRIVER_ATTR_RO(ato);
6473 
6474 static ssize_t map_show(struct device_driver *ddp, char *buf)
6475 {
6476 	ssize_t count = 0;
6477 
6478 	if (!scsi_debug_lbp())
6479 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6480 				 sdebug_store_sectors);
6481 
6482 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6483 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6484 
6485 		if (sip)
6486 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6487 					  (int)map_size, sip->map_storep);
6488 	}
6489 	buf[count++] = '\n';
6490 	buf[count] = '\0';
6491 
6492 	return count;
6493 }
6494 static DRIVER_ATTR_RO(map);
6495 
6496 static ssize_t random_show(struct device_driver *ddp, char *buf)
6497 {
6498 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6499 }
6500 
6501 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6502 			    size_t count)
6503 {
6504 	bool v;
6505 
6506 	if (kstrtobool(buf, &v))
6507 		return -EINVAL;
6508 
6509 	sdebug_random = v;
6510 	return count;
6511 }
6512 static DRIVER_ATTR_RW(random);
6513 
6514 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6515 {
6516 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6517 }
6518 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6519 			       size_t count)
6520 {
6521 	int n;
6522 
6523 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6524 		sdebug_removable = (n > 0);
6525 		return count;
6526 	}
6527 	return -EINVAL;
6528 }
6529 static DRIVER_ATTR_RW(removable);
6530 
6531 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6532 {
6533 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6534 }
6535 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6536 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6537 			       size_t count)
6538 {
6539 	int n;
6540 
6541 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6542 		sdebug_host_lock = (n > 0);
6543 		return count;
6544 	}
6545 	return -EINVAL;
6546 }
6547 static DRIVER_ATTR_RW(host_lock);
6548 
6549 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6550 {
6551 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6552 }
6553 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6554 			    size_t count)
6555 {
6556 	int n;
6557 
6558 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6559 		sdebug_strict = (n > 0);
6560 		return count;
6561 	}
6562 	return -EINVAL;
6563 }
6564 static DRIVER_ATTR_RW(strict);
6565 
6566 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6567 {
6568 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6569 }
6570 static DRIVER_ATTR_RO(uuid_ctl);
6571 
6572 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6573 {
6574 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6575 }
6576 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6577 			     size_t count)
6578 {
6579 	int ret, n;
6580 
6581 	ret = kstrtoint(buf, 0, &n);
6582 	if (ret)
6583 		return ret;
6584 	sdebug_cdb_len = n;
6585 	all_config_cdb_len();
6586 	return count;
6587 }
6588 static DRIVER_ATTR_RW(cdb_len);
6589 
6590 static const char * const zbc_model_strs_a[] = {
6591 	[BLK_ZONED_NONE] = "none",
6592 	[BLK_ZONED_HA]   = "host-aware",
6593 	[BLK_ZONED_HM]   = "host-managed",
6594 };
6595 
6596 static const char * const zbc_model_strs_b[] = {
6597 	[BLK_ZONED_NONE] = "no",
6598 	[BLK_ZONED_HA]   = "aware",
6599 	[BLK_ZONED_HM]   = "managed",
6600 };
6601 
6602 static const char * const zbc_model_strs_c[] = {
6603 	[BLK_ZONED_NONE] = "0",
6604 	[BLK_ZONED_HA]   = "1",
6605 	[BLK_ZONED_HM]   = "2",
6606 };
6607 
6608 static int sdeb_zbc_model_str(const char *cp)
6609 {
6610 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6611 
6612 	if (res < 0) {
6613 		res = sysfs_match_string(zbc_model_strs_b, cp);
6614 		if (res < 0) {
6615 			res = sysfs_match_string(zbc_model_strs_c, cp);
6616 			if (res < 0)
6617 				return -EINVAL;
6618 		}
6619 	}
6620 	return res;
6621 }
6622 
6623 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6624 {
6625 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6626 			 zbc_model_strs_a[sdeb_zbc_model]);
6627 }
6628 static DRIVER_ATTR_RO(zbc);
6629 
6630 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6631 {
6632 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6633 }
6634 static DRIVER_ATTR_RO(tur_ms_to_ready);
6635 
6636 /* Note: The following array creates attribute files in the
6637    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6638    files (over those found in the /sys/module/scsi_debug/parameters
6639    directory) is that auxiliary actions can be triggered when an attribute
6640    is changed. For example see: add_host_store() above.
6641  */
6642 
6643 static struct attribute *sdebug_drv_attrs[] = {
6644 	&driver_attr_delay.attr,
6645 	&driver_attr_opts.attr,
6646 	&driver_attr_ptype.attr,
6647 	&driver_attr_dsense.attr,
6648 	&driver_attr_fake_rw.attr,
6649 	&driver_attr_host_max_queue.attr,
6650 	&driver_attr_no_lun_0.attr,
6651 	&driver_attr_num_tgts.attr,
6652 	&driver_attr_dev_size_mb.attr,
6653 	&driver_attr_num_parts.attr,
6654 	&driver_attr_every_nth.attr,
6655 	&driver_attr_lun_format.attr,
6656 	&driver_attr_max_luns.attr,
6657 	&driver_attr_max_queue.attr,
6658 	&driver_attr_no_uld.attr,
6659 	&driver_attr_scsi_level.attr,
6660 	&driver_attr_virtual_gb.attr,
6661 	&driver_attr_add_host.attr,
6662 	&driver_attr_per_host_store.attr,
6663 	&driver_attr_vpd_use_hostno.attr,
6664 	&driver_attr_sector_size.attr,
6665 	&driver_attr_statistics.attr,
6666 	&driver_attr_submit_queues.attr,
6667 	&driver_attr_dix.attr,
6668 	&driver_attr_dif.attr,
6669 	&driver_attr_guard.attr,
6670 	&driver_attr_ato.attr,
6671 	&driver_attr_map.attr,
6672 	&driver_attr_random.attr,
6673 	&driver_attr_removable.attr,
6674 	&driver_attr_host_lock.attr,
6675 	&driver_attr_ndelay.attr,
6676 	&driver_attr_strict.attr,
6677 	&driver_attr_uuid_ctl.attr,
6678 	&driver_attr_cdb_len.attr,
6679 	&driver_attr_tur_ms_to_ready.attr,
6680 	&driver_attr_zbc.attr,
6681 	NULL,
6682 };
6683 ATTRIBUTE_GROUPS(sdebug_drv);
6684 
6685 static struct device *pseudo_primary;
6686 
6687 static int __init scsi_debug_init(void)
6688 {
6689 	bool want_store = (sdebug_fake_rw == 0);
6690 	unsigned long sz;
6691 	int k, ret, hosts_to_add;
6692 	int idx = -1;
6693 
6694 	ramdisk_lck_a[0] = &atomic_rw;
6695 	ramdisk_lck_a[1] = &atomic_rw2;
6696 	atomic_set(&retired_max_queue, 0);
6697 
6698 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6699 		pr_warn("ndelay must be less than 1 second, ignored\n");
6700 		sdebug_ndelay = 0;
6701 	} else if (sdebug_ndelay > 0)
6702 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6703 
6704 	switch (sdebug_sector_size) {
6705 	case  512:
6706 	case 1024:
6707 	case 2048:
6708 	case 4096:
6709 		break;
6710 	default:
6711 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6712 		return -EINVAL;
6713 	}
6714 
6715 	switch (sdebug_dif) {
6716 	case T10_PI_TYPE0_PROTECTION:
6717 		break;
6718 	case T10_PI_TYPE1_PROTECTION:
6719 	case T10_PI_TYPE2_PROTECTION:
6720 	case T10_PI_TYPE3_PROTECTION:
6721 		have_dif_prot = true;
6722 		break;
6723 
6724 	default:
6725 		pr_err("dif must be 0, 1, 2 or 3\n");
6726 		return -EINVAL;
6727 	}
6728 
6729 	if (sdebug_num_tgts < 0) {
6730 		pr_err("num_tgts must be >= 0\n");
6731 		return -EINVAL;
6732 	}
6733 
6734 	if (sdebug_guard > 1) {
6735 		pr_err("guard must be 0 or 1\n");
6736 		return -EINVAL;
6737 	}
6738 
6739 	if (sdebug_ato > 1) {
6740 		pr_err("ato must be 0 or 1\n");
6741 		return -EINVAL;
6742 	}
6743 
6744 	if (sdebug_physblk_exp > 15) {
6745 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6746 		return -EINVAL;
6747 	}
6748 
6749 	sdebug_lun_am = sdebug_lun_am_i;
6750 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6751 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6752 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6753 	}
6754 
6755 	if (sdebug_max_luns > 256) {
6756 		if (sdebug_max_luns > 16384) {
6757 			pr_warn("max_luns can be no more than 16384, use default\n");
6758 			sdebug_max_luns = DEF_MAX_LUNS;
6759 		}
6760 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6761 	}
6762 
6763 	if (sdebug_lowest_aligned > 0x3fff) {
6764 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6765 		return -EINVAL;
6766 	}
6767 
6768 	if (submit_queues < 1) {
6769 		pr_err("submit_queues must be 1 or more\n");
6770 		return -EINVAL;
6771 	}
6772 
6773 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6774 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6775 		return -EINVAL;
6776 	}
6777 
6778 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6779 	    (sdebug_host_max_queue < 0)) {
6780 		pr_err("host_max_queue must be in range [0 %d]\n",
6781 		       SDEBUG_CANQUEUE);
6782 		return -EINVAL;
6783 	}
6784 
6785 	if (sdebug_host_max_queue &&
6786 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6787 		sdebug_max_queue = sdebug_host_max_queue;
6788 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6789 			sdebug_max_queue);
6790 	}
6791 
6792 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6793 			       GFP_KERNEL);
6794 	if (sdebug_q_arr == NULL)
6795 		return -ENOMEM;
6796 	for (k = 0; k < submit_queues; ++k)
6797 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6798 
6799 	/*
6800 	 * check for host managed zoned block device specified with
6801 	 * ptype=0x14 or zbc=XXX.
6802 	 */
6803 	if (sdebug_ptype == TYPE_ZBC) {
6804 		sdeb_zbc_model = BLK_ZONED_HM;
6805 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6806 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6807 		if (k < 0) {
6808 			ret = k;
6809 			goto free_q_arr;
6810 		}
6811 		sdeb_zbc_model = k;
6812 		switch (sdeb_zbc_model) {
6813 		case BLK_ZONED_NONE:
6814 		case BLK_ZONED_HA:
6815 			sdebug_ptype = TYPE_DISK;
6816 			break;
6817 		case BLK_ZONED_HM:
6818 			sdebug_ptype = TYPE_ZBC;
6819 			break;
6820 		default:
6821 			pr_err("Invalid ZBC model\n");
6822 			ret = -EINVAL;
6823 			goto free_q_arr;
6824 		}
6825 	}
6826 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6827 		sdeb_zbc_in_use = true;
6828 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6829 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6830 	}
6831 
6832 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6833 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6834 	if (sdebug_dev_size_mb < 1)
6835 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6836 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6837 	sdebug_store_sectors = sz / sdebug_sector_size;
6838 	sdebug_capacity = get_sdebug_capacity();
6839 
6840 	/* play around with geometry, don't waste too much on track 0 */
6841 	sdebug_heads = 8;
6842 	sdebug_sectors_per = 32;
6843 	if (sdebug_dev_size_mb >= 256)
6844 		sdebug_heads = 64;
6845 	else if (sdebug_dev_size_mb >= 16)
6846 		sdebug_heads = 32;
6847 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6848 			       (sdebug_sectors_per * sdebug_heads);
6849 	if (sdebug_cylinders_per >= 1024) {
6850 		/* other LLDs do this; implies >= 1GB ram disk ... */
6851 		sdebug_heads = 255;
6852 		sdebug_sectors_per = 63;
6853 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6854 			       (sdebug_sectors_per * sdebug_heads);
6855 	}
6856 	if (scsi_debug_lbp()) {
6857 		sdebug_unmap_max_blocks =
6858 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6859 
6860 		sdebug_unmap_max_desc =
6861 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6862 
6863 		sdebug_unmap_granularity =
6864 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6865 
6866 		if (sdebug_unmap_alignment &&
6867 		    sdebug_unmap_granularity <=
6868 		    sdebug_unmap_alignment) {
6869 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6870 			ret = -EINVAL;
6871 			goto free_q_arr;
6872 		}
6873 	}
6874 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6875 	if (want_store) {
6876 		idx = sdebug_add_store();
6877 		if (idx < 0) {
6878 			ret = idx;
6879 			goto free_q_arr;
6880 		}
6881 	}
6882 
6883 	pseudo_primary = root_device_register("pseudo_0");
6884 	if (IS_ERR(pseudo_primary)) {
6885 		pr_warn("root_device_register() error\n");
6886 		ret = PTR_ERR(pseudo_primary);
6887 		goto free_vm;
6888 	}
6889 	ret = bus_register(&pseudo_lld_bus);
6890 	if (ret < 0) {
6891 		pr_warn("bus_register error: %d\n", ret);
6892 		goto dev_unreg;
6893 	}
6894 	ret = driver_register(&sdebug_driverfs_driver);
6895 	if (ret < 0) {
6896 		pr_warn("driver_register error: %d\n", ret);
6897 		goto bus_unreg;
6898 	}
6899 
6900 	hosts_to_add = sdebug_add_host;
6901 	sdebug_add_host = 0;
6902 
6903 	for (k = 0; k < hosts_to_add; k++) {
6904 		if (want_store && k == 0) {
6905 			ret = sdebug_add_host_helper(idx);
6906 			if (ret < 0) {
6907 				pr_err("add_host_helper k=%d, error=%d\n",
6908 				       k, -ret);
6909 				break;
6910 			}
6911 		} else {
6912 			ret = sdebug_do_add_host(want_store &&
6913 						 sdebug_per_host_store);
6914 			if (ret < 0) {
6915 				pr_err("add_host k=%d error=%d\n", k, -ret);
6916 				break;
6917 			}
6918 		}
6919 	}
6920 	if (sdebug_verbose)
6921 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6922 
6923 	return 0;
6924 
6925 bus_unreg:
6926 	bus_unregister(&pseudo_lld_bus);
6927 dev_unreg:
6928 	root_device_unregister(pseudo_primary);
6929 free_vm:
6930 	sdebug_erase_store(idx, NULL);
6931 free_q_arr:
6932 	kfree(sdebug_q_arr);
6933 	return ret;
6934 }
6935 
6936 static void __exit scsi_debug_exit(void)
6937 {
6938 	int k = sdebug_num_hosts;
6939 
6940 	stop_all_queued();
6941 	for (; k; k--)
6942 		sdebug_do_remove_host(true);
6943 	free_all_queued();
6944 	driver_unregister(&sdebug_driverfs_driver);
6945 	bus_unregister(&pseudo_lld_bus);
6946 	root_device_unregister(pseudo_primary);
6947 
6948 	sdebug_erase_all_stores(false);
6949 	xa_destroy(per_store_ap);
6950 	kfree(sdebug_q_arr);
6951 }
6952 
6953 device_initcall(scsi_debug_init);
6954 module_exit(scsi_debug_exit);
6955 
6956 static void sdebug_release_adapter(struct device *dev)
6957 {
6958 	struct sdebug_host_info *sdbg_host;
6959 
6960 	sdbg_host = to_sdebug_host(dev);
6961 	kfree(sdbg_host);
6962 }
6963 
6964 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6965 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6966 {
6967 	if (idx < 0)
6968 		return;
6969 	if (!sip) {
6970 		if (xa_empty(per_store_ap))
6971 			return;
6972 		sip = xa_load(per_store_ap, idx);
6973 		if (!sip)
6974 			return;
6975 	}
6976 	vfree(sip->map_storep);
6977 	vfree(sip->dif_storep);
6978 	vfree(sip->storep);
6979 	xa_erase(per_store_ap, idx);
6980 	kfree(sip);
6981 }
6982 
6983 /* Assume apart_from_first==false only in shutdown case. */
6984 static void sdebug_erase_all_stores(bool apart_from_first)
6985 {
6986 	unsigned long idx;
6987 	struct sdeb_store_info *sip = NULL;
6988 
6989 	xa_for_each(per_store_ap, idx, sip) {
6990 		if (apart_from_first)
6991 			apart_from_first = false;
6992 		else
6993 			sdebug_erase_store(idx, sip);
6994 	}
6995 	if (apart_from_first)
6996 		sdeb_most_recent_idx = sdeb_first_idx;
6997 }
6998 
6999 /*
7000  * Returns store xarray new element index (idx) if >=0 else negated errno.
7001  * Limit the number of stores to 65536.
7002  */
7003 static int sdebug_add_store(void)
7004 {
7005 	int res;
7006 	u32 n_idx;
7007 	unsigned long iflags;
7008 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7009 	struct sdeb_store_info *sip = NULL;
7010 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7011 
7012 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7013 	if (!sip)
7014 		return -ENOMEM;
7015 
7016 	xa_lock_irqsave(per_store_ap, iflags);
7017 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7018 	if (unlikely(res < 0)) {
7019 		xa_unlock_irqrestore(per_store_ap, iflags);
7020 		kfree(sip);
7021 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7022 		return res;
7023 	}
7024 	sdeb_most_recent_idx = n_idx;
7025 	if (sdeb_first_idx < 0)
7026 		sdeb_first_idx = n_idx;
7027 	xa_unlock_irqrestore(per_store_ap, iflags);
7028 
7029 	res = -ENOMEM;
7030 	sip->storep = vzalloc(sz);
7031 	if (!sip->storep) {
7032 		pr_err("user data oom\n");
7033 		goto err;
7034 	}
7035 	if (sdebug_num_parts > 0)
7036 		sdebug_build_parts(sip->storep, sz);
7037 
7038 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7039 	if (sdebug_dix) {
7040 		int dif_size;
7041 
7042 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7043 		sip->dif_storep = vmalloc(dif_size);
7044 
7045 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7046 			sip->dif_storep);
7047 
7048 		if (!sip->dif_storep) {
7049 			pr_err("DIX oom\n");
7050 			goto err;
7051 		}
7052 		memset(sip->dif_storep, 0xff, dif_size);
7053 	}
7054 	/* Logical Block Provisioning */
7055 	if (scsi_debug_lbp()) {
7056 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7057 		sip->map_storep = vmalloc(array_size(sizeof(long),
7058 						     BITS_TO_LONGS(map_size)));
7059 
7060 		pr_info("%lu provisioning blocks\n", map_size);
7061 
7062 		if (!sip->map_storep) {
7063 			pr_err("LBP map oom\n");
7064 			goto err;
7065 		}
7066 
7067 		bitmap_zero(sip->map_storep, map_size);
7068 
7069 		/* Map first 1KB for partition table */
7070 		if (sdebug_num_parts)
7071 			map_region(sip, 0, 2);
7072 	}
7073 
7074 	rwlock_init(&sip->macc_lck);
7075 	return (int)n_idx;
7076 err:
7077 	sdebug_erase_store((int)n_idx, sip);
7078 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7079 	return res;
7080 }
7081 
7082 static int sdebug_add_host_helper(int per_host_idx)
7083 {
7084 	int k, devs_per_host, idx;
7085 	int error = -ENOMEM;
7086 	struct sdebug_host_info *sdbg_host;
7087 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7088 
7089 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7090 	if (!sdbg_host)
7091 		return -ENOMEM;
7092 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7093 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7094 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7095 	sdbg_host->si_idx = idx;
7096 
7097 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7098 
7099 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7100 	for (k = 0; k < devs_per_host; k++) {
7101 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7102 		if (!sdbg_devinfo)
7103 			goto clean;
7104 	}
7105 
7106 	spin_lock(&sdebug_host_list_lock);
7107 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7108 	spin_unlock(&sdebug_host_list_lock);
7109 
7110 	sdbg_host->dev.bus = &pseudo_lld_bus;
7111 	sdbg_host->dev.parent = pseudo_primary;
7112 	sdbg_host->dev.release = &sdebug_release_adapter;
7113 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7114 
7115 	error = device_register(&sdbg_host->dev);
7116 	if (error)
7117 		goto clean;
7118 
7119 	++sdebug_num_hosts;
7120 	return 0;
7121 
7122 clean:
7123 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7124 				 dev_list) {
7125 		list_del(&sdbg_devinfo->dev_list);
7126 		kfree(sdbg_devinfo->zstate);
7127 		kfree(sdbg_devinfo);
7128 	}
7129 	kfree(sdbg_host);
7130 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7131 	return error;
7132 }
7133 
7134 static int sdebug_do_add_host(bool mk_new_store)
7135 {
7136 	int ph_idx = sdeb_most_recent_idx;
7137 
7138 	if (mk_new_store) {
7139 		ph_idx = sdebug_add_store();
7140 		if (ph_idx < 0)
7141 			return ph_idx;
7142 	}
7143 	return sdebug_add_host_helper(ph_idx);
7144 }
7145 
7146 static void sdebug_do_remove_host(bool the_end)
7147 {
7148 	int idx = -1;
7149 	struct sdebug_host_info *sdbg_host = NULL;
7150 	struct sdebug_host_info *sdbg_host2;
7151 
7152 	spin_lock(&sdebug_host_list_lock);
7153 	if (!list_empty(&sdebug_host_list)) {
7154 		sdbg_host = list_entry(sdebug_host_list.prev,
7155 				       struct sdebug_host_info, host_list);
7156 		idx = sdbg_host->si_idx;
7157 	}
7158 	if (!the_end && idx >= 0) {
7159 		bool unique = true;
7160 
7161 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7162 			if (sdbg_host2 == sdbg_host)
7163 				continue;
7164 			if (idx == sdbg_host2->si_idx) {
7165 				unique = false;
7166 				break;
7167 			}
7168 		}
7169 		if (unique) {
7170 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7171 			if (idx == sdeb_most_recent_idx)
7172 				--sdeb_most_recent_idx;
7173 		}
7174 	}
7175 	if (sdbg_host)
7176 		list_del(&sdbg_host->host_list);
7177 	spin_unlock(&sdebug_host_list_lock);
7178 
7179 	if (!sdbg_host)
7180 		return;
7181 
7182 	device_unregister(&sdbg_host->dev);
7183 	--sdebug_num_hosts;
7184 }
7185 
7186 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7187 {
7188 	int num_in_q = 0;
7189 	struct sdebug_dev_info *devip;
7190 
7191 	block_unblock_all_queues(true);
7192 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7193 	if (NULL == devip) {
7194 		block_unblock_all_queues(false);
7195 		return	-ENODEV;
7196 	}
7197 	num_in_q = atomic_read(&devip->num_in_q);
7198 
7199 	if (qdepth > SDEBUG_CANQUEUE) {
7200 		qdepth = SDEBUG_CANQUEUE;
7201 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7202 			qdepth, SDEBUG_CANQUEUE);
7203 	}
7204 	if (qdepth < 1)
7205 		qdepth = 1;
7206 	if (qdepth != sdev->queue_depth)
7207 		scsi_change_queue_depth(sdev, qdepth);
7208 
7209 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7210 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7211 			    __func__, qdepth, num_in_q);
7212 	}
7213 	block_unblock_all_queues(false);
7214 	return sdev->queue_depth;
7215 }
7216 
7217 static bool fake_timeout(struct scsi_cmnd *scp)
7218 {
7219 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7220 		if (sdebug_every_nth < -1)
7221 			sdebug_every_nth = -1;
7222 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7223 			return true; /* ignore command causing timeout */
7224 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7225 			 scsi_medium_access_command(scp))
7226 			return true; /* time out reads and writes */
7227 	}
7228 	return false;
7229 }
7230 
7231 /* Response to TUR or media access command when device stopped */
7232 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7233 {
7234 	int stopped_state;
7235 	u64 diff_ns = 0;
7236 	ktime_t now_ts = ktime_get_boottime();
7237 	struct scsi_device *sdp = scp->device;
7238 
7239 	stopped_state = atomic_read(&devip->stopped);
7240 	if (stopped_state == 2) {
7241 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7242 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7243 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7244 				/* tur_ms_to_ready timer extinguished */
7245 				atomic_set(&devip->stopped, 0);
7246 				return 0;
7247 			}
7248 		}
7249 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7250 		if (sdebug_verbose)
7251 			sdev_printk(KERN_INFO, sdp,
7252 				    "%s: Not ready: in process of becoming ready\n", my_name);
7253 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7254 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7255 
7256 			if (diff_ns <= tur_nanosecs_to_ready)
7257 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7258 			else
7259 				diff_ns = tur_nanosecs_to_ready;
7260 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7261 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7262 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7263 						   diff_ns);
7264 			return check_condition_result;
7265 		}
7266 	}
7267 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7268 	if (sdebug_verbose)
7269 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7270 			    my_name);
7271 	return check_condition_result;
7272 }
7273 
7274 static int sdebug_map_queues(struct Scsi_Host *shost)
7275 {
7276 	int i, qoff;
7277 
7278 	if (shost->nr_hw_queues == 1)
7279 		return 0;
7280 
7281 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7282 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7283 
7284 		map->nr_queues  = 0;
7285 
7286 		if (i == HCTX_TYPE_DEFAULT)
7287 			map->nr_queues = submit_queues - poll_queues;
7288 		else if (i == HCTX_TYPE_POLL)
7289 			map->nr_queues = poll_queues;
7290 
7291 		if (!map->nr_queues) {
7292 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7293 			continue;
7294 		}
7295 
7296 		map->queue_offset = qoff;
7297 		blk_mq_map_queues(map);
7298 
7299 		qoff += map->nr_queues;
7300 	}
7301 
7302 	return 0;
7303 
7304 }
7305 
7306 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7307 {
7308 	bool first;
7309 	bool retiring = false;
7310 	int num_entries = 0;
7311 	unsigned int qc_idx = 0;
7312 	unsigned long iflags;
7313 	ktime_t kt_from_boot = ktime_get_boottime();
7314 	struct sdebug_queue *sqp;
7315 	struct sdebug_queued_cmd *sqcp;
7316 	struct scsi_cmnd *scp;
7317 	struct sdebug_dev_info *devip;
7318 	struct sdebug_defer *sd_dp;
7319 
7320 	sqp = sdebug_q_arr + queue_num;
7321 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7322 
7323 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7324 		if (first) {
7325 			qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7326 			first = false;
7327 		} else {
7328 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7329 		}
7330 		if (unlikely(qc_idx >= sdebug_max_queue))
7331 			break;
7332 
7333 		sqcp = &sqp->qc_arr[qc_idx];
7334 		sd_dp = sqcp->sd_dp;
7335 		if (unlikely(!sd_dp))
7336 			continue;
7337 		scp = sqcp->a_cmnd;
7338 		if (unlikely(scp == NULL)) {
7339 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7340 			       queue_num, qc_idx, __func__);
7341 			break;
7342 		}
7343 		if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7344 			if (kt_from_boot < sd_dp->cmpl_ts)
7345 				continue;
7346 
7347 		} else		/* ignoring non REQ_POLLED requests */
7348 			continue;
7349 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7350 		if (likely(devip))
7351 			atomic_dec(&devip->num_in_q);
7352 		else
7353 			pr_err("devip=NULL from %s\n", __func__);
7354 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7355 			retiring = true;
7356 
7357 		sqcp->a_cmnd = NULL;
7358 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7359 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7360 				sqp, queue_num, qc_idx, __func__);
7361 			break;
7362 		}
7363 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7364 			int k, retval;
7365 
7366 			retval = atomic_read(&retired_max_queue);
7367 			if (qc_idx >= retval) {
7368 				pr_err("index %d too large\n", retval);
7369 				break;
7370 			}
7371 			k = find_last_bit(sqp->in_use_bm, retval);
7372 			if ((k < sdebug_max_queue) || (k == retval))
7373 				atomic_set(&retired_max_queue, 0);
7374 			else
7375 				atomic_set(&retired_max_queue, k + 1);
7376 		}
7377 		sd_dp->defer_t = SDEB_DEFER_NONE;
7378 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7379 		scsi_done(scp); /* callback to mid level */
7380 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7381 		num_entries++;
7382 	}
7383 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7384 	if (num_entries > 0)
7385 		atomic_add(num_entries, &sdeb_mq_poll_count);
7386 	return num_entries;
7387 }
7388 
7389 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7390 				   struct scsi_cmnd *scp)
7391 {
7392 	u8 sdeb_i;
7393 	struct scsi_device *sdp = scp->device;
7394 	const struct opcode_info_t *oip;
7395 	const struct opcode_info_t *r_oip;
7396 	struct sdebug_dev_info *devip;
7397 	u8 *cmd = scp->cmnd;
7398 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7399 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7400 	int k, na;
7401 	int errsts = 0;
7402 	u64 lun_index = sdp->lun & 0x3FFF;
7403 	u32 flags;
7404 	u16 sa;
7405 	u8 opcode = cmd[0];
7406 	bool has_wlun_rl;
7407 	bool inject_now;
7408 
7409 	scsi_set_resid(scp, 0);
7410 	if (sdebug_statistics) {
7411 		atomic_inc(&sdebug_cmnd_count);
7412 		inject_now = inject_on_this_cmd();
7413 	} else {
7414 		inject_now = false;
7415 	}
7416 	if (unlikely(sdebug_verbose &&
7417 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7418 		char b[120];
7419 		int n, len, sb;
7420 
7421 		len = scp->cmd_len;
7422 		sb = (int)sizeof(b);
7423 		if (len > 32)
7424 			strcpy(b, "too long, over 32 bytes");
7425 		else {
7426 			for (k = 0, n = 0; k < len && n < sb; ++k)
7427 				n += scnprintf(b + n, sb - n, "%02x ",
7428 					       (u32)cmd[k]);
7429 		}
7430 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7431 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7432 	}
7433 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7434 		return SCSI_MLQUEUE_HOST_BUSY;
7435 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7436 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7437 		goto err_out;
7438 
7439 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7440 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7441 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7442 	if (unlikely(!devip)) {
7443 		devip = find_build_dev_info(sdp);
7444 		if (NULL == devip)
7445 			goto err_out;
7446 	}
7447 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7448 		atomic_set(&sdeb_inject_pending, 1);
7449 
7450 	na = oip->num_attached;
7451 	r_pfp = oip->pfp;
7452 	if (na) {	/* multiple commands with this opcode */
7453 		r_oip = oip;
7454 		if (FF_SA & r_oip->flags) {
7455 			if (F_SA_LOW & oip->flags)
7456 				sa = 0x1f & cmd[1];
7457 			else
7458 				sa = get_unaligned_be16(cmd + 8);
7459 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7460 				if (opcode == oip->opcode && sa == oip->sa)
7461 					break;
7462 			}
7463 		} else {   /* since no service action only check opcode */
7464 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7465 				if (opcode == oip->opcode)
7466 					break;
7467 			}
7468 		}
7469 		if (k > na) {
7470 			if (F_SA_LOW & r_oip->flags)
7471 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7472 			else if (F_SA_HIGH & r_oip->flags)
7473 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7474 			else
7475 				mk_sense_invalid_opcode(scp);
7476 			goto check_cond;
7477 		}
7478 	}	/* else (when na==0) we assume the oip is a match */
7479 	flags = oip->flags;
7480 	if (unlikely(F_INV_OP & flags)) {
7481 		mk_sense_invalid_opcode(scp);
7482 		goto check_cond;
7483 	}
7484 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7485 		if (sdebug_verbose)
7486 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7487 				    my_name, opcode, " supported for wlun");
7488 		mk_sense_invalid_opcode(scp);
7489 		goto check_cond;
7490 	}
7491 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7492 		u8 rem;
7493 		int j;
7494 
7495 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7496 			rem = ~oip->len_mask[k] & cmd[k];
7497 			if (rem) {
7498 				for (j = 7; j >= 0; --j, rem <<= 1) {
7499 					if (0x80 & rem)
7500 						break;
7501 				}
7502 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7503 				goto check_cond;
7504 			}
7505 		}
7506 	}
7507 	if (unlikely(!(F_SKIP_UA & flags) &&
7508 		     find_first_bit(devip->uas_bm,
7509 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7510 		errsts = make_ua(scp, devip);
7511 		if (errsts)
7512 			goto check_cond;
7513 	}
7514 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7515 		     atomic_read(&devip->stopped))) {
7516 		errsts = resp_not_ready(scp, devip);
7517 		if (errsts)
7518 			goto fini;
7519 	}
7520 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7521 		goto fini;
7522 	if (unlikely(sdebug_every_nth)) {
7523 		if (fake_timeout(scp))
7524 			return 0;	/* ignore command: make trouble */
7525 	}
7526 	if (likely(oip->pfp))
7527 		pfp = oip->pfp;	/* calls a resp_* function */
7528 	else
7529 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7530 
7531 fini:
7532 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7533 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7534 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7535 					    sdebug_ndelay > 10000)) {
7536 		/*
7537 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7538 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7539 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7540 		 * For Synchronize Cache want 1/20 of SSU's delay.
7541 		 */
7542 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7543 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7544 
7545 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7546 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7547 	} else
7548 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7549 				     sdebug_ndelay);
7550 check_cond:
7551 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7552 err_out:
7553 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7554 }
7555 
7556 static struct scsi_host_template sdebug_driver_template = {
7557 	.show_info =		scsi_debug_show_info,
7558 	.write_info =		scsi_debug_write_info,
7559 	.proc_name =		sdebug_proc_name,
7560 	.name =			"SCSI DEBUG",
7561 	.info =			scsi_debug_info,
7562 	.slave_alloc =		scsi_debug_slave_alloc,
7563 	.slave_configure =	scsi_debug_slave_configure,
7564 	.slave_destroy =	scsi_debug_slave_destroy,
7565 	.ioctl =		scsi_debug_ioctl,
7566 	.queuecommand =		scsi_debug_queuecommand,
7567 	.change_queue_depth =	sdebug_change_qdepth,
7568 	.map_queues =		sdebug_map_queues,
7569 	.mq_poll =		sdebug_blk_mq_poll,
7570 	.eh_abort_handler =	scsi_debug_abort,
7571 	.eh_device_reset_handler = scsi_debug_device_reset,
7572 	.eh_target_reset_handler = scsi_debug_target_reset,
7573 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7574 	.eh_host_reset_handler = scsi_debug_host_reset,
7575 	.can_queue =		SDEBUG_CANQUEUE,
7576 	.this_id =		7,
7577 	.sg_tablesize =		SG_MAX_SEGMENTS,
7578 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7579 	.max_sectors =		-1U,
7580 	.max_segment_size =	-1U,
7581 	.module =		THIS_MODULE,
7582 	.track_queue_depth =	1,
7583 };
7584 
7585 static int sdebug_driver_probe(struct device *dev)
7586 {
7587 	int error = 0;
7588 	struct sdebug_host_info *sdbg_host;
7589 	struct Scsi_Host *hpnt;
7590 	int hprot;
7591 
7592 	sdbg_host = to_sdebug_host(dev);
7593 
7594 	sdebug_driver_template.can_queue = sdebug_max_queue;
7595 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7596 	if (!sdebug_clustering)
7597 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7598 
7599 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7600 	if (NULL == hpnt) {
7601 		pr_err("scsi_host_alloc failed\n");
7602 		error = -ENODEV;
7603 		return error;
7604 	}
7605 	if (submit_queues > nr_cpu_ids) {
7606 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7607 			my_name, submit_queues, nr_cpu_ids);
7608 		submit_queues = nr_cpu_ids;
7609 	}
7610 	/*
7611 	 * Decide whether to tell scsi subsystem that we want mq. The
7612 	 * following should give the same answer for each host.
7613 	 */
7614 	hpnt->nr_hw_queues = submit_queues;
7615 	if (sdebug_host_max_queue)
7616 		hpnt->host_tagset = 1;
7617 
7618 	/* poll queues are possible for nr_hw_queues > 1 */
7619 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7620 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7621 			 my_name, poll_queues, hpnt->nr_hw_queues);
7622 		poll_queues = 0;
7623 	}
7624 
7625 	/*
7626 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7627 	 * left over for non-polled I/O.
7628 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7629 	 */
7630 	if (poll_queues >= submit_queues) {
7631 		if (submit_queues < 3)
7632 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7633 		else
7634 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7635 				my_name, submit_queues - 1);
7636 		poll_queues = 1;
7637 	}
7638 	if (poll_queues)
7639 		hpnt->nr_maps = 3;
7640 
7641 	sdbg_host->shost = hpnt;
7642 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7643 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7644 		hpnt->max_id = sdebug_num_tgts + 1;
7645 	else
7646 		hpnt->max_id = sdebug_num_tgts;
7647 	/* = sdebug_max_luns; */
7648 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7649 
7650 	hprot = 0;
7651 
7652 	switch (sdebug_dif) {
7653 
7654 	case T10_PI_TYPE1_PROTECTION:
7655 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7656 		if (sdebug_dix)
7657 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7658 		break;
7659 
7660 	case T10_PI_TYPE2_PROTECTION:
7661 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7662 		if (sdebug_dix)
7663 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7664 		break;
7665 
7666 	case T10_PI_TYPE3_PROTECTION:
7667 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7668 		if (sdebug_dix)
7669 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7670 		break;
7671 
7672 	default:
7673 		if (sdebug_dix)
7674 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7675 		break;
7676 	}
7677 
7678 	scsi_host_set_prot(hpnt, hprot);
7679 
7680 	if (have_dif_prot || sdebug_dix)
7681 		pr_info("host protection%s%s%s%s%s%s%s\n",
7682 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7683 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7684 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7685 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7686 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7687 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7688 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7689 
7690 	if (sdebug_guard == 1)
7691 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7692 	else
7693 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7694 
7695 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7696 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7697 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7698 		sdebug_statistics = true;
7699 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7700 	if (error) {
7701 		pr_err("scsi_add_host failed\n");
7702 		error = -ENODEV;
7703 		scsi_host_put(hpnt);
7704 	} else {
7705 		scsi_scan_host(hpnt);
7706 	}
7707 
7708 	return error;
7709 }
7710 
7711 static void sdebug_driver_remove(struct device *dev)
7712 {
7713 	struct sdebug_host_info *sdbg_host;
7714 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7715 
7716 	sdbg_host = to_sdebug_host(dev);
7717 
7718 	scsi_remove_host(sdbg_host->shost);
7719 
7720 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7721 				 dev_list) {
7722 		list_del(&sdbg_devinfo->dev_list);
7723 		kfree(sdbg_devinfo->zstate);
7724 		kfree(sdbg_devinfo);
7725 	}
7726 
7727 	scsi_host_put(sdbg_host->shost);
7728 }
7729 
7730 static int pseudo_lld_bus_match(struct device *dev,
7731 				struct device_driver *dev_driver)
7732 {
7733 	return 1;
7734 }
7735 
7736 static struct bus_type pseudo_lld_bus = {
7737 	.name = "pseudo",
7738 	.match = pseudo_lld_bus_match,
7739 	.probe = sdebug_driver_probe,
7740 	.remove = sdebug_driver_remove,
7741 	.drv_groups = sdebug_drv_groups,
7742 };
7743