xref: /linux/drivers/scsi/scsi_debug.c (revision 3be042cf46feeedf664152d063376b5c17026d1d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20210520";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define WRITE_PROTECTED 0x27
80 #define UA_RESET_ASC 0x29
81 #define UA_CHANGED_ASC 0x2a
82 #define TARGET_CHANGED_ASC 0x3f
83 #define LUNS_CHANGED_ASCQ 0x0e
84 #define INSUFF_RES_ASC 0x55
85 #define INSUFF_RES_ASCQ 0x3
86 #define POWER_ON_RESET_ASCQ 0x0
87 #define POWER_ON_OCCURRED_ASCQ 0x1
88 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
89 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
90 #define CAPACITY_CHANGED_ASCQ 0x9
91 #define SAVING_PARAMS_UNSUP 0x39
92 #define TRANSPORT_PROBLEM 0x4b
93 #define THRESHOLD_EXCEEDED 0x5d
94 #define LOW_POWER_COND_ON 0x5e
95 #define MISCOMPARE_VERIFY_ASC 0x1d
96 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
97 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
98 #define WRITE_ERROR_ASC 0xc
99 #define UNALIGNED_WRITE_ASCQ 0x4
100 #define WRITE_BOUNDARY_ASCQ 0x5
101 #define READ_INVDATA_ASCQ 0x6
102 #define READ_BOUNDARY_ASCQ 0x7
103 #define ATTEMPT_ACCESS_GAP 0x9
104 #define INSUFF_ZONE_ASCQ 0xe
105 
106 /* Additional Sense Code Qualifier (ASCQ) */
107 #define ACK_NAK_TO 0x3
108 
109 /* Default values for driver parameters */
110 #define DEF_NUM_HOST   1
111 #define DEF_NUM_TGTS   1
112 #define DEF_MAX_LUNS   1
113 /* With these defaults, this driver will make 1 host with 1 target
114  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
115  */
116 #define DEF_ATO 1
117 #define DEF_CDB_LEN 10
118 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
119 #define DEF_DEV_SIZE_PRE_INIT   0
120 #define DEF_DEV_SIZE_MB   8
121 #define DEF_ZBC_DEV_SIZE_MB   128
122 #define DEF_DIF 0
123 #define DEF_DIX 0
124 #define DEF_PER_HOST_STORE false
125 #define DEF_D_SENSE   0
126 #define DEF_EVERY_NTH   0
127 #define DEF_FAKE_RW	0
128 #define DEF_GUARD 0
129 #define DEF_HOST_LOCK 0
130 #define DEF_LBPU 0
131 #define DEF_LBPWS 0
132 #define DEF_LBPWS10 0
133 #define DEF_LBPRZ 1
134 #define DEF_LOWEST_ALIGNED 0
135 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
136 #define DEF_NO_LUN_0   0
137 #define DEF_NUM_PARTS   0
138 #define DEF_OPTS   0
139 #define DEF_OPT_BLKS 1024
140 #define DEF_PHYSBLK_EXP 0
141 #define DEF_OPT_XFERLEN_EXP 0
142 #define DEF_PTYPE   TYPE_DISK
143 #define DEF_RANDOM false
144 #define DEF_REMOVABLE false
145 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
146 #define DEF_SECTOR_SIZE 512
147 #define DEF_UNMAP_ALIGNMENT 0
148 #define DEF_UNMAP_GRANULARITY 1
149 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
150 #define DEF_UNMAP_MAX_DESC 256
151 #define DEF_VIRTUAL_GB   0
152 #define DEF_VPD_USE_HOSTNO 1
153 #define DEF_WRITESAME_LENGTH 0xFFFF
154 #define DEF_STRICT 0
155 #define DEF_STATISTICS false
156 #define DEF_SUBMIT_QUEUES 1
157 #define DEF_TUR_MS_TO_READY 0
158 #define DEF_UUID_CTL 0
159 #define JDELAY_OVERRIDDEN -9999
160 
161 /* Default parameters for ZBC drives */
162 #define DEF_ZBC_ZONE_SIZE_MB	128
163 #define DEF_ZBC_MAX_OPEN_ZONES	8
164 #define DEF_ZBC_NR_CONV_ZONES	1
165 
166 #define SDEBUG_LUN_0_VAL 0
167 
168 /* bit mask values for sdebug_opts */
169 #define SDEBUG_OPT_NOISE		1
170 #define SDEBUG_OPT_MEDIUM_ERR		2
171 #define SDEBUG_OPT_TIMEOUT		4
172 #define SDEBUG_OPT_RECOVERED_ERR	8
173 #define SDEBUG_OPT_TRANSPORT_ERR	16
174 #define SDEBUG_OPT_DIF_ERR		32
175 #define SDEBUG_OPT_DIX_ERR		64
176 #define SDEBUG_OPT_MAC_TIMEOUT		128
177 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
178 #define SDEBUG_OPT_Q_NOISE		0x200
179 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
180 #define SDEBUG_OPT_RARE_TSF		0x800
181 #define SDEBUG_OPT_N_WCE		0x1000
182 #define SDEBUG_OPT_RESET_NOISE		0x2000
183 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
184 #define SDEBUG_OPT_HOST_BUSY		0x8000
185 #define SDEBUG_OPT_CMD_ABORT		0x10000
186 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
187 			      SDEBUG_OPT_RESET_NOISE)
188 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
189 				  SDEBUG_OPT_TRANSPORT_ERR | \
190 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
191 				  SDEBUG_OPT_SHORT_TRANSFER | \
192 				  SDEBUG_OPT_HOST_BUSY | \
193 				  SDEBUG_OPT_CMD_ABORT)
194 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
195 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
196 
197 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
198  * priority order. In the subset implemented here lower numbers have higher
199  * priority. The UA numbers should be a sequence starting from 0 with
200  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
201 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
202 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
203 #define SDEBUG_UA_BUS_RESET 2
204 #define SDEBUG_UA_MODE_CHANGED 3
205 #define SDEBUG_UA_CAPACITY_CHANGED 4
206 #define SDEBUG_UA_LUNS_CHANGED 5
207 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
208 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
209 #define SDEBUG_NUM_UAS 8
210 
211 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
212  * sector on read commands: */
213 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
214 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
215 
216 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
217  * (for response) per submit queue at one time. Can be reduced by max_queue
218  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
219  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
220  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
221  * but cannot exceed SDEBUG_CANQUEUE .
222  */
223 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
224 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
225 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
226 
227 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
228 #define F_D_IN			1	/* Data-in command (e.g. READ) */
229 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
230 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
231 #define F_D_UNKN		8
232 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
233 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
234 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
235 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
236 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
237 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
238 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
239 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
240 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
241 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
242 
243 /* Useful combinations of the above flags */
244 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
245 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
246 #define FF_SA (F_SA_HIGH | F_SA_LOW)
247 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
248 
249 #define SDEBUG_MAX_PARTS 4
250 
251 #define SDEBUG_MAX_CMD_LEN 32
252 
253 #define SDEB_XA_NOT_IN_USE XA_MARK_1
254 
255 static struct kmem_cache *queued_cmd_cache;
256 
257 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
258 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
259 
260 /* Zone types (zbcr05 table 25) */
261 enum sdebug_z_type {
262 	ZBC_ZTYPE_CNV	= 0x1,
263 	ZBC_ZTYPE_SWR	= 0x2,
264 	ZBC_ZTYPE_SWP	= 0x3,
265 	/* ZBC_ZTYPE_SOBR = 0x4, */
266 	ZBC_ZTYPE_GAP	= 0x5,
267 };
268 
269 /* enumeration names taken from table 26, zbcr05 */
270 enum sdebug_z_cond {
271 	ZBC_NOT_WRITE_POINTER	= 0x0,
272 	ZC1_EMPTY		= 0x1,
273 	ZC2_IMPLICIT_OPEN	= 0x2,
274 	ZC3_EXPLICIT_OPEN	= 0x3,
275 	ZC4_CLOSED		= 0x4,
276 	ZC6_READ_ONLY		= 0xd,
277 	ZC5_FULL		= 0xe,
278 	ZC7_OFFLINE		= 0xf,
279 };
280 
281 struct sdeb_zone_state {	/* ZBC: per zone state */
282 	enum sdebug_z_type z_type;
283 	enum sdebug_z_cond z_cond;
284 	bool z_non_seq_resource;
285 	unsigned int z_size;
286 	sector_t z_start;
287 	sector_t z_wp;
288 };
289 
290 enum sdebug_err_type {
291 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
292 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
293 					/* queuecmd return failed */
294 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
295 					/* queuecmd return succeed but */
296 					/* with errors set in scsi_cmnd */
297 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
298 					/* scsi_debug_abort() */
299 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
300 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
301 };
302 
303 struct sdebug_err_inject {
304 	int type;
305 	struct list_head list;
306 	int cnt;
307 	unsigned char cmd;
308 	struct rcu_head rcu;
309 
310 	union {
311 		/*
312 		 * For ERR_FAIL_QUEUE_CMD
313 		 */
314 		int queuecmd_ret;
315 
316 		/*
317 		 * For ERR_FAIL_CMD
318 		 */
319 		struct {
320 			unsigned char host_byte;
321 			unsigned char driver_byte;
322 			unsigned char status_byte;
323 			unsigned char sense_key;
324 			unsigned char asc;
325 			unsigned char asq;
326 		};
327 	};
328 };
329 
330 struct sdebug_dev_info {
331 	struct list_head dev_list;
332 	unsigned int channel;
333 	unsigned int target;
334 	u64 lun;
335 	uuid_t lu_name;
336 	struct sdebug_host_info *sdbg_host;
337 	unsigned long uas_bm[1];
338 	atomic_t stopped;	/* 1: by SSU, 2: device start */
339 	bool used;
340 
341 	/* For ZBC devices */
342 	bool zoned;
343 	unsigned int zcap;
344 	unsigned int zsize;
345 	unsigned int zsize_shift;
346 	unsigned int nr_zones;
347 	unsigned int nr_conv_zones;
348 	unsigned int nr_seq_zones;
349 	unsigned int nr_imp_open;
350 	unsigned int nr_exp_open;
351 	unsigned int nr_closed;
352 	unsigned int max_open;
353 	ktime_t create_ts;	/* time since bootup that this device was created */
354 	struct sdeb_zone_state *zstate;
355 
356 	struct dentry *debugfs_entry;
357 	struct spinlock list_lock;
358 	struct list_head inject_err_list;
359 };
360 
361 struct sdebug_target_info {
362 	bool reset_fail;
363 	struct dentry *debugfs_entry;
364 };
365 
366 struct sdebug_host_info {
367 	struct list_head host_list;
368 	int si_idx;	/* sdeb_store_info (per host) xarray index */
369 	struct Scsi_Host *shost;
370 	struct device dev;
371 	struct list_head dev_info_list;
372 };
373 
374 /* There is an xarray of pointers to this struct's objects, one per host */
375 struct sdeb_store_info {
376 	rwlock_t macc_lck;	/* for atomic media access on this store */
377 	u8 *storep;		/* user data storage (ram) */
378 	struct t10_pi_tuple *dif_storep; /* protection info */
379 	void *map_storep;	/* provisioning map */
380 };
381 
382 #define dev_to_sdebug_host(d)	\
383 	container_of(d, struct sdebug_host_info, dev)
384 
385 #define shost_to_sdebug_host(shost)	\
386 	dev_to_sdebug_host(shost->dma_dev)
387 
388 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
389 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
390 
391 struct sdebug_defer {
392 	struct hrtimer hrt;
393 	struct execute_work ew;
394 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
395 	int issuing_cpu;
396 	bool aborted;	/* true when blk_abort_request() already called */
397 	enum sdeb_defer_type defer_t;
398 };
399 
400 struct sdebug_queued_cmd {
401 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
402 	 * instance indicates this slot is in use.
403 	 */
404 	struct sdebug_defer sd_dp;
405 	struct scsi_cmnd *scmd;
406 };
407 
408 struct sdebug_scsi_cmd {
409 	spinlock_t   lock;
410 };
411 
412 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
413 static atomic_t sdebug_completions;  /* count of deferred completions */
414 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
415 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
416 static atomic_t sdeb_inject_pending;
417 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
418 
419 struct opcode_info_t {
420 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
421 				/* for terminating element */
422 	u8 opcode;		/* if num_attached > 0, preferred */
423 	u16 sa;			/* service action */
424 	u32 flags;		/* OR-ed set of SDEB_F_* */
425 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
426 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
427 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
428 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
429 };
430 
431 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
432 enum sdeb_opcode_index {
433 	SDEB_I_INVALID_OPCODE =	0,
434 	SDEB_I_INQUIRY = 1,
435 	SDEB_I_REPORT_LUNS = 2,
436 	SDEB_I_REQUEST_SENSE = 3,
437 	SDEB_I_TEST_UNIT_READY = 4,
438 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
439 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
440 	SDEB_I_LOG_SENSE = 7,
441 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
442 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
443 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
444 	SDEB_I_START_STOP = 11,
445 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
446 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
447 	SDEB_I_MAINT_IN = 14,
448 	SDEB_I_MAINT_OUT = 15,
449 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
450 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
451 	SDEB_I_RESERVE = 18,		/* 6, 10 */
452 	SDEB_I_RELEASE = 19,		/* 6, 10 */
453 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
454 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
455 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
456 	SDEB_I_SEND_DIAG = 23,
457 	SDEB_I_UNMAP = 24,
458 	SDEB_I_WRITE_BUFFER = 25,
459 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
460 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
461 	SDEB_I_COMP_WRITE = 28,
462 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
463 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
464 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
465 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
466 };
467 
468 
469 static const unsigned char opcode_ind_arr[256] = {
470 /* 0x0; 0x0->0x1f: 6 byte cdbs */
471 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
472 	    0, 0, 0, 0,
473 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
474 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
475 	    SDEB_I_RELEASE,
476 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
477 	    SDEB_I_ALLOW_REMOVAL, 0,
478 /* 0x20; 0x20->0x3f: 10 byte cdbs */
479 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
480 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
481 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
482 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
483 /* 0x40; 0x40->0x5f: 10 byte cdbs */
484 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
485 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
486 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
487 	    SDEB_I_RELEASE,
488 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
489 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
490 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
491 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 	0, SDEB_I_VARIABLE_LEN,
493 /* 0x80; 0x80->0x9f: 16 byte cdbs */
494 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
495 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
496 	0, 0, 0, SDEB_I_VERIFY,
497 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
498 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
499 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
500 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
501 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
502 	     SDEB_I_MAINT_OUT, 0, 0, 0,
503 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
504 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
505 	0, 0, 0, 0, 0, 0, 0, 0,
506 	0, 0, 0, 0, 0, 0, 0, 0,
507 /* 0xc0; 0xc0->0xff: vendor specific */
508 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
509 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 };
513 
514 /*
515  * The following "response" functions return the SCSI mid-level's 4 byte
516  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
517  * command completion, they can mask their return value with
518  * SDEG_RES_IMMED_MASK .
519  */
520 #define SDEG_RES_IMMED_MASK 0x40000000
521 
522 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
523 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
537 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
538 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
551 
552 static int sdebug_do_add_host(bool mk_new_store);
553 static int sdebug_add_host_helper(int per_host_idx);
554 static void sdebug_do_remove_host(bool the_end);
555 static int sdebug_add_store(void);
556 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
557 static void sdebug_erase_all_stores(bool apart_from_first);
558 
559 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
560 
561 /*
562  * The following are overflow arrays for cdbs that "hit" the same index in
563  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
564  * should be placed in opcode_info_arr[], the others should be placed here.
565  */
566 static const struct opcode_info_t msense_iarr[] = {
567 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
568 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
569 };
570 
571 static const struct opcode_info_t mselect_iarr[] = {
572 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
573 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574 };
575 
576 static const struct opcode_info_t read_iarr[] = {
577 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
578 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
579 	     0, 0, 0, 0} },
580 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
581 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
582 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
583 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
584 	     0xc7, 0, 0, 0, 0} },
585 };
586 
587 static const struct opcode_info_t write_iarr[] = {
588 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
589 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
590 		   0, 0, 0, 0, 0, 0} },
591 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
592 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
593 		   0, 0, 0} },
594 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
595 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
596 		   0xbf, 0xc7, 0, 0, 0, 0} },
597 };
598 
599 static const struct opcode_info_t verify_iarr[] = {
600 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
601 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
602 		   0, 0, 0, 0, 0, 0} },
603 };
604 
605 static const struct opcode_info_t sa_in_16_iarr[] = {
606 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
607 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
609 };
610 
611 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
612 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
613 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
614 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
615 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
616 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
617 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
618 };
619 
620 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
621 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
622 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
623 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
624 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
625 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
626 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
627 };
628 
629 static const struct opcode_info_t write_same_iarr[] = {
630 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
631 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
632 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
633 };
634 
635 static const struct opcode_info_t reserve_iarr[] = {
636 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
637 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
638 };
639 
640 static const struct opcode_info_t release_iarr[] = {
641 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
642 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
643 };
644 
645 static const struct opcode_info_t sync_cache_iarr[] = {
646 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
647 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
648 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
649 };
650 
651 static const struct opcode_info_t pre_fetch_iarr[] = {
652 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
653 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
655 };
656 
657 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
658 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
659 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
661 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
662 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
663 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
664 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
665 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
667 };
668 
669 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
670 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
671 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
673 };
674 
675 
676 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
677  * plus the terminating elements for logic that scans this table such as
678  * REPORT SUPPORTED OPERATION CODES. */
679 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
680 /* 0 */
681 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
682 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
683 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
684 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
685 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
686 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
687 	     0, 0} },					/* REPORT LUNS */
688 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
689 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
691 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 /* 5 */
693 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
694 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
695 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
697 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
698 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
699 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
700 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
701 	     0, 0, 0} },
702 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
703 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
704 	     0, 0} },
705 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
706 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
707 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
708 /* 10 */
709 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
710 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
711 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
712 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
713 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
714 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
715 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
716 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
717 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
719 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
720 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
721 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
722 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
723 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
724 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
725 				0xff, 0, 0xc7, 0, 0, 0, 0} },
726 /* 15 */
727 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
728 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
729 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
730 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
731 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
732 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
733 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
734 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
735 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
736 	     0xff, 0xff} },
737 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
738 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
739 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
740 	     0} },
741 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
742 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
743 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
744 	     0} },
745 /* 20 */
746 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
747 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
748 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
749 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
750 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
751 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
752 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
753 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
755 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
756 /* 25 */
757 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
758 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
759 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
760 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
761 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
762 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
763 		 0, 0, 0, 0, 0} },
764 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
765 	    resp_sync_cache, sync_cache_iarr,
766 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
767 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
768 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
769 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
770 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
771 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
772 	    resp_pre_fetch, pre_fetch_iarr,
773 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
774 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
775 
776 /* 30 */
777 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
778 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
779 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
780 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
781 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
782 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
783 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
784 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
785 /* sentinel */
786 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
787 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
788 };
789 
790 static int sdebug_num_hosts;
791 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
792 static int sdebug_ato = DEF_ATO;
793 static int sdebug_cdb_len = DEF_CDB_LEN;
794 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
795 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
796 static int sdebug_dif = DEF_DIF;
797 static int sdebug_dix = DEF_DIX;
798 static int sdebug_dsense = DEF_D_SENSE;
799 static int sdebug_every_nth = DEF_EVERY_NTH;
800 static int sdebug_fake_rw = DEF_FAKE_RW;
801 static unsigned int sdebug_guard = DEF_GUARD;
802 static int sdebug_host_max_queue;	/* per host */
803 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
804 static int sdebug_max_luns = DEF_MAX_LUNS;
805 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
806 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
807 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
808 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
809 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
810 static int sdebug_no_uld;
811 static int sdebug_num_parts = DEF_NUM_PARTS;
812 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
813 static int sdebug_opt_blks = DEF_OPT_BLKS;
814 static int sdebug_opts = DEF_OPTS;
815 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
816 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
817 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
818 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
819 static int sdebug_sector_size = DEF_SECTOR_SIZE;
820 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
821 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
822 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
823 static unsigned int sdebug_lbpu = DEF_LBPU;
824 static unsigned int sdebug_lbpws = DEF_LBPWS;
825 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
826 static unsigned int sdebug_lbprz = DEF_LBPRZ;
827 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
828 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
829 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
830 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
831 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
832 static int sdebug_uuid_ctl = DEF_UUID_CTL;
833 static bool sdebug_random = DEF_RANDOM;
834 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
835 static bool sdebug_removable = DEF_REMOVABLE;
836 static bool sdebug_clustering;
837 static bool sdebug_host_lock = DEF_HOST_LOCK;
838 static bool sdebug_strict = DEF_STRICT;
839 static bool sdebug_any_injecting_opt;
840 static bool sdebug_no_rwlock;
841 static bool sdebug_verbose;
842 static bool have_dif_prot;
843 static bool write_since_sync;
844 static bool sdebug_statistics = DEF_STATISTICS;
845 static bool sdebug_wp;
846 static bool sdebug_allow_restart;
847 static enum {
848 	BLK_ZONED_NONE	= 0,
849 	BLK_ZONED_HA	= 1,
850 	BLK_ZONED_HM	= 2,
851 } sdeb_zbc_model = BLK_ZONED_NONE;
852 static char *sdeb_zbc_model_s;
853 
854 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
855 			  SAM_LUN_AM_FLAT = 0x1,
856 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
857 			  SAM_LUN_AM_EXTENDED = 0x3};
858 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
859 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
860 
861 static unsigned int sdebug_store_sectors;
862 static sector_t sdebug_capacity;	/* in sectors */
863 
864 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
865    may still need them */
866 static int sdebug_heads;		/* heads per disk */
867 static int sdebug_cylinders_per;	/* cylinders per surface */
868 static int sdebug_sectors_per;		/* sectors per cylinder */
869 
870 static LIST_HEAD(sdebug_host_list);
871 static DEFINE_MUTEX(sdebug_host_list_mutex);
872 
873 static struct xarray per_store_arr;
874 static struct xarray *per_store_ap = &per_store_arr;
875 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
876 static int sdeb_most_recent_idx = -1;
877 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
878 
879 static unsigned long map_size;
880 static int num_aborts;
881 static int num_dev_resets;
882 static int num_target_resets;
883 static int num_bus_resets;
884 static int num_host_resets;
885 static int dix_writes;
886 static int dix_reads;
887 static int dif_errors;
888 
889 /* ZBC global data */
890 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
891 static int sdeb_zbc_zone_cap_mb;
892 static int sdeb_zbc_zone_size_mb;
893 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
894 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
895 
896 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
897 static int poll_queues; /* iouring iopoll interface.*/
898 
899 static char sdebug_proc_name[] = MY_NAME;
900 static const char *my_name = MY_NAME;
901 
902 static struct bus_type pseudo_lld_bus;
903 
904 static struct device_driver sdebug_driverfs_driver = {
905 	.name 		= sdebug_proc_name,
906 	.bus		= &pseudo_lld_bus,
907 };
908 
909 static const int check_condition_result =
910 	SAM_STAT_CHECK_CONDITION;
911 
912 static const int illegal_condition_result =
913 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
914 
915 static const int device_qfull_result =
916 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
917 
918 static const int condition_met_result = SAM_STAT_CONDITION_MET;
919 
920 static struct dentry *sdebug_debugfs_root;
921 
922 static void sdebug_err_free(struct rcu_head *head)
923 {
924 	struct sdebug_err_inject *inject =
925 		container_of(head, typeof(*inject), rcu);
926 
927 	kfree(inject);
928 }
929 
930 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
931 {
932 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
933 	struct sdebug_err_inject *err;
934 
935 	spin_lock(&devip->list_lock);
936 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
937 		if (err->type == new->type && err->cmd == new->cmd) {
938 			list_del_rcu(&err->list);
939 			call_rcu(&err->rcu, sdebug_err_free);
940 		}
941 	}
942 
943 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
944 	spin_unlock(&devip->list_lock);
945 }
946 
947 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
948 {
949 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
950 	struct sdebug_err_inject *err;
951 	int type;
952 	unsigned char cmd;
953 
954 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
955 		kfree(buf);
956 		return -EINVAL;
957 	}
958 
959 	spin_lock(&devip->list_lock);
960 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
961 		if (err->type == type && err->cmd == cmd) {
962 			list_del_rcu(&err->list);
963 			call_rcu(&err->rcu, sdebug_err_free);
964 			spin_unlock(&devip->list_lock);
965 			kfree(buf);
966 			return count;
967 		}
968 	}
969 	spin_unlock(&devip->list_lock);
970 
971 	kfree(buf);
972 	return -EINVAL;
973 }
974 
975 static int sdebug_error_show(struct seq_file *m, void *p)
976 {
977 	struct scsi_device *sdev = (struct scsi_device *)m->private;
978 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
979 	struct sdebug_err_inject *err;
980 
981 	seq_puts(m, "Type\tCount\tCommand\n");
982 
983 	rcu_read_lock();
984 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
985 		switch (err->type) {
986 		case ERR_TMOUT_CMD:
987 		case ERR_ABORT_CMD_FAILED:
988 		case ERR_LUN_RESET_FAILED:
989 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
990 				err->cmd);
991 		break;
992 
993 		case ERR_FAIL_QUEUE_CMD:
994 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
995 				err->cnt, err->cmd, err->queuecmd_ret);
996 		break;
997 
998 		case ERR_FAIL_CMD:
999 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1000 				err->type, err->cnt, err->cmd,
1001 				err->host_byte, err->driver_byte,
1002 				err->status_byte, err->sense_key,
1003 				err->asc, err->asq);
1004 		break;
1005 		}
1006 	}
1007 	rcu_read_unlock();
1008 
1009 	return 0;
1010 }
1011 
1012 static int sdebug_error_open(struct inode *inode, struct file *file)
1013 {
1014 	return single_open(file, sdebug_error_show, inode->i_private);
1015 }
1016 
1017 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1018 		size_t count, loff_t *ppos)
1019 {
1020 	char *buf;
1021 	unsigned int inject_type;
1022 	struct sdebug_err_inject *inject;
1023 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1024 
1025 	buf = kzalloc(count + 1, GFP_KERNEL);
1026 	if (!buf)
1027 		return -ENOMEM;
1028 
1029 	if (copy_from_user(buf, ubuf, count)) {
1030 		kfree(buf);
1031 		return -EFAULT;
1032 	}
1033 
1034 	if (buf[0] == '-')
1035 		return sdebug_err_remove(sdev, buf, count);
1036 
1037 	if (sscanf(buf, "%d", &inject_type) != 1) {
1038 		kfree(buf);
1039 		return -EINVAL;
1040 	}
1041 
1042 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1043 	if (!inject) {
1044 		kfree(buf);
1045 		return -ENOMEM;
1046 	}
1047 
1048 	switch (inject_type) {
1049 	case ERR_TMOUT_CMD:
1050 	case ERR_ABORT_CMD_FAILED:
1051 	case ERR_LUN_RESET_FAILED:
1052 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1053 			   &inject->cmd) != 3)
1054 			goto out_error;
1055 	break;
1056 
1057 	case ERR_FAIL_QUEUE_CMD:
1058 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1059 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1060 			goto out_error;
1061 	break;
1062 
1063 	case ERR_FAIL_CMD:
1064 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1065 			   &inject->type, &inject->cnt, &inject->cmd,
1066 			   &inject->host_byte, &inject->driver_byte,
1067 			   &inject->status_byte, &inject->sense_key,
1068 			   &inject->asc, &inject->asq) != 9)
1069 			goto out_error;
1070 	break;
1071 
1072 	default:
1073 		goto out_error;
1074 	break;
1075 	}
1076 
1077 	kfree(buf);
1078 	sdebug_err_add(sdev, inject);
1079 
1080 	return count;
1081 
1082 out_error:
1083 	kfree(buf);
1084 	kfree(inject);
1085 	return -EINVAL;
1086 }
1087 
1088 static const struct file_operations sdebug_error_fops = {
1089 	.open	= sdebug_error_open,
1090 	.read	= seq_read,
1091 	.write	= sdebug_error_write,
1092 	.release = single_release,
1093 };
1094 
1095 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1096 {
1097 	struct scsi_target *starget = (struct scsi_target *)m->private;
1098 	struct sdebug_target_info *targetip =
1099 		(struct sdebug_target_info *)starget->hostdata;
1100 
1101 	if (targetip)
1102 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1103 
1104 	return 0;
1105 }
1106 
1107 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1108 {
1109 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1110 }
1111 
1112 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1113 		const char __user *ubuf, size_t count, loff_t *ppos)
1114 {
1115 	int ret;
1116 	struct scsi_target *starget =
1117 		(struct scsi_target *)file->f_inode->i_private;
1118 	struct sdebug_target_info *targetip =
1119 		(struct sdebug_target_info *)starget->hostdata;
1120 
1121 	if (targetip) {
1122 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1123 		return ret < 0 ? ret : count;
1124 	}
1125 	return -ENODEV;
1126 }
1127 
1128 static const struct file_operations sdebug_target_reset_fail_fops = {
1129 	.open	= sdebug_target_reset_fail_open,
1130 	.read	= seq_read,
1131 	.write	= sdebug_target_reset_fail_write,
1132 	.release = single_release,
1133 };
1134 
1135 static int sdebug_target_alloc(struct scsi_target *starget)
1136 {
1137 	struct sdebug_target_info *targetip;
1138 
1139 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1140 	if (!targetip)
1141 		return -ENOMEM;
1142 
1143 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1144 				sdebug_debugfs_root);
1145 
1146 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1147 				&sdebug_target_reset_fail_fops);
1148 
1149 	starget->hostdata = targetip;
1150 
1151 	return 0;
1152 }
1153 
1154 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1155 {
1156 	struct sdebug_target_info *targetip = data;
1157 
1158 	debugfs_remove(targetip->debugfs_entry);
1159 	kfree(targetip);
1160 }
1161 
1162 static void sdebug_target_destroy(struct scsi_target *starget)
1163 {
1164 	struct sdebug_target_info *targetip;
1165 
1166 	targetip = (struct sdebug_target_info *)starget->hostdata;
1167 	if (targetip) {
1168 		starget->hostdata = NULL;
1169 		async_schedule(sdebug_tartget_cleanup_async, targetip);
1170 	}
1171 }
1172 
1173 /* Only do the extra work involved in logical block provisioning if one or
1174  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1175  * real reads and writes (i.e. not skipping them for speed).
1176  */
1177 static inline bool scsi_debug_lbp(void)
1178 {
1179 	return 0 == sdebug_fake_rw &&
1180 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1181 }
1182 
1183 static void *lba2fake_store(struct sdeb_store_info *sip,
1184 			    unsigned long long lba)
1185 {
1186 	struct sdeb_store_info *lsip = sip;
1187 
1188 	lba = do_div(lba, sdebug_store_sectors);
1189 	if (!sip || !sip->storep) {
1190 		WARN_ON_ONCE(true);
1191 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1192 	}
1193 	return lsip->storep + lba * sdebug_sector_size;
1194 }
1195 
1196 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1197 				      sector_t sector)
1198 {
1199 	sector = sector_div(sector, sdebug_store_sectors);
1200 
1201 	return sip->dif_storep + sector;
1202 }
1203 
1204 static void sdebug_max_tgts_luns(void)
1205 {
1206 	struct sdebug_host_info *sdbg_host;
1207 	struct Scsi_Host *hpnt;
1208 
1209 	mutex_lock(&sdebug_host_list_mutex);
1210 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1211 		hpnt = sdbg_host->shost;
1212 		if ((hpnt->this_id >= 0) &&
1213 		    (sdebug_num_tgts > hpnt->this_id))
1214 			hpnt->max_id = sdebug_num_tgts + 1;
1215 		else
1216 			hpnt->max_id = sdebug_num_tgts;
1217 		/* sdebug_max_luns; */
1218 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1219 	}
1220 	mutex_unlock(&sdebug_host_list_mutex);
1221 }
1222 
1223 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1224 
1225 /* Set in_bit to -1 to indicate no bit position of invalid field */
1226 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1227 				 enum sdeb_cmd_data c_d,
1228 				 int in_byte, int in_bit)
1229 {
1230 	unsigned char *sbuff;
1231 	u8 sks[4];
1232 	int sl, asc;
1233 
1234 	sbuff = scp->sense_buffer;
1235 	if (!sbuff) {
1236 		sdev_printk(KERN_ERR, scp->device,
1237 			    "%s: sense_buffer is NULL\n", __func__);
1238 		return;
1239 	}
1240 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1241 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1242 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1243 	memset(sks, 0, sizeof(sks));
1244 	sks[0] = 0x80;
1245 	if (c_d)
1246 		sks[0] |= 0x40;
1247 	if (in_bit >= 0) {
1248 		sks[0] |= 0x8;
1249 		sks[0] |= 0x7 & in_bit;
1250 	}
1251 	put_unaligned_be16(in_byte, sks + 1);
1252 	if (sdebug_dsense) {
1253 		sl = sbuff[7] + 8;
1254 		sbuff[7] = sl;
1255 		sbuff[sl] = 0x2;
1256 		sbuff[sl + 1] = 0x6;
1257 		memcpy(sbuff + sl + 4, sks, 3);
1258 	} else
1259 		memcpy(sbuff + 15, sks, 3);
1260 	if (sdebug_verbose)
1261 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1262 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1263 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1264 }
1265 
1266 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1267 {
1268 	if (!scp->sense_buffer) {
1269 		sdev_printk(KERN_ERR, scp->device,
1270 			    "%s: sense_buffer is NULL\n", __func__);
1271 		return;
1272 	}
1273 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1274 
1275 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1276 
1277 	if (sdebug_verbose)
1278 		sdev_printk(KERN_INFO, scp->device,
1279 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1280 			    my_name, key, asc, asq);
1281 }
1282 
1283 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1284 {
1285 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1286 }
1287 
1288 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1289 			    void __user *arg)
1290 {
1291 	if (sdebug_verbose) {
1292 		if (0x1261 == cmd)
1293 			sdev_printk(KERN_INFO, dev,
1294 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1295 		else if (0x5331 == cmd)
1296 			sdev_printk(KERN_INFO, dev,
1297 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1298 				    __func__);
1299 		else
1300 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1301 				    __func__, cmd);
1302 	}
1303 	return -EINVAL;
1304 	/* return -ENOTTY; // correct return but upsets fdisk */
1305 }
1306 
1307 static void config_cdb_len(struct scsi_device *sdev)
1308 {
1309 	switch (sdebug_cdb_len) {
1310 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1311 		sdev->use_10_for_rw = false;
1312 		sdev->use_16_for_rw = false;
1313 		sdev->use_10_for_ms = false;
1314 		break;
1315 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1316 		sdev->use_10_for_rw = true;
1317 		sdev->use_16_for_rw = false;
1318 		sdev->use_10_for_ms = false;
1319 		break;
1320 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1321 		sdev->use_10_for_rw = true;
1322 		sdev->use_16_for_rw = false;
1323 		sdev->use_10_for_ms = true;
1324 		break;
1325 	case 16:
1326 		sdev->use_10_for_rw = false;
1327 		sdev->use_16_for_rw = true;
1328 		sdev->use_10_for_ms = true;
1329 		break;
1330 	case 32: /* No knobs to suggest this so same as 16 for now */
1331 		sdev->use_10_for_rw = false;
1332 		sdev->use_16_for_rw = true;
1333 		sdev->use_10_for_ms = true;
1334 		break;
1335 	default:
1336 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1337 			sdebug_cdb_len);
1338 		sdev->use_10_for_rw = true;
1339 		sdev->use_16_for_rw = false;
1340 		sdev->use_10_for_ms = false;
1341 		sdebug_cdb_len = 10;
1342 		break;
1343 	}
1344 }
1345 
1346 static void all_config_cdb_len(void)
1347 {
1348 	struct sdebug_host_info *sdbg_host;
1349 	struct Scsi_Host *shost;
1350 	struct scsi_device *sdev;
1351 
1352 	mutex_lock(&sdebug_host_list_mutex);
1353 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1354 		shost = sdbg_host->shost;
1355 		shost_for_each_device(sdev, shost) {
1356 			config_cdb_len(sdev);
1357 		}
1358 	}
1359 	mutex_unlock(&sdebug_host_list_mutex);
1360 }
1361 
1362 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1363 {
1364 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1365 	struct sdebug_dev_info *dp;
1366 
1367 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1368 		if ((devip->sdbg_host == dp->sdbg_host) &&
1369 		    (devip->target == dp->target)) {
1370 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1371 		}
1372 	}
1373 }
1374 
1375 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1376 {
1377 	int k;
1378 
1379 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1380 	if (k != SDEBUG_NUM_UAS) {
1381 		const char *cp = NULL;
1382 
1383 		switch (k) {
1384 		case SDEBUG_UA_POR:
1385 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1386 					POWER_ON_RESET_ASCQ);
1387 			if (sdebug_verbose)
1388 				cp = "power on reset";
1389 			break;
1390 		case SDEBUG_UA_POOCCUR:
1391 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1392 					POWER_ON_OCCURRED_ASCQ);
1393 			if (sdebug_verbose)
1394 				cp = "power on occurred";
1395 			break;
1396 		case SDEBUG_UA_BUS_RESET:
1397 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1398 					BUS_RESET_ASCQ);
1399 			if (sdebug_verbose)
1400 				cp = "bus reset";
1401 			break;
1402 		case SDEBUG_UA_MODE_CHANGED:
1403 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1404 					MODE_CHANGED_ASCQ);
1405 			if (sdebug_verbose)
1406 				cp = "mode parameters changed";
1407 			break;
1408 		case SDEBUG_UA_CAPACITY_CHANGED:
1409 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1410 					CAPACITY_CHANGED_ASCQ);
1411 			if (sdebug_verbose)
1412 				cp = "capacity data changed";
1413 			break;
1414 		case SDEBUG_UA_MICROCODE_CHANGED:
1415 			mk_sense_buffer(scp, UNIT_ATTENTION,
1416 					TARGET_CHANGED_ASC,
1417 					MICROCODE_CHANGED_ASCQ);
1418 			if (sdebug_verbose)
1419 				cp = "microcode has been changed";
1420 			break;
1421 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1422 			mk_sense_buffer(scp, UNIT_ATTENTION,
1423 					TARGET_CHANGED_ASC,
1424 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1425 			if (sdebug_verbose)
1426 				cp = "microcode has been changed without reset";
1427 			break;
1428 		case SDEBUG_UA_LUNS_CHANGED:
1429 			/*
1430 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1431 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1432 			 * on the target, until a REPORT LUNS command is
1433 			 * received.  SPC-4 behavior is to report it only once.
1434 			 * NOTE:  sdebug_scsi_level does not use the same
1435 			 * values as struct scsi_device->scsi_level.
1436 			 */
1437 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1438 				clear_luns_changed_on_target(devip);
1439 			mk_sense_buffer(scp, UNIT_ATTENTION,
1440 					TARGET_CHANGED_ASC,
1441 					LUNS_CHANGED_ASCQ);
1442 			if (sdebug_verbose)
1443 				cp = "reported luns data has changed";
1444 			break;
1445 		default:
1446 			pr_warn("unexpected unit attention code=%d\n", k);
1447 			if (sdebug_verbose)
1448 				cp = "unknown";
1449 			break;
1450 		}
1451 		clear_bit(k, devip->uas_bm);
1452 		if (sdebug_verbose)
1453 			sdev_printk(KERN_INFO, scp->device,
1454 				   "%s reports: Unit attention: %s\n",
1455 				   my_name, cp);
1456 		return check_condition_result;
1457 	}
1458 	return 0;
1459 }
1460 
1461 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1462 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1463 				int arr_len)
1464 {
1465 	int act_len;
1466 	struct scsi_data_buffer *sdb = &scp->sdb;
1467 
1468 	if (!sdb->length)
1469 		return 0;
1470 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1471 		return DID_ERROR << 16;
1472 
1473 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1474 				      arr, arr_len);
1475 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1476 
1477 	return 0;
1478 }
1479 
1480 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1481  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1482  * calls, not required to write in ascending offset order. Assumes resid
1483  * set to scsi_bufflen() prior to any calls.
1484  */
1485 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1486 				  int arr_len, unsigned int off_dst)
1487 {
1488 	unsigned int act_len, n;
1489 	struct scsi_data_buffer *sdb = &scp->sdb;
1490 	off_t skip = off_dst;
1491 
1492 	if (sdb->length <= off_dst)
1493 		return 0;
1494 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1495 		return DID_ERROR << 16;
1496 
1497 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1498 				       arr, arr_len, skip);
1499 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1500 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1501 		 scsi_get_resid(scp));
1502 	n = scsi_bufflen(scp) - (off_dst + act_len);
1503 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1504 	return 0;
1505 }
1506 
1507 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1508  * 'arr' or -1 if error.
1509  */
1510 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1511 			       int arr_len)
1512 {
1513 	if (!scsi_bufflen(scp))
1514 		return 0;
1515 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1516 		return -1;
1517 
1518 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1519 }
1520 
1521 
1522 static char sdebug_inq_vendor_id[9] = "Linux   ";
1523 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1524 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1525 /* Use some locally assigned NAAs for SAS addresses. */
1526 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1527 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1528 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1529 
1530 /* Device identification VPD page. Returns number of bytes placed in arr */
1531 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1532 			  int target_dev_id, int dev_id_num,
1533 			  const char *dev_id_str, int dev_id_str_len,
1534 			  const uuid_t *lu_name)
1535 {
1536 	int num, port_a;
1537 	char b[32];
1538 
1539 	port_a = target_dev_id + 1;
1540 	/* T10 vendor identifier field format (faked) */
1541 	arr[0] = 0x2;	/* ASCII */
1542 	arr[1] = 0x1;
1543 	arr[2] = 0x0;
1544 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1545 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1546 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1547 	num = 8 + 16 + dev_id_str_len;
1548 	arr[3] = num;
1549 	num += 4;
1550 	if (dev_id_num >= 0) {
1551 		if (sdebug_uuid_ctl) {
1552 			/* Locally assigned UUID */
1553 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1554 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1555 			arr[num++] = 0x0;
1556 			arr[num++] = 0x12;
1557 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1558 			arr[num++] = 0x0;
1559 			memcpy(arr + num, lu_name, 16);
1560 			num += 16;
1561 		} else {
1562 			/* NAA-3, Logical unit identifier (binary) */
1563 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1564 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1565 			arr[num++] = 0x0;
1566 			arr[num++] = 0x8;
1567 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1568 			num += 8;
1569 		}
1570 		/* Target relative port number */
1571 		arr[num++] = 0x61;	/* proto=sas, binary */
1572 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1573 		arr[num++] = 0x0;	/* reserved */
1574 		arr[num++] = 0x4;	/* length */
1575 		arr[num++] = 0x0;	/* reserved */
1576 		arr[num++] = 0x0;	/* reserved */
1577 		arr[num++] = 0x0;
1578 		arr[num++] = 0x1;	/* relative port A */
1579 	}
1580 	/* NAA-3, Target port identifier */
1581 	arr[num++] = 0x61;	/* proto=sas, binary */
1582 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1583 	arr[num++] = 0x0;
1584 	arr[num++] = 0x8;
1585 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1586 	num += 8;
1587 	/* NAA-3, Target port group identifier */
1588 	arr[num++] = 0x61;	/* proto=sas, binary */
1589 	arr[num++] = 0x95;	/* piv=1, target port group id */
1590 	arr[num++] = 0x0;
1591 	arr[num++] = 0x4;
1592 	arr[num++] = 0;
1593 	arr[num++] = 0;
1594 	put_unaligned_be16(port_group_id, arr + num);
1595 	num += 2;
1596 	/* NAA-3, Target device identifier */
1597 	arr[num++] = 0x61;	/* proto=sas, binary */
1598 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1599 	arr[num++] = 0x0;
1600 	arr[num++] = 0x8;
1601 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1602 	num += 8;
1603 	/* SCSI name string: Target device identifier */
1604 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1605 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1606 	arr[num++] = 0x0;
1607 	arr[num++] = 24;
1608 	memcpy(arr + num, "naa.32222220", 12);
1609 	num += 12;
1610 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1611 	memcpy(arr + num, b, 8);
1612 	num += 8;
1613 	memset(arr + num, 0, 4);
1614 	num += 4;
1615 	return num;
1616 }
1617 
1618 static unsigned char vpd84_data[] = {
1619 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1620     0x22,0x22,0x22,0x0,0xbb,0x1,
1621     0x22,0x22,0x22,0x0,0xbb,0x2,
1622 };
1623 
1624 /*  Software interface identification VPD page */
1625 static int inquiry_vpd_84(unsigned char *arr)
1626 {
1627 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1628 	return sizeof(vpd84_data);
1629 }
1630 
1631 /* Management network addresses VPD page */
1632 static int inquiry_vpd_85(unsigned char *arr)
1633 {
1634 	int num = 0;
1635 	const char *na1 = "https://www.kernel.org/config";
1636 	const char *na2 = "http://www.kernel.org/log";
1637 	int plen, olen;
1638 
1639 	arr[num++] = 0x1;	/* lu, storage config */
1640 	arr[num++] = 0x0;	/* reserved */
1641 	arr[num++] = 0x0;
1642 	olen = strlen(na1);
1643 	plen = olen + 1;
1644 	if (plen % 4)
1645 		plen = ((plen / 4) + 1) * 4;
1646 	arr[num++] = plen;	/* length, null termianted, padded */
1647 	memcpy(arr + num, na1, olen);
1648 	memset(arr + num + olen, 0, plen - olen);
1649 	num += plen;
1650 
1651 	arr[num++] = 0x4;	/* lu, logging */
1652 	arr[num++] = 0x0;	/* reserved */
1653 	arr[num++] = 0x0;
1654 	olen = strlen(na2);
1655 	plen = olen + 1;
1656 	if (plen % 4)
1657 		plen = ((plen / 4) + 1) * 4;
1658 	arr[num++] = plen;	/* length, null terminated, padded */
1659 	memcpy(arr + num, na2, olen);
1660 	memset(arr + num + olen, 0, plen - olen);
1661 	num += plen;
1662 
1663 	return num;
1664 }
1665 
1666 /* SCSI ports VPD page */
1667 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1668 {
1669 	int num = 0;
1670 	int port_a, port_b;
1671 
1672 	port_a = target_dev_id + 1;
1673 	port_b = port_a + 1;
1674 	arr[num++] = 0x0;	/* reserved */
1675 	arr[num++] = 0x0;	/* reserved */
1676 	arr[num++] = 0x0;
1677 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1678 	memset(arr + num, 0, 6);
1679 	num += 6;
1680 	arr[num++] = 0x0;
1681 	arr[num++] = 12;	/* length tp descriptor */
1682 	/* naa-5 target port identifier (A) */
1683 	arr[num++] = 0x61;	/* proto=sas, binary */
1684 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1685 	arr[num++] = 0x0;	/* reserved */
1686 	arr[num++] = 0x8;	/* length */
1687 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1688 	num += 8;
1689 	arr[num++] = 0x0;	/* reserved */
1690 	arr[num++] = 0x0;	/* reserved */
1691 	arr[num++] = 0x0;
1692 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1693 	memset(arr + num, 0, 6);
1694 	num += 6;
1695 	arr[num++] = 0x0;
1696 	arr[num++] = 12;	/* length tp descriptor */
1697 	/* naa-5 target port identifier (B) */
1698 	arr[num++] = 0x61;	/* proto=sas, binary */
1699 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1700 	arr[num++] = 0x0;	/* reserved */
1701 	arr[num++] = 0x8;	/* length */
1702 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1703 	num += 8;
1704 
1705 	return num;
1706 }
1707 
1708 
1709 static unsigned char vpd89_data[] = {
1710 /* from 4th byte */ 0,0,0,0,
1711 'l','i','n','u','x',' ',' ',' ',
1712 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1713 '1','2','3','4',
1714 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1715 0xec,0,0,0,
1716 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1717 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1718 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1719 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1720 0x53,0x41,
1721 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1722 0x20,0x20,
1723 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1724 0x10,0x80,
1725 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1726 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1727 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1728 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1729 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1730 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1731 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1732 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1733 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1734 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1735 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1736 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1737 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1738 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1739 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1740 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1741 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1742 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1743 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1744 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1745 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1746 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1748 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1749 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1750 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1751 };
1752 
1753 /* ATA Information VPD page */
1754 static int inquiry_vpd_89(unsigned char *arr)
1755 {
1756 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1757 	return sizeof(vpd89_data);
1758 }
1759 
1760 
1761 static unsigned char vpdb0_data[] = {
1762 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1763 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1764 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1765 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1766 };
1767 
1768 /* Block limits VPD page (SBC-3) */
1769 static int inquiry_vpd_b0(unsigned char *arr)
1770 {
1771 	unsigned int gran;
1772 
1773 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1774 
1775 	/* Optimal transfer length granularity */
1776 	if (sdebug_opt_xferlen_exp != 0 &&
1777 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1778 		gran = 1 << sdebug_opt_xferlen_exp;
1779 	else
1780 		gran = 1 << sdebug_physblk_exp;
1781 	put_unaligned_be16(gran, arr + 2);
1782 
1783 	/* Maximum Transfer Length */
1784 	if (sdebug_store_sectors > 0x400)
1785 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1786 
1787 	/* Optimal Transfer Length */
1788 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1789 
1790 	if (sdebug_lbpu) {
1791 		/* Maximum Unmap LBA Count */
1792 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1793 
1794 		/* Maximum Unmap Block Descriptor Count */
1795 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1796 	}
1797 
1798 	/* Unmap Granularity Alignment */
1799 	if (sdebug_unmap_alignment) {
1800 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1801 		arr[28] |= 0x80; /* UGAVALID */
1802 	}
1803 
1804 	/* Optimal Unmap Granularity */
1805 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1806 
1807 	/* Maximum WRITE SAME Length */
1808 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1809 
1810 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1811 }
1812 
1813 /* Block device characteristics VPD page (SBC-3) */
1814 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1815 {
1816 	memset(arr, 0, 0x3c);
1817 	arr[0] = 0;
1818 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1819 	arr[2] = 0;
1820 	arr[3] = 5;	/* less than 1.8" */
1821 
1822 	return 0x3c;
1823 }
1824 
1825 /* Logical block provisioning VPD page (SBC-4) */
1826 static int inquiry_vpd_b2(unsigned char *arr)
1827 {
1828 	memset(arr, 0, 0x4);
1829 	arr[0] = 0;			/* threshold exponent */
1830 	if (sdebug_lbpu)
1831 		arr[1] = 1 << 7;
1832 	if (sdebug_lbpws)
1833 		arr[1] |= 1 << 6;
1834 	if (sdebug_lbpws10)
1835 		arr[1] |= 1 << 5;
1836 	if (sdebug_lbprz && scsi_debug_lbp())
1837 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1838 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1839 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1840 	/* threshold_percentage=0 */
1841 	return 0x4;
1842 }
1843 
1844 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1845 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1846 {
1847 	memset(arr, 0, 0x3c);
1848 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1849 	/*
1850 	 * Set Optimal number of open sequential write preferred zones and
1851 	 * Optimal number of non-sequentially written sequential write
1852 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1853 	 * fields set to zero, apart from Max. number of open swrz_s field.
1854 	 */
1855 	put_unaligned_be32(0xffffffff, &arr[4]);
1856 	put_unaligned_be32(0xffffffff, &arr[8]);
1857 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1858 		put_unaligned_be32(devip->max_open, &arr[12]);
1859 	else
1860 		put_unaligned_be32(0xffffffff, &arr[12]);
1861 	if (devip->zcap < devip->zsize) {
1862 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1863 		put_unaligned_be64(devip->zsize, &arr[20]);
1864 	} else {
1865 		arr[19] = 0;
1866 	}
1867 	return 0x3c;
1868 }
1869 
1870 #define SDEBUG_LONG_INQ_SZ 96
1871 #define SDEBUG_MAX_INQ_ARR_SZ 584
1872 
1873 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1874 {
1875 	unsigned char pq_pdt;
1876 	unsigned char *arr;
1877 	unsigned char *cmd = scp->cmnd;
1878 	u32 alloc_len, n;
1879 	int ret;
1880 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1881 
1882 	alloc_len = get_unaligned_be16(cmd + 3);
1883 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1884 	if (! arr)
1885 		return DID_REQUEUE << 16;
1886 	is_disk = (sdebug_ptype == TYPE_DISK);
1887 	is_zbc = devip->zoned;
1888 	is_disk_zbc = (is_disk || is_zbc);
1889 	have_wlun = scsi_is_wlun(scp->device->lun);
1890 	if (have_wlun)
1891 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1892 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1893 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1894 	else
1895 		pq_pdt = (sdebug_ptype & 0x1f);
1896 	arr[0] = pq_pdt;
1897 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1898 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1899 		kfree(arr);
1900 		return check_condition_result;
1901 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1902 		int lu_id_num, port_group_id, target_dev_id;
1903 		u32 len;
1904 		char lu_id_str[6];
1905 		int host_no = devip->sdbg_host->shost->host_no;
1906 
1907 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1908 		    (devip->channel & 0x7f);
1909 		if (sdebug_vpd_use_hostno == 0)
1910 			host_no = 0;
1911 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1912 			    (devip->target * 1000) + devip->lun);
1913 		target_dev_id = ((host_no + 1) * 2000) +
1914 				 (devip->target * 1000) - 3;
1915 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1916 		if (0 == cmd[2]) { /* supported vital product data pages */
1917 			arr[1] = cmd[2];	/*sanity */
1918 			n = 4;
1919 			arr[n++] = 0x0;   /* this page */
1920 			arr[n++] = 0x80;  /* unit serial number */
1921 			arr[n++] = 0x83;  /* device identification */
1922 			arr[n++] = 0x84;  /* software interface ident. */
1923 			arr[n++] = 0x85;  /* management network addresses */
1924 			arr[n++] = 0x86;  /* extended inquiry */
1925 			arr[n++] = 0x87;  /* mode page policy */
1926 			arr[n++] = 0x88;  /* SCSI ports */
1927 			if (is_disk_zbc) {	  /* SBC or ZBC */
1928 				arr[n++] = 0x89;  /* ATA information */
1929 				arr[n++] = 0xb0;  /* Block limits */
1930 				arr[n++] = 0xb1;  /* Block characteristics */
1931 				if (is_disk)
1932 					arr[n++] = 0xb2;  /* LB Provisioning */
1933 				if (is_zbc)
1934 					arr[n++] = 0xb6;  /* ZB dev. char. */
1935 			}
1936 			arr[3] = n - 4;	  /* number of supported VPD pages */
1937 		} else if (0x80 == cmd[2]) { /* unit serial number */
1938 			arr[1] = cmd[2];	/*sanity */
1939 			arr[3] = len;
1940 			memcpy(&arr[4], lu_id_str, len);
1941 		} else if (0x83 == cmd[2]) { /* device identification */
1942 			arr[1] = cmd[2];	/*sanity */
1943 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1944 						target_dev_id, lu_id_num,
1945 						lu_id_str, len,
1946 						&devip->lu_name);
1947 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1948 			arr[1] = cmd[2];	/*sanity */
1949 			arr[3] = inquiry_vpd_84(&arr[4]);
1950 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1951 			arr[1] = cmd[2];	/*sanity */
1952 			arr[3] = inquiry_vpd_85(&arr[4]);
1953 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1954 			arr[1] = cmd[2];	/*sanity */
1955 			arr[3] = 0x3c;	/* number of following entries */
1956 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1957 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1958 			else if (have_dif_prot)
1959 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1960 			else
1961 				arr[4] = 0x0;   /* no protection stuff */
1962 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1963 		} else if (0x87 == cmd[2]) { /* mode page policy */
1964 			arr[1] = cmd[2];	/*sanity */
1965 			arr[3] = 0x8;	/* number of following entries */
1966 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1967 			arr[6] = 0x80;	/* mlus, shared */
1968 			arr[8] = 0x18;	 /* protocol specific lu */
1969 			arr[10] = 0x82;	 /* mlus, per initiator port */
1970 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1971 			arr[1] = cmd[2];	/*sanity */
1972 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1973 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1974 			arr[1] = cmd[2];        /*sanity */
1975 			n = inquiry_vpd_89(&arr[4]);
1976 			put_unaligned_be16(n, arr + 2);
1977 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1978 			arr[1] = cmd[2];        /*sanity */
1979 			arr[3] = inquiry_vpd_b0(&arr[4]);
1980 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1981 			arr[1] = cmd[2];        /*sanity */
1982 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1983 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1984 			arr[1] = cmd[2];        /*sanity */
1985 			arr[3] = inquiry_vpd_b2(&arr[4]);
1986 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1987 			arr[1] = cmd[2];        /*sanity */
1988 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1989 		} else {
1990 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1991 			kfree(arr);
1992 			return check_condition_result;
1993 		}
1994 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1995 		ret = fill_from_dev_buffer(scp, arr,
1996 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1997 		kfree(arr);
1998 		return ret;
1999 	}
2000 	/* drops through here for a standard inquiry */
2001 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2002 	arr[2] = sdebug_scsi_level;
2003 	arr[3] = 2;    /* response_data_format==2 */
2004 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2005 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2006 	if (sdebug_vpd_use_hostno == 0)
2007 		arr[5] |= 0x10; /* claim: implicit TPGS */
2008 	arr[6] = 0x10; /* claim: MultiP */
2009 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2010 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2011 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2012 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2013 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2014 	/* Use Vendor Specific area to place driver date in ASCII hex */
2015 	memcpy(&arr[36], sdebug_version_date, 8);
2016 	/* version descriptors (2 bytes each) follow */
2017 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2018 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2019 	n = 62;
2020 	if (is_disk) {		/* SBC-4 no version claimed */
2021 		put_unaligned_be16(0x600, arr + n);
2022 		n += 2;
2023 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
2024 		put_unaligned_be16(0x525, arr + n);
2025 		n += 2;
2026 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2027 		put_unaligned_be16(0x624, arr + n);
2028 		n += 2;
2029 	}
2030 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2031 	ret = fill_from_dev_buffer(scp, arr,
2032 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2033 	kfree(arr);
2034 	return ret;
2035 }
2036 
2037 /* See resp_iec_m_pg() for how this data is manipulated */
2038 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2039 				   0, 0, 0x0, 0x0};
2040 
2041 static int resp_requests(struct scsi_cmnd *scp,
2042 			 struct sdebug_dev_info *devip)
2043 {
2044 	unsigned char *cmd = scp->cmnd;
2045 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2046 	bool dsense = !!(cmd[1] & 1);
2047 	u32 alloc_len = cmd[4];
2048 	u32 len = 18;
2049 	int stopped_state = atomic_read(&devip->stopped);
2050 
2051 	memset(arr, 0, sizeof(arr));
2052 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2053 		if (dsense) {
2054 			arr[0] = 0x72;
2055 			arr[1] = NOT_READY;
2056 			arr[2] = LOGICAL_UNIT_NOT_READY;
2057 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2058 			len = 8;
2059 		} else {
2060 			arr[0] = 0x70;
2061 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2062 			arr[7] = 0xa;			/* 18 byte sense buffer */
2063 			arr[12] = LOGICAL_UNIT_NOT_READY;
2064 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2065 		}
2066 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2067 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2068 		if (dsense) {
2069 			arr[0] = 0x72;
2070 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2071 			arr[2] = THRESHOLD_EXCEEDED;
2072 			arr[3] = 0xff;		/* Failure prediction(false) */
2073 			len = 8;
2074 		} else {
2075 			arr[0] = 0x70;
2076 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2077 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2078 			arr[12] = THRESHOLD_EXCEEDED;
2079 			arr[13] = 0xff;		/* Failure prediction(false) */
2080 		}
2081 	} else {	/* nothing to report */
2082 		if (dsense) {
2083 			len = 8;
2084 			memset(arr, 0, len);
2085 			arr[0] = 0x72;
2086 		} else {
2087 			memset(arr, 0, len);
2088 			arr[0] = 0x70;
2089 			arr[7] = 0xa;
2090 		}
2091 	}
2092 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2093 }
2094 
2095 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2096 {
2097 	unsigned char *cmd = scp->cmnd;
2098 	int power_cond, want_stop, stopped_state;
2099 	bool changing;
2100 
2101 	power_cond = (cmd[4] & 0xf0) >> 4;
2102 	if (power_cond) {
2103 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2104 		return check_condition_result;
2105 	}
2106 	want_stop = !(cmd[4] & 1);
2107 	stopped_state = atomic_read(&devip->stopped);
2108 	if (stopped_state == 2) {
2109 		ktime_t now_ts = ktime_get_boottime();
2110 
2111 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2112 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2113 
2114 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2115 				/* tur_ms_to_ready timer extinguished */
2116 				atomic_set(&devip->stopped, 0);
2117 				stopped_state = 0;
2118 			}
2119 		}
2120 		if (stopped_state == 2) {
2121 			if (want_stop) {
2122 				stopped_state = 1;	/* dummy up success */
2123 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2124 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2125 				return check_condition_result;
2126 			}
2127 		}
2128 	}
2129 	changing = (stopped_state != want_stop);
2130 	if (changing)
2131 		atomic_xchg(&devip->stopped, want_stop);
2132 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2133 		return SDEG_RES_IMMED_MASK;
2134 	else
2135 		return 0;
2136 }
2137 
2138 static sector_t get_sdebug_capacity(void)
2139 {
2140 	static const unsigned int gibibyte = 1073741824;
2141 
2142 	if (sdebug_virtual_gb > 0)
2143 		return (sector_t)sdebug_virtual_gb *
2144 			(gibibyte / sdebug_sector_size);
2145 	else
2146 		return sdebug_store_sectors;
2147 }
2148 
2149 #define SDEBUG_READCAP_ARR_SZ 8
2150 static int resp_readcap(struct scsi_cmnd *scp,
2151 			struct sdebug_dev_info *devip)
2152 {
2153 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2154 	unsigned int capac;
2155 
2156 	/* following just in case virtual_gb changed */
2157 	sdebug_capacity = get_sdebug_capacity();
2158 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2159 	if (sdebug_capacity < 0xffffffff) {
2160 		capac = (unsigned int)sdebug_capacity - 1;
2161 		put_unaligned_be32(capac, arr + 0);
2162 	} else
2163 		put_unaligned_be32(0xffffffff, arr + 0);
2164 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2165 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2166 }
2167 
2168 #define SDEBUG_READCAP16_ARR_SZ 32
2169 static int resp_readcap16(struct scsi_cmnd *scp,
2170 			  struct sdebug_dev_info *devip)
2171 {
2172 	unsigned char *cmd = scp->cmnd;
2173 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2174 	u32 alloc_len;
2175 
2176 	alloc_len = get_unaligned_be32(cmd + 10);
2177 	/* following just in case virtual_gb changed */
2178 	sdebug_capacity = get_sdebug_capacity();
2179 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2180 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2181 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2182 	arr[13] = sdebug_physblk_exp & 0xf;
2183 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2184 
2185 	if (scsi_debug_lbp()) {
2186 		arr[14] |= 0x80; /* LBPME */
2187 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2188 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2189 		 * in the wider field maps to 0 in this field.
2190 		 */
2191 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2192 			arr[14] |= 0x40;
2193 	}
2194 
2195 	/*
2196 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2197 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2198 	 */
2199 	if (devip->zoned)
2200 		arr[12] |= 1 << 4;
2201 
2202 	arr[15] = sdebug_lowest_aligned & 0xff;
2203 
2204 	if (have_dif_prot) {
2205 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2206 		arr[12] |= 1; /* PROT_EN */
2207 	}
2208 
2209 	return fill_from_dev_buffer(scp, arr,
2210 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2211 }
2212 
2213 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2214 
2215 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2216 			      struct sdebug_dev_info *devip)
2217 {
2218 	unsigned char *cmd = scp->cmnd;
2219 	unsigned char *arr;
2220 	int host_no = devip->sdbg_host->shost->host_no;
2221 	int port_group_a, port_group_b, port_a, port_b;
2222 	u32 alen, n, rlen;
2223 	int ret;
2224 
2225 	alen = get_unaligned_be32(cmd + 6);
2226 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2227 	if (! arr)
2228 		return DID_REQUEUE << 16;
2229 	/*
2230 	 * EVPD page 0x88 states we have two ports, one
2231 	 * real and a fake port with no device connected.
2232 	 * So we create two port groups with one port each
2233 	 * and set the group with port B to unavailable.
2234 	 */
2235 	port_a = 0x1; /* relative port A */
2236 	port_b = 0x2; /* relative port B */
2237 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2238 			(devip->channel & 0x7f);
2239 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2240 			(devip->channel & 0x7f) + 0x80;
2241 
2242 	/*
2243 	 * The asymmetric access state is cycled according to the host_id.
2244 	 */
2245 	n = 4;
2246 	if (sdebug_vpd_use_hostno == 0) {
2247 		arr[n++] = host_no % 3; /* Asymm access state */
2248 		arr[n++] = 0x0F; /* claim: all states are supported */
2249 	} else {
2250 		arr[n++] = 0x0; /* Active/Optimized path */
2251 		arr[n++] = 0x01; /* only support active/optimized paths */
2252 	}
2253 	put_unaligned_be16(port_group_a, arr + n);
2254 	n += 2;
2255 	arr[n++] = 0;    /* Reserved */
2256 	arr[n++] = 0;    /* Status code */
2257 	arr[n++] = 0;    /* Vendor unique */
2258 	arr[n++] = 0x1;  /* One port per group */
2259 	arr[n++] = 0;    /* Reserved */
2260 	arr[n++] = 0;    /* Reserved */
2261 	put_unaligned_be16(port_a, arr + n);
2262 	n += 2;
2263 	arr[n++] = 3;    /* Port unavailable */
2264 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2265 	put_unaligned_be16(port_group_b, arr + n);
2266 	n += 2;
2267 	arr[n++] = 0;    /* Reserved */
2268 	arr[n++] = 0;    /* Status code */
2269 	arr[n++] = 0;    /* Vendor unique */
2270 	arr[n++] = 0x1;  /* One port per group */
2271 	arr[n++] = 0;    /* Reserved */
2272 	arr[n++] = 0;    /* Reserved */
2273 	put_unaligned_be16(port_b, arr + n);
2274 	n += 2;
2275 
2276 	rlen = n - 4;
2277 	put_unaligned_be32(rlen, arr + 0);
2278 
2279 	/*
2280 	 * Return the smallest value of either
2281 	 * - The allocated length
2282 	 * - The constructed command length
2283 	 * - The maximum array size
2284 	 */
2285 	rlen = min(alen, n);
2286 	ret = fill_from_dev_buffer(scp, arr,
2287 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2288 	kfree(arr);
2289 	return ret;
2290 }
2291 
2292 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2293 			     struct sdebug_dev_info *devip)
2294 {
2295 	bool rctd;
2296 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2297 	u16 req_sa, u;
2298 	u32 alloc_len, a_len;
2299 	int k, offset, len, errsts, count, bump, na;
2300 	const struct opcode_info_t *oip;
2301 	const struct opcode_info_t *r_oip;
2302 	u8 *arr;
2303 	u8 *cmd = scp->cmnd;
2304 
2305 	rctd = !!(cmd[2] & 0x80);
2306 	reporting_opts = cmd[2] & 0x7;
2307 	req_opcode = cmd[3];
2308 	req_sa = get_unaligned_be16(cmd + 4);
2309 	alloc_len = get_unaligned_be32(cmd + 6);
2310 	if (alloc_len < 4 || alloc_len > 0xffff) {
2311 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2312 		return check_condition_result;
2313 	}
2314 	if (alloc_len > 8192)
2315 		a_len = 8192;
2316 	else
2317 		a_len = alloc_len;
2318 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2319 	if (NULL == arr) {
2320 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2321 				INSUFF_RES_ASCQ);
2322 		return check_condition_result;
2323 	}
2324 	switch (reporting_opts) {
2325 	case 0:	/* all commands */
2326 		/* count number of commands */
2327 		for (count = 0, oip = opcode_info_arr;
2328 		     oip->num_attached != 0xff; ++oip) {
2329 			if (F_INV_OP & oip->flags)
2330 				continue;
2331 			count += (oip->num_attached + 1);
2332 		}
2333 		bump = rctd ? 20 : 8;
2334 		put_unaligned_be32(count * bump, arr);
2335 		for (offset = 4, oip = opcode_info_arr;
2336 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2337 			if (F_INV_OP & oip->flags)
2338 				continue;
2339 			na = oip->num_attached;
2340 			arr[offset] = oip->opcode;
2341 			put_unaligned_be16(oip->sa, arr + offset + 2);
2342 			if (rctd)
2343 				arr[offset + 5] |= 0x2;
2344 			if (FF_SA & oip->flags)
2345 				arr[offset + 5] |= 0x1;
2346 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2347 			if (rctd)
2348 				put_unaligned_be16(0xa, arr + offset + 8);
2349 			r_oip = oip;
2350 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2351 				if (F_INV_OP & oip->flags)
2352 					continue;
2353 				offset += bump;
2354 				arr[offset] = oip->opcode;
2355 				put_unaligned_be16(oip->sa, arr + offset + 2);
2356 				if (rctd)
2357 					arr[offset + 5] |= 0x2;
2358 				if (FF_SA & oip->flags)
2359 					arr[offset + 5] |= 0x1;
2360 				put_unaligned_be16(oip->len_mask[0],
2361 						   arr + offset + 6);
2362 				if (rctd)
2363 					put_unaligned_be16(0xa,
2364 							   arr + offset + 8);
2365 			}
2366 			oip = r_oip;
2367 			offset += bump;
2368 		}
2369 		break;
2370 	case 1:	/* one command: opcode only */
2371 	case 2:	/* one command: opcode plus service action */
2372 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2373 		sdeb_i = opcode_ind_arr[req_opcode];
2374 		oip = &opcode_info_arr[sdeb_i];
2375 		if (F_INV_OP & oip->flags) {
2376 			supp = 1;
2377 			offset = 4;
2378 		} else {
2379 			if (1 == reporting_opts) {
2380 				if (FF_SA & oip->flags) {
2381 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2382 							     2, 2);
2383 					kfree(arr);
2384 					return check_condition_result;
2385 				}
2386 				req_sa = 0;
2387 			} else if (2 == reporting_opts &&
2388 				   0 == (FF_SA & oip->flags)) {
2389 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2390 				kfree(arr);	/* point at requested sa */
2391 				return check_condition_result;
2392 			}
2393 			if (0 == (FF_SA & oip->flags) &&
2394 			    req_opcode == oip->opcode)
2395 				supp = 3;
2396 			else if (0 == (FF_SA & oip->flags)) {
2397 				na = oip->num_attached;
2398 				for (k = 0, oip = oip->arrp; k < na;
2399 				     ++k, ++oip) {
2400 					if (req_opcode == oip->opcode)
2401 						break;
2402 				}
2403 				supp = (k >= na) ? 1 : 3;
2404 			} else if (req_sa != oip->sa) {
2405 				na = oip->num_attached;
2406 				for (k = 0, oip = oip->arrp; k < na;
2407 				     ++k, ++oip) {
2408 					if (req_sa == oip->sa)
2409 						break;
2410 				}
2411 				supp = (k >= na) ? 1 : 3;
2412 			} else
2413 				supp = 3;
2414 			if (3 == supp) {
2415 				u = oip->len_mask[0];
2416 				put_unaligned_be16(u, arr + 2);
2417 				arr[4] = oip->opcode;
2418 				for (k = 1; k < u; ++k)
2419 					arr[4 + k] = (k < 16) ?
2420 						 oip->len_mask[k] : 0xff;
2421 				offset = 4 + u;
2422 			} else
2423 				offset = 4;
2424 		}
2425 		arr[1] = (rctd ? 0x80 : 0) | supp;
2426 		if (rctd) {
2427 			put_unaligned_be16(0xa, arr + offset);
2428 			offset += 12;
2429 		}
2430 		break;
2431 	default:
2432 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2433 		kfree(arr);
2434 		return check_condition_result;
2435 	}
2436 	offset = (offset < a_len) ? offset : a_len;
2437 	len = (offset < alloc_len) ? offset : alloc_len;
2438 	errsts = fill_from_dev_buffer(scp, arr, len);
2439 	kfree(arr);
2440 	return errsts;
2441 }
2442 
2443 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2444 			  struct sdebug_dev_info *devip)
2445 {
2446 	bool repd;
2447 	u32 alloc_len, len;
2448 	u8 arr[16];
2449 	u8 *cmd = scp->cmnd;
2450 
2451 	memset(arr, 0, sizeof(arr));
2452 	repd = !!(cmd[2] & 0x80);
2453 	alloc_len = get_unaligned_be32(cmd + 6);
2454 	if (alloc_len < 4) {
2455 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2456 		return check_condition_result;
2457 	}
2458 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2459 	arr[1] = 0x1;		/* ITNRS */
2460 	if (repd) {
2461 		arr[3] = 0xc;
2462 		len = 16;
2463 	} else
2464 		len = 4;
2465 
2466 	len = (len < alloc_len) ? len : alloc_len;
2467 	return fill_from_dev_buffer(scp, arr, len);
2468 }
2469 
2470 /* <<Following mode page info copied from ST318451LW>> */
2471 
2472 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2473 {	/* Read-Write Error Recovery page for mode_sense */
2474 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2475 					5, 0, 0xff, 0xff};
2476 
2477 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2478 	if (1 == pcontrol)
2479 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2480 	return sizeof(err_recov_pg);
2481 }
2482 
2483 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2484 { 	/* Disconnect-Reconnect page for mode_sense */
2485 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2486 					 0, 0, 0, 0, 0, 0, 0, 0};
2487 
2488 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2489 	if (1 == pcontrol)
2490 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2491 	return sizeof(disconnect_pg);
2492 }
2493 
2494 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2495 {       /* Format device page for mode_sense */
2496 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2497 				     0, 0, 0, 0, 0, 0, 0, 0,
2498 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2499 
2500 	memcpy(p, format_pg, sizeof(format_pg));
2501 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2502 	put_unaligned_be16(sdebug_sector_size, p + 12);
2503 	if (sdebug_removable)
2504 		p[20] |= 0x20; /* should agree with INQUIRY */
2505 	if (1 == pcontrol)
2506 		memset(p + 2, 0, sizeof(format_pg) - 2);
2507 	return sizeof(format_pg);
2508 }
2509 
2510 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2511 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2512 				     0, 0, 0, 0};
2513 
2514 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2515 { 	/* Caching page for mode_sense */
2516 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2517 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2518 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2519 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2520 
2521 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2522 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2523 	memcpy(p, caching_pg, sizeof(caching_pg));
2524 	if (1 == pcontrol)
2525 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2526 	else if (2 == pcontrol)
2527 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2528 	return sizeof(caching_pg);
2529 }
2530 
2531 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2532 				    0, 0, 0x2, 0x4b};
2533 
2534 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2535 { 	/* Control mode page for mode_sense */
2536 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2537 					0, 0, 0, 0};
2538 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2539 				     0, 0, 0x2, 0x4b};
2540 
2541 	if (sdebug_dsense)
2542 		ctrl_m_pg[2] |= 0x4;
2543 	else
2544 		ctrl_m_pg[2] &= ~0x4;
2545 
2546 	if (sdebug_ato)
2547 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2548 
2549 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2550 	if (1 == pcontrol)
2551 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2552 	else if (2 == pcontrol)
2553 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2554 	return sizeof(ctrl_m_pg);
2555 }
2556 
2557 
2558 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2559 {	/* Informational Exceptions control mode page for mode_sense */
2560 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2561 				       0, 0, 0x0, 0x0};
2562 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2563 				      0, 0, 0x0, 0x0};
2564 
2565 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2566 	if (1 == pcontrol)
2567 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2568 	else if (2 == pcontrol)
2569 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2570 	return sizeof(iec_m_pg);
2571 }
2572 
2573 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2574 {	/* SAS SSP mode page - short format for mode_sense */
2575 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2576 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2577 
2578 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2579 	if (1 == pcontrol)
2580 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2581 	return sizeof(sas_sf_m_pg);
2582 }
2583 
2584 
2585 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2586 			      int target_dev_id)
2587 {	/* SAS phy control and discover mode page for mode_sense */
2588 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2589 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2590 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2591 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2592 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2593 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2594 		    0, 0, 0, 0, 0, 0, 0, 0,
2595 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2596 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2597 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2598 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2599 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2600 		    0, 0, 0, 0, 0, 0, 0, 0,
2601 		};
2602 	int port_a, port_b;
2603 
2604 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2605 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2606 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2607 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2608 	port_a = target_dev_id + 1;
2609 	port_b = port_a + 1;
2610 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2611 	put_unaligned_be32(port_a, p + 20);
2612 	put_unaligned_be32(port_b, p + 48 + 20);
2613 	if (1 == pcontrol)
2614 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2615 	return sizeof(sas_pcd_m_pg);
2616 }
2617 
2618 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2619 {	/* SAS SSP shared protocol specific port mode subpage */
2620 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2621 		    0, 0, 0, 0, 0, 0, 0, 0,
2622 		};
2623 
2624 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2625 	if (1 == pcontrol)
2626 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2627 	return sizeof(sas_sha_m_pg);
2628 }
2629 
2630 #define SDEBUG_MAX_MSENSE_SZ 256
2631 
2632 static int resp_mode_sense(struct scsi_cmnd *scp,
2633 			   struct sdebug_dev_info *devip)
2634 {
2635 	int pcontrol, pcode, subpcode, bd_len;
2636 	unsigned char dev_spec;
2637 	u32 alloc_len, offset, len;
2638 	int target_dev_id;
2639 	int target = scp->device->id;
2640 	unsigned char *ap;
2641 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2642 	unsigned char *cmd = scp->cmnd;
2643 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2644 
2645 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2646 	pcontrol = (cmd[2] & 0xc0) >> 6;
2647 	pcode = cmd[2] & 0x3f;
2648 	subpcode = cmd[3];
2649 	msense_6 = (MODE_SENSE == cmd[0]);
2650 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2651 	is_disk = (sdebug_ptype == TYPE_DISK);
2652 	is_zbc = devip->zoned;
2653 	if ((is_disk || is_zbc) && !dbd)
2654 		bd_len = llbaa ? 16 : 8;
2655 	else
2656 		bd_len = 0;
2657 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2658 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2659 	if (0x3 == pcontrol) {  /* Saving values not supported */
2660 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2661 		return check_condition_result;
2662 	}
2663 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2664 			(devip->target * 1000) - 3;
2665 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2666 	if (is_disk || is_zbc) {
2667 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2668 		if (sdebug_wp)
2669 			dev_spec |= 0x80;
2670 	} else
2671 		dev_spec = 0x0;
2672 	if (msense_6) {
2673 		arr[2] = dev_spec;
2674 		arr[3] = bd_len;
2675 		offset = 4;
2676 	} else {
2677 		arr[3] = dev_spec;
2678 		if (16 == bd_len)
2679 			arr[4] = 0x1;	/* set LONGLBA bit */
2680 		arr[7] = bd_len;	/* assume 255 or less */
2681 		offset = 8;
2682 	}
2683 	ap = arr + offset;
2684 	if ((bd_len > 0) && (!sdebug_capacity))
2685 		sdebug_capacity = get_sdebug_capacity();
2686 
2687 	if (8 == bd_len) {
2688 		if (sdebug_capacity > 0xfffffffe)
2689 			put_unaligned_be32(0xffffffff, ap + 0);
2690 		else
2691 			put_unaligned_be32(sdebug_capacity, ap + 0);
2692 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2693 		offset += bd_len;
2694 		ap = arr + offset;
2695 	} else if (16 == bd_len) {
2696 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2697 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2698 		offset += bd_len;
2699 		ap = arr + offset;
2700 	}
2701 
2702 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2703 		/* TODO: Control Extension page */
2704 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2705 		return check_condition_result;
2706 	}
2707 	bad_pcode = false;
2708 
2709 	switch (pcode) {
2710 	case 0x1:	/* Read-Write error recovery page, direct access */
2711 		len = resp_err_recov_pg(ap, pcontrol, target);
2712 		offset += len;
2713 		break;
2714 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2715 		len = resp_disconnect_pg(ap, pcontrol, target);
2716 		offset += len;
2717 		break;
2718 	case 0x3:       /* Format device page, direct access */
2719 		if (is_disk) {
2720 			len = resp_format_pg(ap, pcontrol, target);
2721 			offset += len;
2722 		} else
2723 			bad_pcode = true;
2724 		break;
2725 	case 0x8:	/* Caching page, direct access */
2726 		if (is_disk || is_zbc) {
2727 			len = resp_caching_pg(ap, pcontrol, target);
2728 			offset += len;
2729 		} else
2730 			bad_pcode = true;
2731 		break;
2732 	case 0xa:	/* Control Mode page, all devices */
2733 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2734 		offset += len;
2735 		break;
2736 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2737 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2738 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2739 			return check_condition_result;
2740 		}
2741 		len = 0;
2742 		if ((0x0 == subpcode) || (0xff == subpcode))
2743 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2744 		if ((0x1 == subpcode) || (0xff == subpcode))
2745 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2746 						  target_dev_id);
2747 		if ((0x2 == subpcode) || (0xff == subpcode))
2748 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2749 		offset += len;
2750 		break;
2751 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2752 		len = resp_iec_m_pg(ap, pcontrol, target);
2753 		offset += len;
2754 		break;
2755 	case 0x3f:	/* Read all Mode pages */
2756 		if ((0 == subpcode) || (0xff == subpcode)) {
2757 			len = resp_err_recov_pg(ap, pcontrol, target);
2758 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2759 			if (is_disk) {
2760 				len += resp_format_pg(ap + len, pcontrol,
2761 						      target);
2762 				len += resp_caching_pg(ap + len, pcontrol,
2763 						       target);
2764 			} else if (is_zbc) {
2765 				len += resp_caching_pg(ap + len, pcontrol,
2766 						       target);
2767 			}
2768 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2769 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2770 			if (0xff == subpcode) {
2771 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2772 						  target, target_dev_id);
2773 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2774 			}
2775 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2776 			offset += len;
2777 		} else {
2778 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2779 			return check_condition_result;
2780 		}
2781 		break;
2782 	default:
2783 		bad_pcode = true;
2784 		break;
2785 	}
2786 	if (bad_pcode) {
2787 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2788 		return check_condition_result;
2789 	}
2790 	if (msense_6)
2791 		arr[0] = offset - 1;
2792 	else
2793 		put_unaligned_be16((offset - 2), arr + 0);
2794 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2795 }
2796 
2797 #define SDEBUG_MAX_MSELECT_SZ 512
2798 
2799 static int resp_mode_select(struct scsi_cmnd *scp,
2800 			    struct sdebug_dev_info *devip)
2801 {
2802 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2803 	int param_len, res, mpage;
2804 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2805 	unsigned char *cmd = scp->cmnd;
2806 	int mselect6 = (MODE_SELECT == cmd[0]);
2807 
2808 	memset(arr, 0, sizeof(arr));
2809 	pf = cmd[1] & 0x10;
2810 	sp = cmd[1] & 0x1;
2811 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2812 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2813 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2814 		return check_condition_result;
2815 	}
2816 	res = fetch_to_dev_buffer(scp, arr, param_len);
2817 	if (-1 == res)
2818 		return DID_ERROR << 16;
2819 	else if (sdebug_verbose && (res < param_len))
2820 		sdev_printk(KERN_INFO, scp->device,
2821 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2822 			    __func__, param_len, res);
2823 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2824 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2825 	off = bd_len + (mselect6 ? 4 : 8);
2826 	if (md_len > 2 || off >= res) {
2827 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2828 		return check_condition_result;
2829 	}
2830 	mpage = arr[off] & 0x3f;
2831 	ps = !!(arr[off] & 0x80);
2832 	if (ps) {
2833 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2834 		return check_condition_result;
2835 	}
2836 	spf = !!(arr[off] & 0x40);
2837 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2838 		       (arr[off + 1] + 2);
2839 	if ((pg_len + off) > param_len) {
2840 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2841 				PARAMETER_LIST_LENGTH_ERR, 0);
2842 		return check_condition_result;
2843 	}
2844 	switch (mpage) {
2845 	case 0x8:      /* Caching Mode page */
2846 		if (caching_pg[1] == arr[off + 1]) {
2847 			memcpy(caching_pg + 2, arr + off + 2,
2848 			       sizeof(caching_pg) - 2);
2849 			goto set_mode_changed_ua;
2850 		}
2851 		break;
2852 	case 0xa:      /* Control Mode page */
2853 		if (ctrl_m_pg[1] == arr[off + 1]) {
2854 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2855 			       sizeof(ctrl_m_pg) - 2);
2856 			if (ctrl_m_pg[4] & 0x8)
2857 				sdebug_wp = true;
2858 			else
2859 				sdebug_wp = false;
2860 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2861 			goto set_mode_changed_ua;
2862 		}
2863 		break;
2864 	case 0x1c:      /* Informational Exceptions Mode page */
2865 		if (iec_m_pg[1] == arr[off + 1]) {
2866 			memcpy(iec_m_pg + 2, arr + off + 2,
2867 			       sizeof(iec_m_pg) - 2);
2868 			goto set_mode_changed_ua;
2869 		}
2870 		break;
2871 	default:
2872 		break;
2873 	}
2874 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2875 	return check_condition_result;
2876 set_mode_changed_ua:
2877 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2878 	return 0;
2879 }
2880 
2881 static int resp_temp_l_pg(unsigned char *arr)
2882 {
2883 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2884 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2885 		};
2886 
2887 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2888 	return sizeof(temp_l_pg);
2889 }
2890 
2891 static int resp_ie_l_pg(unsigned char *arr)
2892 {
2893 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2894 		};
2895 
2896 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2897 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2898 		arr[4] = THRESHOLD_EXCEEDED;
2899 		arr[5] = 0xff;
2900 	}
2901 	return sizeof(ie_l_pg);
2902 }
2903 
2904 static int resp_env_rep_l_spg(unsigned char *arr)
2905 {
2906 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2907 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2908 					 0x1, 0x0, 0x23, 0x8,
2909 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2910 		};
2911 
2912 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2913 	return sizeof(env_rep_l_spg);
2914 }
2915 
2916 #define SDEBUG_MAX_LSENSE_SZ 512
2917 
2918 static int resp_log_sense(struct scsi_cmnd *scp,
2919 			  struct sdebug_dev_info *devip)
2920 {
2921 	int ppc, sp, pcode, subpcode;
2922 	u32 alloc_len, len, n;
2923 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2924 	unsigned char *cmd = scp->cmnd;
2925 
2926 	memset(arr, 0, sizeof(arr));
2927 	ppc = cmd[1] & 0x2;
2928 	sp = cmd[1] & 0x1;
2929 	if (ppc || sp) {
2930 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2931 		return check_condition_result;
2932 	}
2933 	pcode = cmd[2] & 0x3f;
2934 	subpcode = cmd[3] & 0xff;
2935 	alloc_len = get_unaligned_be16(cmd + 7);
2936 	arr[0] = pcode;
2937 	if (0 == subpcode) {
2938 		switch (pcode) {
2939 		case 0x0:	/* Supported log pages log page */
2940 			n = 4;
2941 			arr[n++] = 0x0;		/* this page */
2942 			arr[n++] = 0xd;		/* Temperature */
2943 			arr[n++] = 0x2f;	/* Informational exceptions */
2944 			arr[3] = n - 4;
2945 			break;
2946 		case 0xd:	/* Temperature log page */
2947 			arr[3] = resp_temp_l_pg(arr + 4);
2948 			break;
2949 		case 0x2f:	/* Informational exceptions log page */
2950 			arr[3] = resp_ie_l_pg(arr + 4);
2951 			break;
2952 		default:
2953 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2954 			return check_condition_result;
2955 		}
2956 	} else if (0xff == subpcode) {
2957 		arr[0] |= 0x40;
2958 		arr[1] = subpcode;
2959 		switch (pcode) {
2960 		case 0x0:	/* Supported log pages and subpages log page */
2961 			n = 4;
2962 			arr[n++] = 0x0;
2963 			arr[n++] = 0x0;		/* 0,0 page */
2964 			arr[n++] = 0x0;
2965 			arr[n++] = 0xff;	/* this page */
2966 			arr[n++] = 0xd;
2967 			arr[n++] = 0x0;		/* Temperature */
2968 			arr[n++] = 0xd;
2969 			arr[n++] = 0x1;		/* Environment reporting */
2970 			arr[n++] = 0xd;
2971 			arr[n++] = 0xff;	/* all 0xd subpages */
2972 			arr[n++] = 0x2f;
2973 			arr[n++] = 0x0;	/* Informational exceptions */
2974 			arr[n++] = 0x2f;
2975 			arr[n++] = 0xff;	/* all 0x2f subpages */
2976 			arr[3] = n - 4;
2977 			break;
2978 		case 0xd:	/* Temperature subpages */
2979 			n = 4;
2980 			arr[n++] = 0xd;
2981 			arr[n++] = 0x0;		/* Temperature */
2982 			arr[n++] = 0xd;
2983 			arr[n++] = 0x1;		/* Environment reporting */
2984 			arr[n++] = 0xd;
2985 			arr[n++] = 0xff;	/* these subpages */
2986 			arr[3] = n - 4;
2987 			break;
2988 		case 0x2f:	/* Informational exceptions subpages */
2989 			n = 4;
2990 			arr[n++] = 0x2f;
2991 			arr[n++] = 0x0;		/* Informational exceptions */
2992 			arr[n++] = 0x2f;
2993 			arr[n++] = 0xff;	/* these subpages */
2994 			arr[3] = n - 4;
2995 			break;
2996 		default:
2997 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2998 			return check_condition_result;
2999 		}
3000 	} else if (subpcode > 0) {
3001 		arr[0] |= 0x40;
3002 		arr[1] = subpcode;
3003 		if (pcode == 0xd && subpcode == 1)
3004 			arr[3] = resp_env_rep_l_spg(arr + 4);
3005 		else {
3006 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3007 			return check_condition_result;
3008 		}
3009 	} else {
3010 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3011 		return check_condition_result;
3012 	}
3013 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3014 	return fill_from_dev_buffer(scp, arr,
3015 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3016 }
3017 
3018 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3019 {
3020 	return devip->nr_zones != 0;
3021 }
3022 
3023 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3024 					unsigned long long lba)
3025 {
3026 	u32 zno = lba >> devip->zsize_shift;
3027 	struct sdeb_zone_state *zsp;
3028 
3029 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3030 		return &devip->zstate[zno];
3031 
3032 	/*
3033 	 * If the zone capacity is less than the zone size, adjust for gap
3034 	 * zones.
3035 	 */
3036 	zno = 2 * zno - devip->nr_conv_zones;
3037 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3038 	zsp = &devip->zstate[zno];
3039 	if (lba >= zsp->z_start + zsp->z_size)
3040 		zsp++;
3041 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3042 	return zsp;
3043 }
3044 
3045 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3046 {
3047 	return zsp->z_type == ZBC_ZTYPE_CNV;
3048 }
3049 
3050 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3051 {
3052 	return zsp->z_type == ZBC_ZTYPE_GAP;
3053 }
3054 
3055 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3056 {
3057 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3058 }
3059 
3060 static void zbc_close_zone(struct sdebug_dev_info *devip,
3061 			   struct sdeb_zone_state *zsp)
3062 {
3063 	enum sdebug_z_cond zc;
3064 
3065 	if (!zbc_zone_is_seq(zsp))
3066 		return;
3067 
3068 	zc = zsp->z_cond;
3069 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3070 		return;
3071 
3072 	if (zc == ZC2_IMPLICIT_OPEN)
3073 		devip->nr_imp_open--;
3074 	else
3075 		devip->nr_exp_open--;
3076 
3077 	if (zsp->z_wp == zsp->z_start) {
3078 		zsp->z_cond = ZC1_EMPTY;
3079 	} else {
3080 		zsp->z_cond = ZC4_CLOSED;
3081 		devip->nr_closed++;
3082 	}
3083 }
3084 
3085 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3086 {
3087 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3088 	unsigned int i;
3089 
3090 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3091 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3092 			zbc_close_zone(devip, zsp);
3093 			return;
3094 		}
3095 	}
3096 }
3097 
3098 static void zbc_open_zone(struct sdebug_dev_info *devip,
3099 			  struct sdeb_zone_state *zsp, bool explicit)
3100 {
3101 	enum sdebug_z_cond zc;
3102 
3103 	if (!zbc_zone_is_seq(zsp))
3104 		return;
3105 
3106 	zc = zsp->z_cond;
3107 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3108 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3109 		return;
3110 
3111 	/* Close an implicit open zone if necessary */
3112 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3113 		zbc_close_zone(devip, zsp);
3114 	else if (devip->max_open &&
3115 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3116 		zbc_close_imp_open_zone(devip);
3117 
3118 	if (zsp->z_cond == ZC4_CLOSED)
3119 		devip->nr_closed--;
3120 	if (explicit) {
3121 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3122 		devip->nr_exp_open++;
3123 	} else {
3124 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3125 		devip->nr_imp_open++;
3126 	}
3127 }
3128 
3129 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3130 				     struct sdeb_zone_state *zsp)
3131 {
3132 	switch (zsp->z_cond) {
3133 	case ZC2_IMPLICIT_OPEN:
3134 		devip->nr_imp_open--;
3135 		break;
3136 	case ZC3_EXPLICIT_OPEN:
3137 		devip->nr_exp_open--;
3138 		break;
3139 	default:
3140 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3141 			  zsp->z_start, zsp->z_cond);
3142 		break;
3143 	}
3144 	zsp->z_cond = ZC5_FULL;
3145 }
3146 
3147 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3148 		       unsigned long long lba, unsigned int num)
3149 {
3150 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3151 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3152 
3153 	if (!zbc_zone_is_seq(zsp))
3154 		return;
3155 
3156 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3157 		zsp->z_wp += num;
3158 		if (zsp->z_wp >= zend)
3159 			zbc_set_zone_full(devip, zsp);
3160 		return;
3161 	}
3162 
3163 	while (num) {
3164 		if (lba != zsp->z_wp)
3165 			zsp->z_non_seq_resource = true;
3166 
3167 		end = lba + num;
3168 		if (end >= zend) {
3169 			n = zend - lba;
3170 			zsp->z_wp = zend;
3171 		} else if (end > zsp->z_wp) {
3172 			n = num;
3173 			zsp->z_wp = end;
3174 		} else {
3175 			n = num;
3176 		}
3177 		if (zsp->z_wp >= zend)
3178 			zbc_set_zone_full(devip, zsp);
3179 
3180 		num -= n;
3181 		lba += n;
3182 		if (num) {
3183 			zsp++;
3184 			zend = zsp->z_start + zsp->z_size;
3185 		}
3186 	}
3187 }
3188 
3189 static int check_zbc_access_params(struct scsi_cmnd *scp,
3190 			unsigned long long lba, unsigned int num, bool write)
3191 {
3192 	struct scsi_device *sdp = scp->device;
3193 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3194 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3195 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3196 
3197 	if (!write) {
3198 		/* For host-managed, reads cannot cross zone types boundaries */
3199 		if (zsp->z_type != zsp_end->z_type) {
3200 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3201 					LBA_OUT_OF_RANGE,
3202 					READ_INVDATA_ASCQ);
3203 			return check_condition_result;
3204 		}
3205 		return 0;
3206 	}
3207 
3208 	/* Writing into a gap zone is not allowed */
3209 	if (zbc_zone_is_gap(zsp)) {
3210 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3211 				ATTEMPT_ACCESS_GAP);
3212 		return check_condition_result;
3213 	}
3214 
3215 	/* No restrictions for writes within conventional zones */
3216 	if (zbc_zone_is_conv(zsp)) {
3217 		if (!zbc_zone_is_conv(zsp_end)) {
3218 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3219 					LBA_OUT_OF_RANGE,
3220 					WRITE_BOUNDARY_ASCQ);
3221 			return check_condition_result;
3222 		}
3223 		return 0;
3224 	}
3225 
3226 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3227 		/* Writes cannot cross sequential zone boundaries */
3228 		if (zsp_end != zsp) {
3229 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3230 					LBA_OUT_OF_RANGE,
3231 					WRITE_BOUNDARY_ASCQ);
3232 			return check_condition_result;
3233 		}
3234 		/* Cannot write full zones */
3235 		if (zsp->z_cond == ZC5_FULL) {
3236 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3237 					INVALID_FIELD_IN_CDB, 0);
3238 			return check_condition_result;
3239 		}
3240 		/* Writes must be aligned to the zone WP */
3241 		if (lba != zsp->z_wp) {
3242 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3243 					LBA_OUT_OF_RANGE,
3244 					UNALIGNED_WRITE_ASCQ);
3245 			return check_condition_result;
3246 		}
3247 	}
3248 
3249 	/* Handle implicit open of closed and empty zones */
3250 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3251 		if (devip->max_open &&
3252 		    devip->nr_exp_open >= devip->max_open) {
3253 			mk_sense_buffer(scp, DATA_PROTECT,
3254 					INSUFF_RES_ASC,
3255 					INSUFF_ZONE_ASCQ);
3256 			return check_condition_result;
3257 		}
3258 		zbc_open_zone(devip, zsp, false);
3259 	}
3260 
3261 	return 0;
3262 }
3263 
3264 static inline int check_device_access_params
3265 			(struct scsi_cmnd *scp, unsigned long long lba,
3266 			 unsigned int num, bool write)
3267 {
3268 	struct scsi_device *sdp = scp->device;
3269 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3270 
3271 	if (lba + num > sdebug_capacity) {
3272 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3273 		return check_condition_result;
3274 	}
3275 	/* transfer length excessive (tie in to block limits VPD page) */
3276 	if (num > sdebug_store_sectors) {
3277 		/* needs work to find which cdb byte 'num' comes from */
3278 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3279 		return check_condition_result;
3280 	}
3281 	if (write && unlikely(sdebug_wp)) {
3282 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3283 		return check_condition_result;
3284 	}
3285 	if (sdebug_dev_is_zoned(devip))
3286 		return check_zbc_access_params(scp, lba, num, write);
3287 
3288 	return 0;
3289 }
3290 
3291 /*
3292  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3293  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3294  * that access any of the "stores" in struct sdeb_store_info should call this
3295  * function with bug_if_fake_rw set to true.
3296  */
3297 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3298 						bool bug_if_fake_rw)
3299 {
3300 	if (sdebug_fake_rw) {
3301 		BUG_ON(bug_if_fake_rw);	/* See note above */
3302 		return NULL;
3303 	}
3304 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3305 }
3306 
3307 /* Returns number of bytes copied or -1 if error. */
3308 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3309 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3310 {
3311 	int ret;
3312 	u64 block, rest = 0;
3313 	enum dma_data_direction dir;
3314 	struct scsi_data_buffer *sdb = &scp->sdb;
3315 	u8 *fsp;
3316 
3317 	if (do_write) {
3318 		dir = DMA_TO_DEVICE;
3319 		write_since_sync = true;
3320 	} else {
3321 		dir = DMA_FROM_DEVICE;
3322 	}
3323 
3324 	if (!sdb->length || !sip)
3325 		return 0;
3326 	if (scp->sc_data_direction != dir)
3327 		return -1;
3328 	fsp = sip->storep;
3329 
3330 	block = do_div(lba, sdebug_store_sectors);
3331 	if (block + num > sdebug_store_sectors)
3332 		rest = block + num - sdebug_store_sectors;
3333 
3334 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3335 		   fsp + (block * sdebug_sector_size),
3336 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3337 	if (ret != (num - rest) * sdebug_sector_size)
3338 		return ret;
3339 
3340 	if (rest) {
3341 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3342 			    fsp, rest * sdebug_sector_size,
3343 			    sg_skip + ((num - rest) * sdebug_sector_size),
3344 			    do_write);
3345 	}
3346 
3347 	return ret;
3348 }
3349 
3350 /* Returns number of bytes copied or -1 if error. */
3351 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3352 {
3353 	struct scsi_data_buffer *sdb = &scp->sdb;
3354 
3355 	if (!sdb->length)
3356 		return 0;
3357 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3358 		return -1;
3359 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3360 			      num * sdebug_sector_size, 0, true);
3361 }
3362 
3363 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3364  * arr into sip->storep+lba and return true. If comparison fails then
3365  * return false. */
3366 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3367 			      const u8 *arr, bool compare_only)
3368 {
3369 	bool res;
3370 	u64 block, rest = 0;
3371 	u32 store_blks = sdebug_store_sectors;
3372 	u32 lb_size = sdebug_sector_size;
3373 	u8 *fsp = sip->storep;
3374 
3375 	block = do_div(lba, store_blks);
3376 	if (block + num > store_blks)
3377 		rest = block + num - store_blks;
3378 
3379 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3380 	if (!res)
3381 		return res;
3382 	if (rest)
3383 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3384 			     rest * lb_size);
3385 	if (!res)
3386 		return res;
3387 	if (compare_only)
3388 		return true;
3389 	arr += num * lb_size;
3390 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3391 	if (rest)
3392 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3393 	return res;
3394 }
3395 
3396 static __be16 dif_compute_csum(const void *buf, int len)
3397 {
3398 	__be16 csum;
3399 
3400 	if (sdebug_guard)
3401 		csum = (__force __be16)ip_compute_csum(buf, len);
3402 	else
3403 		csum = cpu_to_be16(crc_t10dif(buf, len));
3404 
3405 	return csum;
3406 }
3407 
3408 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3409 		      sector_t sector, u32 ei_lba)
3410 {
3411 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3412 
3413 	if (sdt->guard_tag != csum) {
3414 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3415 			(unsigned long)sector,
3416 			be16_to_cpu(sdt->guard_tag),
3417 			be16_to_cpu(csum));
3418 		return 0x01;
3419 	}
3420 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3421 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3422 		pr_err("REF check failed on sector %lu\n",
3423 			(unsigned long)sector);
3424 		return 0x03;
3425 	}
3426 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3427 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3428 		pr_err("REF check failed on sector %lu\n",
3429 			(unsigned long)sector);
3430 		return 0x03;
3431 	}
3432 	return 0;
3433 }
3434 
3435 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3436 			  unsigned int sectors, bool read)
3437 {
3438 	size_t resid;
3439 	void *paddr;
3440 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3441 						scp->device->hostdata, true);
3442 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3443 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3444 	struct sg_mapping_iter miter;
3445 
3446 	/* Bytes of protection data to copy into sgl */
3447 	resid = sectors * sizeof(*dif_storep);
3448 
3449 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3450 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3451 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3452 
3453 	while (sg_miter_next(&miter) && resid > 0) {
3454 		size_t len = min_t(size_t, miter.length, resid);
3455 		void *start = dif_store(sip, sector);
3456 		size_t rest = 0;
3457 
3458 		if (dif_store_end < start + len)
3459 			rest = start + len - dif_store_end;
3460 
3461 		paddr = miter.addr;
3462 
3463 		if (read)
3464 			memcpy(paddr, start, len - rest);
3465 		else
3466 			memcpy(start, paddr, len - rest);
3467 
3468 		if (rest) {
3469 			if (read)
3470 				memcpy(paddr + len - rest, dif_storep, rest);
3471 			else
3472 				memcpy(dif_storep, paddr + len - rest, rest);
3473 		}
3474 
3475 		sector += len / sizeof(*dif_storep);
3476 		resid -= len;
3477 	}
3478 	sg_miter_stop(&miter);
3479 }
3480 
3481 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3482 			    unsigned int sectors, u32 ei_lba)
3483 {
3484 	int ret = 0;
3485 	unsigned int i;
3486 	sector_t sector;
3487 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3488 						scp->device->hostdata, true);
3489 	struct t10_pi_tuple *sdt;
3490 
3491 	for (i = 0; i < sectors; i++, ei_lba++) {
3492 		sector = start_sec + i;
3493 		sdt = dif_store(sip, sector);
3494 
3495 		if (sdt->app_tag == cpu_to_be16(0xffff))
3496 			continue;
3497 
3498 		/*
3499 		 * Because scsi_debug acts as both initiator and
3500 		 * target we proceed to verify the PI even if
3501 		 * RDPROTECT=3. This is done so the "initiator" knows
3502 		 * which type of error to return. Otherwise we would
3503 		 * have to iterate over the PI twice.
3504 		 */
3505 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3506 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3507 					 sector, ei_lba);
3508 			if (ret) {
3509 				dif_errors++;
3510 				break;
3511 			}
3512 		}
3513 	}
3514 
3515 	dif_copy_prot(scp, start_sec, sectors, true);
3516 	dix_reads++;
3517 
3518 	return ret;
3519 }
3520 
3521 static inline void
3522 sdeb_read_lock(struct sdeb_store_info *sip)
3523 {
3524 	if (sdebug_no_rwlock) {
3525 		if (sip)
3526 			__acquire(&sip->macc_lck);
3527 		else
3528 			__acquire(&sdeb_fake_rw_lck);
3529 	} else {
3530 		if (sip)
3531 			read_lock(&sip->macc_lck);
3532 		else
3533 			read_lock(&sdeb_fake_rw_lck);
3534 	}
3535 }
3536 
3537 static inline void
3538 sdeb_read_unlock(struct sdeb_store_info *sip)
3539 {
3540 	if (sdebug_no_rwlock) {
3541 		if (sip)
3542 			__release(&sip->macc_lck);
3543 		else
3544 			__release(&sdeb_fake_rw_lck);
3545 	} else {
3546 		if (sip)
3547 			read_unlock(&sip->macc_lck);
3548 		else
3549 			read_unlock(&sdeb_fake_rw_lck);
3550 	}
3551 }
3552 
3553 static inline void
3554 sdeb_write_lock(struct sdeb_store_info *sip)
3555 {
3556 	if (sdebug_no_rwlock) {
3557 		if (sip)
3558 			__acquire(&sip->macc_lck);
3559 		else
3560 			__acquire(&sdeb_fake_rw_lck);
3561 	} else {
3562 		if (sip)
3563 			write_lock(&sip->macc_lck);
3564 		else
3565 			write_lock(&sdeb_fake_rw_lck);
3566 	}
3567 }
3568 
3569 static inline void
3570 sdeb_write_unlock(struct sdeb_store_info *sip)
3571 {
3572 	if (sdebug_no_rwlock) {
3573 		if (sip)
3574 			__release(&sip->macc_lck);
3575 		else
3576 			__release(&sdeb_fake_rw_lck);
3577 	} else {
3578 		if (sip)
3579 			write_unlock(&sip->macc_lck);
3580 		else
3581 			write_unlock(&sdeb_fake_rw_lck);
3582 	}
3583 }
3584 
3585 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3586 {
3587 	bool check_prot;
3588 	u32 num;
3589 	u32 ei_lba;
3590 	int ret;
3591 	u64 lba;
3592 	struct sdeb_store_info *sip = devip2sip(devip, true);
3593 	u8 *cmd = scp->cmnd;
3594 
3595 	switch (cmd[0]) {
3596 	case READ_16:
3597 		ei_lba = 0;
3598 		lba = get_unaligned_be64(cmd + 2);
3599 		num = get_unaligned_be32(cmd + 10);
3600 		check_prot = true;
3601 		break;
3602 	case READ_10:
3603 		ei_lba = 0;
3604 		lba = get_unaligned_be32(cmd + 2);
3605 		num = get_unaligned_be16(cmd + 7);
3606 		check_prot = true;
3607 		break;
3608 	case READ_6:
3609 		ei_lba = 0;
3610 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3611 		      (u32)(cmd[1] & 0x1f) << 16;
3612 		num = (0 == cmd[4]) ? 256 : cmd[4];
3613 		check_prot = true;
3614 		break;
3615 	case READ_12:
3616 		ei_lba = 0;
3617 		lba = get_unaligned_be32(cmd + 2);
3618 		num = get_unaligned_be32(cmd + 6);
3619 		check_prot = true;
3620 		break;
3621 	case XDWRITEREAD_10:
3622 		ei_lba = 0;
3623 		lba = get_unaligned_be32(cmd + 2);
3624 		num = get_unaligned_be16(cmd + 7);
3625 		check_prot = false;
3626 		break;
3627 	default:	/* assume READ(32) */
3628 		lba = get_unaligned_be64(cmd + 12);
3629 		ei_lba = get_unaligned_be32(cmd + 20);
3630 		num = get_unaligned_be32(cmd + 28);
3631 		check_prot = false;
3632 		break;
3633 	}
3634 	if (unlikely(have_dif_prot && check_prot)) {
3635 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3636 		    (cmd[1] & 0xe0)) {
3637 			mk_sense_invalid_opcode(scp);
3638 			return check_condition_result;
3639 		}
3640 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3641 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3642 		    (cmd[1] & 0xe0) == 0)
3643 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3644 				    "to DIF device\n");
3645 	}
3646 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3647 		     atomic_read(&sdeb_inject_pending))) {
3648 		num /= 2;
3649 		atomic_set(&sdeb_inject_pending, 0);
3650 	}
3651 
3652 	ret = check_device_access_params(scp, lba, num, false);
3653 	if (ret)
3654 		return ret;
3655 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3656 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3657 		     ((lba + num) > sdebug_medium_error_start))) {
3658 		/* claim unrecoverable read error */
3659 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3660 		/* set info field and valid bit for fixed descriptor */
3661 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3662 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3663 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3664 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3665 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3666 		}
3667 		scsi_set_resid(scp, scsi_bufflen(scp));
3668 		return check_condition_result;
3669 	}
3670 
3671 	sdeb_read_lock(sip);
3672 
3673 	/* DIX + T10 DIF */
3674 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3675 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3676 		case 1: /* Guard tag error */
3677 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3678 				sdeb_read_unlock(sip);
3679 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3680 				return check_condition_result;
3681 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3682 				sdeb_read_unlock(sip);
3683 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3684 				return illegal_condition_result;
3685 			}
3686 			break;
3687 		case 3: /* Reference tag error */
3688 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3689 				sdeb_read_unlock(sip);
3690 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3691 				return check_condition_result;
3692 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3693 				sdeb_read_unlock(sip);
3694 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3695 				return illegal_condition_result;
3696 			}
3697 			break;
3698 		}
3699 	}
3700 
3701 	ret = do_device_access(sip, scp, 0, lba, num, false);
3702 	sdeb_read_unlock(sip);
3703 	if (unlikely(ret == -1))
3704 		return DID_ERROR << 16;
3705 
3706 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3707 
3708 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3709 		     atomic_read(&sdeb_inject_pending))) {
3710 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3711 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3712 			atomic_set(&sdeb_inject_pending, 0);
3713 			return check_condition_result;
3714 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3715 			/* Logical block guard check failed */
3716 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3717 			atomic_set(&sdeb_inject_pending, 0);
3718 			return illegal_condition_result;
3719 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3720 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3721 			atomic_set(&sdeb_inject_pending, 0);
3722 			return illegal_condition_result;
3723 		}
3724 	}
3725 	return 0;
3726 }
3727 
3728 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3729 			     unsigned int sectors, u32 ei_lba)
3730 {
3731 	int ret;
3732 	struct t10_pi_tuple *sdt;
3733 	void *daddr;
3734 	sector_t sector = start_sec;
3735 	int ppage_offset;
3736 	int dpage_offset;
3737 	struct sg_mapping_iter diter;
3738 	struct sg_mapping_iter piter;
3739 
3740 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3741 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3742 
3743 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3744 			scsi_prot_sg_count(SCpnt),
3745 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3746 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3747 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3748 
3749 	/* For each protection page */
3750 	while (sg_miter_next(&piter)) {
3751 		dpage_offset = 0;
3752 		if (WARN_ON(!sg_miter_next(&diter))) {
3753 			ret = 0x01;
3754 			goto out;
3755 		}
3756 
3757 		for (ppage_offset = 0; ppage_offset < piter.length;
3758 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3759 			/* If we're at the end of the current
3760 			 * data page advance to the next one
3761 			 */
3762 			if (dpage_offset >= diter.length) {
3763 				if (WARN_ON(!sg_miter_next(&diter))) {
3764 					ret = 0x01;
3765 					goto out;
3766 				}
3767 				dpage_offset = 0;
3768 			}
3769 
3770 			sdt = piter.addr + ppage_offset;
3771 			daddr = diter.addr + dpage_offset;
3772 
3773 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3774 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3775 				if (ret)
3776 					goto out;
3777 			}
3778 
3779 			sector++;
3780 			ei_lba++;
3781 			dpage_offset += sdebug_sector_size;
3782 		}
3783 		diter.consumed = dpage_offset;
3784 		sg_miter_stop(&diter);
3785 	}
3786 	sg_miter_stop(&piter);
3787 
3788 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3789 	dix_writes++;
3790 
3791 	return 0;
3792 
3793 out:
3794 	dif_errors++;
3795 	sg_miter_stop(&diter);
3796 	sg_miter_stop(&piter);
3797 	return ret;
3798 }
3799 
3800 static unsigned long lba_to_map_index(sector_t lba)
3801 {
3802 	if (sdebug_unmap_alignment)
3803 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3804 	sector_div(lba, sdebug_unmap_granularity);
3805 	return lba;
3806 }
3807 
3808 static sector_t map_index_to_lba(unsigned long index)
3809 {
3810 	sector_t lba = index * sdebug_unmap_granularity;
3811 
3812 	if (sdebug_unmap_alignment)
3813 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3814 	return lba;
3815 }
3816 
3817 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3818 			      unsigned int *num)
3819 {
3820 	sector_t end;
3821 	unsigned int mapped;
3822 	unsigned long index;
3823 	unsigned long next;
3824 
3825 	index = lba_to_map_index(lba);
3826 	mapped = test_bit(index, sip->map_storep);
3827 
3828 	if (mapped)
3829 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3830 	else
3831 		next = find_next_bit(sip->map_storep, map_size, index);
3832 
3833 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3834 	*num = end - lba;
3835 	return mapped;
3836 }
3837 
3838 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3839 		       unsigned int len)
3840 {
3841 	sector_t end = lba + len;
3842 
3843 	while (lba < end) {
3844 		unsigned long index = lba_to_map_index(lba);
3845 
3846 		if (index < map_size)
3847 			set_bit(index, sip->map_storep);
3848 
3849 		lba = map_index_to_lba(index + 1);
3850 	}
3851 }
3852 
3853 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3854 			 unsigned int len)
3855 {
3856 	sector_t end = lba + len;
3857 	u8 *fsp = sip->storep;
3858 
3859 	while (lba < end) {
3860 		unsigned long index = lba_to_map_index(lba);
3861 
3862 		if (lba == map_index_to_lba(index) &&
3863 		    lba + sdebug_unmap_granularity <= end &&
3864 		    index < map_size) {
3865 			clear_bit(index, sip->map_storep);
3866 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3867 				memset(fsp + lba * sdebug_sector_size,
3868 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3869 				       sdebug_sector_size *
3870 				       sdebug_unmap_granularity);
3871 			}
3872 			if (sip->dif_storep) {
3873 				memset(sip->dif_storep + lba, 0xff,
3874 				       sizeof(*sip->dif_storep) *
3875 				       sdebug_unmap_granularity);
3876 			}
3877 		}
3878 		lba = map_index_to_lba(index + 1);
3879 	}
3880 }
3881 
3882 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3883 {
3884 	bool check_prot;
3885 	u32 num;
3886 	u32 ei_lba;
3887 	int ret;
3888 	u64 lba;
3889 	struct sdeb_store_info *sip = devip2sip(devip, true);
3890 	u8 *cmd = scp->cmnd;
3891 
3892 	switch (cmd[0]) {
3893 	case WRITE_16:
3894 		ei_lba = 0;
3895 		lba = get_unaligned_be64(cmd + 2);
3896 		num = get_unaligned_be32(cmd + 10);
3897 		check_prot = true;
3898 		break;
3899 	case WRITE_10:
3900 		ei_lba = 0;
3901 		lba = get_unaligned_be32(cmd + 2);
3902 		num = get_unaligned_be16(cmd + 7);
3903 		check_prot = true;
3904 		break;
3905 	case WRITE_6:
3906 		ei_lba = 0;
3907 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3908 		      (u32)(cmd[1] & 0x1f) << 16;
3909 		num = (0 == cmd[4]) ? 256 : cmd[4];
3910 		check_prot = true;
3911 		break;
3912 	case WRITE_12:
3913 		ei_lba = 0;
3914 		lba = get_unaligned_be32(cmd + 2);
3915 		num = get_unaligned_be32(cmd + 6);
3916 		check_prot = true;
3917 		break;
3918 	case 0x53:	/* XDWRITEREAD(10) */
3919 		ei_lba = 0;
3920 		lba = get_unaligned_be32(cmd + 2);
3921 		num = get_unaligned_be16(cmd + 7);
3922 		check_prot = false;
3923 		break;
3924 	default:	/* assume WRITE(32) */
3925 		lba = get_unaligned_be64(cmd + 12);
3926 		ei_lba = get_unaligned_be32(cmd + 20);
3927 		num = get_unaligned_be32(cmd + 28);
3928 		check_prot = false;
3929 		break;
3930 	}
3931 	if (unlikely(have_dif_prot && check_prot)) {
3932 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3933 		    (cmd[1] & 0xe0)) {
3934 			mk_sense_invalid_opcode(scp);
3935 			return check_condition_result;
3936 		}
3937 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3938 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3939 		    (cmd[1] & 0xe0) == 0)
3940 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3941 				    "to DIF device\n");
3942 	}
3943 
3944 	sdeb_write_lock(sip);
3945 	ret = check_device_access_params(scp, lba, num, true);
3946 	if (ret) {
3947 		sdeb_write_unlock(sip);
3948 		return ret;
3949 	}
3950 
3951 	/* DIX + T10 DIF */
3952 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3953 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3954 		case 1: /* Guard tag error */
3955 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3956 				sdeb_write_unlock(sip);
3957 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3958 				return illegal_condition_result;
3959 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3960 				sdeb_write_unlock(sip);
3961 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3962 				return check_condition_result;
3963 			}
3964 			break;
3965 		case 3: /* Reference tag error */
3966 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3967 				sdeb_write_unlock(sip);
3968 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3969 				return illegal_condition_result;
3970 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3971 				sdeb_write_unlock(sip);
3972 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3973 				return check_condition_result;
3974 			}
3975 			break;
3976 		}
3977 	}
3978 
3979 	ret = do_device_access(sip, scp, 0, lba, num, true);
3980 	if (unlikely(scsi_debug_lbp()))
3981 		map_region(sip, lba, num);
3982 	/* If ZBC zone then bump its write pointer */
3983 	if (sdebug_dev_is_zoned(devip))
3984 		zbc_inc_wp(devip, lba, num);
3985 	sdeb_write_unlock(sip);
3986 	if (unlikely(-1 == ret))
3987 		return DID_ERROR << 16;
3988 	else if (unlikely(sdebug_verbose &&
3989 			  (ret < (num * sdebug_sector_size))))
3990 		sdev_printk(KERN_INFO, scp->device,
3991 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3992 			    my_name, num * sdebug_sector_size, ret);
3993 
3994 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3995 		     atomic_read(&sdeb_inject_pending))) {
3996 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3997 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3998 			atomic_set(&sdeb_inject_pending, 0);
3999 			return check_condition_result;
4000 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4001 			/* Logical block guard check failed */
4002 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4003 			atomic_set(&sdeb_inject_pending, 0);
4004 			return illegal_condition_result;
4005 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4006 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4007 			atomic_set(&sdeb_inject_pending, 0);
4008 			return illegal_condition_result;
4009 		}
4010 	}
4011 	return 0;
4012 }
4013 
4014 /*
4015  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4016  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4017  */
4018 static int resp_write_scat(struct scsi_cmnd *scp,
4019 			   struct sdebug_dev_info *devip)
4020 {
4021 	u8 *cmd = scp->cmnd;
4022 	u8 *lrdp = NULL;
4023 	u8 *up;
4024 	struct sdeb_store_info *sip = devip2sip(devip, true);
4025 	u8 wrprotect;
4026 	u16 lbdof, num_lrd, k;
4027 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4028 	u32 lb_size = sdebug_sector_size;
4029 	u32 ei_lba;
4030 	u64 lba;
4031 	int ret, res;
4032 	bool is_16;
4033 	static const u32 lrd_size = 32; /* + parameter list header size */
4034 
4035 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
4036 		is_16 = false;
4037 		wrprotect = (cmd[10] >> 5) & 0x7;
4038 		lbdof = get_unaligned_be16(cmd + 12);
4039 		num_lrd = get_unaligned_be16(cmd + 16);
4040 		bt_len = get_unaligned_be32(cmd + 28);
4041 	} else {        /* that leaves WRITE SCATTERED(16) */
4042 		is_16 = true;
4043 		wrprotect = (cmd[2] >> 5) & 0x7;
4044 		lbdof = get_unaligned_be16(cmd + 4);
4045 		num_lrd = get_unaligned_be16(cmd + 8);
4046 		bt_len = get_unaligned_be32(cmd + 10);
4047 		if (unlikely(have_dif_prot)) {
4048 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4049 			    wrprotect) {
4050 				mk_sense_invalid_opcode(scp);
4051 				return illegal_condition_result;
4052 			}
4053 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4054 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4055 			     wrprotect == 0)
4056 				sdev_printk(KERN_ERR, scp->device,
4057 					    "Unprotected WR to DIF device\n");
4058 		}
4059 	}
4060 	if ((num_lrd == 0) || (bt_len == 0))
4061 		return 0;       /* T10 says these do-nothings are not errors */
4062 	if (lbdof == 0) {
4063 		if (sdebug_verbose)
4064 			sdev_printk(KERN_INFO, scp->device,
4065 				"%s: %s: LB Data Offset field bad\n",
4066 				my_name, __func__);
4067 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4068 		return illegal_condition_result;
4069 	}
4070 	lbdof_blen = lbdof * lb_size;
4071 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4072 		if (sdebug_verbose)
4073 			sdev_printk(KERN_INFO, scp->device,
4074 				"%s: %s: LBA range descriptors don't fit\n",
4075 				my_name, __func__);
4076 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4077 		return illegal_condition_result;
4078 	}
4079 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4080 	if (lrdp == NULL)
4081 		return SCSI_MLQUEUE_HOST_BUSY;
4082 	if (sdebug_verbose)
4083 		sdev_printk(KERN_INFO, scp->device,
4084 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4085 			my_name, __func__, lbdof_blen);
4086 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4087 	if (res == -1) {
4088 		ret = DID_ERROR << 16;
4089 		goto err_out;
4090 	}
4091 
4092 	sdeb_write_lock(sip);
4093 	sg_off = lbdof_blen;
4094 	/* Spec says Buffer xfer Length field in number of LBs in dout */
4095 	cum_lb = 0;
4096 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4097 		lba = get_unaligned_be64(up + 0);
4098 		num = get_unaligned_be32(up + 8);
4099 		if (sdebug_verbose)
4100 			sdev_printk(KERN_INFO, scp->device,
4101 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4102 				my_name, __func__, k, lba, num, sg_off);
4103 		if (num == 0)
4104 			continue;
4105 		ret = check_device_access_params(scp, lba, num, true);
4106 		if (ret)
4107 			goto err_out_unlock;
4108 		num_by = num * lb_size;
4109 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4110 
4111 		if ((cum_lb + num) > bt_len) {
4112 			if (sdebug_verbose)
4113 				sdev_printk(KERN_INFO, scp->device,
4114 				    "%s: %s: sum of blocks > data provided\n",
4115 				    my_name, __func__);
4116 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4117 					0);
4118 			ret = illegal_condition_result;
4119 			goto err_out_unlock;
4120 		}
4121 
4122 		/* DIX + T10 DIF */
4123 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4124 			int prot_ret = prot_verify_write(scp, lba, num,
4125 							 ei_lba);
4126 
4127 			if (prot_ret) {
4128 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4129 						prot_ret);
4130 				ret = illegal_condition_result;
4131 				goto err_out_unlock;
4132 			}
4133 		}
4134 
4135 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
4136 		/* If ZBC zone then bump its write pointer */
4137 		if (sdebug_dev_is_zoned(devip))
4138 			zbc_inc_wp(devip, lba, num);
4139 		if (unlikely(scsi_debug_lbp()))
4140 			map_region(sip, lba, num);
4141 		if (unlikely(-1 == ret)) {
4142 			ret = DID_ERROR << 16;
4143 			goto err_out_unlock;
4144 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
4145 			sdev_printk(KERN_INFO, scp->device,
4146 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4147 			    my_name, num_by, ret);
4148 
4149 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4150 			     atomic_read(&sdeb_inject_pending))) {
4151 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4152 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4153 				atomic_set(&sdeb_inject_pending, 0);
4154 				ret = check_condition_result;
4155 				goto err_out_unlock;
4156 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4157 				/* Logical block guard check failed */
4158 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4159 				atomic_set(&sdeb_inject_pending, 0);
4160 				ret = illegal_condition_result;
4161 				goto err_out_unlock;
4162 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4163 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4164 				atomic_set(&sdeb_inject_pending, 0);
4165 				ret = illegal_condition_result;
4166 				goto err_out_unlock;
4167 			}
4168 		}
4169 		sg_off += num_by;
4170 		cum_lb += num;
4171 	}
4172 	ret = 0;
4173 err_out_unlock:
4174 	sdeb_write_unlock(sip);
4175 err_out:
4176 	kfree(lrdp);
4177 	return ret;
4178 }
4179 
4180 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4181 			   u32 ei_lba, bool unmap, bool ndob)
4182 {
4183 	struct scsi_device *sdp = scp->device;
4184 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4185 	unsigned long long i;
4186 	u64 block, lbaa;
4187 	u32 lb_size = sdebug_sector_size;
4188 	int ret;
4189 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4190 						scp->device->hostdata, true);
4191 	u8 *fs1p;
4192 	u8 *fsp;
4193 
4194 	sdeb_write_lock(sip);
4195 
4196 	ret = check_device_access_params(scp, lba, num, true);
4197 	if (ret) {
4198 		sdeb_write_unlock(sip);
4199 		return ret;
4200 	}
4201 
4202 	if (unmap && scsi_debug_lbp()) {
4203 		unmap_region(sip, lba, num);
4204 		goto out;
4205 	}
4206 	lbaa = lba;
4207 	block = do_div(lbaa, sdebug_store_sectors);
4208 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
4209 	fsp = sip->storep;
4210 	fs1p = fsp + (block * lb_size);
4211 	if (ndob) {
4212 		memset(fs1p, 0, lb_size);
4213 		ret = 0;
4214 	} else
4215 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4216 
4217 	if (-1 == ret) {
4218 		sdeb_write_unlock(sip);
4219 		return DID_ERROR << 16;
4220 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
4221 		sdev_printk(KERN_INFO, scp->device,
4222 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
4223 			    my_name, "write same", lb_size, ret);
4224 
4225 	/* Copy first sector to remaining blocks */
4226 	for (i = 1 ; i < num ; i++) {
4227 		lbaa = lba + i;
4228 		block = do_div(lbaa, sdebug_store_sectors);
4229 		memmove(fsp + (block * lb_size), fs1p, lb_size);
4230 	}
4231 	if (scsi_debug_lbp())
4232 		map_region(sip, lba, num);
4233 	/* If ZBC zone then bump its write pointer */
4234 	if (sdebug_dev_is_zoned(devip))
4235 		zbc_inc_wp(devip, lba, num);
4236 out:
4237 	sdeb_write_unlock(sip);
4238 
4239 	return 0;
4240 }
4241 
4242 static int resp_write_same_10(struct scsi_cmnd *scp,
4243 			      struct sdebug_dev_info *devip)
4244 {
4245 	u8 *cmd = scp->cmnd;
4246 	u32 lba;
4247 	u16 num;
4248 	u32 ei_lba = 0;
4249 	bool unmap = false;
4250 
4251 	if (cmd[1] & 0x8) {
4252 		if (sdebug_lbpws10 == 0) {
4253 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4254 			return check_condition_result;
4255 		} else
4256 			unmap = true;
4257 	}
4258 	lba = get_unaligned_be32(cmd + 2);
4259 	num = get_unaligned_be16(cmd + 7);
4260 	if (num > sdebug_write_same_length) {
4261 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4262 		return check_condition_result;
4263 	}
4264 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4265 }
4266 
4267 static int resp_write_same_16(struct scsi_cmnd *scp,
4268 			      struct sdebug_dev_info *devip)
4269 {
4270 	u8 *cmd = scp->cmnd;
4271 	u64 lba;
4272 	u32 num;
4273 	u32 ei_lba = 0;
4274 	bool unmap = false;
4275 	bool ndob = false;
4276 
4277 	if (cmd[1] & 0x8) {	/* UNMAP */
4278 		if (sdebug_lbpws == 0) {
4279 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4280 			return check_condition_result;
4281 		} else
4282 			unmap = true;
4283 	}
4284 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4285 		ndob = true;
4286 	lba = get_unaligned_be64(cmd + 2);
4287 	num = get_unaligned_be32(cmd + 10);
4288 	if (num > sdebug_write_same_length) {
4289 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4290 		return check_condition_result;
4291 	}
4292 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4293 }
4294 
4295 /* Note the mode field is in the same position as the (lower) service action
4296  * field. For the Report supported operation codes command, SPC-4 suggests
4297  * each mode of this command should be reported separately; for future. */
4298 static int resp_write_buffer(struct scsi_cmnd *scp,
4299 			     struct sdebug_dev_info *devip)
4300 {
4301 	u8 *cmd = scp->cmnd;
4302 	struct scsi_device *sdp = scp->device;
4303 	struct sdebug_dev_info *dp;
4304 	u8 mode;
4305 
4306 	mode = cmd[1] & 0x1f;
4307 	switch (mode) {
4308 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4309 		/* set UAs on this device only */
4310 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4311 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4312 		break;
4313 	case 0x5:	/* download MC, save and ACT */
4314 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4315 		break;
4316 	case 0x6:	/* download MC with offsets and ACT */
4317 		/* set UAs on most devices (LUs) in this target */
4318 		list_for_each_entry(dp,
4319 				    &devip->sdbg_host->dev_info_list,
4320 				    dev_list)
4321 			if (dp->target == sdp->id) {
4322 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4323 				if (devip != dp)
4324 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4325 						dp->uas_bm);
4326 			}
4327 		break;
4328 	case 0x7:	/* download MC with offsets, save, and ACT */
4329 		/* set UA on all devices (LUs) in this target */
4330 		list_for_each_entry(dp,
4331 				    &devip->sdbg_host->dev_info_list,
4332 				    dev_list)
4333 			if (dp->target == sdp->id)
4334 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4335 					dp->uas_bm);
4336 		break;
4337 	default:
4338 		/* do nothing for this command for other mode values */
4339 		break;
4340 	}
4341 	return 0;
4342 }
4343 
4344 static int resp_comp_write(struct scsi_cmnd *scp,
4345 			   struct sdebug_dev_info *devip)
4346 {
4347 	u8 *cmd = scp->cmnd;
4348 	u8 *arr;
4349 	struct sdeb_store_info *sip = devip2sip(devip, true);
4350 	u64 lba;
4351 	u32 dnum;
4352 	u32 lb_size = sdebug_sector_size;
4353 	u8 num;
4354 	int ret;
4355 	int retval = 0;
4356 
4357 	lba = get_unaligned_be64(cmd + 2);
4358 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4359 	if (0 == num)
4360 		return 0;	/* degenerate case, not an error */
4361 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4362 	    (cmd[1] & 0xe0)) {
4363 		mk_sense_invalid_opcode(scp);
4364 		return check_condition_result;
4365 	}
4366 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4367 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4368 	    (cmd[1] & 0xe0) == 0)
4369 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4370 			    "to DIF device\n");
4371 	ret = check_device_access_params(scp, lba, num, false);
4372 	if (ret)
4373 		return ret;
4374 	dnum = 2 * num;
4375 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4376 	if (NULL == arr) {
4377 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4378 				INSUFF_RES_ASCQ);
4379 		return check_condition_result;
4380 	}
4381 
4382 	sdeb_write_lock(sip);
4383 
4384 	ret = do_dout_fetch(scp, dnum, arr);
4385 	if (ret == -1) {
4386 		retval = DID_ERROR << 16;
4387 		goto cleanup;
4388 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4389 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4390 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4391 			    dnum * lb_size, ret);
4392 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4393 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4394 		retval = check_condition_result;
4395 		goto cleanup;
4396 	}
4397 	if (scsi_debug_lbp())
4398 		map_region(sip, lba, num);
4399 cleanup:
4400 	sdeb_write_unlock(sip);
4401 	kfree(arr);
4402 	return retval;
4403 }
4404 
4405 struct unmap_block_desc {
4406 	__be64	lba;
4407 	__be32	blocks;
4408 	__be32	__reserved;
4409 };
4410 
4411 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4412 {
4413 	unsigned char *buf;
4414 	struct unmap_block_desc *desc;
4415 	struct sdeb_store_info *sip = devip2sip(devip, true);
4416 	unsigned int i, payload_len, descriptors;
4417 	int ret;
4418 
4419 	if (!scsi_debug_lbp())
4420 		return 0;	/* fib and say its done */
4421 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4422 	BUG_ON(scsi_bufflen(scp) != payload_len);
4423 
4424 	descriptors = (payload_len - 8) / 16;
4425 	if (descriptors > sdebug_unmap_max_desc) {
4426 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4427 		return check_condition_result;
4428 	}
4429 
4430 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4431 	if (!buf) {
4432 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4433 				INSUFF_RES_ASCQ);
4434 		return check_condition_result;
4435 	}
4436 
4437 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4438 
4439 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4440 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4441 
4442 	desc = (void *)&buf[8];
4443 
4444 	sdeb_write_lock(sip);
4445 
4446 	for (i = 0 ; i < descriptors ; i++) {
4447 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4448 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4449 
4450 		ret = check_device_access_params(scp, lba, num, true);
4451 		if (ret)
4452 			goto out;
4453 
4454 		unmap_region(sip, lba, num);
4455 	}
4456 
4457 	ret = 0;
4458 
4459 out:
4460 	sdeb_write_unlock(sip);
4461 	kfree(buf);
4462 
4463 	return ret;
4464 }
4465 
4466 #define SDEBUG_GET_LBA_STATUS_LEN 32
4467 
4468 static int resp_get_lba_status(struct scsi_cmnd *scp,
4469 			       struct sdebug_dev_info *devip)
4470 {
4471 	u8 *cmd = scp->cmnd;
4472 	u64 lba;
4473 	u32 alloc_len, mapped, num;
4474 	int ret;
4475 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4476 
4477 	lba = get_unaligned_be64(cmd + 2);
4478 	alloc_len = get_unaligned_be32(cmd + 10);
4479 
4480 	if (alloc_len < 24)
4481 		return 0;
4482 
4483 	ret = check_device_access_params(scp, lba, 1, false);
4484 	if (ret)
4485 		return ret;
4486 
4487 	if (scsi_debug_lbp()) {
4488 		struct sdeb_store_info *sip = devip2sip(devip, true);
4489 
4490 		mapped = map_state(sip, lba, &num);
4491 	} else {
4492 		mapped = 1;
4493 		/* following just in case virtual_gb changed */
4494 		sdebug_capacity = get_sdebug_capacity();
4495 		if (sdebug_capacity - lba <= 0xffffffff)
4496 			num = sdebug_capacity - lba;
4497 		else
4498 			num = 0xffffffff;
4499 	}
4500 
4501 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4502 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4503 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4504 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4505 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4506 
4507 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4508 }
4509 
4510 static int resp_sync_cache(struct scsi_cmnd *scp,
4511 			   struct sdebug_dev_info *devip)
4512 {
4513 	int res = 0;
4514 	u64 lba;
4515 	u32 num_blocks;
4516 	u8 *cmd = scp->cmnd;
4517 
4518 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4519 		lba = get_unaligned_be32(cmd + 2);
4520 		num_blocks = get_unaligned_be16(cmd + 7);
4521 	} else {				/* SYNCHRONIZE_CACHE(16) */
4522 		lba = get_unaligned_be64(cmd + 2);
4523 		num_blocks = get_unaligned_be32(cmd + 10);
4524 	}
4525 	if (lba + num_blocks > sdebug_capacity) {
4526 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4527 		return check_condition_result;
4528 	}
4529 	if (!write_since_sync || (cmd[1] & 0x2))
4530 		res = SDEG_RES_IMMED_MASK;
4531 	else		/* delay if write_since_sync and IMMED clear */
4532 		write_since_sync = false;
4533 	return res;
4534 }
4535 
4536 /*
4537  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4538  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4539  * a GOOD status otherwise. Model a disk with a big cache and yield
4540  * CONDITION MET. Actually tries to bring range in main memory into the
4541  * cache associated with the CPU(s).
4542  */
4543 static int resp_pre_fetch(struct scsi_cmnd *scp,
4544 			  struct sdebug_dev_info *devip)
4545 {
4546 	int res = 0;
4547 	u64 lba;
4548 	u64 block, rest = 0;
4549 	u32 nblks;
4550 	u8 *cmd = scp->cmnd;
4551 	struct sdeb_store_info *sip = devip2sip(devip, true);
4552 	u8 *fsp = sip->storep;
4553 
4554 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4555 		lba = get_unaligned_be32(cmd + 2);
4556 		nblks = get_unaligned_be16(cmd + 7);
4557 	} else {			/* PRE-FETCH(16) */
4558 		lba = get_unaligned_be64(cmd + 2);
4559 		nblks = get_unaligned_be32(cmd + 10);
4560 	}
4561 	if (lba + nblks > sdebug_capacity) {
4562 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4563 		return check_condition_result;
4564 	}
4565 	if (!fsp)
4566 		goto fini;
4567 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4568 	block = do_div(lba, sdebug_store_sectors);
4569 	if (block + nblks > sdebug_store_sectors)
4570 		rest = block + nblks - sdebug_store_sectors;
4571 
4572 	/* Try to bring the PRE-FETCH range into CPU's cache */
4573 	sdeb_read_lock(sip);
4574 	prefetch_range(fsp + (sdebug_sector_size * block),
4575 		       (nblks - rest) * sdebug_sector_size);
4576 	if (rest)
4577 		prefetch_range(fsp, rest * sdebug_sector_size);
4578 	sdeb_read_unlock(sip);
4579 fini:
4580 	if (cmd[1] & 0x2)
4581 		res = SDEG_RES_IMMED_MASK;
4582 	return res | condition_met_result;
4583 }
4584 
4585 #define RL_BUCKET_ELEMS 8
4586 
4587 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4588  * (W-LUN), the normal Linux scanning logic does not associate it with a
4589  * device (e.g. /dev/sg7). The following magic will make that association:
4590  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4591  * where <n> is a host number. If there are multiple targets in a host then
4592  * the above will associate a W-LUN to each target. To only get a W-LUN
4593  * for target 2, then use "echo '- 2 49409' > scan" .
4594  */
4595 static int resp_report_luns(struct scsi_cmnd *scp,
4596 			    struct sdebug_dev_info *devip)
4597 {
4598 	unsigned char *cmd = scp->cmnd;
4599 	unsigned int alloc_len;
4600 	unsigned char select_report;
4601 	u64 lun;
4602 	struct scsi_lun *lun_p;
4603 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4604 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4605 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4606 	unsigned int tlun_cnt;	/* total LUN count */
4607 	unsigned int rlen;	/* response length (in bytes) */
4608 	int k, j, n, res;
4609 	unsigned int off_rsp = 0;
4610 	const int sz_lun = sizeof(struct scsi_lun);
4611 
4612 	clear_luns_changed_on_target(devip);
4613 
4614 	select_report = cmd[2];
4615 	alloc_len = get_unaligned_be32(cmd + 6);
4616 
4617 	if (alloc_len < 4) {
4618 		pr_err("alloc len too small %d\n", alloc_len);
4619 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4620 		return check_condition_result;
4621 	}
4622 
4623 	switch (select_report) {
4624 	case 0:		/* all LUNs apart from W-LUNs */
4625 		lun_cnt = sdebug_max_luns;
4626 		wlun_cnt = 0;
4627 		break;
4628 	case 1:		/* only W-LUNs */
4629 		lun_cnt = 0;
4630 		wlun_cnt = 1;
4631 		break;
4632 	case 2:		/* all LUNs */
4633 		lun_cnt = sdebug_max_luns;
4634 		wlun_cnt = 1;
4635 		break;
4636 	case 0x10:	/* only administrative LUs */
4637 	case 0x11:	/* see SPC-5 */
4638 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4639 	default:
4640 		pr_debug("select report invalid %d\n", select_report);
4641 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4642 		return check_condition_result;
4643 	}
4644 
4645 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4646 		--lun_cnt;
4647 
4648 	tlun_cnt = lun_cnt + wlun_cnt;
4649 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4650 	scsi_set_resid(scp, scsi_bufflen(scp));
4651 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4652 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4653 
4654 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4655 	lun = sdebug_no_lun_0 ? 1 : 0;
4656 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4657 		memset(arr, 0, sizeof(arr));
4658 		lun_p = (struct scsi_lun *)&arr[0];
4659 		if (k == 0) {
4660 			put_unaligned_be32(rlen, &arr[0]);
4661 			++lun_p;
4662 			j = 1;
4663 		}
4664 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4665 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4666 				break;
4667 			int_to_scsilun(lun++, lun_p);
4668 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4669 				lun_p->scsi_lun[0] |= 0x40;
4670 		}
4671 		if (j < RL_BUCKET_ELEMS)
4672 			break;
4673 		n = j * sz_lun;
4674 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4675 		if (res)
4676 			return res;
4677 		off_rsp += n;
4678 	}
4679 	if (wlun_cnt) {
4680 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4681 		++j;
4682 	}
4683 	if (j > 0)
4684 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4685 	return res;
4686 }
4687 
4688 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4689 {
4690 	bool is_bytchk3 = false;
4691 	u8 bytchk;
4692 	int ret, j;
4693 	u32 vnum, a_num, off;
4694 	const u32 lb_size = sdebug_sector_size;
4695 	u64 lba;
4696 	u8 *arr;
4697 	u8 *cmd = scp->cmnd;
4698 	struct sdeb_store_info *sip = devip2sip(devip, true);
4699 
4700 	bytchk = (cmd[1] >> 1) & 0x3;
4701 	if (bytchk == 0) {
4702 		return 0;	/* always claim internal verify okay */
4703 	} else if (bytchk == 2) {
4704 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4705 		return check_condition_result;
4706 	} else if (bytchk == 3) {
4707 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4708 	}
4709 	switch (cmd[0]) {
4710 	case VERIFY_16:
4711 		lba = get_unaligned_be64(cmd + 2);
4712 		vnum = get_unaligned_be32(cmd + 10);
4713 		break;
4714 	case VERIFY:		/* is VERIFY(10) */
4715 		lba = get_unaligned_be32(cmd + 2);
4716 		vnum = get_unaligned_be16(cmd + 7);
4717 		break;
4718 	default:
4719 		mk_sense_invalid_opcode(scp);
4720 		return check_condition_result;
4721 	}
4722 	if (vnum == 0)
4723 		return 0;	/* not an error */
4724 	a_num = is_bytchk3 ? 1 : vnum;
4725 	/* Treat following check like one for read (i.e. no write) access */
4726 	ret = check_device_access_params(scp, lba, a_num, false);
4727 	if (ret)
4728 		return ret;
4729 
4730 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4731 	if (!arr) {
4732 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4733 				INSUFF_RES_ASCQ);
4734 		return check_condition_result;
4735 	}
4736 	/* Not changing store, so only need read access */
4737 	sdeb_read_lock(sip);
4738 
4739 	ret = do_dout_fetch(scp, a_num, arr);
4740 	if (ret == -1) {
4741 		ret = DID_ERROR << 16;
4742 		goto cleanup;
4743 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4744 		sdev_printk(KERN_INFO, scp->device,
4745 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4746 			    my_name, __func__, a_num * lb_size, ret);
4747 	}
4748 	if (is_bytchk3) {
4749 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4750 			memcpy(arr + off, arr, lb_size);
4751 	}
4752 	ret = 0;
4753 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4754 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4755 		ret = check_condition_result;
4756 		goto cleanup;
4757 	}
4758 cleanup:
4759 	sdeb_read_unlock(sip);
4760 	kfree(arr);
4761 	return ret;
4762 }
4763 
4764 #define RZONES_DESC_HD 64
4765 
4766 /* Report zones depending on start LBA and reporting options */
4767 static int resp_report_zones(struct scsi_cmnd *scp,
4768 			     struct sdebug_dev_info *devip)
4769 {
4770 	unsigned int rep_max_zones, nrz = 0;
4771 	int ret = 0;
4772 	u32 alloc_len, rep_opts, rep_len;
4773 	bool partial;
4774 	u64 lba, zs_lba;
4775 	u8 *arr = NULL, *desc;
4776 	u8 *cmd = scp->cmnd;
4777 	struct sdeb_zone_state *zsp = NULL;
4778 	struct sdeb_store_info *sip = devip2sip(devip, false);
4779 
4780 	if (!sdebug_dev_is_zoned(devip)) {
4781 		mk_sense_invalid_opcode(scp);
4782 		return check_condition_result;
4783 	}
4784 	zs_lba = get_unaligned_be64(cmd + 2);
4785 	alloc_len = get_unaligned_be32(cmd + 10);
4786 	if (alloc_len == 0)
4787 		return 0;	/* not an error */
4788 	rep_opts = cmd[14] & 0x3f;
4789 	partial = cmd[14] & 0x80;
4790 
4791 	if (zs_lba >= sdebug_capacity) {
4792 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4793 		return check_condition_result;
4794 	}
4795 
4796 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4797 
4798 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4799 	if (!arr) {
4800 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4801 				INSUFF_RES_ASCQ);
4802 		return check_condition_result;
4803 	}
4804 
4805 	sdeb_read_lock(sip);
4806 
4807 	desc = arr + 64;
4808 	for (lba = zs_lba; lba < sdebug_capacity;
4809 	     lba = zsp->z_start + zsp->z_size) {
4810 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4811 			break;
4812 		zsp = zbc_zone(devip, lba);
4813 		switch (rep_opts) {
4814 		case 0x00:
4815 			/* All zones */
4816 			break;
4817 		case 0x01:
4818 			/* Empty zones */
4819 			if (zsp->z_cond != ZC1_EMPTY)
4820 				continue;
4821 			break;
4822 		case 0x02:
4823 			/* Implicit open zones */
4824 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4825 				continue;
4826 			break;
4827 		case 0x03:
4828 			/* Explicit open zones */
4829 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4830 				continue;
4831 			break;
4832 		case 0x04:
4833 			/* Closed zones */
4834 			if (zsp->z_cond != ZC4_CLOSED)
4835 				continue;
4836 			break;
4837 		case 0x05:
4838 			/* Full zones */
4839 			if (zsp->z_cond != ZC5_FULL)
4840 				continue;
4841 			break;
4842 		case 0x06:
4843 		case 0x07:
4844 		case 0x10:
4845 			/*
4846 			 * Read-only, offline, reset WP recommended are
4847 			 * not emulated: no zones to report;
4848 			 */
4849 			continue;
4850 		case 0x11:
4851 			/* non-seq-resource set */
4852 			if (!zsp->z_non_seq_resource)
4853 				continue;
4854 			break;
4855 		case 0x3e:
4856 			/* All zones except gap zones. */
4857 			if (zbc_zone_is_gap(zsp))
4858 				continue;
4859 			break;
4860 		case 0x3f:
4861 			/* Not write pointer (conventional) zones */
4862 			if (zbc_zone_is_seq(zsp))
4863 				continue;
4864 			break;
4865 		default:
4866 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4867 					INVALID_FIELD_IN_CDB, 0);
4868 			ret = check_condition_result;
4869 			goto fini;
4870 		}
4871 
4872 		if (nrz < rep_max_zones) {
4873 			/* Fill zone descriptor */
4874 			desc[0] = zsp->z_type;
4875 			desc[1] = zsp->z_cond << 4;
4876 			if (zsp->z_non_seq_resource)
4877 				desc[1] |= 1 << 1;
4878 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4879 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4880 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4881 			desc += 64;
4882 		}
4883 
4884 		if (partial && nrz >= rep_max_zones)
4885 			break;
4886 
4887 		nrz++;
4888 	}
4889 
4890 	/* Report header */
4891 	/* Zone list length. */
4892 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4893 	/* Maximum LBA */
4894 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4895 	/* Zone starting LBA granularity. */
4896 	if (devip->zcap < devip->zsize)
4897 		put_unaligned_be64(devip->zsize, arr + 16);
4898 
4899 	rep_len = (unsigned long)desc - (unsigned long)arr;
4900 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4901 
4902 fini:
4903 	sdeb_read_unlock(sip);
4904 	kfree(arr);
4905 	return ret;
4906 }
4907 
4908 /* Logic transplanted from tcmu-runner, file_zbc.c */
4909 static void zbc_open_all(struct sdebug_dev_info *devip)
4910 {
4911 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4912 	unsigned int i;
4913 
4914 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4915 		if (zsp->z_cond == ZC4_CLOSED)
4916 			zbc_open_zone(devip, &devip->zstate[i], true);
4917 	}
4918 }
4919 
4920 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4921 {
4922 	int res = 0;
4923 	u64 z_id;
4924 	enum sdebug_z_cond zc;
4925 	u8 *cmd = scp->cmnd;
4926 	struct sdeb_zone_state *zsp;
4927 	bool all = cmd[14] & 0x01;
4928 	struct sdeb_store_info *sip = devip2sip(devip, false);
4929 
4930 	if (!sdebug_dev_is_zoned(devip)) {
4931 		mk_sense_invalid_opcode(scp);
4932 		return check_condition_result;
4933 	}
4934 
4935 	sdeb_write_lock(sip);
4936 
4937 	if (all) {
4938 		/* Check if all closed zones can be open */
4939 		if (devip->max_open &&
4940 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4941 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4942 					INSUFF_ZONE_ASCQ);
4943 			res = check_condition_result;
4944 			goto fini;
4945 		}
4946 		/* Open all closed zones */
4947 		zbc_open_all(devip);
4948 		goto fini;
4949 	}
4950 
4951 	/* Open the specified zone */
4952 	z_id = get_unaligned_be64(cmd + 2);
4953 	if (z_id >= sdebug_capacity) {
4954 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4955 		res = check_condition_result;
4956 		goto fini;
4957 	}
4958 
4959 	zsp = zbc_zone(devip, z_id);
4960 	if (z_id != zsp->z_start) {
4961 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4962 		res = check_condition_result;
4963 		goto fini;
4964 	}
4965 	if (zbc_zone_is_conv(zsp)) {
4966 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4967 		res = check_condition_result;
4968 		goto fini;
4969 	}
4970 
4971 	zc = zsp->z_cond;
4972 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4973 		goto fini;
4974 
4975 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4976 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4977 				INSUFF_ZONE_ASCQ);
4978 		res = check_condition_result;
4979 		goto fini;
4980 	}
4981 
4982 	zbc_open_zone(devip, zsp, true);
4983 fini:
4984 	sdeb_write_unlock(sip);
4985 	return res;
4986 }
4987 
4988 static void zbc_close_all(struct sdebug_dev_info *devip)
4989 {
4990 	unsigned int i;
4991 
4992 	for (i = 0; i < devip->nr_zones; i++)
4993 		zbc_close_zone(devip, &devip->zstate[i]);
4994 }
4995 
4996 static int resp_close_zone(struct scsi_cmnd *scp,
4997 			   struct sdebug_dev_info *devip)
4998 {
4999 	int res = 0;
5000 	u64 z_id;
5001 	u8 *cmd = scp->cmnd;
5002 	struct sdeb_zone_state *zsp;
5003 	bool all = cmd[14] & 0x01;
5004 	struct sdeb_store_info *sip = devip2sip(devip, false);
5005 
5006 	if (!sdebug_dev_is_zoned(devip)) {
5007 		mk_sense_invalid_opcode(scp);
5008 		return check_condition_result;
5009 	}
5010 
5011 	sdeb_write_lock(sip);
5012 
5013 	if (all) {
5014 		zbc_close_all(devip);
5015 		goto fini;
5016 	}
5017 
5018 	/* Close specified zone */
5019 	z_id = get_unaligned_be64(cmd + 2);
5020 	if (z_id >= sdebug_capacity) {
5021 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5022 		res = check_condition_result;
5023 		goto fini;
5024 	}
5025 
5026 	zsp = zbc_zone(devip, z_id);
5027 	if (z_id != zsp->z_start) {
5028 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5029 		res = check_condition_result;
5030 		goto fini;
5031 	}
5032 	if (zbc_zone_is_conv(zsp)) {
5033 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5034 		res = check_condition_result;
5035 		goto fini;
5036 	}
5037 
5038 	zbc_close_zone(devip, zsp);
5039 fini:
5040 	sdeb_write_unlock(sip);
5041 	return res;
5042 }
5043 
5044 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5045 			    struct sdeb_zone_state *zsp, bool empty)
5046 {
5047 	enum sdebug_z_cond zc = zsp->z_cond;
5048 
5049 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5050 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5051 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5052 			zbc_close_zone(devip, zsp);
5053 		if (zsp->z_cond == ZC4_CLOSED)
5054 			devip->nr_closed--;
5055 		zsp->z_wp = zsp->z_start + zsp->z_size;
5056 		zsp->z_cond = ZC5_FULL;
5057 	}
5058 }
5059 
5060 static void zbc_finish_all(struct sdebug_dev_info *devip)
5061 {
5062 	unsigned int i;
5063 
5064 	for (i = 0; i < devip->nr_zones; i++)
5065 		zbc_finish_zone(devip, &devip->zstate[i], false);
5066 }
5067 
5068 static int resp_finish_zone(struct scsi_cmnd *scp,
5069 			    struct sdebug_dev_info *devip)
5070 {
5071 	struct sdeb_zone_state *zsp;
5072 	int res = 0;
5073 	u64 z_id;
5074 	u8 *cmd = scp->cmnd;
5075 	bool all = cmd[14] & 0x01;
5076 	struct sdeb_store_info *sip = devip2sip(devip, false);
5077 
5078 	if (!sdebug_dev_is_zoned(devip)) {
5079 		mk_sense_invalid_opcode(scp);
5080 		return check_condition_result;
5081 	}
5082 
5083 	sdeb_write_lock(sip);
5084 
5085 	if (all) {
5086 		zbc_finish_all(devip);
5087 		goto fini;
5088 	}
5089 
5090 	/* Finish the specified zone */
5091 	z_id = get_unaligned_be64(cmd + 2);
5092 	if (z_id >= sdebug_capacity) {
5093 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5094 		res = check_condition_result;
5095 		goto fini;
5096 	}
5097 
5098 	zsp = zbc_zone(devip, z_id);
5099 	if (z_id != zsp->z_start) {
5100 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5101 		res = check_condition_result;
5102 		goto fini;
5103 	}
5104 	if (zbc_zone_is_conv(zsp)) {
5105 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5106 		res = check_condition_result;
5107 		goto fini;
5108 	}
5109 
5110 	zbc_finish_zone(devip, zsp, true);
5111 fini:
5112 	sdeb_write_unlock(sip);
5113 	return res;
5114 }
5115 
5116 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5117 			 struct sdeb_zone_state *zsp)
5118 {
5119 	enum sdebug_z_cond zc;
5120 	struct sdeb_store_info *sip = devip2sip(devip, false);
5121 
5122 	if (!zbc_zone_is_seq(zsp))
5123 		return;
5124 
5125 	zc = zsp->z_cond;
5126 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5127 		zbc_close_zone(devip, zsp);
5128 
5129 	if (zsp->z_cond == ZC4_CLOSED)
5130 		devip->nr_closed--;
5131 
5132 	if (zsp->z_wp > zsp->z_start)
5133 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5134 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5135 
5136 	zsp->z_non_seq_resource = false;
5137 	zsp->z_wp = zsp->z_start;
5138 	zsp->z_cond = ZC1_EMPTY;
5139 }
5140 
5141 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5142 {
5143 	unsigned int i;
5144 
5145 	for (i = 0; i < devip->nr_zones; i++)
5146 		zbc_rwp_zone(devip, &devip->zstate[i]);
5147 }
5148 
5149 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5150 {
5151 	struct sdeb_zone_state *zsp;
5152 	int res = 0;
5153 	u64 z_id;
5154 	u8 *cmd = scp->cmnd;
5155 	bool all = cmd[14] & 0x01;
5156 	struct sdeb_store_info *sip = devip2sip(devip, false);
5157 
5158 	if (!sdebug_dev_is_zoned(devip)) {
5159 		mk_sense_invalid_opcode(scp);
5160 		return check_condition_result;
5161 	}
5162 
5163 	sdeb_write_lock(sip);
5164 
5165 	if (all) {
5166 		zbc_rwp_all(devip);
5167 		goto fini;
5168 	}
5169 
5170 	z_id = get_unaligned_be64(cmd + 2);
5171 	if (z_id >= sdebug_capacity) {
5172 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5173 		res = check_condition_result;
5174 		goto fini;
5175 	}
5176 
5177 	zsp = zbc_zone(devip, z_id);
5178 	if (z_id != zsp->z_start) {
5179 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5180 		res = check_condition_result;
5181 		goto fini;
5182 	}
5183 	if (zbc_zone_is_conv(zsp)) {
5184 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5185 		res = check_condition_result;
5186 		goto fini;
5187 	}
5188 
5189 	zbc_rwp_zone(devip, zsp);
5190 fini:
5191 	sdeb_write_unlock(sip);
5192 	return res;
5193 }
5194 
5195 static u32 get_tag(struct scsi_cmnd *cmnd)
5196 {
5197 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5198 }
5199 
5200 /* Queued (deferred) command completions converge here. */
5201 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5202 {
5203 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5204 	unsigned long flags;
5205 	struct scsi_cmnd *scp = sqcp->scmd;
5206 	struct sdebug_scsi_cmd *sdsc;
5207 	bool aborted;
5208 
5209 	if (sdebug_statistics) {
5210 		atomic_inc(&sdebug_completions);
5211 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5212 			atomic_inc(&sdebug_miss_cpus);
5213 	}
5214 
5215 	if (!scp) {
5216 		pr_err("scmd=NULL\n");
5217 		goto out;
5218 	}
5219 
5220 	sdsc = scsi_cmd_priv(scp);
5221 	spin_lock_irqsave(&sdsc->lock, flags);
5222 	aborted = sd_dp->aborted;
5223 	if (unlikely(aborted))
5224 		sd_dp->aborted = false;
5225 	ASSIGN_QUEUED_CMD(scp, NULL);
5226 
5227 	spin_unlock_irqrestore(&sdsc->lock, flags);
5228 
5229 	if (aborted) {
5230 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5231 		blk_abort_request(scsi_cmd_to_rq(scp));
5232 		goto out;
5233 	}
5234 
5235 	scsi_done(scp); /* callback to mid level */
5236 out:
5237 	sdebug_free_queued_cmd(sqcp);
5238 }
5239 
5240 /* When high resolution timer goes off this function is called. */
5241 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5242 {
5243 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5244 						  hrt);
5245 	sdebug_q_cmd_complete(sd_dp);
5246 	return HRTIMER_NORESTART;
5247 }
5248 
5249 /* When work queue schedules work, it calls this function. */
5250 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5251 {
5252 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5253 						  ew.work);
5254 	sdebug_q_cmd_complete(sd_dp);
5255 }
5256 
5257 static bool got_shared_uuid;
5258 static uuid_t shared_uuid;
5259 
5260 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5261 {
5262 	struct sdeb_zone_state *zsp;
5263 	sector_t capacity = get_sdebug_capacity();
5264 	sector_t conv_capacity;
5265 	sector_t zstart = 0;
5266 	unsigned int i;
5267 
5268 	/*
5269 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5270 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5271 	 * use the specified zone size checking that at least 2 zones can be
5272 	 * created for the device.
5273 	 */
5274 	if (!sdeb_zbc_zone_size_mb) {
5275 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5276 			>> ilog2(sdebug_sector_size);
5277 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5278 			devip->zsize >>= 1;
5279 		if (devip->zsize < 2) {
5280 			pr_err("Device capacity too small\n");
5281 			return -EINVAL;
5282 		}
5283 	} else {
5284 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5285 			pr_err("Zone size is not a power of 2\n");
5286 			return -EINVAL;
5287 		}
5288 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5289 			>> ilog2(sdebug_sector_size);
5290 		if (devip->zsize >= capacity) {
5291 			pr_err("Zone size too large for device capacity\n");
5292 			return -EINVAL;
5293 		}
5294 	}
5295 
5296 	devip->zsize_shift = ilog2(devip->zsize);
5297 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5298 
5299 	if (sdeb_zbc_zone_cap_mb == 0) {
5300 		devip->zcap = devip->zsize;
5301 	} else {
5302 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5303 			      ilog2(sdebug_sector_size);
5304 		if (devip->zcap > devip->zsize) {
5305 			pr_err("Zone capacity too large\n");
5306 			return -EINVAL;
5307 		}
5308 	}
5309 
5310 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5311 	if (conv_capacity >= capacity) {
5312 		pr_err("Number of conventional zones too large\n");
5313 		return -EINVAL;
5314 	}
5315 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5316 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5317 			      devip->zsize_shift;
5318 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5319 
5320 	/* Add gap zones if zone capacity is smaller than the zone size */
5321 	if (devip->zcap < devip->zsize)
5322 		devip->nr_zones += devip->nr_seq_zones;
5323 
5324 	if (devip->zoned) {
5325 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5326 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5327 			devip->max_open = (devip->nr_zones - 1) / 2;
5328 		else
5329 			devip->max_open = sdeb_zbc_max_open;
5330 	}
5331 
5332 	devip->zstate = kcalloc(devip->nr_zones,
5333 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5334 	if (!devip->zstate)
5335 		return -ENOMEM;
5336 
5337 	for (i = 0; i < devip->nr_zones; i++) {
5338 		zsp = &devip->zstate[i];
5339 
5340 		zsp->z_start = zstart;
5341 
5342 		if (i < devip->nr_conv_zones) {
5343 			zsp->z_type = ZBC_ZTYPE_CNV;
5344 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5345 			zsp->z_wp = (sector_t)-1;
5346 			zsp->z_size =
5347 				min_t(u64, devip->zsize, capacity - zstart);
5348 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5349 			if (devip->zoned)
5350 				zsp->z_type = ZBC_ZTYPE_SWR;
5351 			else
5352 				zsp->z_type = ZBC_ZTYPE_SWP;
5353 			zsp->z_cond = ZC1_EMPTY;
5354 			zsp->z_wp = zsp->z_start;
5355 			zsp->z_size =
5356 				min_t(u64, devip->zcap, capacity - zstart);
5357 		} else {
5358 			zsp->z_type = ZBC_ZTYPE_GAP;
5359 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5360 			zsp->z_wp = (sector_t)-1;
5361 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5362 					    capacity - zstart);
5363 		}
5364 
5365 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5366 		zstart += zsp->z_size;
5367 	}
5368 
5369 	return 0;
5370 }
5371 
5372 static struct sdebug_dev_info *sdebug_device_create(
5373 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5374 {
5375 	struct sdebug_dev_info *devip;
5376 
5377 	devip = kzalloc(sizeof(*devip), flags);
5378 	if (devip) {
5379 		if (sdebug_uuid_ctl == 1)
5380 			uuid_gen(&devip->lu_name);
5381 		else if (sdebug_uuid_ctl == 2) {
5382 			if (got_shared_uuid)
5383 				devip->lu_name = shared_uuid;
5384 			else {
5385 				uuid_gen(&shared_uuid);
5386 				got_shared_uuid = true;
5387 				devip->lu_name = shared_uuid;
5388 			}
5389 		}
5390 		devip->sdbg_host = sdbg_host;
5391 		if (sdeb_zbc_in_use) {
5392 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5393 			if (sdebug_device_create_zones(devip)) {
5394 				kfree(devip);
5395 				return NULL;
5396 			}
5397 		} else {
5398 			devip->zoned = false;
5399 		}
5400 		devip->create_ts = ktime_get_boottime();
5401 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5402 		spin_lock_init(&devip->list_lock);
5403 		INIT_LIST_HEAD(&devip->inject_err_list);
5404 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5405 	}
5406 	return devip;
5407 }
5408 
5409 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5410 {
5411 	struct sdebug_host_info *sdbg_host;
5412 	struct sdebug_dev_info *open_devip = NULL;
5413 	struct sdebug_dev_info *devip;
5414 
5415 	sdbg_host = shost_to_sdebug_host(sdev->host);
5416 
5417 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5418 		if ((devip->used) && (devip->channel == sdev->channel) &&
5419 		    (devip->target == sdev->id) &&
5420 		    (devip->lun == sdev->lun))
5421 			return devip;
5422 		else {
5423 			if ((!devip->used) && (!open_devip))
5424 				open_devip = devip;
5425 		}
5426 	}
5427 	if (!open_devip) { /* try and make a new one */
5428 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5429 		if (!open_devip) {
5430 			pr_err("out of memory at line %d\n", __LINE__);
5431 			return NULL;
5432 		}
5433 	}
5434 
5435 	open_devip->channel = sdev->channel;
5436 	open_devip->target = sdev->id;
5437 	open_devip->lun = sdev->lun;
5438 	open_devip->sdbg_host = sdbg_host;
5439 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5440 	open_devip->used = true;
5441 	return open_devip;
5442 }
5443 
5444 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5445 {
5446 	if (sdebug_verbose)
5447 		pr_info("slave_alloc <%u %u %u %llu>\n",
5448 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5449 
5450 	return 0;
5451 }
5452 
5453 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5454 {
5455 	struct sdebug_dev_info *devip =
5456 			(struct sdebug_dev_info *)sdp->hostdata;
5457 	struct dentry *dentry;
5458 
5459 	if (sdebug_verbose)
5460 		pr_info("slave_configure <%u %u %u %llu>\n",
5461 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5462 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5463 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5464 	if (devip == NULL) {
5465 		devip = find_build_dev_info(sdp);
5466 		if (devip == NULL)
5467 			return 1;  /* no resources, will be marked offline */
5468 	}
5469 	sdp->hostdata = devip;
5470 	if (sdebug_no_uld)
5471 		sdp->no_uld_attach = 1;
5472 	config_cdb_len(sdp);
5473 
5474 	if (sdebug_allow_restart)
5475 		sdp->allow_restart = 1;
5476 
5477 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5478 				sdebug_debugfs_root);
5479 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
5480 		pr_info("%s: failed to create debugfs directory for device %s\n",
5481 			__func__, dev_name(&sdp->sdev_gendev));
5482 
5483 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5484 				&sdebug_error_fops);
5485 	if (IS_ERR_OR_NULL(dentry))
5486 		pr_info("%s: failed to create error file for device %s\n",
5487 			__func__, dev_name(&sdp->sdev_gendev));
5488 
5489 	return 0;
5490 }
5491 
5492 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5493 {
5494 	struct sdebug_dev_info *devip =
5495 		(struct sdebug_dev_info *)sdp->hostdata;
5496 	struct sdebug_err_inject *err;
5497 
5498 	if (sdebug_verbose)
5499 		pr_info("slave_destroy <%u %u %u %llu>\n",
5500 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5501 
5502 	if (!devip)
5503 		return;
5504 
5505 	spin_lock(&devip->list_lock);
5506 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5507 		list_del_rcu(&err->list);
5508 		call_rcu(&err->rcu, sdebug_err_free);
5509 	}
5510 	spin_unlock(&devip->list_lock);
5511 
5512 	debugfs_remove(devip->debugfs_entry);
5513 
5514 	/* make this slot available for re-use */
5515 	devip->used = false;
5516 	sdp->hostdata = NULL;
5517 }
5518 
5519 /* Returns true if we require the queued memory to be freed by the caller. */
5520 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5521 			   enum sdeb_defer_type defer_t)
5522 {
5523 	if (defer_t == SDEB_DEFER_HRT) {
5524 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5525 
5526 		switch (res) {
5527 		case 0: /* Not active, it must have already run */
5528 		case -1: /* -1 It's executing the CB */
5529 			return false;
5530 		case 1: /* Was active, we've now cancelled */
5531 		default:
5532 			return true;
5533 		}
5534 	} else if (defer_t == SDEB_DEFER_WQ) {
5535 		/* Cancel if pending */
5536 		if (cancel_work_sync(&sd_dp->ew.work))
5537 			return true;
5538 		/* Was not pending, so it must have run */
5539 		return false;
5540 	} else if (defer_t == SDEB_DEFER_POLL) {
5541 		return true;
5542 	}
5543 
5544 	return false;
5545 }
5546 
5547 
5548 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5549 {
5550 	enum sdeb_defer_type l_defer_t;
5551 	struct sdebug_defer *sd_dp;
5552 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5553 	struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5554 
5555 	lockdep_assert_held(&sdsc->lock);
5556 
5557 	if (!sqcp)
5558 		return false;
5559 	sd_dp = &sqcp->sd_dp;
5560 	l_defer_t = READ_ONCE(sd_dp->defer_t);
5561 	ASSIGN_QUEUED_CMD(cmnd, NULL);
5562 
5563 	if (stop_qc_helper(sd_dp, l_defer_t))
5564 		sdebug_free_queued_cmd(sqcp);
5565 
5566 	return true;
5567 }
5568 
5569 /*
5570  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5571  */
5572 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5573 {
5574 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5575 	unsigned long flags;
5576 	bool res;
5577 
5578 	spin_lock_irqsave(&sdsc->lock, flags);
5579 	res = scsi_debug_stop_cmnd(cmnd);
5580 	spin_unlock_irqrestore(&sdsc->lock, flags);
5581 
5582 	return res;
5583 }
5584 
5585 /*
5586  * All we can do is set the cmnd as internally aborted and wait for it to
5587  * finish. We cannot call scsi_done() as normal completion path may do that.
5588  */
5589 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5590 {
5591 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5592 
5593 	return true;
5594 }
5595 
5596 /* Deletes (stops) timers or work queues of all queued commands */
5597 static void stop_all_queued(void)
5598 {
5599 	struct sdebug_host_info *sdhp;
5600 
5601 	mutex_lock(&sdebug_host_list_mutex);
5602 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5603 		struct Scsi_Host *shost = sdhp->shost;
5604 
5605 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5606 	}
5607 	mutex_unlock(&sdebug_host_list_mutex);
5608 }
5609 
5610 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5611 {
5612 	struct scsi_device *sdp = cmnd->device;
5613 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5614 	struct sdebug_err_inject *err;
5615 	unsigned char *cmd = cmnd->cmnd;
5616 	int ret = 0;
5617 
5618 	if (devip == NULL)
5619 		return 0;
5620 
5621 	rcu_read_lock();
5622 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5623 		if (err->type == ERR_ABORT_CMD_FAILED &&
5624 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5625 			ret = !!err->cnt;
5626 			if (err->cnt < 0)
5627 				err->cnt++;
5628 
5629 			rcu_read_unlock();
5630 			return ret;
5631 		}
5632 	}
5633 	rcu_read_unlock();
5634 
5635 	return 0;
5636 }
5637 
5638 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5639 {
5640 	bool ok = scsi_debug_abort_cmnd(SCpnt);
5641 	u8 *cmd = SCpnt->cmnd;
5642 	u8 opcode = cmd[0];
5643 
5644 	++num_aborts;
5645 
5646 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5647 		sdev_printk(KERN_INFO, SCpnt->device,
5648 			    "%s: command%s found\n", __func__,
5649 			    ok ? "" : " not");
5650 
5651 	if (sdebug_fail_abort(SCpnt)) {
5652 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5653 			    opcode);
5654 		return FAILED;
5655 	}
5656 
5657 	return SUCCESS;
5658 }
5659 
5660 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5661 {
5662 	struct scsi_device *sdp = data;
5663 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5664 
5665 	if (scmd->device == sdp)
5666 		scsi_debug_abort_cmnd(scmd);
5667 
5668 	return true;
5669 }
5670 
5671 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5672 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5673 {
5674 	struct Scsi_Host *shost = sdp->host;
5675 
5676 	blk_mq_tagset_busy_iter(&shost->tag_set,
5677 				scsi_debug_stop_all_queued_iter, sdp);
5678 }
5679 
5680 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5681 {
5682 	struct scsi_device *sdp = cmnd->device;
5683 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5684 	struct sdebug_err_inject *err;
5685 	unsigned char *cmd = cmnd->cmnd;
5686 	int ret = 0;
5687 
5688 	if (devip == NULL)
5689 		return 0;
5690 
5691 	rcu_read_lock();
5692 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5693 		if (err->type == ERR_LUN_RESET_FAILED &&
5694 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5695 			ret = !!err->cnt;
5696 			if (err->cnt < 0)
5697 				err->cnt++;
5698 
5699 			rcu_read_unlock();
5700 			return ret;
5701 		}
5702 	}
5703 	rcu_read_unlock();
5704 
5705 	return 0;
5706 }
5707 
5708 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5709 {
5710 	struct scsi_device *sdp = SCpnt->device;
5711 	struct sdebug_dev_info *devip = sdp->hostdata;
5712 	u8 *cmd = SCpnt->cmnd;
5713 	u8 opcode = cmd[0];
5714 
5715 	++num_dev_resets;
5716 
5717 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5718 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5719 
5720 	scsi_debug_stop_all_queued(sdp);
5721 	if (devip)
5722 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5723 
5724 	if (sdebug_fail_lun_reset(SCpnt)) {
5725 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5726 		return FAILED;
5727 	}
5728 
5729 	return SUCCESS;
5730 }
5731 
5732 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5733 {
5734 	struct scsi_target *starget = scsi_target(cmnd->device);
5735 	struct sdebug_target_info *targetip =
5736 		(struct sdebug_target_info *)starget->hostdata;
5737 
5738 	if (targetip)
5739 		return targetip->reset_fail;
5740 
5741 	return 0;
5742 }
5743 
5744 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5745 {
5746 	struct scsi_device *sdp = SCpnt->device;
5747 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5748 	struct sdebug_dev_info *devip;
5749 	u8 *cmd = SCpnt->cmnd;
5750 	u8 opcode = cmd[0];
5751 	int k = 0;
5752 
5753 	++num_target_resets;
5754 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5755 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5756 
5757 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5758 		if (devip->target == sdp->id) {
5759 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5760 			++k;
5761 		}
5762 	}
5763 
5764 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5765 		sdev_printk(KERN_INFO, sdp,
5766 			    "%s: %d device(s) found in target\n", __func__, k);
5767 
5768 	if (sdebug_fail_target_reset(SCpnt)) {
5769 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5770 			    opcode);
5771 		return FAILED;
5772 	}
5773 
5774 	return SUCCESS;
5775 }
5776 
5777 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5778 {
5779 	struct scsi_device *sdp = SCpnt->device;
5780 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5781 	struct sdebug_dev_info *devip;
5782 	int k = 0;
5783 
5784 	++num_bus_resets;
5785 
5786 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5787 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5788 
5789 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5790 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5791 		++k;
5792 	}
5793 
5794 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5795 		sdev_printk(KERN_INFO, sdp,
5796 			    "%s: %d device(s) found in host\n", __func__, k);
5797 	return SUCCESS;
5798 }
5799 
5800 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5801 {
5802 	struct sdebug_host_info *sdbg_host;
5803 	struct sdebug_dev_info *devip;
5804 	int k = 0;
5805 
5806 	++num_host_resets;
5807 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5808 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5809 	mutex_lock(&sdebug_host_list_mutex);
5810 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5811 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5812 				    dev_list) {
5813 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5814 			++k;
5815 		}
5816 	}
5817 	mutex_unlock(&sdebug_host_list_mutex);
5818 	stop_all_queued();
5819 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5820 		sdev_printk(KERN_INFO, SCpnt->device,
5821 			    "%s: %d device(s) found\n", __func__, k);
5822 	return SUCCESS;
5823 }
5824 
5825 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5826 {
5827 	struct msdos_partition *pp;
5828 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5829 	int sectors_per_part, num_sectors, k;
5830 	int heads_by_sects, start_sec, end_sec;
5831 
5832 	/* assume partition table already zeroed */
5833 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5834 		return;
5835 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5836 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5837 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5838 	}
5839 	num_sectors = (int)get_sdebug_capacity();
5840 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5841 			   / sdebug_num_parts;
5842 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5843 	starts[0] = sdebug_sectors_per;
5844 	max_part_secs = sectors_per_part;
5845 	for (k = 1; k < sdebug_num_parts; ++k) {
5846 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5847 			    * heads_by_sects;
5848 		if (starts[k] - starts[k - 1] < max_part_secs)
5849 			max_part_secs = starts[k] - starts[k - 1];
5850 	}
5851 	starts[sdebug_num_parts] = num_sectors;
5852 	starts[sdebug_num_parts + 1] = 0;
5853 
5854 	ramp[510] = 0x55;	/* magic partition markings */
5855 	ramp[511] = 0xAA;
5856 	pp = (struct msdos_partition *)(ramp + 0x1be);
5857 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5858 		start_sec = starts[k];
5859 		end_sec = starts[k] + max_part_secs - 1;
5860 		pp->boot_ind = 0;
5861 
5862 		pp->cyl = start_sec / heads_by_sects;
5863 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5864 			   / sdebug_sectors_per;
5865 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5866 
5867 		pp->end_cyl = end_sec / heads_by_sects;
5868 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5869 			       / sdebug_sectors_per;
5870 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5871 
5872 		pp->start_sect = cpu_to_le32(start_sec);
5873 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5874 		pp->sys_ind = 0x83;	/* plain Linux partition */
5875 	}
5876 }
5877 
5878 static void block_unblock_all_queues(bool block)
5879 {
5880 	struct sdebug_host_info *sdhp;
5881 
5882 	lockdep_assert_held(&sdebug_host_list_mutex);
5883 
5884 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5885 		struct Scsi_Host *shost = sdhp->shost;
5886 
5887 		if (block)
5888 			scsi_block_requests(shost);
5889 		else
5890 			scsi_unblock_requests(shost);
5891 	}
5892 }
5893 
5894 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5895  * commands will be processed normally before triggers occur.
5896  */
5897 static void tweak_cmnd_count(void)
5898 {
5899 	int count, modulo;
5900 
5901 	modulo = abs(sdebug_every_nth);
5902 	if (modulo < 2)
5903 		return;
5904 
5905 	mutex_lock(&sdebug_host_list_mutex);
5906 	block_unblock_all_queues(true);
5907 	count = atomic_read(&sdebug_cmnd_count);
5908 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5909 	block_unblock_all_queues(false);
5910 	mutex_unlock(&sdebug_host_list_mutex);
5911 }
5912 
5913 static void clear_queue_stats(void)
5914 {
5915 	atomic_set(&sdebug_cmnd_count, 0);
5916 	atomic_set(&sdebug_completions, 0);
5917 	atomic_set(&sdebug_miss_cpus, 0);
5918 	atomic_set(&sdebug_a_tsf, 0);
5919 }
5920 
5921 static bool inject_on_this_cmd(void)
5922 {
5923 	if (sdebug_every_nth == 0)
5924 		return false;
5925 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5926 }
5927 
5928 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5929 
5930 
5931 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5932 {
5933 	if (sqcp)
5934 		kmem_cache_free(queued_cmd_cache, sqcp);
5935 }
5936 
5937 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5938 {
5939 	struct sdebug_queued_cmd *sqcp;
5940 	struct sdebug_defer *sd_dp;
5941 
5942 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5943 	if (!sqcp)
5944 		return NULL;
5945 
5946 	sd_dp = &sqcp->sd_dp;
5947 
5948 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5949 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5950 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5951 
5952 	sqcp->scmd = scmd;
5953 
5954 	return sqcp;
5955 }
5956 
5957 /* Complete the processing of the thread that queued a SCSI command to this
5958  * driver. It either completes the command by calling cmnd_done() or
5959  * schedules a hr timer or work queue then returns 0. Returns
5960  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5961  */
5962 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5963 			 int scsi_result,
5964 			 int (*pfp)(struct scsi_cmnd *,
5965 				    struct sdebug_dev_info *),
5966 			 int delta_jiff, int ndelay)
5967 {
5968 	struct request *rq = scsi_cmd_to_rq(cmnd);
5969 	bool polled = rq->cmd_flags & REQ_POLLED;
5970 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5971 	unsigned long flags;
5972 	u64 ns_from_boot = 0;
5973 	struct sdebug_queued_cmd *sqcp;
5974 	struct scsi_device *sdp;
5975 	struct sdebug_defer *sd_dp;
5976 
5977 	if (unlikely(devip == NULL)) {
5978 		if (scsi_result == 0)
5979 			scsi_result = DID_NO_CONNECT << 16;
5980 		goto respond_in_thread;
5981 	}
5982 	sdp = cmnd->device;
5983 
5984 	if (delta_jiff == 0)
5985 		goto respond_in_thread;
5986 
5987 
5988 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5989 		     (scsi_result == 0))) {
5990 		int num_in_q = scsi_device_busy(sdp);
5991 		int qdepth = cmnd->device->queue_depth;
5992 
5993 		if ((num_in_q == qdepth) &&
5994 		    (atomic_inc_return(&sdebug_a_tsf) >=
5995 		     abs(sdebug_every_nth))) {
5996 			atomic_set(&sdebug_a_tsf, 0);
5997 			scsi_result = device_qfull_result;
5998 
5999 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6000 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6001 					    __func__, num_in_q);
6002 		}
6003 	}
6004 
6005 	sqcp = sdebug_alloc_queued_cmd(cmnd);
6006 	if (!sqcp) {
6007 		pr_err("%s no alloc\n", __func__);
6008 		return SCSI_MLQUEUE_HOST_BUSY;
6009 	}
6010 	sd_dp = &sqcp->sd_dp;
6011 
6012 	if (polled)
6013 		ns_from_boot = ktime_get_boottime_ns();
6014 
6015 	/* one of the resp_*() response functions is called here */
6016 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6017 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
6018 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
6019 		delta_jiff = ndelay = 0;
6020 	}
6021 	if (cmnd->result == 0 && scsi_result != 0)
6022 		cmnd->result = scsi_result;
6023 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6024 		if (atomic_read(&sdeb_inject_pending)) {
6025 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6026 			atomic_set(&sdeb_inject_pending, 0);
6027 			cmnd->result = check_condition_result;
6028 		}
6029 	}
6030 
6031 	if (unlikely(sdebug_verbose && cmnd->result))
6032 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6033 			    __func__, cmnd->result);
6034 
6035 	if (delta_jiff > 0 || ndelay > 0) {
6036 		ktime_t kt;
6037 
6038 		if (delta_jiff > 0) {
6039 			u64 ns = jiffies_to_nsecs(delta_jiff);
6040 
6041 			if (sdebug_random && ns < U32_MAX) {
6042 				ns = get_random_u32_below((u32)ns);
6043 			} else if (sdebug_random) {
6044 				ns >>= 12;	/* scale to 4 usec precision */
6045 				if (ns < U32_MAX)	/* over 4 hours max */
6046 					ns = get_random_u32_below((u32)ns);
6047 				ns <<= 12;
6048 			}
6049 			kt = ns_to_ktime(ns);
6050 		} else {	/* ndelay has a 4.2 second max */
6051 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6052 					     (u32)ndelay;
6053 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6054 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
6055 
6056 				if (kt <= d) {	/* elapsed duration >= kt */
6057 					/* call scsi_done() from this thread */
6058 					sdebug_free_queued_cmd(sqcp);
6059 					scsi_done(cmnd);
6060 					return 0;
6061 				}
6062 				/* otherwise reduce kt by elapsed time */
6063 				kt -= d;
6064 			}
6065 		}
6066 		if (sdebug_statistics)
6067 			sd_dp->issuing_cpu = raw_smp_processor_id();
6068 		if (polled) {
6069 			spin_lock_irqsave(&sdsc->lock, flags);
6070 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6071 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6072 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6073 			spin_unlock_irqrestore(&sdsc->lock, flags);
6074 		} else {
6075 			/* schedule the invocation of scsi_done() for a later time */
6076 			spin_lock_irqsave(&sdsc->lock, flags);
6077 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6078 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6079 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6080 			/*
6081 			 * The completion handler will try to grab sqcp->lock,
6082 			 * so there is no chance that the completion handler
6083 			 * will call scsi_done() until we release the lock
6084 			 * here (so ok to keep referencing sdsc).
6085 			 */
6086 			spin_unlock_irqrestore(&sdsc->lock, flags);
6087 		}
6088 	} else {	/* jdelay < 0, use work queue */
6089 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6090 			     atomic_read(&sdeb_inject_pending))) {
6091 			sd_dp->aborted = true;
6092 			atomic_set(&sdeb_inject_pending, 0);
6093 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6094 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6095 		}
6096 
6097 		if (sdebug_statistics)
6098 			sd_dp->issuing_cpu = raw_smp_processor_id();
6099 		if (polled) {
6100 			spin_lock_irqsave(&sdsc->lock, flags);
6101 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6102 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6103 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6104 			spin_unlock_irqrestore(&sdsc->lock, flags);
6105 		} else {
6106 			spin_lock_irqsave(&sdsc->lock, flags);
6107 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6108 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6109 			schedule_work(&sd_dp->ew.work);
6110 			spin_unlock_irqrestore(&sdsc->lock, flags);
6111 		}
6112 	}
6113 
6114 	return 0;
6115 
6116 respond_in_thread:	/* call back to mid-layer using invocation thread */
6117 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6118 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
6119 	if (cmnd->result == 0 && scsi_result != 0)
6120 		cmnd->result = scsi_result;
6121 	scsi_done(cmnd);
6122 	return 0;
6123 }
6124 
6125 /* Note: The following macros create attribute files in the
6126    /sys/module/scsi_debug/parameters directory. Unfortunately this
6127    driver is unaware of a change and cannot trigger auxiliary actions
6128    as it can when the corresponding attribute in the
6129    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6130  */
6131 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6132 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6133 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6134 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6135 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6136 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6137 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6138 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6139 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6140 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6141 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6142 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6143 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6144 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6145 module_param_string(inq_product, sdebug_inq_product_id,
6146 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6147 module_param_string(inq_rev, sdebug_inq_product_rev,
6148 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6149 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6150 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6151 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6152 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6153 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6154 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6155 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6156 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6157 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6158 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6159 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6160 		   S_IRUGO | S_IWUSR);
6161 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6162 		   S_IRUGO | S_IWUSR);
6163 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6164 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6165 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6166 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6167 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6168 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6169 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6170 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6171 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6172 module_param_named(per_host_store, sdebug_per_host_store, bool,
6173 		   S_IRUGO | S_IWUSR);
6174 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6175 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6176 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6177 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6178 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6179 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6180 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6181 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6182 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6183 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6184 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6185 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6186 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6187 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6188 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6189 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6190 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6191 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6192 		   S_IRUGO | S_IWUSR);
6193 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6194 module_param_named(write_same_length, sdebug_write_same_length, int,
6195 		   S_IRUGO | S_IWUSR);
6196 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6197 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6198 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6199 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6200 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6201 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6202 
6203 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6204 MODULE_DESCRIPTION("SCSI debug adapter driver");
6205 MODULE_LICENSE("GPL");
6206 MODULE_VERSION(SDEBUG_VERSION);
6207 
6208 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6209 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6210 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6211 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6212 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6213 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6214 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6215 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6216 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6217 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6218 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6219 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6220 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6221 MODULE_PARM_DESC(host_max_queue,
6222 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6223 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6224 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6225 		 SDEBUG_VERSION "\")");
6226 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6227 MODULE_PARM_DESC(lbprz,
6228 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6229 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6230 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6231 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6232 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6233 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6234 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6235 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6236 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6237 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6238 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6239 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6240 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6241 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6242 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6243 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6244 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6245 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6246 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6247 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6248 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6249 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6250 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6251 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6252 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6253 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6254 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6255 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6256 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6257 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6258 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6259 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6260 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6261 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6262 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6263 MODULE_PARM_DESC(uuid_ctl,
6264 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6265 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6266 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6267 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6268 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6269 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6270 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6271 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6272 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6273 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6274 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6275 
6276 #define SDEBUG_INFO_LEN 256
6277 static char sdebug_info[SDEBUG_INFO_LEN];
6278 
6279 static const char *scsi_debug_info(struct Scsi_Host *shp)
6280 {
6281 	int k;
6282 
6283 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6284 		      my_name, SDEBUG_VERSION, sdebug_version_date);
6285 	if (k >= (SDEBUG_INFO_LEN - 1))
6286 		return sdebug_info;
6287 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6288 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6289 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
6290 		  "statistics", (int)sdebug_statistics);
6291 	return sdebug_info;
6292 }
6293 
6294 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6295 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6296 				 int length)
6297 {
6298 	char arr[16];
6299 	int opts;
6300 	int minLen = length > 15 ? 15 : length;
6301 
6302 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6303 		return -EACCES;
6304 	memcpy(arr, buffer, minLen);
6305 	arr[minLen] = '\0';
6306 	if (1 != sscanf(arr, "%d", &opts))
6307 		return -EINVAL;
6308 	sdebug_opts = opts;
6309 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6310 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6311 	if (sdebug_every_nth != 0)
6312 		tweak_cmnd_count();
6313 	return length;
6314 }
6315 
6316 struct sdebug_submit_queue_data {
6317 	int *first;
6318 	int *last;
6319 	int queue_num;
6320 };
6321 
6322 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6323 {
6324 	struct sdebug_submit_queue_data *data = opaque;
6325 	u32 unique_tag = blk_mq_unique_tag(rq);
6326 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6327 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6328 	int queue_num = data->queue_num;
6329 
6330 	if (hwq != queue_num)
6331 		return true;
6332 
6333 	/* Rely on iter'ing in ascending tag order */
6334 	if (*data->first == -1)
6335 		*data->first = *data->last = tag;
6336 	else
6337 		*data->last = tag;
6338 
6339 	return true;
6340 }
6341 
6342 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6343  * same for each scsi_debug host (if more than one). Some of the counters
6344  * output are not atomics so might be inaccurate in a busy system. */
6345 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6346 {
6347 	struct sdebug_host_info *sdhp;
6348 	int j;
6349 
6350 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6351 		   SDEBUG_VERSION, sdebug_version_date);
6352 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6353 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6354 		   sdebug_opts, sdebug_every_nth);
6355 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6356 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6357 		   sdebug_sector_size, "bytes");
6358 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6359 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6360 		   num_aborts);
6361 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6362 		   num_dev_resets, num_target_resets, num_bus_resets,
6363 		   num_host_resets);
6364 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6365 		   dix_reads, dix_writes, dif_errors);
6366 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6367 		   sdebug_statistics);
6368 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6369 		   atomic_read(&sdebug_cmnd_count),
6370 		   atomic_read(&sdebug_completions),
6371 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6372 		   atomic_read(&sdebug_a_tsf),
6373 		   atomic_read(&sdeb_mq_poll_count));
6374 
6375 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6376 	for (j = 0; j < submit_queues; ++j) {
6377 		int f = -1, l = -1;
6378 		struct sdebug_submit_queue_data data = {
6379 			.queue_num = j,
6380 			.first = &f,
6381 			.last = &l,
6382 		};
6383 		seq_printf(m, "  queue %d:\n", j);
6384 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6385 					&data);
6386 		if (f >= 0) {
6387 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6388 				   "first,last bits", f, l);
6389 		}
6390 	}
6391 
6392 	seq_printf(m, "this host_no=%d\n", host->host_no);
6393 	if (!xa_empty(per_store_ap)) {
6394 		bool niu;
6395 		int idx;
6396 		unsigned long l_idx;
6397 		struct sdeb_store_info *sip;
6398 
6399 		seq_puts(m, "\nhost list:\n");
6400 		j = 0;
6401 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6402 			idx = sdhp->si_idx;
6403 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6404 				   sdhp->shost->host_no, idx);
6405 			++j;
6406 		}
6407 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6408 			   sdeb_most_recent_idx);
6409 		j = 0;
6410 		xa_for_each(per_store_ap, l_idx, sip) {
6411 			niu = xa_get_mark(per_store_ap, l_idx,
6412 					  SDEB_XA_NOT_IN_USE);
6413 			idx = (int)l_idx;
6414 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6415 				   (niu ? "  not_in_use" : ""));
6416 			++j;
6417 		}
6418 	}
6419 	return 0;
6420 }
6421 
6422 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6423 {
6424 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6425 }
6426 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6427  * of delay is jiffies.
6428  */
6429 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6430 			   size_t count)
6431 {
6432 	int jdelay, res;
6433 
6434 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6435 		res = count;
6436 		if (sdebug_jdelay != jdelay) {
6437 			struct sdebug_host_info *sdhp;
6438 
6439 			mutex_lock(&sdebug_host_list_mutex);
6440 			block_unblock_all_queues(true);
6441 
6442 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6443 				struct Scsi_Host *shost = sdhp->shost;
6444 
6445 				if (scsi_host_busy(shost)) {
6446 					res = -EBUSY;   /* queued commands */
6447 					break;
6448 				}
6449 			}
6450 			if (res > 0) {
6451 				sdebug_jdelay = jdelay;
6452 				sdebug_ndelay = 0;
6453 			}
6454 			block_unblock_all_queues(false);
6455 			mutex_unlock(&sdebug_host_list_mutex);
6456 		}
6457 		return res;
6458 	}
6459 	return -EINVAL;
6460 }
6461 static DRIVER_ATTR_RW(delay);
6462 
6463 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6464 {
6465 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6466 }
6467 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6468 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6469 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6470 			    size_t count)
6471 {
6472 	int ndelay, res;
6473 
6474 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6475 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6476 		res = count;
6477 		if (sdebug_ndelay != ndelay) {
6478 			struct sdebug_host_info *sdhp;
6479 
6480 			mutex_lock(&sdebug_host_list_mutex);
6481 			block_unblock_all_queues(true);
6482 
6483 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6484 				struct Scsi_Host *shost = sdhp->shost;
6485 
6486 				if (scsi_host_busy(shost)) {
6487 					res = -EBUSY;   /* queued commands */
6488 					break;
6489 				}
6490 			}
6491 
6492 			if (res > 0) {
6493 				sdebug_ndelay = ndelay;
6494 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6495 							: DEF_JDELAY;
6496 			}
6497 			block_unblock_all_queues(false);
6498 			mutex_unlock(&sdebug_host_list_mutex);
6499 		}
6500 		return res;
6501 	}
6502 	return -EINVAL;
6503 }
6504 static DRIVER_ATTR_RW(ndelay);
6505 
6506 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6507 {
6508 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6509 }
6510 
6511 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6512 			  size_t count)
6513 {
6514 	int opts;
6515 	char work[20];
6516 
6517 	if (sscanf(buf, "%10s", work) == 1) {
6518 		if (strncasecmp(work, "0x", 2) == 0) {
6519 			if (kstrtoint(work + 2, 16, &opts) == 0)
6520 				goto opts_done;
6521 		} else {
6522 			if (kstrtoint(work, 10, &opts) == 0)
6523 				goto opts_done;
6524 		}
6525 	}
6526 	return -EINVAL;
6527 opts_done:
6528 	sdebug_opts = opts;
6529 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6530 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6531 	tweak_cmnd_count();
6532 	return count;
6533 }
6534 static DRIVER_ATTR_RW(opts);
6535 
6536 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6537 {
6538 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6539 }
6540 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6541 			   size_t count)
6542 {
6543 	int n;
6544 
6545 	/* Cannot change from or to TYPE_ZBC with sysfs */
6546 	if (sdebug_ptype == TYPE_ZBC)
6547 		return -EINVAL;
6548 
6549 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6550 		if (n == TYPE_ZBC)
6551 			return -EINVAL;
6552 		sdebug_ptype = n;
6553 		return count;
6554 	}
6555 	return -EINVAL;
6556 }
6557 static DRIVER_ATTR_RW(ptype);
6558 
6559 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6560 {
6561 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6562 }
6563 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6564 			    size_t count)
6565 {
6566 	int n;
6567 
6568 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6569 		sdebug_dsense = n;
6570 		return count;
6571 	}
6572 	return -EINVAL;
6573 }
6574 static DRIVER_ATTR_RW(dsense);
6575 
6576 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6577 {
6578 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6579 }
6580 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6581 			     size_t count)
6582 {
6583 	int n, idx;
6584 
6585 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6586 		bool want_store = (n == 0);
6587 		struct sdebug_host_info *sdhp;
6588 
6589 		n = (n > 0);
6590 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6591 		if (sdebug_fake_rw == n)
6592 			return count;	/* not transitioning so do nothing */
6593 
6594 		if (want_store) {	/* 1 --> 0 transition, set up store */
6595 			if (sdeb_first_idx < 0) {
6596 				idx = sdebug_add_store();
6597 				if (idx < 0)
6598 					return idx;
6599 			} else {
6600 				idx = sdeb_first_idx;
6601 				xa_clear_mark(per_store_ap, idx,
6602 					      SDEB_XA_NOT_IN_USE);
6603 			}
6604 			/* make all hosts use same store */
6605 			list_for_each_entry(sdhp, &sdebug_host_list,
6606 					    host_list) {
6607 				if (sdhp->si_idx != idx) {
6608 					xa_set_mark(per_store_ap, sdhp->si_idx,
6609 						    SDEB_XA_NOT_IN_USE);
6610 					sdhp->si_idx = idx;
6611 				}
6612 			}
6613 			sdeb_most_recent_idx = idx;
6614 		} else {	/* 0 --> 1 transition is trigger for shrink */
6615 			sdebug_erase_all_stores(true /* apart from first */);
6616 		}
6617 		sdebug_fake_rw = n;
6618 		return count;
6619 	}
6620 	return -EINVAL;
6621 }
6622 static DRIVER_ATTR_RW(fake_rw);
6623 
6624 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6625 {
6626 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6627 }
6628 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6629 			      size_t count)
6630 {
6631 	int n;
6632 
6633 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6634 		sdebug_no_lun_0 = n;
6635 		return count;
6636 	}
6637 	return -EINVAL;
6638 }
6639 static DRIVER_ATTR_RW(no_lun_0);
6640 
6641 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6642 {
6643 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6644 }
6645 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6646 			      size_t count)
6647 {
6648 	int n;
6649 
6650 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6651 		sdebug_num_tgts = n;
6652 		sdebug_max_tgts_luns();
6653 		return count;
6654 	}
6655 	return -EINVAL;
6656 }
6657 static DRIVER_ATTR_RW(num_tgts);
6658 
6659 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6660 {
6661 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6662 }
6663 static DRIVER_ATTR_RO(dev_size_mb);
6664 
6665 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6666 {
6667 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6668 }
6669 
6670 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6671 				    size_t count)
6672 {
6673 	bool v;
6674 
6675 	if (kstrtobool(buf, &v))
6676 		return -EINVAL;
6677 
6678 	sdebug_per_host_store = v;
6679 	return count;
6680 }
6681 static DRIVER_ATTR_RW(per_host_store);
6682 
6683 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6684 {
6685 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6686 }
6687 static DRIVER_ATTR_RO(num_parts);
6688 
6689 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6690 {
6691 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6692 }
6693 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6694 			       size_t count)
6695 {
6696 	int nth;
6697 	char work[20];
6698 
6699 	if (sscanf(buf, "%10s", work) == 1) {
6700 		if (strncasecmp(work, "0x", 2) == 0) {
6701 			if (kstrtoint(work + 2, 16, &nth) == 0)
6702 				goto every_nth_done;
6703 		} else {
6704 			if (kstrtoint(work, 10, &nth) == 0)
6705 				goto every_nth_done;
6706 		}
6707 	}
6708 	return -EINVAL;
6709 
6710 every_nth_done:
6711 	sdebug_every_nth = nth;
6712 	if (nth && !sdebug_statistics) {
6713 		pr_info("every_nth needs statistics=1, set it\n");
6714 		sdebug_statistics = true;
6715 	}
6716 	tweak_cmnd_count();
6717 	return count;
6718 }
6719 static DRIVER_ATTR_RW(every_nth);
6720 
6721 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6722 {
6723 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6724 }
6725 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6726 				size_t count)
6727 {
6728 	int n;
6729 	bool changed;
6730 
6731 	if (kstrtoint(buf, 0, &n))
6732 		return -EINVAL;
6733 	if (n >= 0) {
6734 		if (n > (int)SAM_LUN_AM_FLAT) {
6735 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6736 			return -EINVAL;
6737 		}
6738 		changed = ((int)sdebug_lun_am != n);
6739 		sdebug_lun_am = n;
6740 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6741 			struct sdebug_host_info *sdhp;
6742 			struct sdebug_dev_info *dp;
6743 
6744 			mutex_lock(&sdebug_host_list_mutex);
6745 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6746 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6747 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6748 				}
6749 			}
6750 			mutex_unlock(&sdebug_host_list_mutex);
6751 		}
6752 		return count;
6753 	}
6754 	return -EINVAL;
6755 }
6756 static DRIVER_ATTR_RW(lun_format);
6757 
6758 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6759 {
6760 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6761 }
6762 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6763 			      size_t count)
6764 {
6765 	int n;
6766 	bool changed;
6767 
6768 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6769 		if (n > 256) {
6770 			pr_warn("max_luns can be no more than 256\n");
6771 			return -EINVAL;
6772 		}
6773 		changed = (sdebug_max_luns != n);
6774 		sdebug_max_luns = n;
6775 		sdebug_max_tgts_luns();
6776 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6777 			struct sdebug_host_info *sdhp;
6778 			struct sdebug_dev_info *dp;
6779 
6780 			mutex_lock(&sdebug_host_list_mutex);
6781 			list_for_each_entry(sdhp, &sdebug_host_list,
6782 					    host_list) {
6783 				list_for_each_entry(dp, &sdhp->dev_info_list,
6784 						    dev_list) {
6785 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6786 						dp->uas_bm);
6787 				}
6788 			}
6789 			mutex_unlock(&sdebug_host_list_mutex);
6790 		}
6791 		return count;
6792 	}
6793 	return -EINVAL;
6794 }
6795 static DRIVER_ATTR_RW(max_luns);
6796 
6797 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6798 {
6799 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6800 }
6801 /* N.B. max_queue can be changed while there are queued commands. In flight
6802  * commands beyond the new max_queue will be completed. */
6803 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6804 			       size_t count)
6805 {
6806 	int n;
6807 
6808 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6809 	    (n <= SDEBUG_CANQUEUE) &&
6810 	    (sdebug_host_max_queue == 0)) {
6811 		mutex_lock(&sdebug_host_list_mutex);
6812 
6813 		/* We may only change sdebug_max_queue when we have no shosts */
6814 		if (list_empty(&sdebug_host_list))
6815 			sdebug_max_queue = n;
6816 		else
6817 			count = -EBUSY;
6818 		mutex_unlock(&sdebug_host_list_mutex);
6819 		return count;
6820 	}
6821 	return -EINVAL;
6822 }
6823 static DRIVER_ATTR_RW(max_queue);
6824 
6825 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6826 {
6827 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6828 }
6829 
6830 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6831 {
6832 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6833 }
6834 
6835 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6836 {
6837 	bool v;
6838 
6839 	if (kstrtobool(buf, &v))
6840 		return -EINVAL;
6841 
6842 	sdebug_no_rwlock = v;
6843 	return count;
6844 }
6845 static DRIVER_ATTR_RW(no_rwlock);
6846 
6847 /*
6848  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6849  * in range [0, sdebug_host_max_queue), we can't change it.
6850  */
6851 static DRIVER_ATTR_RO(host_max_queue);
6852 
6853 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6854 {
6855 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6856 }
6857 static DRIVER_ATTR_RO(no_uld);
6858 
6859 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6860 {
6861 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6862 }
6863 static DRIVER_ATTR_RO(scsi_level);
6864 
6865 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6866 {
6867 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6868 }
6869 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6870 				size_t count)
6871 {
6872 	int n;
6873 	bool changed;
6874 
6875 	/* Ignore capacity change for ZBC drives for now */
6876 	if (sdeb_zbc_in_use)
6877 		return -ENOTSUPP;
6878 
6879 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6880 		changed = (sdebug_virtual_gb != n);
6881 		sdebug_virtual_gb = n;
6882 		sdebug_capacity = get_sdebug_capacity();
6883 		if (changed) {
6884 			struct sdebug_host_info *sdhp;
6885 			struct sdebug_dev_info *dp;
6886 
6887 			mutex_lock(&sdebug_host_list_mutex);
6888 			list_for_each_entry(sdhp, &sdebug_host_list,
6889 					    host_list) {
6890 				list_for_each_entry(dp, &sdhp->dev_info_list,
6891 						    dev_list) {
6892 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6893 						dp->uas_bm);
6894 				}
6895 			}
6896 			mutex_unlock(&sdebug_host_list_mutex);
6897 		}
6898 		return count;
6899 	}
6900 	return -EINVAL;
6901 }
6902 static DRIVER_ATTR_RW(virtual_gb);
6903 
6904 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6905 {
6906 	/* absolute number of hosts currently active is what is shown */
6907 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6908 }
6909 
6910 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6911 			      size_t count)
6912 {
6913 	bool found;
6914 	unsigned long idx;
6915 	struct sdeb_store_info *sip;
6916 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6917 	int delta_hosts;
6918 
6919 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6920 		return -EINVAL;
6921 	if (delta_hosts > 0) {
6922 		do {
6923 			found = false;
6924 			if (want_phs) {
6925 				xa_for_each_marked(per_store_ap, idx, sip,
6926 						   SDEB_XA_NOT_IN_USE) {
6927 					sdeb_most_recent_idx = (int)idx;
6928 					found = true;
6929 					break;
6930 				}
6931 				if (found)	/* re-use case */
6932 					sdebug_add_host_helper((int)idx);
6933 				else
6934 					sdebug_do_add_host(true);
6935 			} else {
6936 				sdebug_do_add_host(false);
6937 			}
6938 		} while (--delta_hosts);
6939 	} else if (delta_hosts < 0) {
6940 		do {
6941 			sdebug_do_remove_host(false);
6942 		} while (++delta_hosts);
6943 	}
6944 	return count;
6945 }
6946 static DRIVER_ATTR_RW(add_host);
6947 
6948 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6949 {
6950 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6951 }
6952 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6953 				    size_t count)
6954 {
6955 	int n;
6956 
6957 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6958 		sdebug_vpd_use_hostno = n;
6959 		return count;
6960 	}
6961 	return -EINVAL;
6962 }
6963 static DRIVER_ATTR_RW(vpd_use_hostno);
6964 
6965 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6966 {
6967 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6968 }
6969 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6970 				size_t count)
6971 {
6972 	int n;
6973 
6974 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6975 		if (n > 0)
6976 			sdebug_statistics = true;
6977 		else {
6978 			clear_queue_stats();
6979 			sdebug_statistics = false;
6980 		}
6981 		return count;
6982 	}
6983 	return -EINVAL;
6984 }
6985 static DRIVER_ATTR_RW(statistics);
6986 
6987 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6988 {
6989 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6990 }
6991 static DRIVER_ATTR_RO(sector_size);
6992 
6993 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6994 {
6995 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6996 }
6997 static DRIVER_ATTR_RO(submit_queues);
6998 
6999 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7000 {
7001 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7002 }
7003 static DRIVER_ATTR_RO(dix);
7004 
7005 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7006 {
7007 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7008 }
7009 static DRIVER_ATTR_RO(dif);
7010 
7011 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7012 {
7013 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7014 }
7015 static DRIVER_ATTR_RO(guard);
7016 
7017 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7018 {
7019 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7020 }
7021 static DRIVER_ATTR_RO(ato);
7022 
7023 static ssize_t map_show(struct device_driver *ddp, char *buf)
7024 {
7025 	ssize_t count = 0;
7026 
7027 	if (!scsi_debug_lbp())
7028 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7029 				 sdebug_store_sectors);
7030 
7031 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7032 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7033 
7034 		if (sip)
7035 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7036 					  (int)map_size, sip->map_storep);
7037 	}
7038 	buf[count++] = '\n';
7039 	buf[count] = '\0';
7040 
7041 	return count;
7042 }
7043 static DRIVER_ATTR_RO(map);
7044 
7045 static ssize_t random_show(struct device_driver *ddp, char *buf)
7046 {
7047 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7048 }
7049 
7050 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7051 			    size_t count)
7052 {
7053 	bool v;
7054 
7055 	if (kstrtobool(buf, &v))
7056 		return -EINVAL;
7057 
7058 	sdebug_random = v;
7059 	return count;
7060 }
7061 static DRIVER_ATTR_RW(random);
7062 
7063 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7064 {
7065 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7066 }
7067 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7068 			       size_t count)
7069 {
7070 	int n;
7071 
7072 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7073 		sdebug_removable = (n > 0);
7074 		return count;
7075 	}
7076 	return -EINVAL;
7077 }
7078 static DRIVER_ATTR_RW(removable);
7079 
7080 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7081 {
7082 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7083 }
7084 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7085 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7086 			       size_t count)
7087 {
7088 	int n;
7089 
7090 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7091 		sdebug_host_lock = (n > 0);
7092 		return count;
7093 	}
7094 	return -EINVAL;
7095 }
7096 static DRIVER_ATTR_RW(host_lock);
7097 
7098 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7099 {
7100 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7101 }
7102 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7103 			    size_t count)
7104 {
7105 	int n;
7106 
7107 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7108 		sdebug_strict = (n > 0);
7109 		return count;
7110 	}
7111 	return -EINVAL;
7112 }
7113 static DRIVER_ATTR_RW(strict);
7114 
7115 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7116 {
7117 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7118 }
7119 static DRIVER_ATTR_RO(uuid_ctl);
7120 
7121 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7122 {
7123 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7124 }
7125 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7126 			     size_t count)
7127 {
7128 	int ret, n;
7129 
7130 	ret = kstrtoint(buf, 0, &n);
7131 	if (ret)
7132 		return ret;
7133 	sdebug_cdb_len = n;
7134 	all_config_cdb_len();
7135 	return count;
7136 }
7137 static DRIVER_ATTR_RW(cdb_len);
7138 
7139 static const char * const zbc_model_strs_a[] = {
7140 	[BLK_ZONED_NONE] = "none",
7141 	[BLK_ZONED_HA]   = "host-aware",
7142 	[BLK_ZONED_HM]   = "host-managed",
7143 };
7144 
7145 static const char * const zbc_model_strs_b[] = {
7146 	[BLK_ZONED_NONE] = "no",
7147 	[BLK_ZONED_HA]   = "aware",
7148 	[BLK_ZONED_HM]   = "managed",
7149 };
7150 
7151 static const char * const zbc_model_strs_c[] = {
7152 	[BLK_ZONED_NONE] = "0",
7153 	[BLK_ZONED_HA]   = "1",
7154 	[BLK_ZONED_HM]   = "2",
7155 };
7156 
7157 static int sdeb_zbc_model_str(const char *cp)
7158 {
7159 	int res = sysfs_match_string(zbc_model_strs_a, cp);
7160 
7161 	if (res < 0) {
7162 		res = sysfs_match_string(zbc_model_strs_b, cp);
7163 		if (res < 0) {
7164 			res = sysfs_match_string(zbc_model_strs_c, cp);
7165 			if (res < 0)
7166 				return -EINVAL;
7167 		}
7168 	}
7169 	return res;
7170 }
7171 
7172 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7173 {
7174 	return scnprintf(buf, PAGE_SIZE, "%s\n",
7175 			 zbc_model_strs_a[sdeb_zbc_model]);
7176 }
7177 static DRIVER_ATTR_RO(zbc);
7178 
7179 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7180 {
7181 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7182 }
7183 static DRIVER_ATTR_RO(tur_ms_to_ready);
7184 
7185 /* Note: The following array creates attribute files in the
7186    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7187    files (over those found in the /sys/module/scsi_debug/parameters
7188    directory) is that auxiliary actions can be triggered when an attribute
7189    is changed. For example see: add_host_store() above.
7190  */
7191 
7192 static struct attribute *sdebug_drv_attrs[] = {
7193 	&driver_attr_delay.attr,
7194 	&driver_attr_opts.attr,
7195 	&driver_attr_ptype.attr,
7196 	&driver_attr_dsense.attr,
7197 	&driver_attr_fake_rw.attr,
7198 	&driver_attr_host_max_queue.attr,
7199 	&driver_attr_no_lun_0.attr,
7200 	&driver_attr_num_tgts.attr,
7201 	&driver_attr_dev_size_mb.attr,
7202 	&driver_attr_num_parts.attr,
7203 	&driver_attr_every_nth.attr,
7204 	&driver_attr_lun_format.attr,
7205 	&driver_attr_max_luns.attr,
7206 	&driver_attr_max_queue.attr,
7207 	&driver_attr_no_rwlock.attr,
7208 	&driver_attr_no_uld.attr,
7209 	&driver_attr_scsi_level.attr,
7210 	&driver_attr_virtual_gb.attr,
7211 	&driver_attr_add_host.attr,
7212 	&driver_attr_per_host_store.attr,
7213 	&driver_attr_vpd_use_hostno.attr,
7214 	&driver_attr_sector_size.attr,
7215 	&driver_attr_statistics.attr,
7216 	&driver_attr_submit_queues.attr,
7217 	&driver_attr_dix.attr,
7218 	&driver_attr_dif.attr,
7219 	&driver_attr_guard.attr,
7220 	&driver_attr_ato.attr,
7221 	&driver_attr_map.attr,
7222 	&driver_attr_random.attr,
7223 	&driver_attr_removable.attr,
7224 	&driver_attr_host_lock.attr,
7225 	&driver_attr_ndelay.attr,
7226 	&driver_attr_strict.attr,
7227 	&driver_attr_uuid_ctl.attr,
7228 	&driver_attr_cdb_len.attr,
7229 	&driver_attr_tur_ms_to_ready.attr,
7230 	&driver_attr_zbc.attr,
7231 	NULL,
7232 };
7233 ATTRIBUTE_GROUPS(sdebug_drv);
7234 
7235 static struct device *pseudo_primary;
7236 
7237 static int __init scsi_debug_init(void)
7238 {
7239 	bool want_store = (sdebug_fake_rw == 0);
7240 	unsigned long sz;
7241 	int k, ret, hosts_to_add;
7242 	int idx = -1;
7243 
7244 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7245 		pr_warn("ndelay must be less than 1 second, ignored\n");
7246 		sdebug_ndelay = 0;
7247 	} else if (sdebug_ndelay > 0)
7248 		sdebug_jdelay = JDELAY_OVERRIDDEN;
7249 
7250 	switch (sdebug_sector_size) {
7251 	case  512:
7252 	case 1024:
7253 	case 2048:
7254 	case 4096:
7255 		break;
7256 	default:
7257 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
7258 		return -EINVAL;
7259 	}
7260 
7261 	switch (sdebug_dif) {
7262 	case T10_PI_TYPE0_PROTECTION:
7263 		break;
7264 	case T10_PI_TYPE1_PROTECTION:
7265 	case T10_PI_TYPE2_PROTECTION:
7266 	case T10_PI_TYPE3_PROTECTION:
7267 		have_dif_prot = true;
7268 		break;
7269 
7270 	default:
7271 		pr_err("dif must be 0, 1, 2 or 3\n");
7272 		return -EINVAL;
7273 	}
7274 
7275 	if (sdebug_num_tgts < 0) {
7276 		pr_err("num_tgts must be >= 0\n");
7277 		return -EINVAL;
7278 	}
7279 
7280 	if (sdebug_guard > 1) {
7281 		pr_err("guard must be 0 or 1\n");
7282 		return -EINVAL;
7283 	}
7284 
7285 	if (sdebug_ato > 1) {
7286 		pr_err("ato must be 0 or 1\n");
7287 		return -EINVAL;
7288 	}
7289 
7290 	if (sdebug_physblk_exp > 15) {
7291 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7292 		return -EINVAL;
7293 	}
7294 
7295 	sdebug_lun_am = sdebug_lun_am_i;
7296 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7297 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7298 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7299 	}
7300 
7301 	if (sdebug_max_luns > 256) {
7302 		if (sdebug_max_luns > 16384) {
7303 			pr_warn("max_luns can be no more than 16384, use default\n");
7304 			sdebug_max_luns = DEF_MAX_LUNS;
7305 		}
7306 		sdebug_lun_am = SAM_LUN_AM_FLAT;
7307 	}
7308 
7309 	if (sdebug_lowest_aligned > 0x3fff) {
7310 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7311 		return -EINVAL;
7312 	}
7313 
7314 	if (submit_queues < 1) {
7315 		pr_err("submit_queues must be 1 or more\n");
7316 		return -EINVAL;
7317 	}
7318 
7319 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7320 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7321 		return -EINVAL;
7322 	}
7323 
7324 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7325 	    (sdebug_host_max_queue < 0)) {
7326 		pr_err("host_max_queue must be in range [0 %d]\n",
7327 		       SDEBUG_CANQUEUE);
7328 		return -EINVAL;
7329 	}
7330 
7331 	if (sdebug_host_max_queue &&
7332 	    (sdebug_max_queue != sdebug_host_max_queue)) {
7333 		sdebug_max_queue = sdebug_host_max_queue;
7334 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7335 			sdebug_max_queue);
7336 	}
7337 
7338 	/*
7339 	 * check for host managed zoned block device specified with
7340 	 * ptype=0x14 or zbc=XXX.
7341 	 */
7342 	if (sdebug_ptype == TYPE_ZBC) {
7343 		sdeb_zbc_model = BLK_ZONED_HM;
7344 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7345 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7346 		if (k < 0)
7347 			return k;
7348 		sdeb_zbc_model = k;
7349 		switch (sdeb_zbc_model) {
7350 		case BLK_ZONED_NONE:
7351 		case BLK_ZONED_HA:
7352 			sdebug_ptype = TYPE_DISK;
7353 			break;
7354 		case BLK_ZONED_HM:
7355 			sdebug_ptype = TYPE_ZBC;
7356 			break;
7357 		default:
7358 			pr_err("Invalid ZBC model\n");
7359 			return -EINVAL;
7360 		}
7361 	}
7362 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7363 		sdeb_zbc_in_use = true;
7364 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7365 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7366 	}
7367 
7368 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7369 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7370 	if (sdebug_dev_size_mb < 1)
7371 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7372 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7373 	sdebug_store_sectors = sz / sdebug_sector_size;
7374 	sdebug_capacity = get_sdebug_capacity();
7375 
7376 	/* play around with geometry, don't waste too much on track 0 */
7377 	sdebug_heads = 8;
7378 	sdebug_sectors_per = 32;
7379 	if (sdebug_dev_size_mb >= 256)
7380 		sdebug_heads = 64;
7381 	else if (sdebug_dev_size_mb >= 16)
7382 		sdebug_heads = 32;
7383 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7384 			       (sdebug_sectors_per * sdebug_heads);
7385 	if (sdebug_cylinders_per >= 1024) {
7386 		/* other LLDs do this; implies >= 1GB ram disk ... */
7387 		sdebug_heads = 255;
7388 		sdebug_sectors_per = 63;
7389 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7390 			       (sdebug_sectors_per * sdebug_heads);
7391 	}
7392 	if (scsi_debug_lbp()) {
7393 		sdebug_unmap_max_blocks =
7394 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7395 
7396 		sdebug_unmap_max_desc =
7397 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7398 
7399 		sdebug_unmap_granularity =
7400 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7401 
7402 		if (sdebug_unmap_alignment &&
7403 		    sdebug_unmap_granularity <=
7404 		    sdebug_unmap_alignment) {
7405 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7406 			return -EINVAL;
7407 		}
7408 	}
7409 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7410 	if (want_store) {
7411 		idx = sdebug_add_store();
7412 		if (idx < 0)
7413 			return idx;
7414 	}
7415 
7416 	pseudo_primary = root_device_register("pseudo_0");
7417 	if (IS_ERR(pseudo_primary)) {
7418 		pr_warn("root_device_register() error\n");
7419 		ret = PTR_ERR(pseudo_primary);
7420 		goto free_vm;
7421 	}
7422 	ret = bus_register(&pseudo_lld_bus);
7423 	if (ret < 0) {
7424 		pr_warn("bus_register error: %d\n", ret);
7425 		goto dev_unreg;
7426 	}
7427 	ret = driver_register(&sdebug_driverfs_driver);
7428 	if (ret < 0) {
7429 		pr_warn("driver_register error: %d\n", ret);
7430 		goto bus_unreg;
7431 	}
7432 
7433 	hosts_to_add = sdebug_add_host;
7434 	sdebug_add_host = 0;
7435 
7436 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7437 	if (!queued_cmd_cache) {
7438 		ret = -ENOMEM;
7439 		goto driver_unreg;
7440 	}
7441 
7442 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7443 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7444 		pr_info("%s: failed to create initial debugfs directory\n", __func__);
7445 
7446 	for (k = 0; k < hosts_to_add; k++) {
7447 		if (want_store && k == 0) {
7448 			ret = sdebug_add_host_helper(idx);
7449 			if (ret < 0) {
7450 				pr_err("add_host_helper k=%d, error=%d\n",
7451 				       k, -ret);
7452 				break;
7453 			}
7454 		} else {
7455 			ret = sdebug_do_add_host(want_store &&
7456 						 sdebug_per_host_store);
7457 			if (ret < 0) {
7458 				pr_err("add_host k=%d error=%d\n", k, -ret);
7459 				break;
7460 			}
7461 		}
7462 	}
7463 	if (sdebug_verbose)
7464 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7465 
7466 	return 0;
7467 
7468 driver_unreg:
7469 	driver_unregister(&sdebug_driverfs_driver);
7470 bus_unreg:
7471 	bus_unregister(&pseudo_lld_bus);
7472 dev_unreg:
7473 	root_device_unregister(pseudo_primary);
7474 free_vm:
7475 	sdebug_erase_store(idx, NULL);
7476 	return ret;
7477 }
7478 
7479 static void __exit scsi_debug_exit(void)
7480 {
7481 	int k = sdebug_num_hosts;
7482 
7483 	for (; k; k--)
7484 		sdebug_do_remove_host(true);
7485 	kmem_cache_destroy(queued_cmd_cache);
7486 	driver_unregister(&sdebug_driverfs_driver);
7487 	bus_unregister(&pseudo_lld_bus);
7488 	root_device_unregister(pseudo_primary);
7489 
7490 	sdebug_erase_all_stores(false);
7491 	xa_destroy(per_store_ap);
7492 	debugfs_remove(sdebug_debugfs_root);
7493 }
7494 
7495 device_initcall(scsi_debug_init);
7496 module_exit(scsi_debug_exit);
7497 
7498 static void sdebug_release_adapter(struct device *dev)
7499 {
7500 	struct sdebug_host_info *sdbg_host;
7501 
7502 	sdbg_host = dev_to_sdebug_host(dev);
7503 	kfree(sdbg_host);
7504 }
7505 
7506 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7508 {
7509 	if (idx < 0)
7510 		return;
7511 	if (!sip) {
7512 		if (xa_empty(per_store_ap))
7513 			return;
7514 		sip = xa_load(per_store_ap, idx);
7515 		if (!sip)
7516 			return;
7517 	}
7518 	vfree(sip->map_storep);
7519 	vfree(sip->dif_storep);
7520 	vfree(sip->storep);
7521 	xa_erase(per_store_ap, idx);
7522 	kfree(sip);
7523 }
7524 
7525 /* Assume apart_from_first==false only in shutdown case. */
7526 static void sdebug_erase_all_stores(bool apart_from_first)
7527 {
7528 	unsigned long idx;
7529 	struct sdeb_store_info *sip = NULL;
7530 
7531 	xa_for_each(per_store_ap, idx, sip) {
7532 		if (apart_from_first)
7533 			apart_from_first = false;
7534 		else
7535 			sdebug_erase_store(idx, sip);
7536 	}
7537 	if (apart_from_first)
7538 		sdeb_most_recent_idx = sdeb_first_idx;
7539 }
7540 
7541 /*
7542  * Returns store xarray new element index (idx) if >=0 else negated errno.
7543  * Limit the number of stores to 65536.
7544  */
7545 static int sdebug_add_store(void)
7546 {
7547 	int res;
7548 	u32 n_idx;
7549 	unsigned long iflags;
7550 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7551 	struct sdeb_store_info *sip = NULL;
7552 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7553 
7554 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7555 	if (!sip)
7556 		return -ENOMEM;
7557 
7558 	xa_lock_irqsave(per_store_ap, iflags);
7559 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7560 	if (unlikely(res < 0)) {
7561 		xa_unlock_irqrestore(per_store_ap, iflags);
7562 		kfree(sip);
7563 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7564 		return res;
7565 	}
7566 	sdeb_most_recent_idx = n_idx;
7567 	if (sdeb_first_idx < 0)
7568 		sdeb_first_idx = n_idx;
7569 	xa_unlock_irqrestore(per_store_ap, iflags);
7570 
7571 	res = -ENOMEM;
7572 	sip->storep = vzalloc(sz);
7573 	if (!sip->storep) {
7574 		pr_err("user data oom\n");
7575 		goto err;
7576 	}
7577 	if (sdebug_num_parts > 0)
7578 		sdebug_build_parts(sip->storep, sz);
7579 
7580 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7581 	if (sdebug_dix) {
7582 		int dif_size;
7583 
7584 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7585 		sip->dif_storep = vmalloc(dif_size);
7586 
7587 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7588 			sip->dif_storep);
7589 
7590 		if (!sip->dif_storep) {
7591 			pr_err("DIX oom\n");
7592 			goto err;
7593 		}
7594 		memset(sip->dif_storep, 0xff, dif_size);
7595 	}
7596 	/* Logical Block Provisioning */
7597 	if (scsi_debug_lbp()) {
7598 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7599 		sip->map_storep = vmalloc(array_size(sizeof(long),
7600 						     BITS_TO_LONGS(map_size)));
7601 
7602 		pr_info("%lu provisioning blocks\n", map_size);
7603 
7604 		if (!sip->map_storep) {
7605 			pr_err("LBP map oom\n");
7606 			goto err;
7607 		}
7608 
7609 		bitmap_zero(sip->map_storep, map_size);
7610 
7611 		/* Map first 1KB for partition table */
7612 		if (sdebug_num_parts)
7613 			map_region(sip, 0, 2);
7614 	}
7615 
7616 	rwlock_init(&sip->macc_lck);
7617 	return (int)n_idx;
7618 err:
7619 	sdebug_erase_store((int)n_idx, sip);
7620 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7621 	return res;
7622 }
7623 
7624 static int sdebug_add_host_helper(int per_host_idx)
7625 {
7626 	int k, devs_per_host, idx;
7627 	int error = -ENOMEM;
7628 	struct sdebug_host_info *sdbg_host;
7629 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7630 
7631 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7632 	if (!sdbg_host)
7633 		return -ENOMEM;
7634 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7635 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7636 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7637 	sdbg_host->si_idx = idx;
7638 
7639 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7640 
7641 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7642 	for (k = 0; k < devs_per_host; k++) {
7643 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7644 		if (!sdbg_devinfo)
7645 			goto clean;
7646 	}
7647 
7648 	mutex_lock(&sdebug_host_list_mutex);
7649 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7650 	mutex_unlock(&sdebug_host_list_mutex);
7651 
7652 	sdbg_host->dev.bus = &pseudo_lld_bus;
7653 	sdbg_host->dev.parent = pseudo_primary;
7654 	sdbg_host->dev.release = &sdebug_release_adapter;
7655 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7656 
7657 	error = device_register(&sdbg_host->dev);
7658 	if (error) {
7659 		mutex_lock(&sdebug_host_list_mutex);
7660 		list_del(&sdbg_host->host_list);
7661 		mutex_unlock(&sdebug_host_list_mutex);
7662 		goto clean;
7663 	}
7664 
7665 	++sdebug_num_hosts;
7666 	return 0;
7667 
7668 clean:
7669 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7670 				 dev_list) {
7671 		list_del(&sdbg_devinfo->dev_list);
7672 		kfree(sdbg_devinfo->zstate);
7673 		kfree(sdbg_devinfo);
7674 	}
7675 	if (sdbg_host->dev.release)
7676 		put_device(&sdbg_host->dev);
7677 	else
7678 		kfree(sdbg_host);
7679 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7680 	return error;
7681 }
7682 
7683 static int sdebug_do_add_host(bool mk_new_store)
7684 {
7685 	int ph_idx = sdeb_most_recent_idx;
7686 
7687 	if (mk_new_store) {
7688 		ph_idx = sdebug_add_store();
7689 		if (ph_idx < 0)
7690 			return ph_idx;
7691 	}
7692 	return sdebug_add_host_helper(ph_idx);
7693 }
7694 
7695 static void sdebug_do_remove_host(bool the_end)
7696 {
7697 	int idx = -1;
7698 	struct sdebug_host_info *sdbg_host = NULL;
7699 	struct sdebug_host_info *sdbg_host2;
7700 
7701 	mutex_lock(&sdebug_host_list_mutex);
7702 	if (!list_empty(&sdebug_host_list)) {
7703 		sdbg_host = list_entry(sdebug_host_list.prev,
7704 				       struct sdebug_host_info, host_list);
7705 		idx = sdbg_host->si_idx;
7706 	}
7707 	if (!the_end && idx >= 0) {
7708 		bool unique = true;
7709 
7710 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7711 			if (sdbg_host2 == sdbg_host)
7712 				continue;
7713 			if (idx == sdbg_host2->si_idx) {
7714 				unique = false;
7715 				break;
7716 			}
7717 		}
7718 		if (unique) {
7719 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7720 			if (idx == sdeb_most_recent_idx)
7721 				--sdeb_most_recent_idx;
7722 		}
7723 	}
7724 	if (sdbg_host)
7725 		list_del(&sdbg_host->host_list);
7726 	mutex_unlock(&sdebug_host_list_mutex);
7727 
7728 	if (!sdbg_host)
7729 		return;
7730 
7731 	device_unregister(&sdbg_host->dev);
7732 	--sdebug_num_hosts;
7733 }
7734 
7735 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7736 {
7737 	struct sdebug_dev_info *devip = sdev->hostdata;
7738 
7739 	if (!devip)
7740 		return	-ENODEV;
7741 
7742 	mutex_lock(&sdebug_host_list_mutex);
7743 	block_unblock_all_queues(true);
7744 
7745 	if (qdepth > SDEBUG_CANQUEUE) {
7746 		qdepth = SDEBUG_CANQUEUE;
7747 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7748 			qdepth, SDEBUG_CANQUEUE);
7749 	}
7750 	if (qdepth < 1)
7751 		qdepth = 1;
7752 	if (qdepth != sdev->queue_depth)
7753 		scsi_change_queue_depth(sdev, qdepth);
7754 
7755 	block_unblock_all_queues(false);
7756 	mutex_unlock(&sdebug_host_list_mutex);
7757 
7758 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7759 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7760 
7761 	return sdev->queue_depth;
7762 }
7763 
7764 static bool fake_timeout(struct scsi_cmnd *scp)
7765 {
7766 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7767 		if (sdebug_every_nth < -1)
7768 			sdebug_every_nth = -1;
7769 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7770 			return true; /* ignore command causing timeout */
7771 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7772 			 scsi_medium_access_command(scp))
7773 			return true; /* time out reads and writes */
7774 	}
7775 	return false;
7776 }
7777 
7778 /* Response to TUR or media access command when device stopped */
7779 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7780 {
7781 	int stopped_state;
7782 	u64 diff_ns = 0;
7783 	ktime_t now_ts = ktime_get_boottime();
7784 	struct scsi_device *sdp = scp->device;
7785 
7786 	stopped_state = atomic_read(&devip->stopped);
7787 	if (stopped_state == 2) {
7788 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7789 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7790 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7791 				/* tur_ms_to_ready timer extinguished */
7792 				atomic_set(&devip->stopped, 0);
7793 				return 0;
7794 			}
7795 		}
7796 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7797 		if (sdebug_verbose)
7798 			sdev_printk(KERN_INFO, sdp,
7799 				    "%s: Not ready: in process of becoming ready\n", my_name);
7800 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7801 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7802 
7803 			if (diff_ns <= tur_nanosecs_to_ready)
7804 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7805 			else
7806 				diff_ns = tur_nanosecs_to_ready;
7807 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7808 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7809 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7810 						   diff_ns);
7811 			return check_condition_result;
7812 		}
7813 	}
7814 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7815 	if (sdebug_verbose)
7816 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7817 			    my_name);
7818 	return check_condition_result;
7819 }
7820 
7821 static void sdebug_map_queues(struct Scsi_Host *shost)
7822 {
7823 	int i, qoff;
7824 
7825 	if (shost->nr_hw_queues == 1)
7826 		return;
7827 
7828 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7829 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7830 
7831 		map->nr_queues  = 0;
7832 
7833 		if (i == HCTX_TYPE_DEFAULT)
7834 			map->nr_queues = submit_queues - poll_queues;
7835 		else if (i == HCTX_TYPE_POLL)
7836 			map->nr_queues = poll_queues;
7837 
7838 		if (!map->nr_queues) {
7839 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7840 			continue;
7841 		}
7842 
7843 		map->queue_offset = qoff;
7844 		blk_mq_map_queues(map);
7845 
7846 		qoff += map->nr_queues;
7847 	}
7848 }
7849 
7850 struct sdebug_blk_mq_poll_data {
7851 	unsigned int queue_num;
7852 	int *num_entries;
7853 };
7854 
7855 /*
7856  * We don't handle aborted commands here, but it does not seem possible to have
7857  * aborted polled commands from schedule_resp()
7858  */
7859 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7860 {
7861 	struct sdebug_blk_mq_poll_data *data = opaque;
7862 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7863 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7864 	struct sdebug_defer *sd_dp;
7865 	u32 unique_tag = blk_mq_unique_tag(rq);
7866 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7867 	struct sdebug_queued_cmd *sqcp;
7868 	unsigned long flags;
7869 	int queue_num = data->queue_num;
7870 	ktime_t time;
7871 
7872 	/* We're only interested in one queue for this iteration */
7873 	if (hwq != queue_num)
7874 		return true;
7875 
7876 	/* Subsequent checks would fail if this failed, but check anyway */
7877 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7878 		return true;
7879 
7880 	time = ktime_get_boottime();
7881 
7882 	spin_lock_irqsave(&sdsc->lock, flags);
7883 	sqcp = TO_QUEUED_CMD(cmd);
7884 	if (!sqcp) {
7885 		spin_unlock_irqrestore(&sdsc->lock, flags);
7886 		return true;
7887 	}
7888 
7889 	sd_dp = &sqcp->sd_dp;
7890 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7891 		spin_unlock_irqrestore(&sdsc->lock, flags);
7892 		return true;
7893 	}
7894 
7895 	if (time < sd_dp->cmpl_ts) {
7896 		spin_unlock_irqrestore(&sdsc->lock, flags);
7897 		return true;
7898 	}
7899 
7900 	ASSIGN_QUEUED_CMD(cmd, NULL);
7901 	spin_unlock_irqrestore(&sdsc->lock, flags);
7902 
7903 	if (sdebug_statistics) {
7904 		atomic_inc(&sdebug_completions);
7905 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7906 			atomic_inc(&sdebug_miss_cpus);
7907 	}
7908 
7909 	sdebug_free_queued_cmd(sqcp);
7910 
7911 	scsi_done(cmd); /* callback to mid level */
7912 	(*data->num_entries)++;
7913 	return true;
7914 }
7915 
7916 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7917 {
7918 	int num_entries = 0;
7919 	struct sdebug_blk_mq_poll_data data = {
7920 		.queue_num = queue_num,
7921 		.num_entries = &num_entries,
7922 	};
7923 
7924 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7925 				&data);
7926 
7927 	if (num_entries > 0)
7928 		atomic_add(num_entries, &sdeb_mq_poll_count);
7929 	return num_entries;
7930 }
7931 
7932 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
7933 {
7934 	struct scsi_device *sdp = cmnd->device;
7935 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7936 	struct sdebug_err_inject *err;
7937 	unsigned char *cmd = cmnd->cmnd;
7938 	int ret = 0;
7939 
7940 	if (devip == NULL)
7941 		return 0;
7942 
7943 	rcu_read_lock();
7944 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7945 		if (err->type == ERR_TMOUT_CMD &&
7946 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
7947 			ret = !!err->cnt;
7948 			if (err->cnt < 0)
7949 				err->cnt++;
7950 
7951 			rcu_read_unlock();
7952 			return ret;
7953 		}
7954 	}
7955 	rcu_read_unlock();
7956 
7957 	return 0;
7958 }
7959 
7960 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
7961 {
7962 	struct scsi_device *sdp = cmnd->device;
7963 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7964 	struct sdebug_err_inject *err;
7965 	unsigned char *cmd = cmnd->cmnd;
7966 	int ret = 0;
7967 
7968 	if (devip == NULL)
7969 		return 0;
7970 
7971 	rcu_read_lock();
7972 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7973 		if (err->type == ERR_FAIL_QUEUE_CMD &&
7974 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
7975 			ret = err->cnt ? err->queuecmd_ret : 0;
7976 			if (err->cnt < 0)
7977 				err->cnt++;
7978 
7979 			rcu_read_unlock();
7980 			return ret;
7981 		}
7982 	}
7983 	rcu_read_unlock();
7984 
7985 	return 0;
7986 }
7987 
7988 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
7989 			   struct sdebug_err_inject *info)
7990 {
7991 	struct scsi_device *sdp = cmnd->device;
7992 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7993 	struct sdebug_err_inject *err;
7994 	unsigned char *cmd = cmnd->cmnd;
7995 	int ret = 0;
7996 	int result;
7997 
7998 	if (devip == NULL)
7999 		return 0;
8000 
8001 	rcu_read_lock();
8002 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8003 		if (err->type == ERR_FAIL_CMD &&
8004 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8005 			if (!err->cnt) {
8006 				rcu_read_unlock();
8007 				return 0;
8008 			}
8009 
8010 			ret = !!err->cnt;
8011 			rcu_read_unlock();
8012 			goto out_handle;
8013 		}
8014 	}
8015 	rcu_read_unlock();
8016 
8017 	return 0;
8018 
8019 out_handle:
8020 	if (err->cnt < 0)
8021 		err->cnt++;
8022 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8023 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8024 	*info = *err;
8025 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8026 
8027 	return ret;
8028 }
8029 
8030 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8031 				   struct scsi_cmnd *scp)
8032 {
8033 	u8 sdeb_i;
8034 	struct scsi_device *sdp = scp->device;
8035 	const struct opcode_info_t *oip;
8036 	const struct opcode_info_t *r_oip;
8037 	struct sdebug_dev_info *devip;
8038 	u8 *cmd = scp->cmnd;
8039 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8040 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8041 	int k, na;
8042 	int errsts = 0;
8043 	u64 lun_index = sdp->lun & 0x3FFF;
8044 	u32 flags;
8045 	u16 sa;
8046 	u8 opcode = cmd[0];
8047 	bool has_wlun_rl;
8048 	bool inject_now;
8049 	int ret = 0;
8050 	struct sdebug_err_inject err;
8051 
8052 	scsi_set_resid(scp, 0);
8053 	if (sdebug_statistics) {
8054 		atomic_inc(&sdebug_cmnd_count);
8055 		inject_now = inject_on_this_cmd();
8056 	} else {
8057 		inject_now = false;
8058 	}
8059 	if (unlikely(sdebug_verbose &&
8060 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8061 		char b[120];
8062 		int n, len, sb;
8063 
8064 		len = scp->cmd_len;
8065 		sb = (int)sizeof(b);
8066 		if (len > 32)
8067 			strcpy(b, "too long, over 32 bytes");
8068 		else {
8069 			for (k = 0, n = 0; k < len && n < sb; ++k)
8070 				n += scnprintf(b + n, sb - n, "%02x ",
8071 					       (u32)cmd[k]);
8072 		}
8073 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8074 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8075 	}
8076 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8077 		return SCSI_MLQUEUE_HOST_BUSY;
8078 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8079 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8080 		goto err_out;
8081 
8082 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
8083 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
8084 	devip = (struct sdebug_dev_info *)sdp->hostdata;
8085 	if (unlikely(!devip)) {
8086 		devip = find_build_dev_info(sdp);
8087 		if (NULL == devip)
8088 			goto err_out;
8089 	}
8090 
8091 	if (sdebug_timeout_cmd(scp)) {
8092 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8093 		return 0;
8094 	}
8095 
8096 	ret = sdebug_fail_queue_cmd(scp);
8097 	if (ret) {
8098 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8099 				opcode, ret);
8100 		return ret;
8101 	}
8102 
8103 	if (sdebug_fail_cmd(scp, &ret, &err)) {
8104 		scmd_printk(KERN_INFO, scp,
8105 			"fail command 0x%x with hostbyte=0x%x, "
8106 			"driverbyte=0x%x, statusbyte=0x%x, "
8107 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8108 			opcode, err.host_byte, err.driver_byte,
8109 			err.status_byte, err.sense_key, err.asc, err.asq);
8110 		return ret;
8111 	}
8112 
8113 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8114 		atomic_set(&sdeb_inject_pending, 1);
8115 
8116 	na = oip->num_attached;
8117 	r_pfp = oip->pfp;
8118 	if (na) {	/* multiple commands with this opcode */
8119 		r_oip = oip;
8120 		if (FF_SA & r_oip->flags) {
8121 			if (F_SA_LOW & oip->flags)
8122 				sa = 0x1f & cmd[1];
8123 			else
8124 				sa = get_unaligned_be16(cmd + 8);
8125 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8126 				if (opcode == oip->opcode && sa == oip->sa)
8127 					break;
8128 			}
8129 		} else {   /* since no service action only check opcode */
8130 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8131 				if (opcode == oip->opcode)
8132 					break;
8133 			}
8134 		}
8135 		if (k > na) {
8136 			if (F_SA_LOW & r_oip->flags)
8137 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8138 			else if (F_SA_HIGH & r_oip->flags)
8139 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8140 			else
8141 				mk_sense_invalid_opcode(scp);
8142 			goto check_cond;
8143 		}
8144 	}	/* else (when na==0) we assume the oip is a match */
8145 	flags = oip->flags;
8146 	if (unlikely(F_INV_OP & flags)) {
8147 		mk_sense_invalid_opcode(scp);
8148 		goto check_cond;
8149 	}
8150 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8151 		if (sdebug_verbose)
8152 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8153 				    my_name, opcode, " supported for wlun");
8154 		mk_sense_invalid_opcode(scp);
8155 		goto check_cond;
8156 	}
8157 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
8158 		u8 rem;
8159 		int j;
8160 
8161 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8162 			rem = ~oip->len_mask[k] & cmd[k];
8163 			if (rem) {
8164 				for (j = 7; j >= 0; --j, rem <<= 1) {
8165 					if (0x80 & rem)
8166 						break;
8167 				}
8168 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8169 				goto check_cond;
8170 			}
8171 		}
8172 	}
8173 	if (unlikely(!(F_SKIP_UA & flags) &&
8174 		     find_first_bit(devip->uas_bm,
8175 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8176 		errsts = make_ua(scp, devip);
8177 		if (errsts)
8178 			goto check_cond;
8179 	}
8180 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8181 		     atomic_read(&devip->stopped))) {
8182 		errsts = resp_not_ready(scp, devip);
8183 		if (errsts)
8184 			goto fini;
8185 	}
8186 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
8187 		goto fini;
8188 	if (unlikely(sdebug_every_nth)) {
8189 		if (fake_timeout(scp))
8190 			return 0;	/* ignore command: make trouble */
8191 	}
8192 	if (likely(oip->pfp))
8193 		pfp = oip->pfp;	/* calls a resp_* function */
8194 	else
8195 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8196 
8197 fini:
8198 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
8199 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8200 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8201 					    sdebug_ndelay > 10000)) {
8202 		/*
8203 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8204 		 * for Start Stop Unit (SSU) want at least 1 second delay and
8205 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
8206 		 * For Synchronize Cache want 1/20 of SSU's delay.
8207 		 */
8208 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8209 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8210 
8211 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8212 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8213 	} else
8214 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8215 				     sdebug_ndelay);
8216 check_cond:
8217 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8218 err_out:
8219 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8220 }
8221 
8222 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8223 {
8224 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8225 
8226 	spin_lock_init(&sdsc->lock);
8227 
8228 	return 0;
8229 }
8230 
8231 static struct scsi_host_template sdebug_driver_template = {
8232 	.show_info =		scsi_debug_show_info,
8233 	.write_info =		scsi_debug_write_info,
8234 	.proc_name =		sdebug_proc_name,
8235 	.name =			"SCSI DEBUG",
8236 	.info =			scsi_debug_info,
8237 	.slave_alloc =		scsi_debug_slave_alloc,
8238 	.slave_configure =	scsi_debug_slave_configure,
8239 	.slave_destroy =	scsi_debug_slave_destroy,
8240 	.ioctl =		scsi_debug_ioctl,
8241 	.queuecommand =		scsi_debug_queuecommand,
8242 	.change_queue_depth =	sdebug_change_qdepth,
8243 	.map_queues =		sdebug_map_queues,
8244 	.mq_poll =		sdebug_blk_mq_poll,
8245 	.eh_abort_handler =	scsi_debug_abort,
8246 	.eh_device_reset_handler = scsi_debug_device_reset,
8247 	.eh_target_reset_handler = scsi_debug_target_reset,
8248 	.eh_bus_reset_handler = scsi_debug_bus_reset,
8249 	.eh_host_reset_handler = scsi_debug_host_reset,
8250 	.can_queue =		SDEBUG_CANQUEUE,
8251 	.this_id =		7,
8252 	.sg_tablesize =		SG_MAX_SEGMENTS,
8253 	.cmd_per_lun =		DEF_CMD_PER_LUN,
8254 	.max_sectors =		-1U,
8255 	.max_segment_size =	-1U,
8256 	.module =		THIS_MODULE,
8257 	.track_queue_depth =	1,
8258 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
8259 	.init_cmd_priv = sdebug_init_cmd_priv,
8260 	.target_alloc =		sdebug_target_alloc,
8261 	.target_destroy =	sdebug_target_destroy,
8262 };
8263 
8264 static int sdebug_driver_probe(struct device *dev)
8265 {
8266 	int error = 0;
8267 	struct sdebug_host_info *sdbg_host;
8268 	struct Scsi_Host *hpnt;
8269 	int hprot;
8270 
8271 	sdbg_host = dev_to_sdebug_host(dev);
8272 
8273 	sdebug_driver_template.can_queue = sdebug_max_queue;
8274 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8275 	if (!sdebug_clustering)
8276 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8277 
8278 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8279 	if (NULL == hpnt) {
8280 		pr_err("scsi_host_alloc failed\n");
8281 		error = -ENODEV;
8282 		return error;
8283 	}
8284 	if (submit_queues > nr_cpu_ids) {
8285 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8286 			my_name, submit_queues, nr_cpu_ids);
8287 		submit_queues = nr_cpu_ids;
8288 	}
8289 	/*
8290 	 * Decide whether to tell scsi subsystem that we want mq. The
8291 	 * following should give the same answer for each host.
8292 	 */
8293 	hpnt->nr_hw_queues = submit_queues;
8294 	if (sdebug_host_max_queue)
8295 		hpnt->host_tagset = 1;
8296 
8297 	/* poll queues are possible for nr_hw_queues > 1 */
8298 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8299 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8300 			 my_name, poll_queues, hpnt->nr_hw_queues);
8301 		poll_queues = 0;
8302 	}
8303 
8304 	/*
8305 	 * Poll queues don't need interrupts, but we need at least one I/O queue
8306 	 * left over for non-polled I/O.
8307 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
8308 	 */
8309 	if (poll_queues >= submit_queues) {
8310 		if (submit_queues < 3)
8311 			pr_warn("%s: trim poll_queues to 1\n", my_name);
8312 		else
8313 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8314 				my_name, submit_queues - 1);
8315 		poll_queues = 1;
8316 	}
8317 	if (poll_queues)
8318 		hpnt->nr_maps = 3;
8319 
8320 	sdbg_host->shost = hpnt;
8321 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8322 		hpnt->max_id = sdebug_num_tgts + 1;
8323 	else
8324 		hpnt->max_id = sdebug_num_tgts;
8325 	/* = sdebug_max_luns; */
8326 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8327 
8328 	hprot = 0;
8329 
8330 	switch (sdebug_dif) {
8331 
8332 	case T10_PI_TYPE1_PROTECTION:
8333 		hprot = SHOST_DIF_TYPE1_PROTECTION;
8334 		if (sdebug_dix)
8335 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
8336 		break;
8337 
8338 	case T10_PI_TYPE2_PROTECTION:
8339 		hprot = SHOST_DIF_TYPE2_PROTECTION;
8340 		if (sdebug_dix)
8341 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
8342 		break;
8343 
8344 	case T10_PI_TYPE3_PROTECTION:
8345 		hprot = SHOST_DIF_TYPE3_PROTECTION;
8346 		if (sdebug_dix)
8347 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
8348 		break;
8349 
8350 	default:
8351 		if (sdebug_dix)
8352 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
8353 		break;
8354 	}
8355 
8356 	scsi_host_set_prot(hpnt, hprot);
8357 
8358 	if (have_dif_prot || sdebug_dix)
8359 		pr_info("host protection%s%s%s%s%s%s%s\n",
8360 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8361 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8362 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8363 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8364 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8365 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8366 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8367 
8368 	if (sdebug_guard == 1)
8369 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8370 	else
8371 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8372 
8373 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8374 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8375 	if (sdebug_every_nth)	/* need stats counters for every_nth */
8376 		sdebug_statistics = true;
8377 	error = scsi_add_host(hpnt, &sdbg_host->dev);
8378 	if (error) {
8379 		pr_err("scsi_add_host failed\n");
8380 		error = -ENODEV;
8381 		scsi_host_put(hpnt);
8382 	} else {
8383 		scsi_scan_host(hpnt);
8384 	}
8385 
8386 	return error;
8387 }
8388 
8389 static void sdebug_driver_remove(struct device *dev)
8390 {
8391 	struct sdebug_host_info *sdbg_host;
8392 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8393 
8394 	sdbg_host = dev_to_sdebug_host(dev);
8395 
8396 	scsi_remove_host(sdbg_host->shost);
8397 
8398 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8399 				 dev_list) {
8400 		list_del(&sdbg_devinfo->dev_list);
8401 		kfree(sdbg_devinfo->zstate);
8402 		kfree(sdbg_devinfo);
8403 	}
8404 
8405 	scsi_host_put(sdbg_host->shost);
8406 }
8407 
8408 static struct bus_type pseudo_lld_bus = {
8409 	.name = "pseudo",
8410 	.probe = sdebug_driver_probe,
8411 	.remove = sdebug_driver_remove,
8412 	.drv_groups = sdebug_drv_groups,
8413 };
8414