xref: /linux/drivers/scsi/scsi_debug.c (revision 67f9c312b0a7f4bc869376d2a68308e673235954)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47 
48 #include <net/checksum.h>
49 
50 #include <asm/unaligned.h>
51 
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60 
61 #include "sd.h"
62 #include "scsi_logging.h"
63 
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67 
68 #define MY_NAME "scsi_debug"
69 
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define LOGICAL_UNIT_NOT_READY 0x4
73 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
74 #define UNRECOVERED_READ_ERR 0x11
75 #define PARAMETER_LIST_LENGTH_ERR 0x1a
76 #define INVALID_OPCODE 0x20
77 #define LBA_OUT_OF_RANGE 0x21
78 #define INVALID_FIELD_IN_CDB 0x24
79 #define INVALID_FIELD_IN_PARAM_LIST 0x26
80 #define WRITE_PROTECTED 0x27
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define TARGET_CHANGED_ASC 0x3f
84 #define LUNS_CHANGED_ASCQ 0x0e
85 #define INSUFF_RES_ASC 0x55
86 #define INSUFF_RES_ASCQ 0x3
87 #define POWER_ON_RESET_ASCQ 0x0
88 #define POWER_ON_OCCURRED_ASCQ 0x1
89 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
90 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
91 #define CAPACITY_CHANGED_ASCQ 0x9
92 #define SAVING_PARAMS_UNSUP 0x39
93 #define TRANSPORT_PROBLEM 0x4b
94 #define THRESHOLD_EXCEEDED 0x5d
95 #define LOW_POWER_COND_ON 0x5e
96 #define MISCOMPARE_VERIFY_ASC 0x1d
97 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
98 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
99 #define WRITE_ERROR_ASC 0xc
100 #define UNALIGNED_WRITE_ASCQ 0x4
101 #define WRITE_BOUNDARY_ASCQ 0x5
102 #define READ_INVDATA_ASCQ 0x6
103 #define READ_BOUNDARY_ASCQ 0x7
104 #define ATTEMPT_ACCESS_GAP 0x9
105 #define INSUFF_ZONE_ASCQ 0xe
106 
107 /* Additional Sense Code Qualifier (ASCQ) */
108 #define ACK_NAK_TO 0x3
109 
110 /* Default values for driver parameters */
111 #define DEF_NUM_HOST   1
112 #define DEF_NUM_TGTS   1
113 #define DEF_MAX_LUNS   1
114 /* With these defaults, this driver will make 1 host with 1 target
115  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
116  */
117 #define DEF_ATO 1
118 #define DEF_CDB_LEN 10
119 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
120 #define DEF_DEV_SIZE_PRE_INIT   0
121 #define DEF_DEV_SIZE_MB   8
122 #define DEF_ZBC_DEV_SIZE_MB   128
123 #define DEF_DIF 0
124 #define DEF_DIX 0
125 #define DEF_PER_HOST_STORE false
126 #define DEF_D_SENSE   0
127 #define DEF_EVERY_NTH   0
128 #define DEF_FAKE_RW	0
129 #define DEF_GUARD 0
130 #define DEF_HOST_LOCK 0
131 #define DEF_LBPU 0
132 #define DEF_LBPWS 0
133 #define DEF_LBPWS10 0
134 #define DEF_LBPRZ 1
135 #define DEF_LOWEST_ALIGNED 0
136 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
137 #define DEF_NO_LUN_0   0
138 #define DEF_NUM_PARTS   0
139 #define DEF_OPTS   0
140 #define DEF_OPT_BLKS 1024
141 #define DEF_PHYSBLK_EXP 0
142 #define DEF_OPT_XFERLEN_EXP 0
143 #define DEF_PTYPE   TYPE_DISK
144 #define DEF_RANDOM false
145 #define DEF_REMOVABLE false
146 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
147 #define DEF_SECTOR_SIZE 512
148 #define DEF_UNMAP_ALIGNMENT 0
149 #define DEF_UNMAP_GRANULARITY 1
150 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
151 #define DEF_UNMAP_MAX_DESC 256
152 #define DEF_VIRTUAL_GB   0
153 #define DEF_VPD_USE_HOSTNO 1
154 #define DEF_WRITESAME_LENGTH 0xFFFF
155 #define DEF_STRICT 0
156 #define DEF_STATISTICS false
157 #define DEF_SUBMIT_QUEUES 1
158 #define DEF_TUR_MS_TO_READY 0
159 #define DEF_UUID_CTL 0
160 #define JDELAY_OVERRIDDEN -9999
161 
162 /* Default parameters for ZBC drives */
163 #define DEF_ZBC_ZONE_SIZE_MB	128
164 #define DEF_ZBC_MAX_OPEN_ZONES	8
165 #define DEF_ZBC_NR_CONV_ZONES	1
166 
167 #define SDEBUG_LUN_0_VAL 0
168 
169 /* bit mask values for sdebug_opts */
170 #define SDEBUG_OPT_NOISE		1
171 #define SDEBUG_OPT_MEDIUM_ERR		2
172 #define SDEBUG_OPT_TIMEOUT		4
173 #define SDEBUG_OPT_RECOVERED_ERR	8
174 #define SDEBUG_OPT_TRANSPORT_ERR	16
175 #define SDEBUG_OPT_DIF_ERR		32
176 #define SDEBUG_OPT_DIX_ERR		64
177 #define SDEBUG_OPT_MAC_TIMEOUT		128
178 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
179 #define SDEBUG_OPT_Q_NOISE		0x200
180 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
181 #define SDEBUG_OPT_RARE_TSF		0x800
182 #define SDEBUG_OPT_N_WCE		0x1000
183 #define SDEBUG_OPT_RESET_NOISE		0x2000
184 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
185 #define SDEBUG_OPT_HOST_BUSY		0x8000
186 #define SDEBUG_OPT_CMD_ABORT		0x10000
187 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
188 			      SDEBUG_OPT_RESET_NOISE)
189 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
190 				  SDEBUG_OPT_TRANSPORT_ERR | \
191 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
192 				  SDEBUG_OPT_SHORT_TRANSFER | \
193 				  SDEBUG_OPT_HOST_BUSY | \
194 				  SDEBUG_OPT_CMD_ABORT)
195 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
196 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
197 
198 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
199  * priority order. In the subset implemented here lower numbers have higher
200  * priority. The UA numbers should be a sequence starting from 0 with
201  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
202 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
203 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
204 #define SDEBUG_UA_BUS_RESET 2
205 #define SDEBUG_UA_MODE_CHANGED 3
206 #define SDEBUG_UA_CAPACITY_CHANGED 4
207 #define SDEBUG_UA_LUNS_CHANGED 5
208 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
209 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
210 #define SDEBUG_NUM_UAS 8
211 
212 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
213  * sector on read commands: */
214 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
215 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
216 
217 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
218  * (for response) per submit queue at one time. Can be reduced by max_queue
219  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
220  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
221  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
222  * but cannot exceed SDEBUG_CANQUEUE .
223  */
224 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
225 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
226 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
227 
228 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
229 #define F_D_IN			1	/* Data-in command (e.g. READ) */
230 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
231 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
232 #define F_D_UNKN		8
233 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
234 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
235 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
236 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
237 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
238 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
239 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
240 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
241 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
242 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
243 
244 /* Useful combinations of the above flags */
245 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
246 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
247 #define FF_SA (F_SA_HIGH | F_SA_LOW)
248 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
249 
250 #define SDEBUG_MAX_PARTS 4
251 
252 #define SDEBUG_MAX_CMD_LEN 32
253 
254 #define SDEB_XA_NOT_IN_USE XA_MARK_1
255 
256 static struct kmem_cache *queued_cmd_cache;
257 
258 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
259 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
260 
261 /* Zone types (zbcr05 table 25) */
262 enum sdebug_z_type {
263 	ZBC_ZTYPE_CNV	= 0x1,
264 	ZBC_ZTYPE_SWR	= 0x2,
265 	ZBC_ZTYPE_SWP	= 0x3,
266 	/* ZBC_ZTYPE_SOBR = 0x4, */
267 	ZBC_ZTYPE_GAP	= 0x5,
268 };
269 
270 /* enumeration names taken from table 26, zbcr05 */
271 enum sdebug_z_cond {
272 	ZBC_NOT_WRITE_POINTER	= 0x0,
273 	ZC1_EMPTY		= 0x1,
274 	ZC2_IMPLICIT_OPEN	= 0x2,
275 	ZC3_EXPLICIT_OPEN	= 0x3,
276 	ZC4_CLOSED		= 0x4,
277 	ZC6_READ_ONLY		= 0xd,
278 	ZC5_FULL		= 0xe,
279 	ZC7_OFFLINE		= 0xf,
280 };
281 
282 struct sdeb_zone_state {	/* ZBC: per zone state */
283 	enum sdebug_z_type z_type;
284 	enum sdebug_z_cond z_cond;
285 	bool z_non_seq_resource;
286 	unsigned int z_size;
287 	sector_t z_start;
288 	sector_t z_wp;
289 };
290 
291 enum sdebug_err_type {
292 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
293 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
294 					/* queuecmd return failed */
295 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
296 					/* queuecmd return succeed but */
297 					/* with errors set in scsi_cmnd */
298 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
299 					/* scsi_debug_abort() */
300 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
301 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
302 };
303 
304 struct sdebug_err_inject {
305 	int type;
306 	struct list_head list;
307 	int cnt;
308 	unsigned char cmd;
309 	struct rcu_head rcu;
310 
311 	union {
312 		/*
313 		 * For ERR_FAIL_QUEUE_CMD
314 		 */
315 		int queuecmd_ret;
316 
317 		/*
318 		 * For ERR_FAIL_CMD
319 		 */
320 		struct {
321 			unsigned char host_byte;
322 			unsigned char driver_byte;
323 			unsigned char status_byte;
324 			unsigned char sense_key;
325 			unsigned char asc;
326 			unsigned char asq;
327 		};
328 	};
329 };
330 
331 struct sdebug_dev_info {
332 	struct list_head dev_list;
333 	unsigned int channel;
334 	unsigned int target;
335 	u64 lun;
336 	uuid_t lu_name;
337 	struct sdebug_host_info *sdbg_host;
338 	unsigned long uas_bm[1];
339 	atomic_t stopped;	/* 1: by SSU, 2: device start */
340 	bool used;
341 
342 	/* For ZBC devices */
343 	bool zoned;
344 	unsigned int zcap;
345 	unsigned int zsize;
346 	unsigned int zsize_shift;
347 	unsigned int nr_zones;
348 	unsigned int nr_conv_zones;
349 	unsigned int nr_seq_zones;
350 	unsigned int nr_imp_open;
351 	unsigned int nr_exp_open;
352 	unsigned int nr_closed;
353 	unsigned int max_open;
354 	ktime_t create_ts;	/* time since bootup that this device was created */
355 	struct sdeb_zone_state *zstate;
356 
357 	struct dentry *debugfs_entry;
358 	struct spinlock list_lock;
359 	struct list_head inject_err_list;
360 };
361 
362 struct sdebug_target_info {
363 	bool reset_fail;
364 	struct dentry *debugfs_entry;
365 };
366 
367 struct sdebug_host_info {
368 	struct list_head host_list;
369 	int si_idx;	/* sdeb_store_info (per host) xarray index */
370 	struct Scsi_Host *shost;
371 	struct device dev;
372 	struct list_head dev_info_list;
373 };
374 
375 /* There is an xarray of pointers to this struct's objects, one per host */
376 struct sdeb_store_info {
377 	rwlock_t macc_lck;	/* for atomic media access on this store */
378 	u8 *storep;		/* user data storage (ram) */
379 	struct t10_pi_tuple *dif_storep; /* protection info */
380 	void *map_storep;	/* provisioning map */
381 };
382 
383 #define dev_to_sdebug_host(d)	\
384 	container_of(d, struct sdebug_host_info, dev)
385 
386 #define shost_to_sdebug_host(shost)	\
387 	dev_to_sdebug_host(shost->dma_dev)
388 
389 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
390 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
391 
392 struct sdebug_defer {
393 	struct hrtimer hrt;
394 	struct execute_work ew;
395 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
396 	int issuing_cpu;
397 	bool aborted;	/* true when blk_abort_request() already called */
398 	enum sdeb_defer_type defer_t;
399 };
400 
401 struct sdebug_queued_cmd {
402 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
403 	 * instance indicates this slot is in use.
404 	 */
405 	struct sdebug_defer sd_dp;
406 	struct scsi_cmnd *scmd;
407 };
408 
409 struct sdebug_scsi_cmd {
410 	spinlock_t   lock;
411 };
412 
413 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
414 static atomic_t sdebug_completions;  /* count of deferred completions */
415 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
416 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
417 static atomic_t sdeb_inject_pending;
418 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
419 
420 struct opcode_info_t {
421 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
422 				/* for terminating element */
423 	u8 opcode;		/* if num_attached > 0, preferred */
424 	u16 sa;			/* service action */
425 	u32 flags;		/* OR-ed set of SDEB_F_* */
426 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
427 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
428 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
429 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
430 };
431 
432 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
433 enum sdeb_opcode_index {
434 	SDEB_I_INVALID_OPCODE =	0,
435 	SDEB_I_INQUIRY = 1,
436 	SDEB_I_REPORT_LUNS = 2,
437 	SDEB_I_REQUEST_SENSE = 3,
438 	SDEB_I_TEST_UNIT_READY = 4,
439 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
440 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
441 	SDEB_I_LOG_SENSE = 7,
442 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
443 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
444 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
445 	SDEB_I_START_STOP = 11,
446 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
447 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
448 	SDEB_I_MAINT_IN = 14,
449 	SDEB_I_MAINT_OUT = 15,
450 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
451 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
452 	SDEB_I_RESERVE = 18,		/* 6, 10 */
453 	SDEB_I_RELEASE = 19,		/* 6, 10 */
454 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
455 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
456 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
457 	SDEB_I_SEND_DIAG = 23,
458 	SDEB_I_UNMAP = 24,
459 	SDEB_I_WRITE_BUFFER = 25,
460 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
461 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
462 	SDEB_I_COMP_WRITE = 28,
463 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
464 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
465 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
466 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
467 };
468 
469 
470 static const unsigned char opcode_ind_arr[256] = {
471 /* 0x0; 0x0->0x1f: 6 byte cdbs */
472 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
473 	    0, 0, 0, 0,
474 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
475 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
476 	    SDEB_I_RELEASE,
477 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
478 	    SDEB_I_ALLOW_REMOVAL, 0,
479 /* 0x20; 0x20->0x3f: 10 byte cdbs */
480 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
481 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
482 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
483 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
484 /* 0x40; 0x40->0x5f: 10 byte cdbs */
485 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
486 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
487 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
488 	    SDEB_I_RELEASE,
489 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
490 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
491 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
493 	0, SDEB_I_VARIABLE_LEN,
494 /* 0x80; 0x80->0x9f: 16 byte cdbs */
495 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
496 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
497 	0, 0, 0, SDEB_I_VERIFY,
498 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
499 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
500 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
501 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
502 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
503 	     SDEB_I_MAINT_OUT, 0, 0, 0,
504 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
505 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
506 	0, 0, 0, 0, 0, 0, 0, 0,
507 	0, 0, 0, 0, 0, 0, 0, 0,
508 /* 0xc0; 0xc0->0xff: vendor specific */
509 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
513 };
514 
515 /*
516  * The following "response" functions return the SCSI mid-level's 4 byte
517  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
518  * command completion, they can mask their return value with
519  * SDEG_RES_IMMED_MASK .
520  */
521 #define SDEG_RES_IMMED_MASK 0x40000000
522 
523 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_get_stream_status(struct scsi_cmnd *scp,
537 				  struct sdebug_dev_info *devip);
538 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
551 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
553 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
554 
555 static int sdebug_do_add_host(bool mk_new_store);
556 static int sdebug_add_host_helper(int per_host_idx);
557 static void sdebug_do_remove_host(bool the_end);
558 static int sdebug_add_store(void);
559 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
560 static void sdebug_erase_all_stores(bool apart_from_first);
561 
562 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
563 
564 /*
565  * The following are overflow arrays for cdbs that "hit" the same index in
566  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
567  * should be placed in opcode_info_arr[], the others should be placed here.
568  */
569 static const struct opcode_info_t msense_iarr[] = {
570 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
571 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
572 };
573 
574 static const struct opcode_info_t mselect_iarr[] = {
575 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
576 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
577 };
578 
579 static const struct opcode_info_t read_iarr[] = {
580 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
581 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
582 	     0, 0, 0, 0} },
583 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
584 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
586 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
587 	     0xc7, 0, 0, 0, 0} },
588 };
589 
590 static const struct opcode_info_t write_iarr[] = {
591 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
592 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
593 		   0, 0, 0, 0, 0, 0} },
594 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
595 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
596 		   0, 0, 0} },
597 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
598 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
599 		   0xbf, 0xc7, 0, 0, 0, 0} },
600 };
601 
602 static const struct opcode_info_t verify_iarr[] = {
603 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
604 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
605 		   0, 0, 0, 0, 0, 0} },
606 };
607 
608 static const struct opcode_info_t sa_in_16_iarr[] = {
609 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
610 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
612 	{0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
613 	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
614 	     0, 0} },	/* GET STREAM STATUS */
615 };
616 
617 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
618 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
619 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
620 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
621 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
622 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
623 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
624 };
625 
626 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
627 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
628 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
629 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
630 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
631 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
632 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
633 };
634 
635 static const struct opcode_info_t write_same_iarr[] = {
636 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
637 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
638 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
639 };
640 
641 static const struct opcode_info_t reserve_iarr[] = {
642 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
643 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
644 };
645 
646 static const struct opcode_info_t release_iarr[] = {
647 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
648 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
649 };
650 
651 static const struct opcode_info_t sync_cache_iarr[] = {
652 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
653 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
655 };
656 
657 static const struct opcode_info_t pre_fetch_iarr[] = {
658 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
659 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
661 };
662 
663 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
664 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
665 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
667 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
668 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
669 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
670 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
671 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
673 };
674 
675 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
676 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
677 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
678 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
679 };
680 
681 
682 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
683  * plus the terminating elements for logic that scans this table such as
684  * REPORT SUPPORTED OPERATION CODES. */
685 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
686 /* 0 */
687 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
688 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
689 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
690 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
692 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
693 	     0, 0} },					/* REPORT LUNS */
694 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
695 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
697 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 /* 5 */
699 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
700 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
701 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
702 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
703 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
704 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
706 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
707 	     0, 0, 0} },
708 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
709 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
710 	     0, 0} },
711 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
712 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
713 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
714 /* 10 */
715 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
716 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
717 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
719 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
720 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
721 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
722 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
723 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
725 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
726 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
727 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
728 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
729 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
730 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
731 				0xff, 0, 0xc7, 0, 0, 0, 0} },
732 /* 15 */
733 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
734 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
735 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
736 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
737 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
738 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
739 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
740 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
741 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
742 	     0xff, 0xff} },
743 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
744 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
745 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
746 	     0} },
747 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
748 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
749 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
750 	     0} },
751 /* 20 */
752 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
753 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
755 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
756 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
757 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
758 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
759 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
760 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
761 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
762 /* 25 */
763 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
764 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
765 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
766 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
767 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
768 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
769 		 0, 0, 0, 0, 0} },
770 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
771 	    resp_sync_cache, sync_cache_iarr,
772 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
773 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
774 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
775 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
776 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
777 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
778 	    resp_pre_fetch, pre_fetch_iarr,
779 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
780 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
781 
782 /* 30 */
783 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
784 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
785 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
786 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
787 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
788 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
789 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
790 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
791 /* sentinel */
792 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
793 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
794 };
795 
796 static int sdebug_num_hosts;
797 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
798 static int sdebug_ato = DEF_ATO;
799 static int sdebug_cdb_len = DEF_CDB_LEN;
800 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
801 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
802 static int sdebug_dif = DEF_DIF;
803 static int sdebug_dix = DEF_DIX;
804 static int sdebug_dsense = DEF_D_SENSE;
805 static int sdebug_every_nth = DEF_EVERY_NTH;
806 static int sdebug_fake_rw = DEF_FAKE_RW;
807 static unsigned int sdebug_guard = DEF_GUARD;
808 static int sdebug_host_max_queue;	/* per host */
809 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
810 static int sdebug_max_luns = DEF_MAX_LUNS;
811 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
812 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
813 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
814 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
815 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
816 static int sdebug_no_uld;
817 static int sdebug_num_parts = DEF_NUM_PARTS;
818 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
819 static int sdebug_opt_blks = DEF_OPT_BLKS;
820 static int sdebug_opts = DEF_OPTS;
821 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
822 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
823 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
824 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
825 static int sdebug_sector_size = DEF_SECTOR_SIZE;
826 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
827 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
828 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
829 static unsigned int sdebug_lbpu = DEF_LBPU;
830 static unsigned int sdebug_lbpws = DEF_LBPWS;
831 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
832 static unsigned int sdebug_lbprz = DEF_LBPRZ;
833 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
834 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
835 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
836 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
837 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
838 static int sdebug_uuid_ctl = DEF_UUID_CTL;
839 static bool sdebug_random = DEF_RANDOM;
840 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
841 static bool sdebug_removable = DEF_REMOVABLE;
842 static bool sdebug_clustering;
843 static bool sdebug_host_lock = DEF_HOST_LOCK;
844 static bool sdebug_strict = DEF_STRICT;
845 static bool sdebug_any_injecting_opt;
846 static bool sdebug_no_rwlock;
847 static bool sdebug_verbose;
848 static bool have_dif_prot;
849 static bool write_since_sync;
850 static bool sdebug_statistics = DEF_STATISTICS;
851 static bool sdebug_wp;
852 static bool sdebug_allow_restart;
853 static enum {
854 	BLK_ZONED_NONE	= 0,
855 	BLK_ZONED_HA	= 1,
856 	BLK_ZONED_HM	= 2,
857 } sdeb_zbc_model = BLK_ZONED_NONE;
858 static char *sdeb_zbc_model_s;
859 
860 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
861 			  SAM_LUN_AM_FLAT = 0x1,
862 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
863 			  SAM_LUN_AM_EXTENDED = 0x3};
864 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
865 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
866 
867 static unsigned int sdebug_store_sectors;
868 static sector_t sdebug_capacity;	/* in sectors */
869 
870 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
871    may still need them */
872 static int sdebug_heads;		/* heads per disk */
873 static int sdebug_cylinders_per;	/* cylinders per surface */
874 static int sdebug_sectors_per;		/* sectors per cylinder */
875 
876 static LIST_HEAD(sdebug_host_list);
877 static DEFINE_MUTEX(sdebug_host_list_mutex);
878 
879 static struct xarray per_store_arr;
880 static struct xarray *per_store_ap = &per_store_arr;
881 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
882 static int sdeb_most_recent_idx = -1;
883 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
884 
885 static unsigned long map_size;
886 static int num_aborts;
887 static int num_dev_resets;
888 static int num_target_resets;
889 static int num_bus_resets;
890 static int num_host_resets;
891 static int dix_writes;
892 static int dix_reads;
893 static int dif_errors;
894 
895 /* ZBC global data */
896 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
897 static int sdeb_zbc_zone_cap_mb;
898 static int sdeb_zbc_zone_size_mb;
899 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
900 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
901 
902 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
903 static int poll_queues; /* iouring iopoll interface.*/
904 
905 static atomic_long_t writes_by_group_number[64];
906 
907 static char sdebug_proc_name[] = MY_NAME;
908 static const char *my_name = MY_NAME;
909 
910 static const struct bus_type pseudo_lld_bus;
911 
912 static struct device_driver sdebug_driverfs_driver = {
913 	.name 		= sdebug_proc_name,
914 	.bus		= &pseudo_lld_bus,
915 };
916 
917 static const int check_condition_result =
918 	SAM_STAT_CHECK_CONDITION;
919 
920 static const int illegal_condition_result =
921 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
922 
923 static const int device_qfull_result =
924 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
925 
926 static const int condition_met_result = SAM_STAT_CONDITION_MET;
927 
928 static struct dentry *sdebug_debugfs_root;
929 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
930 
931 static void sdebug_err_free(struct rcu_head *head)
932 {
933 	struct sdebug_err_inject *inject =
934 		container_of(head, typeof(*inject), rcu);
935 
936 	kfree(inject);
937 }
938 
939 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
940 {
941 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
942 	struct sdebug_err_inject *err;
943 
944 	spin_lock(&devip->list_lock);
945 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
946 		if (err->type == new->type && err->cmd == new->cmd) {
947 			list_del_rcu(&err->list);
948 			call_rcu(&err->rcu, sdebug_err_free);
949 		}
950 	}
951 
952 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
953 	spin_unlock(&devip->list_lock);
954 }
955 
956 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
957 {
958 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
959 	struct sdebug_err_inject *err;
960 	int type;
961 	unsigned char cmd;
962 
963 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
964 		kfree(buf);
965 		return -EINVAL;
966 	}
967 
968 	spin_lock(&devip->list_lock);
969 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
970 		if (err->type == type && err->cmd == cmd) {
971 			list_del_rcu(&err->list);
972 			call_rcu(&err->rcu, sdebug_err_free);
973 			spin_unlock(&devip->list_lock);
974 			kfree(buf);
975 			return count;
976 		}
977 	}
978 	spin_unlock(&devip->list_lock);
979 
980 	kfree(buf);
981 	return -EINVAL;
982 }
983 
984 static int sdebug_error_show(struct seq_file *m, void *p)
985 {
986 	struct scsi_device *sdev = (struct scsi_device *)m->private;
987 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
988 	struct sdebug_err_inject *err;
989 
990 	seq_puts(m, "Type\tCount\tCommand\n");
991 
992 	rcu_read_lock();
993 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
994 		switch (err->type) {
995 		case ERR_TMOUT_CMD:
996 		case ERR_ABORT_CMD_FAILED:
997 		case ERR_LUN_RESET_FAILED:
998 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
999 				err->cmd);
1000 		break;
1001 
1002 		case ERR_FAIL_QUEUE_CMD:
1003 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1004 				err->cnt, err->cmd, err->queuecmd_ret);
1005 		break;
1006 
1007 		case ERR_FAIL_CMD:
1008 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1009 				err->type, err->cnt, err->cmd,
1010 				err->host_byte, err->driver_byte,
1011 				err->status_byte, err->sense_key,
1012 				err->asc, err->asq);
1013 		break;
1014 		}
1015 	}
1016 	rcu_read_unlock();
1017 
1018 	return 0;
1019 }
1020 
1021 static int sdebug_error_open(struct inode *inode, struct file *file)
1022 {
1023 	return single_open(file, sdebug_error_show, inode->i_private);
1024 }
1025 
1026 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1027 		size_t count, loff_t *ppos)
1028 {
1029 	char *buf;
1030 	unsigned int inject_type;
1031 	struct sdebug_err_inject *inject;
1032 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1033 
1034 	buf = kzalloc(count + 1, GFP_KERNEL);
1035 	if (!buf)
1036 		return -ENOMEM;
1037 
1038 	if (copy_from_user(buf, ubuf, count)) {
1039 		kfree(buf);
1040 		return -EFAULT;
1041 	}
1042 
1043 	if (buf[0] == '-')
1044 		return sdebug_err_remove(sdev, buf, count);
1045 
1046 	if (sscanf(buf, "%d", &inject_type) != 1) {
1047 		kfree(buf);
1048 		return -EINVAL;
1049 	}
1050 
1051 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1052 	if (!inject) {
1053 		kfree(buf);
1054 		return -ENOMEM;
1055 	}
1056 
1057 	switch (inject_type) {
1058 	case ERR_TMOUT_CMD:
1059 	case ERR_ABORT_CMD_FAILED:
1060 	case ERR_LUN_RESET_FAILED:
1061 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1062 			   &inject->cmd) != 3)
1063 			goto out_error;
1064 	break;
1065 
1066 	case ERR_FAIL_QUEUE_CMD:
1067 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1068 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1069 			goto out_error;
1070 	break;
1071 
1072 	case ERR_FAIL_CMD:
1073 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1074 			   &inject->type, &inject->cnt, &inject->cmd,
1075 			   &inject->host_byte, &inject->driver_byte,
1076 			   &inject->status_byte, &inject->sense_key,
1077 			   &inject->asc, &inject->asq) != 9)
1078 			goto out_error;
1079 	break;
1080 
1081 	default:
1082 		goto out_error;
1083 	break;
1084 	}
1085 
1086 	kfree(buf);
1087 	sdebug_err_add(sdev, inject);
1088 
1089 	return count;
1090 
1091 out_error:
1092 	kfree(buf);
1093 	kfree(inject);
1094 	return -EINVAL;
1095 }
1096 
1097 static const struct file_operations sdebug_error_fops = {
1098 	.open	= sdebug_error_open,
1099 	.read	= seq_read,
1100 	.write	= sdebug_error_write,
1101 	.release = single_release,
1102 };
1103 
1104 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1105 {
1106 	struct scsi_target *starget = (struct scsi_target *)m->private;
1107 	struct sdebug_target_info *targetip =
1108 		(struct sdebug_target_info *)starget->hostdata;
1109 
1110 	if (targetip)
1111 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1112 
1113 	return 0;
1114 }
1115 
1116 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1117 {
1118 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1119 }
1120 
1121 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1122 		const char __user *ubuf, size_t count, loff_t *ppos)
1123 {
1124 	int ret;
1125 	struct scsi_target *starget =
1126 		(struct scsi_target *)file->f_inode->i_private;
1127 	struct sdebug_target_info *targetip =
1128 		(struct sdebug_target_info *)starget->hostdata;
1129 
1130 	if (targetip) {
1131 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1132 		return ret < 0 ? ret : count;
1133 	}
1134 	return -ENODEV;
1135 }
1136 
1137 static const struct file_operations sdebug_target_reset_fail_fops = {
1138 	.open	= sdebug_target_reset_fail_open,
1139 	.read	= seq_read,
1140 	.write	= sdebug_target_reset_fail_write,
1141 	.release = single_release,
1142 };
1143 
1144 static int sdebug_target_alloc(struct scsi_target *starget)
1145 {
1146 	struct sdebug_target_info *targetip;
1147 
1148 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1149 	if (!targetip)
1150 		return -ENOMEM;
1151 
1152 	async_synchronize_full_domain(&sdebug_async_domain);
1153 
1154 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1155 				sdebug_debugfs_root);
1156 
1157 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1158 				&sdebug_target_reset_fail_fops);
1159 
1160 	starget->hostdata = targetip;
1161 
1162 	return 0;
1163 }
1164 
1165 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1166 {
1167 	struct sdebug_target_info *targetip = data;
1168 
1169 	debugfs_remove(targetip->debugfs_entry);
1170 	kfree(targetip);
1171 }
1172 
1173 static void sdebug_target_destroy(struct scsi_target *starget)
1174 {
1175 	struct sdebug_target_info *targetip;
1176 
1177 	targetip = (struct sdebug_target_info *)starget->hostdata;
1178 	if (targetip) {
1179 		starget->hostdata = NULL;
1180 		async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1181 				&sdebug_async_domain);
1182 	}
1183 }
1184 
1185 /* Only do the extra work involved in logical block provisioning if one or
1186  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1187  * real reads and writes (i.e. not skipping them for speed).
1188  */
1189 static inline bool scsi_debug_lbp(void)
1190 {
1191 	return 0 == sdebug_fake_rw &&
1192 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1193 }
1194 
1195 static void *lba2fake_store(struct sdeb_store_info *sip,
1196 			    unsigned long long lba)
1197 {
1198 	struct sdeb_store_info *lsip = sip;
1199 
1200 	lba = do_div(lba, sdebug_store_sectors);
1201 	if (!sip || !sip->storep) {
1202 		WARN_ON_ONCE(true);
1203 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1204 	}
1205 	return lsip->storep + lba * sdebug_sector_size;
1206 }
1207 
1208 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1209 				      sector_t sector)
1210 {
1211 	sector = sector_div(sector, sdebug_store_sectors);
1212 
1213 	return sip->dif_storep + sector;
1214 }
1215 
1216 static void sdebug_max_tgts_luns(void)
1217 {
1218 	struct sdebug_host_info *sdbg_host;
1219 	struct Scsi_Host *hpnt;
1220 
1221 	mutex_lock(&sdebug_host_list_mutex);
1222 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1223 		hpnt = sdbg_host->shost;
1224 		if ((hpnt->this_id >= 0) &&
1225 		    (sdebug_num_tgts > hpnt->this_id))
1226 			hpnt->max_id = sdebug_num_tgts + 1;
1227 		else
1228 			hpnt->max_id = sdebug_num_tgts;
1229 		/* sdebug_max_luns; */
1230 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1231 	}
1232 	mutex_unlock(&sdebug_host_list_mutex);
1233 }
1234 
1235 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1236 
1237 /* Set in_bit to -1 to indicate no bit position of invalid field */
1238 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1239 				 enum sdeb_cmd_data c_d,
1240 				 int in_byte, int in_bit)
1241 {
1242 	unsigned char *sbuff;
1243 	u8 sks[4];
1244 	int sl, asc;
1245 
1246 	sbuff = scp->sense_buffer;
1247 	if (!sbuff) {
1248 		sdev_printk(KERN_ERR, scp->device,
1249 			    "%s: sense_buffer is NULL\n", __func__);
1250 		return;
1251 	}
1252 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1253 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1254 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1255 	memset(sks, 0, sizeof(sks));
1256 	sks[0] = 0x80;
1257 	if (c_d)
1258 		sks[0] |= 0x40;
1259 	if (in_bit >= 0) {
1260 		sks[0] |= 0x8;
1261 		sks[0] |= 0x7 & in_bit;
1262 	}
1263 	put_unaligned_be16(in_byte, sks + 1);
1264 	if (sdebug_dsense) {
1265 		sl = sbuff[7] + 8;
1266 		sbuff[7] = sl;
1267 		sbuff[sl] = 0x2;
1268 		sbuff[sl + 1] = 0x6;
1269 		memcpy(sbuff + sl + 4, sks, 3);
1270 	} else
1271 		memcpy(sbuff + 15, sks, 3);
1272 	if (sdebug_verbose)
1273 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1274 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1275 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1276 }
1277 
1278 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1279 {
1280 	if (!scp->sense_buffer) {
1281 		sdev_printk(KERN_ERR, scp->device,
1282 			    "%s: sense_buffer is NULL\n", __func__);
1283 		return;
1284 	}
1285 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1286 
1287 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1288 
1289 	if (sdebug_verbose)
1290 		sdev_printk(KERN_INFO, scp->device,
1291 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1292 			    my_name, key, asc, asq);
1293 }
1294 
1295 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1296 {
1297 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1298 }
1299 
1300 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1301 			    void __user *arg)
1302 {
1303 	if (sdebug_verbose) {
1304 		if (0x1261 == cmd)
1305 			sdev_printk(KERN_INFO, dev,
1306 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1307 		else if (0x5331 == cmd)
1308 			sdev_printk(KERN_INFO, dev,
1309 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1310 				    __func__);
1311 		else
1312 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1313 				    __func__, cmd);
1314 	}
1315 	return -EINVAL;
1316 	/* return -ENOTTY; // correct return but upsets fdisk */
1317 }
1318 
1319 static void config_cdb_len(struct scsi_device *sdev)
1320 {
1321 	switch (sdebug_cdb_len) {
1322 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1323 		sdev->use_10_for_rw = false;
1324 		sdev->use_16_for_rw = false;
1325 		sdev->use_10_for_ms = false;
1326 		break;
1327 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1328 		sdev->use_10_for_rw = true;
1329 		sdev->use_16_for_rw = false;
1330 		sdev->use_10_for_ms = false;
1331 		break;
1332 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1333 		sdev->use_10_for_rw = true;
1334 		sdev->use_16_for_rw = false;
1335 		sdev->use_10_for_ms = true;
1336 		break;
1337 	case 16:
1338 		sdev->use_10_for_rw = false;
1339 		sdev->use_16_for_rw = true;
1340 		sdev->use_10_for_ms = true;
1341 		break;
1342 	case 32: /* No knobs to suggest this so same as 16 for now */
1343 		sdev->use_10_for_rw = false;
1344 		sdev->use_16_for_rw = true;
1345 		sdev->use_10_for_ms = true;
1346 		break;
1347 	default:
1348 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1349 			sdebug_cdb_len);
1350 		sdev->use_10_for_rw = true;
1351 		sdev->use_16_for_rw = false;
1352 		sdev->use_10_for_ms = false;
1353 		sdebug_cdb_len = 10;
1354 		break;
1355 	}
1356 }
1357 
1358 static void all_config_cdb_len(void)
1359 {
1360 	struct sdebug_host_info *sdbg_host;
1361 	struct Scsi_Host *shost;
1362 	struct scsi_device *sdev;
1363 
1364 	mutex_lock(&sdebug_host_list_mutex);
1365 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1366 		shost = sdbg_host->shost;
1367 		shost_for_each_device(sdev, shost) {
1368 			config_cdb_len(sdev);
1369 		}
1370 	}
1371 	mutex_unlock(&sdebug_host_list_mutex);
1372 }
1373 
1374 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1375 {
1376 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1377 	struct sdebug_dev_info *dp;
1378 
1379 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1380 		if ((devip->sdbg_host == dp->sdbg_host) &&
1381 		    (devip->target == dp->target)) {
1382 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1383 		}
1384 	}
1385 }
1386 
1387 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1388 {
1389 	int k;
1390 
1391 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1392 	if (k != SDEBUG_NUM_UAS) {
1393 		const char *cp = NULL;
1394 
1395 		switch (k) {
1396 		case SDEBUG_UA_POR:
1397 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1398 					POWER_ON_RESET_ASCQ);
1399 			if (sdebug_verbose)
1400 				cp = "power on reset";
1401 			break;
1402 		case SDEBUG_UA_POOCCUR:
1403 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1404 					POWER_ON_OCCURRED_ASCQ);
1405 			if (sdebug_verbose)
1406 				cp = "power on occurred";
1407 			break;
1408 		case SDEBUG_UA_BUS_RESET:
1409 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1410 					BUS_RESET_ASCQ);
1411 			if (sdebug_verbose)
1412 				cp = "bus reset";
1413 			break;
1414 		case SDEBUG_UA_MODE_CHANGED:
1415 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1416 					MODE_CHANGED_ASCQ);
1417 			if (sdebug_verbose)
1418 				cp = "mode parameters changed";
1419 			break;
1420 		case SDEBUG_UA_CAPACITY_CHANGED:
1421 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1422 					CAPACITY_CHANGED_ASCQ);
1423 			if (sdebug_verbose)
1424 				cp = "capacity data changed";
1425 			break;
1426 		case SDEBUG_UA_MICROCODE_CHANGED:
1427 			mk_sense_buffer(scp, UNIT_ATTENTION,
1428 					TARGET_CHANGED_ASC,
1429 					MICROCODE_CHANGED_ASCQ);
1430 			if (sdebug_verbose)
1431 				cp = "microcode has been changed";
1432 			break;
1433 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1434 			mk_sense_buffer(scp, UNIT_ATTENTION,
1435 					TARGET_CHANGED_ASC,
1436 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1437 			if (sdebug_verbose)
1438 				cp = "microcode has been changed without reset";
1439 			break;
1440 		case SDEBUG_UA_LUNS_CHANGED:
1441 			/*
1442 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1443 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1444 			 * on the target, until a REPORT LUNS command is
1445 			 * received.  SPC-4 behavior is to report it only once.
1446 			 * NOTE:  sdebug_scsi_level does not use the same
1447 			 * values as struct scsi_device->scsi_level.
1448 			 */
1449 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1450 				clear_luns_changed_on_target(devip);
1451 			mk_sense_buffer(scp, UNIT_ATTENTION,
1452 					TARGET_CHANGED_ASC,
1453 					LUNS_CHANGED_ASCQ);
1454 			if (sdebug_verbose)
1455 				cp = "reported luns data has changed";
1456 			break;
1457 		default:
1458 			pr_warn("unexpected unit attention code=%d\n", k);
1459 			if (sdebug_verbose)
1460 				cp = "unknown";
1461 			break;
1462 		}
1463 		clear_bit(k, devip->uas_bm);
1464 		if (sdebug_verbose)
1465 			sdev_printk(KERN_INFO, scp->device,
1466 				   "%s reports: Unit attention: %s\n",
1467 				   my_name, cp);
1468 		return check_condition_result;
1469 	}
1470 	return 0;
1471 }
1472 
1473 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1474 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1475 				int arr_len)
1476 {
1477 	int act_len;
1478 	struct scsi_data_buffer *sdb = &scp->sdb;
1479 
1480 	if (!sdb->length)
1481 		return 0;
1482 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1483 		return DID_ERROR << 16;
1484 
1485 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1486 				      arr, arr_len);
1487 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1488 
1489 	return 0;
1490 }
1491 
1492 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1493  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1494  * calls, not required to write in ascending offset order. Assumes resid
1495  * set to scsi_bufflen() prior to any calls.
1496  */
1497 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1498 				  int arr_len, unsigned int off_dst)
1499 {
1500 	unsigned int act_len, n;
1501 	struct scsi_data_buffer *sdb = &scp->sdb;
1502 	off_t skip = off_dst;
1503 
1504 	if (sdb->length <= off_dst)
1505 		return 0;
1506 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1507 		return DID_ERROR << 16;
1508 
1509 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1510 				       arr, arr_len, skip);
1511 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1512 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1513 		 scsi_get_resid(scp));
1514 	n = scsi_bufflen(scp) - (off_dst + act_len);
1515 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1516 	return 0;
1517 }
1518 
1519 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1520  * 'arr' or -1 if error.
1521  */
1522 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1523 			       int arr_len)
1524 {
1525 	if (!scsi_bufflen(scp))
1526 		return 0;
1527 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1528 		return -1;
1529 
1530 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1531 }
1532 
1533 
1534 static char sdebug_inq_vendor_id[9] = "Linux   ";
1535 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1536 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1537 /* Use some locally assigned NAAs for SAS addresses. */
1538 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1539 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1540 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1541 
1542 /* Device identification VPD page. Returns number of bytes placed in arr */
1543 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1544 			  int target_dev_id, int dev_id_num,
1545 			  const char *dev_id_str, int dev_id_str_len,
1546 			  const uuid_t *lu_name)
1547 {
1548 	int num, port_a;
1549 	char b[32];
1550 
1551 	port_a = target_dev_id + 1;
1552 	/* T10 vendor identifier field format (faked) */
1553 	arr[0] = 0x2;	/* ASCII */
1554 	arr[1] = 0x1;
1555 	arr[2] = 0x0;
1556 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1557 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1558 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1559 	num = 8 + 16 + dev_id_str_len;
1560 	arr[3] = num;
1561 	num += 4;
1562 	if (dev_id_num >= 0) {
1563 		if (sdebug_uuid_ctl) {
1564 			/* Locally assigned UUID */
1565 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1566 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1567 			arr[num++] = 0x0;
1568 			arr[num++] = 0x12;
1569 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1570 			arr[num++] = 0x0;
1571 			memcpy(arr + num, lu_name, 16);
1572 			num += 16;
1573 		} else {
1574 			/* NAA-3, Logical unit identifier (binary) */
1575 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1576 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1577 			arr[num++] = 0x0;
1578 			arr[num++] = 0x8;
1579 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1580 			num += 8;
1581 		}
1582 		/* Target relative port number */
1583 		arr[num++] = 0x61;	/* proto=sas, binary */
1584 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1585 		arr[num++] = 0x0;	/* reserved */
1586 		arr[num++] = 0x4;	/* length */
1587 		arr[num++] = 0x0;	/* reserved */
1588 		arr[num++] = 0x0;	/* reserved */
1589 		arr[num++] = 0x0;
1590 		arr[num++] = 0x1;	/* relative port A */
1591 	}
1592 	/* NAA-3, Target port identifier */
1593 	arr[num++] = 0x61;	/* proto=sas, binary */
1594 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1595 	arr[num++] = 0x0;
1596 	arr[num++] = 0x8;
1597 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1598 	num += 8;
1599 	/* NAA-3, Target port group identifier */
1600 	arr[num++] = 0x61;	/* proto=sas, binary */
1601 	arr[num++] = 0x95;	/* piv=1, target port group id */
1602 	arr[num++] = 0x0;
1603 	arr[num++] = 0x4;
1604 	arr[num++] = 0;
1605 	arr[num++] = 0;
1606 	put_unaligned_be16(port_group_id, arr + num);
1607 	num += 2;
1608 	/* NAA-3, Target device identifier */
1609 	arr[num++] = 0x61;	/* proto=sas, binary */
1610 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1611 	arr[num++] = 0x0;
1612 	arr[num++] = 0x8;
1613 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1614 	num += 8;
1615 	/* SCSI name string: Target device identifier */
1616 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1617 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1618 	arr[num++] = 0x0;
1619 	arr[num++] = 24;
1620 	memcpy(arr + num, "naa.32222220", 12);
1621 	num += 12;
1622 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1623 	memcpy(arr + num, b, 8);
1624 	num += 8;
1625 	memset(arr + num, 0, 4);
1626 	num += 4;
1627 	return num;
1628 }
1629 
1630 static unsigned char vpd84_data[] = {
1631 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1632     0x22,0x22,0x22,0x0,0xbb,0x1,
1633     0x22,0x22,0x22,0x0,0xbb,0x2,
1634 };
1635 
1636 /*  Software interface identification VPD page */
1637 static int inquiry_vpd_84(unsigned char *arr)
1638 {
1639 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1640 	return sizeof(vpd84_data);
1641 }
1642 
1643 /* Management network addresses VPD page */
1644 static int inquiry_vpd_85(unsigned char *arr)
1645 {
1646 	int num = 0;
1647 	const char *na1 = "https://www.kernel.org/config";
1648 	const char *na2 = "http://www.kernel.org/log";
1649 	int plen, olen;
1650 
1651 	arr[num++] = 0x1;	/* lu, storage config */
1652 	arr[num++] = 0x0;	/* reserved */
1653 	arr[num++] = 0x0;
1654 	olen = strlen(na1);
1655 	plen = olen + 1;
1656 	if (plen % 4)
1657 		plen = ((plen / 4) + 1) * 4;
1658 	arr[num++] = plen;	/* length, null termianted, padded */
1659 	memcpy(arr + num, na1, olen);
1660 	memset(arr + num + olen, 0, plen - olen);
1661 	num += plen;
1662 
1663 	arr[num++] = 0x4;	/* lu, logging */
1664 	arr[num++] = 0x0;	/* reserved */
1665 	arr[num++] = 0x0;
1666 	olen = strlen(na2);
1667 	plen = olen + 1;
1668 	if (plen % 4)
1669 		plen = ((plen / 4) + 1) * 4;
1670 	arr[num++] = plen;	/* length, null terminated, padded */
1671 	memcpy(arr + num, na2, olen);
1672 	memset(arr + num + olen, 0, plen - olen);
1673 	num += plen;
1674 
1675 	return num;
1676 }
1677 
1678 /* SCSI ports VPD page */
1679 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1680 {
1681 	int num = 0;
1682 	int port_a, port_b;
1683 
1684 	port_a = target_dev_id + 1;
1685 	port_b = port_a + 1;
1686 	arr[num++] = 0x0;	/* reserved */
1687 	arr[num++] = 0x0;	/* reserved */
1688 	arr[num++] = 0x0;
1689 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1690 	memset(arr + num, 0, 6);
1691 	num += 6;
1692 	arr[num++] = 0x0;
1693 	arr[num++] = 12;	/* length tp descriptor */
1694 	/* naa-5 target port identifier (A) */
1695 	arr[num++] = 0x61;	/* proto=sas, binary */
1696 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1697 	arr[num++] = 0x0;	/* reserved */
1698 	arr[num++] = 0x8;	/* length */
1699 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1700 	num += 8;
1701 	arr[num++] = 0x0;	/* reserved */
1702 	arr[num++] = 0x0;	/* reserved */
1703 	arr[num++] = 0x0;
1704 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1705 	memset(arr + num, 0, 6);
1706 	num += 6;
1707 	arr[num++] = 0x0;
1708 	arr[num++] = 12;	/* length tp descriptor */
1709 	/* naa-5 target port identifier (B) */
1710 	arr[num++] = 0x61;	/* proto=sas, binary */
1711 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1712 	arr[num++] = 0x0;	/* reserved */
1713 	arr[num++] = 0x8;	/* length */
1714 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1715 	num += 8;
1716 
1717 	return num;
1718 }
1719 
1720 
1721 static unsigned char vpd89_data[] = {
1722 /* from 4th byte */ 0,0,0,0,
1723 'l','i','n','u','x',' ',' ',' ',
1724 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1725 '1','2','3','4',
1726 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1727 0xec,0,0,0,
1728 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1729 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1730 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1731 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1732 0x53,0x41,
1733 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1734 0x20,0x20,
1735 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1736 0x10,0x80,
1737 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1738 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1739 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1740 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1741 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1742 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1743 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1744 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1745 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1746 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1747 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1748 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1749 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1750 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1751 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1752 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1753 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1754 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1755 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1756 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1757 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1758 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1759 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1760 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1761 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1762 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1763 };
1764 
1765 /* ATA Information VPD page */
1766 static int inquiry_vpd_89(unsigned char *arr)
1767 {
1768 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1769 	return sizeof(vpd89_data);
1770 }
1771 
1772 
1773 static unsigned char vpdb0_data[] = {
1774 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1775 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1776 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1777 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1778 };
1779 
1780 /* Block limits VPD page (SBC-3) */
1781 static int inquiry_vpd_b0(unsigned char *arr)
1782 {
1783 	unsigned int gran;
1784 
1785 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1786 
1787 	/* Optimal transfer length granularity */
1788 	if (sdebug_opt_xferlen_exp != 0 &&
1789 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1790 		gran = 1 << sdebug_opt_xferlen_exp;
1791 	else
1792 		gran = 1 << sdebug_physblk_exp;
1793 	put_unaligned_be16(gran, arr + 2);
1794 
1795 	/* Maximum Transfer Length */
1796 	if (sdebug_store_sectors > 0x400)
1797 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1798 
1799 	/* Optimal Transfer Length */
1800 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1801 
1802 	if (sdebug_lbpu) {
1803 		/* Maximum Unmap LBA Count */
1804 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1805 
1806 		/* Maximum Unmap Block Descriptor Count */
1807 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1808 	}
1809 
1810 	/* Unmap Granularity Alignment */
1811 	if (sdebug_unmap_alignment) {
1812 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1813 		arr[28] |= 0x80; /* UGAVALID */
1814 	}
1815 
1816 	/* Optimal Unmap Granularity */
1817 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1818 
1819 	/* Maximum WRITE SAME Length */
1820 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1821 
1822 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1823 }
1824 
1825 /* Block device characteristics VPD page (SBC-3) */
1826 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1827 {
1828 	memset(arr, 0, 0x3c);
1829 	arr[0] = 0;
1830 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1831 	arr[2] = 0;
1832 	arr[3] = 5;	/* less than 1.8" */
1833 
1834 	return 0x3c;
1835 }
1836 
1837 /* Logical block provisioning VPD page (SBC-4) */
1838 static int inquiry_vpd_b2(unsigned char *arr)
1839 {
1840 	memset(arr, 0, 0x4);
1841 	arr[0] = 0;			/* threshold exponent */
1842 	if (sdebug_lbpu)
1843 		arr[1] = 1 << 7;
1844 	if (sdebug_lbpws)
1845 		arr[1] |= 1 << 6;
1846 	if (sdebug_lbpws10)
1847 		arr[1] |= 1 << 5;
1848 	if (sdebug_lbprz && scsi_debug_lbp())
1849 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1850 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1851 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1852 	/* threshold_percentage=0 */
1853 	return 0x4;
1854 }
1855 
1856 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1857 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1858 {
1859 	memset(arr, 0, 0x3c);
1860 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1861 	/*
1862 	 * Set Optimal number of open sequential write preferred zones and
1863 	 * Optimal number of non-sequentially written sequential write
1864 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1865 	 * fields set to zero, apart from Max. number of open swrz_s field.
1866 	 */
1867 	put_unaligned_be32(0xffffffff, &arr[4]);
1868 	put_unaligned_be32(0xffffffff, &arr[8]);
1869 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1870 		put_unaligned_be32(devip->max_open, &arr[12]);
1871 	else
1872 		put_unaligned_be32(0xffffffff, &arr[12]);
1873 	if (devip->zcap < devip->zsize) {
1874 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1875 		put_unaligned_be64(devip->zsize, &arr[20]);
1876 	} else {
1877 		arr[19] = 0;
1878 	}
1879 	return 0x3c;
1880 }
1881 
1882 #define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
1883 
1884 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1885 
1886 /* Block limits extension VPD page (SBC-4) */
1887 static int inquiry_vpd_b7(unsigned char *arrb4)
1888 {
1889 	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1890 	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1891 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1892 	return SDEBUG_BLE_LEN_AFTER_B4;
1893 }
1894 
1895 #define SDEBUG_LONG_INQ_SZ 96
1896 #define SDEBUG_MAX_INQ_ARR_SZ 584
1897 
1898 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1899 {
1900 	unsigned char pq_pdt;
1901 	unsigned char *arr;
1902 	unsigned char *cmd = scp->cmnd;
1903 	u32 alloc_len, n;
1904 	int ret;
1905 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1906 
1907 	alloc_len = get_unaligned_be16(cmd + 3);
1908 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1909 	if (! arr)
1910 		return DID_REQUEUE << 16;
1911 	is_disk = (sdebug_ptype == TYPE_DISK);
1912 	is_zbc = devip->zoned;
1913 	is_disk_zbc = (is_disk || is_zbc);
1914 	have_wlun = scsi_is_wlun(scp->device->lun);
1915 	if (have_wlun)
1916 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1917 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1918 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1919 	else
1920 		pq_pdt = (sdebug_ptype & 0x1f);
1921 	arr[0] = pq_pdt;
1922 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1923 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1924 		kfree(arr);
1925 		return check_condition_result;
1926 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1927 		int lu_id_num, port_group_id, target_dev_id;
1928 		u32 len;
1929 		char lu_id_str[6];
1930 		int host_no = devip->sdbg_host->shost->host_no;
1931 
1932 		arr[1] = cmd[2];
1933 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1934 		    (devip->channel & 0x7f);
1935 		if (sdebug_vpd_use_hostno == 0)
1936 			host_no = 0;
1937 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1938 			    (devip->target * 1000) + devip->lun);
1939 		target_dev_id = ((host_no + 1) * 2000) +
1940 				 (devip->target * 1000) - 3;
1941 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1942 		if (0 == cmd[2]) { /* supported vital product data pages */
1943 			n = 4;
1944 			arr[n++] = 0x0;   /* this page */
1945 			arr[n++] = 0x80;  /* unit serial number */
1946 			arr[n++] = 0x83;  /* device identification */
1947 			arr[n++] = 0x84;  /* software interface ident. */
1948 			arr[n++] = 0x85;  /* management network addresses */
1949 			arr[n++] = 0x86;  /* extended inquiry */
1950 			arr[n++] = 0x87;  /* mode page policy */
1951 			arr[n++] = 0x88;  /* SCSI ports */
1952 			if (is_disk_zbc) {	  /* SBC or ZBC */
1953 				arr[n++] = 0x89;  /* ATA information */
1954 				arr[n++] = 0xb0;  /* Block limits */
1955 				arr[n++] = 0xb1;  /* Block characteristics */
1956 				if (is_disk)
1957 					arr[n++] = 0xb2;  /* LB Provisioning */
1958 				if (is_zbc)
1959 					arr[n++] = 0xb6;  /* ZB dev. char. */
1960 				arr[n++] = 0xb7;  /* Block limits extension */
1961 			}
1962 			arr[3] = n - 4;	  /* number of supported VPD pages */
1963 		} else if (0x80 == cmd[2]) { /* unit serial number */
1964 			arr[3] = len;
1965 			memcpy(&arr[4], lu_id_str, len);
1966 		} else if (0x83 == cmd[2]) { /* device identification */
1967 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1968 						target_dev_id, lu_id_num,
1969 						lu_id_str, len,
1970 						&devip->lu_name);
1971 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1972 			arr[3] = inquiry_vpd_84(&arr[4]);
1973 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1974 			arr[3] = inquiry_vpd_85(&arr[4]);
1975 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1976 			arr[3] = 0x3c;	/* number of following entries */
1977 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1978 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1979 			else if (have_dif_prot)
1980 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1981 			else
1982 				arr[4] = 0x0;   /* no protection stuff */
1983 			/*
1984 			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
1985 			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
1986 			 */
1987 			arr[5] = 0x17;
1988 		} else if (0x87 == cmd[2]) { /* mode page policy */
1989 			arr[3] = 0x8;	/* number of following entries */
1990 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1991 			arr[6] = 0x80;	/* mlus, shared */
1992 			arr[8] = 0x18;	 /* protocol specific lu */
1993 			arr[10] = 0x82;	 /* mlus, per initiator port */
1994 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1995 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1996 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1997 			n = inquiry_vpd_89(&arr[4]);
1998 			put_unaligned_be16(n, arr + 2);
1999 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2000 			arr[3] = inquiry_vpd_b0(&arr[4]);
2001 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2002 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2003 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2004 			arr[3] = inquiry_vpd_b2(&arr[4]);
2005 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2006 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2007 		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2008 			arr[3] = inquiry_vpd_b7(&arr[4]);
2009 		} else {
2010 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2011 			kfree(arr);
2012 			return check_condition_result;
2013 		}
2014 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2015 		ret = fill_from_dev_buffer(scp, arr,
2016 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2017 		kfree(arr);
2018 		return ret;
2019 	}
2020 	/* drops through here for a standard inquiry */
2021 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2022 	arr[2] = sdebug_scsi_level;
2023 	arr[3] = 2;    /* response_data_format==2 */
2024 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2025 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2026 	if (sdebug_vpd_use_hostno == 0)
2027 		arr[5] |= 0x10; /* claim: implicit TPGS */
2028 	arr[6] = 0x10; /* claim: MultiP */
2029 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2030 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2031 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2032 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2033 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2034 	/* Use Vendor Specific area to place driver date in ASCII hex */
2035 	memcpy(&arr[36], sdebug_version_date, 8);
2036 	/* version descriptors (2 bytes each) follow */
2037 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2038 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2039 	n = 62;
2040 	if (is_disk) {		/* SBC-4 no version claimed */
2041 		put_unaligned_be16(0x600, arr + n);
2042 		n += 2;
2043 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
2044 		put_unaligned_be16(0x525, arr + n);
2045 		n += 2;
2046 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2047 		put_unaligned_be16(0x624, arr + n);
2048 		n += 2;
2049 	}
2050 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2051 	ret = fill_from_dev_buffer(scp, arr,
2052 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2053 	kfree(arr);
2054 	return ret;
2055 }
2056 
2057 /* See resp_iec_m_pg() for how this data is manipulated */
2058 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2059 				   0, 0, 0x0, 0x0};
2060 
2061 static int resp_requests(struct scsi_cmnd *scp,
2062 			 struct sdebug_dev_info *devip)
2063 {
2064 	unsigned char *cmd = scp->cmnd;
2065 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2066 	bool dsense = !!(cmd[1] & 1);
2067 	u32 alloc_len = cmd[4];
2068 	u32 len = 18;
2069 	int stopped_state = atomic_read(&devip->stopped);
2070 
2071 	memset(arr, 0, sizeof(arr));
2072 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2073 		if (dsense) {
2074 			arr[0] = 0x72;
2075 			arr[1] = NOT_READY;
2076 			arr[2] = LOGICAL_UNIT_NOT_READY;
2077 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2078 			len = 8;
2079 		} else {
2080 			arr[0] = 0x70;
2081 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2082 			arr[7] = 0xa;			/* 18 byte sense buffer */
2083 			arr[12] = LOGICAL_UNIT_NOT_READY;
2084 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2085 		}
2086 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2087 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2088 		if (dsense) {
2089 			arr[0] = 0x72;
2090 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2091 			arr[2] = THRESHOLD_EXCEEDED;
2092 			arr[3] = 0xff;		/* Failure prediction(false) */
2093 			len = 8;
2094 		} else {
2095 			arr[0] = 0x70;
2096 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2097 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2098 			arr[12] = THRESHOLD_EXCEEDED;
2099 			arr[13] = 0xff;		/* Failure prediction(false) */
2100 		}
2101 	} else {	/* nothing to report */
2102 		if (dsense) {
2103 			len = 8;
2104 			memset(arr, 0, len);
2105 			arr[0] = 0x72;
2106 		} else {
2107 			memset(arr, 0, len);
2108 			arr[0] = 0x70;
2109 			arr[7] = 0xa;
2110 		}
2111 	}
2112 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2113 }
2114 
2115 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2116 {
2117 	unsigned char *cmd = scp->cmnd;
2118 	int power_cond, want_stop, stopped_state;
2119 	bool changing;
2120 
2121 	power_cond = (cmd[4] & 0xf0) >> 4;
2122 	if (power_cond) {
2123 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2124 		return check_condition_result;
2125 	}
2126 	want_stop = !(cmd[4] & 1);
2127 	stopped_state = atomic_read(&devip->stopped);
2128 	if (stopped_state == 2) {
2129 		ktime_t now_ts = ktime_get_boottime();
2130 
2131 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2132 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2133 
2134 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2135 				/* tur_ms_to_ready timer extinguished */
2136 				atomic_set(&devip->stopped, 0);
2137 				stopped_state = 0;
2138 			}
2139 		}
2140 		if (stopped_state == 2) {
2141 			if (want_stop) {
2142 				stopped_state = 1;	/* dummy up success */
2143 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2144 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2145 				return check_condition_result;
2146 			}
2147 		}
2148 	}
2149 	changing = (stopped_state != want_stop);
2150 	if (changing)
2151 		atomic_xchg(&devip->stopped, want_stop);
2152 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2153 		return SDEG_RES_IMMED_MASK;
2154 	else
2155 		return 0;
2156 }
2157 
2158 static sector_t get_sdebug_capacity(void)
2159 {
2160 	static const unsigned int gibibyte = 1073741824;
2161 
2162 	if (sdebug_virtual_gb > 0)
2163 		return (sector_t)sdebug_virtual_gb *
2164 			(gibibyte / sdebug_sector_size);
2165 	else
2166 		return sdebug_store_sectors;
2167 }
2168 
2169 #define SDEBUG_READCAP_ARR_SZ 8
2170 static int resp_readcap(struct scsi_cmnd *scp,
2171 			struct sdebug_dev_info *devip)
2172 {
2173 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2174 	unsigned int capac;
2175 
2176 	/* following just in case virtual_gb changed */
2177 	sdebug_capacity = get_sdebug_capacity();
2178 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2179 	if (sdebug_capacity < 0xffffffff) {
2180 		capac = (unsigned int)sdebug_capacity - 1;
2181 		put_unaligned_be32(capac, arr + 0);
2182 	} else
2183 		put_unaligned_be32(0xffffffff, arr + 0);
2184 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2185 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2186 }
2187 
2188 #define SDEBUG_READCAP16_ARR_SZ 32
2189 static int resp_readcap16(struct scsi_cmnd *scp,
2190 			  struct sdebug_dev_info *devip)
2191 {
2192 	unsigned char *cmd = scp->cmnd;
2193 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2194 	u32 alloc_len;
2195 
2196 	alloc_len = get_unaligned_be32(cmd + 10);
2197 	/* following just in case virtual_gb changed */
2198 	sdebug_capacity = get_sdebug_capacity();
2199 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2200 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2201 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2202 	arr[13] = sdebug_physblk_exp & 0xf;
2203 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2204 
2205 	if (scsi_debug_lbp()) {
2206 		arr[14] |= 0x80; /* LBPME */
2207 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2208 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2209 		 * in the wider field maps to 0 in this field.
2210 		 */
2211 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2212 			arr[14] |= 0x40;
2213 	}
2214 
2215 	/*
2216 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2217 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2218 	 */
2219 	if (devip->zoned)
2220 		arr[12] |= 1 << 4;
2221 
2222 	arr[15] = sdebug_lowest_aligned & 0xff;
2223 
2224 	if (have_dif_prot) {
2225 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2226 		arr[12] |= 1; /* PROT_EN */
2227 	}
2228 
2229 	return fill_from_dev_buffer(scp, arr,
2230 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2231 }
2232 
2233 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2234 
2235 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2236 			      struct sdebug_dev_info *devip)
2237 {
2238 	unsigned char *cmd = scp->cmnd;
2239 	unsigned char *arr;
2240 	int host_no = devip->sdbg_host->shost->host_no;
2241 	int port_group_a, port_group_b, port_a, port_b;
2242 	u32 alen, n, rlen;
2243 	int ret;
2244 
2245 	alen = get_unaligned_be32(cmd + 6);
2246 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2247 	if (! arr)
2248 		return DID_REQUEUE << 16;
2249 	/*
2250 	 * EVPD page 0x88 states we have two ports, one
2251 	 * real and a fake port with no device connected.
2252 	 * So we create two port groups with one port each
2253 	 * and set the group with port B to unavailable.
2254 	 */
2255 	port_a = 0x1; /* relative port A */
2256 	port_b = 0x2; /* relative port B */
2257 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2258 			(devip->channel & 0x7f);
2259 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2260 			(devip->channel & 0x7f) + 0x80;
2261 
2262 	/*
2263 	 * The asymmetric access state is cycled according to the host_id.
2264 	 */
2265 	n = 4;
2266 	if (sdebug_vpd_use_hostno == 0) {
2267 		arr[n++] = host_no % 3; /* Asymm access state */
2268 		arr[n++] = 0x0F; /* claim: all states are supported */
2269 	} else {
2270 		arr[n++] = 0x0; /* Active/Optimized path */
2271 		arr[n++] = 0x01; /* only support active/optimized paths */
2272 	}
2273 	put_unaligned_be16(port_group_a, arr + n);
2274 	n += 2;
2275 	arr[n++] = 0;    /* Reserved */
2276 	arr[n++] = 0;    /* Status code */
2277 	arr[n++] = 0;    /* Vendor unique */
2278 	arr[n++] = 0x1;  /* One port per group */
2279 	arr[n++] = 0;    /* Reserved */
2280 	arr[n++] = 0;    /* Reserved */
2281 	put_unaligned_be16(port_a, arr + n);
2282 	n += 2;
2283 	arr[n++] = 3;    /* Port unavailable */
2284 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2285 	put_unaligned_be16(port_group_b, arr + n);
2286 	n += 2;
2287 	arr[n++] = 0;    /* Reserved */
2288 	arr[n++] = 0;    /* Status code */
2289 	arr[n++] = 0;    /* Vendor unique */
2290 	arr[n++] = 0x1;  /* One port per group */
2291 	arr[n++] = 0;    /* Reserved */
2292 	arr[n++] = 0;    /* Reserved */
2293 	put_unaligned_be16(port_b, arr + n);
2294 	n += 2;
2295 
2296 	rlen = n - 4;
2297 	put_unaligned_be32(rlen, arr + 0);
2298 
2299 	/*
2300 	 * Return the smallest value of either
2301 	 * - The allocated length
2302 	 * - The constructed command length
2303 	 * - The maximum array size
2304 	 */
2305 	rlen = min(alen, n);
2306 	ret = fill_from_dev_buffer(scp, arr,
2307 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2308 	kfree(arr);
2309 	return ret;
2310 }
2311 
2312 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2313 			     struct sdebug_dev_info *devip)
2314 {
2315 	bool rctd;
2316 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2317 	u16 req_sa, u;
2318 	u32 alloc_len, a_len;
2319 	int k, offset, len, errsts, count, bump, na;
2320 	const struct opcode_info_t *oip;
2321 	const struct opcode_info_t *r_oip;
2322 	u8 *arr;
2323 	u8 *cmd = scp->cmnd;
2324 
2325 	rctd = !!(cmd[2] & 0x80);
2326 	reporting_opts = cmd[2] & 0x7;
2327 	req_opcode = cmd[3];
2328 	req_sa = get_unaligned_be16(cmd + 4);
2329 	alloc_len = get_unaligned_be32(cmd + 6);
2330 	if (alloc_len < 4 || alloc_len > 0xffff) {
2331 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2332 		return check_condition_result;
2333 	}
2334 	if (alloc_len > 8192)
2335 		a_len = 8192;
2336 	else
2337 		a_len = alloc_len;
2338 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2339 	if (NULL == arr) {
2340 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2341 				INSUFF_RES_ASCQ);
2342 		return check_condition_result;
2343 	}
2344 	switch (reporting_opts) {
2345 	case 0:	/* all commands */
2346 		/* count number of commands */
2347 		for (count = 0, oip = opcode_info_arr;
2348 		     oip->num_attached != 0xff; ++oip) {
2349 			if (F_INV_OP & oip->flags)
2350 				continue;
2351 			count += (oip->num_attached + 1);
2352 		}
2353 		bump = rctd ? 20 : 8;
2354 		put_unaligned_be32(count * bump, arr);
2355 		for (offset = 4, oip = opcode_info_arr;
2356 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2357 			if (F_INV_OP & oip->flags)
2358 				continue;
2359 			na = oip->num_attached;
2360 			arr[offset] = oip->opcode;
2361 			put_unaligned_be16(oip->sa, arr + offset + 2);
2362 			if (rctd)
2363 				arr[offset + 5] |= 0x2;
2364 			if (FF_SA & oip->flags)
2365 				arr[offset + 5] |= 0x1;
2366 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2367 			if (rctd)
2368 				put_unaligned_be16(0xa, arr + offset + 8);
2369 			r_oip = oip;
2370 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2371 				if (F_INV_OP & oip->flags)
2372 					continue;
2373 				offset += bump;
2374 				arr[offset] = oip->opcode;
2375 				put_unaligned_be16(oip->sa, arr + offset + 2);
2376 				if (rctd)
2377 					arr[offset + 5] |= 0x2;
2378 				if (FF_SA & oip->flags)
2379 					arr[offset + 5] |= 0x1;
2380 				put_unaligned_be16(oip->len_mask[0],
2381 						   arr + offset + 6);
2382 				if (rctd)
2383 					put_unaligned_be16(0xa,
2384 							   arr + offset + 8);
2385 			}
2386 			oip = r_oip;
2387 			offset += bump;
2388 		}
2389 		break;
2390 	case 1:	/* one command: opcode only */
2391 	case 2:	/* one command: opcode plus service action */
2392 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2393 		sdeb_i = opcode_ind_arr[req_opcode];
2394 		oip = &opcode_info_arr[sdeb_i];
2395 		if (F_INV_OP & oip->flags) {
2396 			supp = 1;
2397 			offset = 4;
2398 		} else {
2399 			if (1 == reporting_opts) {
2400 				if (FF_SA & oip->flags) {
2401 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2402 							     2, 2);
2403 					kfree(arr);
2404 					return check_condition_result;
2405 				}
2406 				req_sa = 0;
2407 			} else if (2 == reporting_opts &&
2408 				   0 == (FF_SA & oip->flags)) {
2409 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2410 				kfree(arr);	/* point at requested sa */
2411 				return check_condition_result;
2412 			}
2413 			if (0 == (FF_SA & oip->flags) &&
2414 			    req_opcode == oip->opcode)
2415 				supp = 3;
2416 			else if (0 == (FF_SA & oip->flags)) {
2417 				na = oip->num_attached;
2418 				for (k = 0, oip = oip->arrp; k < na;
2419 				     ++k, ++oip) {
2420 					if (req_opcode == oip->opcode)
2421 						break;
2422 				}
2423 				supp = (k >= na) ? 1 : 3;
2424 			} else if (req_sa != oip->sa) {
2425 				na = oip->num_attached;
2426 				for (k = 0, oip = oip->arrp; k < na;
2427 				     ++k, ++oip) {
2428 					if (req_sa == oip->sa)
2429 						break;
2430 				}
2431 				supp = (k >= na) ? 1 : 3;
2432 			} else
2433 				supp = 3;
2434 			if (3 == supp) {
2435 				u = oip->len_mask[0];
2436 				put_unaligned_be16(u, arr + 2);
2437 				arr[4] = oip->opcode;
2438 				for (k = 1; k < u; ++k)
2439 					arr[4 + k] = (k < 16) ?
2440 						 oip->len_mask[k] : 0xff;
2441 				offset = 4 + u;
2442 			} else
2443 				offset = 4;
2444 		}
2445 		arr[1] = (rctd ? 0x80 : 0) | supp;
2446 		if (rctd) {
2447 			put_unaligned_be16(0xa, arr + offset);
2448 			offset += 12;
2449 		}
2450 		break;
2451 	default:
2452 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2453 		kfree(arr);
2454 		return check_condition_result;
2455 	}
2456 	offset = (offset < a_len) ? offset : a_len;
2457 	len = (offset < alloc_len) ? offset : alloc_len;
2458 	errsts = fill_from_dev_buffer(scp, arr, len);
2459 	kfree(arr);
2460 	return errsts;
2461 }
2462 
2463 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2464 			  struct sdebug_dev_info *devip)
2465 {
2466 	bool repd;
2467 	u32 alloc_len, len;
2468 	u8 arr[16];
2469 	u8 *cmd = scp->cmnd;
2470 
2471 	memset(arr, 0, sizeof(arr));
2472 	repd = !!(cmd[2] & 0x80);
2473 	alloc_len = get_unaligned_be32(cmd + 6);
2474 	if (alloc_len < 4) {
2475 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2476 		return check_condition_result;
2477 	}
2478 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2479 	arr[1] = 0x1;		/* ITNRS */
2480 	if (repd) {
2481 		arr[3] = 0xc;
2482 		len = 16;
2483 	} else
2484 		len = 4;
2485 
2486 	len = (len < alloc_len) ? len : alloc_len;
2487 	return fill_from_dev_buffer(scp, arr, len);
2488 }
2489 
2490 /* <<Following mode page info copied from ST318451LW>> */
2491 
2492 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2493 {	/* Read-Write Error Recovery page for mode_sense */
2494 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2495 					5, 0, 0xff, 0xff};
2496 
2497 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2498 	if (1 == pcontrol)
2499 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2500 	return sizeof(err_recov_pg);
2501 }
2502 
2503 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2504 { 	/* Disconnect-Reconnect page for mode_sense */
2505 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2506 					 0, 0, 0, 0, 0, 0, 0, 0};
2507 
2508 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2509 	if (1 == pcontrol)
2510 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2511 	return sizeof(disconnect_pg);
2512 }
2513 
2514 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2515 {       /* Format device page for mode_sense */
2516 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2517 				     0, 0, 0, 0, 0, 0, 0, 0,
2518 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2519 
2520 	memcpy(p, format_pg, sizeof(format_pg));
2521 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2522 	put_unaligned_be16(sdebug_sector_size, p + 12);
2523 	if (sdebug_removable)
2524 		p[20] |= 0x20; /* should agree with INQUIRY */
2525 	if (1 == pcontrol)
2526 		memset(p + 2, 0, sizeof(format_pg) - 2);
2527 	return sizeof(format_pg);
2528 }
2529 
2530 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2531 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2532 				     0, 0, 0, 0};
2533 
2534 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2535 { 	/* Caching page for mode_sense */
2536 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2537 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2538 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2539 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2540 
2541 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2542 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2543 	memcpy(p, caching_pg, sizeof(caching_pg));
2544 	if (1 == pcontrol)
2545 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2546 	else if (2 == pcontrol)
2547 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2548 	return sizeof(caching_pg);
2549 }
2550 
2551 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2552 				    0, 0, 0x2, 0x4b};
2553 
2554 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2555 { 	/* Control mode page for mode_sense */
2556 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2557 					0, 0, 0, 0};
2558 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2559 				     0, 0, 0x2, 0x4b};
2560 
2561 	if (sdebug_dsense)
2562 		ctrl_m_pg[2] |= 0x4;
2563 	else
2564 		ctrl_m_pg[2] &= ~0x4;
2565 
2566 	if (sdebug_ato)
2567 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2568 
2569 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2570 	if (1 == pcontrol)
2571 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2572 	else if (2 == pcontrol)
2573 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2574 	return sizeof(ctrl_m_pg);
2575 }
2576 
2577 /* IO Advice Hints Grouping mode page */
2578 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2579 {
2580 	/* IO Advice Hints Grouping mode page */
2581 	struct grouping_m_pg {
2582 		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2583 		u8 subpage_code;
2584 		__be16 page_length;
2585 		u8 reserved[12];
2586 		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2587 	};
2588 	static const struct grouping_m_pg gr_m_pg = {
2589 		.page_code = 0xa | 0x40,
2590 		.subpage_code = 5,
2591 		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2592 		.descr = {
2593 			{ .st_enble = 1 },
2594 			{ .st_enble = 1 },
2595 			{ .st_enble = 1 },
2596 			{ .st_enble = 1 },
2597 			{ .st_enble = 1 },
2598 			{ .st_enble = 0 },
2599 		}
2600 	};
2601 
2602 	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2603 		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2604 	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2605 	if (1 == pcontrol) {
2606 		/* There are no changeable values so clear from byte 4 on. */
2607 		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2608 	}
2609 	return sizeof(gr_m_pg);
2610 }
2611 
2612 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2613 {	/* Informational Exceptions control mode page for mode_sense */
2614 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2615 				       0, 0, 0x0, 0x0};
2616 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2617 				      0, 0, 0x0, 0x0};
2618 
2619 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2620 	if (1 == pcontrol)
2621 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2622 	else if (2 == pcontrol)
2623 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2624 	return sizeof(iec_m_pg);
2625 }
2626 
2627 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2628 {	/* SAS SSP mode page - short format for mode_sense */
2629 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2630 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2631 
2632 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2633 	if (1 == pcontrol)
2634 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2635 	return sizeof(sas_sf_m_pg);
2636 }
2637 
2638 
2639 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2640 			      int target_dev_id)
2641 {	/* SAS phy control and discover mode page for mode_sense */
2642 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2643 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2644 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2645 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2646 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2647 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2648 		    0, 0, 0, 0, 0, 0, 0, 0,
2649 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2650 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2651 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2652 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2653 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2654 		    0, 0, 0, 0, 0, 0, 0, 0,
2655 		};
2656 	int port_a, port_b;
2657 
2658 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2659 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2660 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2661 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2662 	port_a = target_dev_id + 1;
2663 	port_b = port_a + 1;
2664 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2665 	put_unaligned_be32(port_a, p + 20);
2666 	put_unaligned_be32(port_b, p + 48 + 20);
2667 	if (1 == pcontrol)
2668 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2669 	return sizeof(sas_pcd_m_pg);
2670 }
2671 
2672 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2673 {	/* SAS SSP shared protocol specific port mode subpage */
2674 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2675 		    0, 0, 0, 0, 0, 0, 0, 0,
2676 		};
2677 
2678 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2679 	if (1 == pcontrol)
2680 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2681 	return sizeof(sas_sha_m_pg);
2682 }
2683 
2684 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2685 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2686 
2687 static int resp_mode_sense(struct scsi_cmnd *scp,
2688 			   struct sdebug_dev_info *devip)
2689 {
2690 	int pcontrol, pcode, subpcode, bd_len;
2691 	unsigned char dev_spec;
2692 	u32 alloc_len, offset, len;
2693 	int target_dev_id;
2694 	int target = scp->device->id;
2695 	unsigned char *ap;
2696 	unsigned char *arr __free(kfree);
2697 	unsigned char *cmd = scp->cmnd;
2698 	bool dbd, llbaa, msense_6, is_disk, is_zbc;
2699 
2700 	arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2701 	if (!arr)
2702 		return -ENOMEM;
2703 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2704 	pcontrol = (cmd[2] & 0xc0) >> 6;
2705 	pcode = cmd[2] & 0x3f;
2706 	subpcode = cmd[3];
2707 	msense_6 = (MODE_SENSE == cmd[0]);
2708 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2709 	is_disk = (sdebug_ptype == TYPE_DISK);
2710 	is_zbc = devip->zoned;
2711 	if ((is_disk || is_zbc) && !dbd)
2712 		bd_len = llbaa ? 16 : 8;
2713 	else
2714 		bd_len = 0;
2715 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2716 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2717 	if (0x3 == pcontrol) {  /* Saving values not supported */
2718 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2719 		return check_condition_result;
2720 	}
2721 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2722 			(devip->target * 1000) - 3;
2723 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2724 	if (is_disk || is_zbc) {
2725 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2726 		if (sdebug_wp)
2727 			dev_spec |= 0x80;
2728 	} else
2729 		dev_spec = 0x0;
2730 	if (msense_6) {
2731 		arr[2] = dev_spec;
2732 		arr[3] = bd_len;
2733 		offset = 4;
2734 	} else {
2735 		arr[3] = dev_spec;
2736 		if (16 == bd_len)
2737 			arr[4] = 0x1;	/* set LONGLBA bit */
2738 		arr[7] = bd_len;	/* assume 255 or less */
2739 		offset = 8;
2740 	}
2741 	ap = arr + offset;
2742 	if ((bd_len > 0) && (!sdebug_capacity))
2743 		sdebug_capacity = get_sdebug_capacity();
2744 
2745 	if (8 == bd_len) {
2746 		if (sdebug_capacity > 0xfffffffe)
2747 			put_unaligned_be32(0xffffffff, ap + 0);
2748 		else
2749 			put_unaligned_be32(sdebug_capacity, ap + 0);
2750 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2751 		offset += bd_len;
2752 		ap = arr + offset;
2753 	} else if (16 == bd_len) {
2754 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2755 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2756 		offset += bd_len;
2757 		ap = arr + offset;
2758 	}
2759 
2760 	/*
2761 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2762 	 *        len += resp_*_pg(ap + len, pcontrol, target);
2763 	 */
2764 	switch (pcode) {
2765 	case 0x1:	/* Read-Write error recovery page, direct access */
2766 		if (subpcode > 0x0 && subpcode < 0xff)
2767 			goto bad_subpcode;
2768 		len = resp_err_recov_pg(ap, pcontrol, target);
2769 		offset += len;
2770 		break;
2771 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2772 		if (subpcode > 0x0 && subpcode < 0xff)
2773 			goto bad_subpcode;
2774 		len = resp_disconnect_pg(ap, pcontrol, target);
2775 		offset += len;
2776 		break;
2777 	case 0x3:       /* Format device page, direct access */
2778 		if (subpcode > 0x0 && subpcode < 0xff)
2779 			goto bad_subpcode;
2780 		if (is_disk) {
2781 			len = resp_format_pg(ap, pcontrol, target);
2782 			offset += len;
2783 		} else {
2784 			goto bad_pcode;
2785 		}
2786 		break;
2787 	case 0x8:	/* Caching page, direct access */
2788 		if (subpcode > 0x0 && subpcode < 0xff)
2789 			goto bad_subpcode;
2790 		if (is_disk || is_zbc) {
2791 			len = resp_caching_pg(ap, pcontrol, target);
2792 			offset += len;
2793 		} else {
2794 			goto bad_pcode;
2795 		}
2796 		break;
2797 	case 0xa:	/* Control Mode page, all devices */
2798 		switch (subpcode) {
2799 		case 0:
2800 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2801 			break;
2802 		case 0x05:
2803 			len = resp_grouping_m_pg(ap, pcontrol, target);
2804 			break;
2805 		case 0xff:
2806 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2807 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2808 			break;
2809 		default:
2810 			goto bad_subpcode;
2811 		}
2812 		offset += len;
2813 		break;
2814 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2815 		if (subpcode > 0x2 && subpcode < 0xff)
2816 			goto bad_subpcode;
2817 		len = 0;
2818 		if ((0x0 == subpcode) || (0xff == subpcode))
2819 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2820 		if ((0x1 == subpcode) || (0xff == subpcode))
2821 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2822 						  target_dev_id);
2823 		if ((0x2 == subpcode) || (0xff == subpcode))
2824 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2825 		offset += len;
2826 		break;
2827 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2828 		if (subpcode > 0x0 && subpcode < 0xff)
2829 			goto bad_subpcode;
2830 		len = resp_iec_m_pg(ap, pcontrol, target);
2831 		offset += len;
2832 		break;
2833 	case 0x3f:	/* Read all Mode pages */
2834 		if (subpcode > 0x0 && subpcode < 0xff)
2835 			goto bad_subpcode;
2836 		len = resp_err_recov_pg(ap, pcontrol, target);
2837 		len += resp_disconnect_pg(ap + len, pcontrol, target);
2838 		if (is_disk) {
2839 			len += resp_format_pg(ap + len, pcontrol, target);
2840 			len += resp_caching_pg(ap + len, pcontrol, target);
2841 		} else if (is_zbc) {
2842 			len += resp_caching_pg(ap + len, pcontrol, target);
2843 		}
2844 		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2845 		if (0xff == subpcode)
2846 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2847 		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2848 		if (0xff == subpcode) {
2849 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2850 						  target_dev_id);
2851 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2852 		}
2853 		len += resp_iec_m_pg(ap + len, pcontrol, target);
2854 		offset += len;
2855 		break;
2856 	default:
2857 		goto bad_pcode;
2858 	}
2859 	if (msense_6)
2860 		arr[0] = offset - 1;
2861 	else
2862 		put_unaligned_be16((offset - 2), arr + 0);
2863 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2864 
2865 bad_pcode:
2866 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2867 	return check_condition_result;
2868 
2869 bad_subpcode:
2870 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2871 	return check_condition_result;
2872 }
2873 
2874 #define SDEBUG_MAX_MSELECT_SZ 512
2875 
2876 static int resp_mode_select(struct scsi_cmnd *scp,
2877 			    struct sdebug_dev_info *devip)
2878 {
2879 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2880 	int param_len, res, mpage;
2881 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2882 	unsigned char *cmd = scp->cmnd;
2883 	int mselect6 = (MODE_SELECT == cmd[0]);
2884 
2885 	memset(arr, 0, sizeof(arr));
2886 	pf = cmd[1] & 0x10;
2887 	sp = cmd[1] & 0x1;
2888 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2889 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2890 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2891 		return check_condition_result;
2892 	}
2893 	res = fetch_to_dev_buffer(scp, arr, param_len);
2894 	if (-1 == res)
2895 		return DID_ERROR << 16;
2896 	else if (sdebug_verbose && (res < param_len))
2897 		sdev_printk(KERN_INFO, scp->device,
2898 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2899 			    __func__, param_len, res);
2900 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2901 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2902 	off = bd_len + (mselect6 ? 4 : 8);
2903 	if (md_len > 2 || off >= res) {
2904 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2905 		return check_condition_result;
2906 	}
2907 	mpage = arr[off] & 0x3f;
2908 	ps = !!(arr[off] & 0x80);
2909 	if (ps) {
2910 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2911 		return check_condition_result;
2912 	}
2913 	spf = !!(arr[off] & 0x40);
2914 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2915 		       (arr[off + 1] + 2);
2916 	if ((pg_len + off) > param_len) {
2917 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2918 				PARAMETER_LIST_LENGTH_ERR, 0);
2919 		return check_condition_result;
2920 	}
2921 	switch (mpage) {
2922 	case 0x8:      /* Caching Mode page */
2923 		if (caching_pg[1] == arr[off + 1]) {
2924 			memcpy(caching_pg + 2, arr + off + 2,
2925 			       sizeof(caching_pg) - 2);
2926 			goto set_mode_changed_ua;
2927 		}
2928 		break;
2929 	case 0xa:      /* Control Mode page */
2930 		if (ctrl_m_pg[1] == arr[off + 1]) {
2931 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2932 			       sizeof(ctrl_m_pg) - 2);
2933 			if (ctrl_m_pg[4] & 0x8)
2934 				sdebug_wp = true;
2935 			else
2936 				sdebug_wp = false;
2937 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2938 			goto set_mode_changed_ua;
2939 		}
2940 		break;
2941 	case 0x1c:      /* Informational Exceptions Mode page */
2942 		if (iec_m_pg[1] == arr[off + 1]) {
2943 			memcpy(iec_m_pg + 2, arr + off + 2,
2944 			       sizeof(iec_m_pg) - 2);
2945 			goto set_mode_changed_ua;
2946 		}
2947 		break;
2948 	default:
2949 		break;
2950 	}
2951 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2952 	return check_condition_result;
2953 set_mode_changed_ua:
2954 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2955 	return 0;
2956 }
2957 
2958 static int resp_temp_l_pg(unsigned char *arr)
2959 {
2960 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2961 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2962 		};
2963 
2964 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2965 	return sizeof(temp_l_pg);
2966 }
2967 
2968 static int resp_ie_l_pg(unsigned char *arr)
2969 {
2970 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2971 		};
2972 
2973 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2974 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2975 		arr[4] = THRESHOLD_EXCEEDED;
2976 		arr[5] = 0xff;
2977 	}
2978 	return sizeof(ie_l_pg);
2979 }
2980 
2981 static int resp_env_rep_l_spg(unsigned char *arr)
2982 {
2983 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2984 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2985 					 0x1, 0x0, 0x23, 0x8,
2986 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2987 		};
2988 
2989 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2990 	return sizeof(env_rep_l_spg);
2991 }
2992 
2993 #define SDEBUG_MAX_LSENSE_SZ 512
2994 
2995 static int resp_log_sense(struct scsi_cmnd *scp,
2996 			  struct sdebug_dev_info *devip)
2997 {
2998 	int ppc, sp, pcode, subpcode;
2999 	u32 alloc_len, len, n;
3000 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3001 	unsigned char *cmd = scp->cmnd;
3002 
3003 	memset(arr, 0, sizeof(arr));
3004 	ppc = cmd[1] & 0x2;
3005 	sp = cmd[1] & 0x1;
3006 	if (ppc || sp) {
3007 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3008 		return check_condition_result;
3009 	}
3010 	pcode = cmd[2] & 0x3f;
3011 	subpcode = cmd[3] & 0xff;
3012 	alloc_len = get_unaligned_be16(cmd + 7);
3013 	arr[0] = pcode;
3014 	if (0 == subpcode) {
3015 		switch (pcode) {
3016 		case 0x0:	/* Supported log pages log page */
3017 			n = 4;
3018 			arr[n++] = 0x0;		/* this page */
3019 			arr[n++] = 0xd;		/* Temperature */
3020 			arr[n++] = 0x2f;	/* Informational exceptions */
3021 			arr[3] = n - 4;
3022 			break;
3023 		case 0xd:	/* Temperature log page */
3024 			arr[3] = resp_temp_l_pg(arr + 4);
3025 			break;
3026 		case 0x2f:	/* Informational exceptions log page */
3027 			arr[3] = resp_ie_l_pg(arr + 4);
3028 			break;
3029 		default:
3030 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3031 			return check_condition_result;
3032 		}
3033 	} else if (0xff == subpcode) {
3034 		arr[0] |= 0x40;
3035 		arr[1] = subpcode;
3036 		switch (pcode) {
3037 		case 0x0:	/* Supported log pages and subpages log page */
3038 			n = 4;
3039 			arr[n++] = 0x0;
3040 			arr[n++] = 0x0;		/* 0,0 page */
3041 			arr[n++] = 0x0;
3042 			arr[n++] = 0xff;	/* this page */
3043 			arr[n++] = 0xd;
3044 			arr[n++] = 0x0;		/* Temperature */
3045 			arr[n++] = 0xd;
3046 			arr[n++] = 0x1;		/* Environment reporting */
3047 			arr[n++] = 0xd;
3048 			arr[n++] = 0xff;	/* all 0xd subpages */
3049 			arr[n++] = 0x2f;
3050 			arr[n++] = 0x0;	/* Informational exceptions */
3051 			arr[n++] = 0x2f;
3052 			arr[n++] = 0xff;	/* all 0x2f subpages */
3053 			arr[3] = n - 4;
3054 			break;
3055 		case 0xd:	/* Temperature subpages */
3056 			n = 4;
3057 			arr[n++] = 0xd;
3058 			arr[n++] = 0x0;		/* Temperature */
3059 			arr[n++] = 0xd;
3060 			arr[n++] = 0x1;		/* Environment reporting */
3061 			arr[n++] = 0xd;
3062 			arr[n++] = 0xff;	/* these subpages */
3063 			arr[3] = n - 4;
3064 			break;
3065 		case 0x2f:	/* Informational exceptions subpages */
3066 			n = 4;
3067 			arr[n++] = 0x2f;
3068 			arr[n++] = 0x0;		/* Informational exceptions */
3069 			arr[n++] = 0x2f;
3070 			arr[n++] = 0xff;	/* these subpages */
3071 			arr[3] = n - 4;
3072 			break;
3073 		default:
3074 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3075 			return check_condition_result;
3076 		}
3077 	} else if (subpcode > 0) {
3078 		arr[0] |= 0x40;
3079 		arr[1] = subpcode;
3080 		if (pcode == 0xd && subpcode == 1)
3081 			arr[3] = resp_env_rep_l_spg(arr + 4);
3082 		else {
3083 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3084 			return check_condition_result;
3085 		}
3086 	} else {
3087 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3088 		return check_condition_result;
3089 	}
3090 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3091 	return fill_from_dev_buffer(scp, arr,
3092 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3093 }
3094 
3095 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3096 {
3097 	return devip->nr_zones != 0;
3098 }
3099 
3100 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3101 					unsigned long long lba)
3102 {
3103 	u32 zno = lba >> devip->zsize_shift;
3104 	struct sdeb_zone_state *zsp;
3105 
3106 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3107 		return &devip->zstate[zno];
3108 
3109 	/*
3110 	 * If the zone capacity is less than the zone size, adjust for gap
3111 	 * zones.
3112 	 */
3113 	zno = 2 * zno - devip->nr_conv_zones;
3114 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3115 	zsp = &devip->zstate[zno];
3116 	if (lba >= zsp->z_start + zsp->z_size)
3117 		zsp++;
3118 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3119 	return zsp;
3120 }
3121 
3122 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3123 {
3124 	return zsp->z_type == ZBC_ZTYPE_CNV;
3125 }
3126 
3127 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3128 {
3129 	return zsp->z_type == ZBC_ZTYPE_GAP;
3130 }
3131 
3132 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3133 {
3134 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3135 }
3136 
3137 static void zbc_close_zone(struct sdebug_dev_info *devip,
3138 			   struct sdeb_zone_state *zsp)
3139 {
3140 	enum sdebug_z_cond zc;
3141 
3142 	if (!zbc_zone_is_seq(zsp))
3143 		return;
3144 
3145 	zc = zsp->z_cond;
3146 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3147 		return;
3148 
3149 	if (zc == ZC2_IMPLICIT_OPEN)
3150 		devip->nr_imp_open--;
3151 	else
3152 		devip->nr_exp_open--;
3153 
3154 	if (zsp->z_wp == zsp->z_start) {
3155 		zsp->z_cond = ZC1_EMPTY;
3156 	} else {
3157 		zsp->z_cond = ZC4_CLOSED;
3158 		devip->nr_closed++;
3159 	}
3160 }
3161 
3162 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3163 {
3164 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3165 	unsigned int i;
3166 
3167 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3168 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3169 			zbc_close_zone(devip, zsp);
3170 			return;
3171 		}
3172 	}
3173 }
3174 
3175 static void zbc_open_zone(struct sdebug_dev_info *devip,
3176 			  struct sdeb_zone_state *zsp, bool explicit)
3177 {
3178 	enum sdebug_z_cond zc;
3179 
3180 	if (!zbc_zone_is_seq(zsp))
3181 		return;
3182 
3183 	zc = zsp->z_cond;
3184 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3185 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3186 		return;
3187 
3188 	/* Close an implicit open zone if necessary */
3189 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3190 		zbc_close_zone(devip, zsp);
3191 	else if (devip->max_open &&
3192 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3193 		zbc_close_imp_open_zone(devip);
3194 
3195 	if (zsp->z_cond == ZC4_CLOSED)
3196 		devip->nr_closed--;
3197 	if (explicit) {
3198 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3199 		devip->nr_exp_open++;
3200 	} else {
3201 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3202 		devip->nr_imp_open++;
3203 	}
3204 }
3205 
3206 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3207 				     struct sdeb_zone_state *zsp)
3208 {
3209 	switch (zsp->z_cond) {
3210 	case ZC2_IMPLICIT_OPEN:
3211 		devip->nr_imp_open--;
3212 		break;
3213 	case ZC3_EXPLICIT_OPEN:
3214 		devip->nr_exp_open--;
3215 		break;
3216 	default:
3217 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3218 			  zsp->z_start, zsp->z_cond);
3219 		break;
3220 	}
3221 	zsp->z_cond = ZC5_FULL;
3222 }
3223 
3224 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3225 		       unsigned long long lba, unsigned int num)
3226 {
3227 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3228 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3229 
3230 	if (!zbc_zone_is_seq(zsp))
3231 		return;
3232 
3233 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3234 		zsp->z_wp += num;
3235 		if (zsp->z_wp >= zend)
3236 			zbc_set_zone_full(devip, zsp);
3237 		return;
3238 	}
3239 
3240 	while (num) {
3241 		if (lba != zsp->z_wp)
3242 			zsp->z_non_seq_resource = true;
3243 
3244 		end = lba + num;
3245 		if (end >= zend) {
3246 			n = zend - lba;
3247 			zsp->z_wp = zend;
3248 		} else if (end > zsp->z_wp) {
3249 			n = num;
3250 			zsp->z_wp = end;
3251 		} else {
3252 			n = num;
3253 		}
3254 		if (zsp->z_wp >= zend)
3255 			zbc_set_zone_full(devip, zsp);
3256 
3257 		num -= n;
3258 		lba += n;
3259 		if (num) {
3260 			zsp++;
3261 			zend = zsp->z_start + zsp->z_size;
3262 		}
3263 	}
3264 }
3265 
3266 static int check_zbc_access_params(struct scsi_cmnd *scp,
3267 			unsigned long long lba, unsigned int num, bool write)
3268 {
3269 	struct scsi_device *sdp = scp->device;
3270 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3271 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3272 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3273 
3274 	if (!write) {
3275 		/* For host-managed, reads cannot cross zone types boundaries */
3276 		if (zsp->z_type != zsp_end->z_type) {
3277 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3278 					LBA_OUT_OF_RANGE,
3279 					READ_INVDATA_ASCQ);
3280 			return check_condition_result;
3281 		}
3282 		return 0;
3283 	}
3284 
3285 	/* Writing into a gap zone is not allowed */
3286 	if (zbc_zone_is_gap(zsp)) {
3287 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3288 				ATTEMPT_ACCESS_GAP);
3289 		return check_condition_result;
3290 	}
3291 
3292 	/* No restrictions for writes within conventional zones */
3293 	if (zbc_zone_is_conv(zsp)) {
3294 		if (!zbc_zone_is_conv(zsp_end)) {
3295 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3296 					LBA_OUT_OF_RANGE,
3297 					WRITE_BOUNDARY_ASCQ);
3298 			return check_condition_result;
3299 		}
3300 		return 0;
3301 	}
3302 
3303 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3304 		/* Writes cannot cross sequential zone boundaries */
3305 		if (zsp_end != zsp) {
3306 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3307 					LBA_OUT_OF_RANGE,
3308 					WRITE_BOUNDARY_ASCQ);
3309 			return check_condition_result;
3310 		}
3311 		/* Cannot write full zones */
3312 		if (zsp->z_cond == ZC5_FULL) {
3313 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3314 					INVALID_FIELD_IN_CDB, 0);
3315 			return check_condition_result;
3316 		}
3317 		/* Writes must be aligned to the zone WP */
3318 		if (lba != zsp->z_wp) {
3319 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3320 					LBA_OUT_OF_RANGE,
3321 					UNALIGNED_WRITE_ASCQ);
3322 			return check_condition_result;
3323 		}
3324 	}
3325 
3326 	/* Handle implicit open of closed and empty zones */
3327 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3328 		if (devip->max_open &&
3329 		    devip->nr_exp_open >= devip->max_open) {
3330 			mk_sense_buffer(scp, DATA_PROTECT,
3331 					INSUFF_RES_ASC,
3332 					INSUFF_ZONE_ASCQ);
3333 			return check_condition_result;
3334 		}
3335 		zbc_open_zone(devip, zsp, false);
3336 	}
3337 
3338 	return 0;
3339 }
3340 
3341 static inline int check_device_access_params
3342 			(struct scsi_cmnd *scp, unsigned long long lba,
3343 			 unsigned int num, bool write)
3344 {
3345 	struct scsi_device *sdp = scp->device;
3346 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3347 
3348 	if (lba + num > sdebug_capacity) {
3349 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3350 		return check_condition_result;
3351 	}
3352 	/* transfer length excessive (tie in to block limits VPD page) */
3353 	if (num > sdebug_store_sectors) {
3354 		/* needs work to find which cdb byte 'num' comes from */
3355 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3356 		return check_condition_result;
3357 	}
3358 	if (write && unlikely(sdebug_wp)) {
3359 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3360 		return check_condition_result;
3361 	}
3362 	if (sdebug_dev_is_zoned(devip))
3363 		return check_zbc_access_params(scp, lba, num, write);
3364 
3365 	return 0;
3366 }
3367 
3368 /*
3369  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3370  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3371  * that access any of the "stores" in struct sdeb_store_info should call this
3372  * function with bug_if_fake_rw set to true.
3373  */
3374 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3375 						bool bug_if_fake_rw)
3376 {
3377 	if (sdebug_fake_rw) {
3378 		BUG_ON(bug_if_fake_rw);	/* See note above */
3379 		return NULL;
3380 	}
3381 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3382 }
3383 
3384 /* Returns number of bytes copied or -1 if error. */
3385 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3386 			    u32 sg_skip, u64 lba, u32 num, bool do_write,
3387 			    u8 group_number)
3388 {
3389 	int ret;
3390 	u64 block, rest = 0;
3391 	enum dma_data_direction dir;
3392 	struct scsi_data_buffer *sdb = &scp->sdb;
3393 	u8 *fsp;
3394 
3395 	if (do_write) {
3396 		dir = DMA_TO_DEVICE;
3397 		write_since_sync = true;
3398 	} else {
3399 		dir = DMA_FROM_DEVICE;
3400 	}
3401 
3402 	if (!sdb->length || !sip)
3403 		return 0;
3404 	if (scp->sc_data_direction != dir)
3405 		return -1;
3406 
3407 	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
3408 		atomic_long_inc(&writes_by_group_number[group_number]);
3409 
3410 	fsp = sip->storep;
3411 
3412 	block = do_div(lba, sdebug_store_sectors);
3413 	if (block + num > sdebug_store_sectors)
3414 		rest = block + num - sdebug_store_sectors;
3415 
3416 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3417 		   fsp + (block * sdebug_sector_size),
3418 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3419 	if (ret != (num - rest) * sdebug_sector_size)
3420 		return ret;
3421 
3422 	if (rest) {
3423 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3424 			    fsp, rest * sdebug_sector_size,
3425 			    sg_skip + ((num - rest) * sdebug_sector_size),
3426 			    do_write);
3427 	}
3428 
3429 	return ret;
3430 }
3431 
3432 /* Returns number of bytes copied or -1 if error. */
3433 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3434 {
3435 	struct scsi_data_buffer *sdb = &scp->sdb;
3436 
3437 	if (!sdb->length)
3438 		return 0;
3439 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3440 		return -1;
3441 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3442 			      num * sdebug_sector_size, 0, true);
3443 }
3444 
3445 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3446  * arr into sip->storep+lba and return true. If comparison fails then
3447  * return false. */
3448 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3449 			      const u8 *arr, bool compare_only)
3450 {
3451 	bool res;
3452 	u64 block, rest = 0;
3453 	u32 store_blks = sdebug_store_sectors;
3454 	u32 lb_size = sdebug_sector_size;
3455 	u8 *fsp = sip->storep;
3456 
3457 	block = do_div(lba, store_blks);
3458 	if (block + num > store_blks)
3459 		rest = block + num - store_blks;
3460 
3461 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3462 	if (!res)
3463 		return res;
3464 	if (rest)
3465 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3466 			     rest * lb_size);
3467 	if (!res)
3468 		return res;
3469 	if (compare_only)
3470 		return true;
3471 	arr += num * lb_size;
3472 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3473 	if (rest)
3474 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3475 	return res;
3476 }
3477 
3478 static __be16 dif_compute_csum(const void *buf, int len)
3479 {
3480 	__be16 csum;
3481 
3482 	if (sdebug_guard)
3483 		csum = (__force __be16)ip_compute_csum(buf, len);
3484 	else
3485 		csum = cpu_to_be16(crc_t10dif(buf, len));
3486 
3487 	return csum;
3488 }
3489 
3490 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3491 		      sector_t sector, u32 ei_lba)
3492 {
3493 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3494 
3495 	if (sdt->guard_tag != csum) {
3496 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3497 			(unsigned long)sector,
3498 			be16_to_cpu(sdt->guard_tag),
3499 			be16_to_cpu(csum));
3500 		return 0x01;
3501 	}
3502 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3503 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3504 		pr_err("REF check failed on sector %lu\n",
3505 			(unsigned long)sector);
3506 		return 0x03;
3507 	}
3508 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3509 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3510 		pr_err("REF check failed on sector %lu\n",
3511 			(unsigned long)sector);
3512 		return 0x03;
3513 	}
3514 	return 0;
3515 }
3516 
3517 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3518 			  unsigned int sectors, bool read)
3519 {
3520 	size_t resid;
3521 	void *paddr;
3522 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3523 						scp->device->hostdata, true);
3524 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3525 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3526 	struct sg_mapping_iter miter;
3527 
3528 	/* Bytes of protection data to copy into sgl */
3529 	resid = sectors * sizeof(*dif_storep);
3530 
3531 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3532 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3533 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3534 
3535 	while (sg_miter_next(&miter) && resid > 0) {
3536 		size_t len = min_t(size_t, miter.length, resid);
3537 		void *start = dif_store(sip, sector);
3538 		size_t rest = 0;
3539 
3540 		if (dif_store_end < start + len)
3541 			rest = start + len - dif_store_end;
3542 
3543 		paddr = miter.addr;
3544 
3545 		if (read)
3546 			memcpy(paddr, start, len - rest);
3547 		else
3548 			memcpy(start, paddr, len - rest);
3549 
3550 		if (rest) {
3551 			if (read)
3552 				memcpy(paddr + len - rest, dif_storep, rest);
3553 			else
3554 				memcpy(dif_storep, paddr + len - rest, rest);
3555 		}
3556 
3557 		sector += len / sizeof(*dif_storep);
3558 		resid -= len;
3559 	}
3560 	sg_miter_stop(&miter);
3561 }
3562 
3563 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3564 			    unsigned int sectors, u32 ei_lba)
3565 {
3566 	int ret = 0;
3567 	unsigned int i;
3568 	sector_t sector;
3569 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3570 						scp->device->hostdata, true);
3571 	struct t10_pi_tuple *sdt;
3572 
3573 	for (i = 0; i < sectors; i++, ei_lba++) {
3574 		sector = start_sec + i;
3575 		sdt = dif_store(sip, sector);
3576 
3577 		if (sdt->app_tag == cpu_to_be16(0xffff))
3578 			continue;
3579 
3580 		/*
3581 		 * Because scsi_debug acts as both initiator and
3582 		 * target we proceed to verify the PI even if
3583 		 * RDPROTECT=3. This is done so the "initiator" knows
3584 		 * which type of error to return. Otherwise we would
3585 		 * have to iterate over the PI twice.
3586 		 */
3587 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3588 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3589 					 sector, ei_lba);
3590 			if (ret) {
3591 				dif_errors++;
3592 				break;
3593 			}
3594 		}
3595 	}
3596 
3597 	dif_copy_prot(scp, start_sec, sectors, true);
3598 	dix_reads++;
3599 
3600 	return ret;
3601 }
3602 
3603 static inline void
3604 sdeb_read_lock(struct sdeb_store_info *sip)
3605 {
3606 	if (sdebug_no_rwlock) {
3607 		if (sip)
3608 			__acquire(&sip->macc_lck);
3609 		else
3610 			__acquire(&sdeb_fake_rw_lck);
3611 	} else {
3612 		if (sip)
3613 			read_lock(&sip->macc_lck);
3614 		else
3615 			read_lock(&sdeb_fake_rw_lck);
3616 	}
3617 }
3618 
3619 static inline void
3620 sdeb_read_unlock(struct sdeb_store_info *sip)
3621 {
3622 	if (sdebug_no_rwlock) {
3623 		if (sip)
3624 			__release(&sip->macc_lck);
3625 		else
3626 			__release(&sdeb_fake_rw_lck);
3627 	} else {
3628 		if (sip)
3629 			read_unlock(&sip->macc_lck);
3630 		else
3631 			read_unlock(&sdeb_fake_rw_lck);
3632 	}
3633 }
3634 
3635 static inline void
3636 sdeb_write_lock(struct sdeb_store_info *sip)
3637 {
3638 	if (sdebug_no_rwlock) {
3639 		if (sip)
3640 			__acquire(&sip->macc_lck);
3641 		else
3642 			__acquire(&sdeb_fake_rw_lck);
3643 	} else {
3644 		if (sip)
3645 			write_lock(&sip->macc_lck);
3646 		else
3647 			write_lock(&sdeb_fake_rw_lck);
3648 	}
3649 }
3650 
3651 static inline void
3652 sdeb_write_unlock(struct sdeb_store_info *sip)
3653 {
3654 	if (sdebug_no_rwlock) {
3655 		if (sip)
3656 			__release(&sip->macc_lck);
3657 		else
3658 			__release(&sdeb_fake_rw_lck);
3659 	} else {
3660 		if (sip)
3661 			write_unlock(&sip->macc_lck);
3662 		else
3663 			write_unlock(&sdeb_fake_rw_lck);
3664 	}
3665 }
3666 
3667 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3668 {
3669 	bool check_prot;
3670 	u32 num;
3671 	u32 ei_lba;
3672 	int ret;
3673 	u64 lba;
3674 	struct sdeb_store_info *sip = devip2sip(devip, true);
3675 	u8 *cmd = scp->cmnd;
3676 
3677 	switch (cmd[0]) {
3678 	case READ_16:
3679 		ei_lba = 0;
3680 		lba = get_unaligned_be64(cmd + 2);
3681 		num = get_unaligned_be32(cmd + 10);
3682 		check_prot = true;
3683 		break;
3684 	case READ_10:
3685 		ei_lba = 0;
3686 		lba = get_unaligned_be32(cmd + 2);
3687 		num = get_unaligned_be16(cmd + 7);
3688 		check_prot = true;
3689 		break;
3690 	case READ_6:
3691 		ei_lba = 0;
3692 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3693 		      (u32)(cmd[1] & 0x1f) << 16;
3694 		num = (0 == cmd[4]) ? 256 : cmd[4];
3695 		check_prot = true;
3696 		break;
3697 	case READ_12:
3698 		ei_lba = 0;
3699 		lba = get_unaligned_be32(cmd + 2);
3700 		num = get_unaligned_be32(cmd + 6);
3701 		check_prot = true;
3702 		break;
3703 	case XDWRITEREAD_10:
3704 		ei_lba = 0;
3705 		lba = get_unaligned_be32(cmd + 2);
3706 		num = get_unaligned_be16(cmd + 7);
3707 		check_prot = false;
3708 		break;
3709 	default:	/* assume READ(32) */
3710 		lba = get_unaligned_be64(cmd + 12);
3711 		ei_lba = get_unaligned_be32(cmd + 20);
3712 		num = get_unaligned_be32(cmd + 28);
3713 		check_prot = false;
3714 		break;
3715 	}
3716 	if (unlikely(have_dif_prot && check_prot)) {
3717 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3718 		    (cmd[1] & 0xe0)) {
3719 			mk_sense_invalid_opcode(scp);
3720 			return check_condition_result;
3721 		}
3722 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3723 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3724 		    (cmd[1] & 0xe0) == 0)
3725 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3726 				    "to DIF device\n");
3727 	}
3728 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3729 		     atomic_read(&sdeb_inject_pending))) {
3730 		num /= 2;
3731 		atomic_set(&sdeb_inject_pending, 0);
3732 	}
3733 
3734 	ret = check_device_access_params(scp, lba, num, false);
3735 	if (ret)
3736 		return ret;
3737 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3738 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3739 		     ((lba + num) > sdebug_medium_error_start))) {
3740 		/* claim unrecoverable read error */
3741 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3742 		/* set info field and valid bit for fixed descriptor */
3743 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3744 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3745 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3746 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3747 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3748 		}
3749 		scsi_set_resid(scp, scsi_bufflen(scp));
3750 		return check_condition_result;
3751 	}
3752 
3753 	sdeb_read_lock(sip);
3754 
3755 	/* DIX + T10 DIF */
3756 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3757 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3758 		case 1: /* Guard tag error */
3759 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3760 				sdeb_read_unlock(sip);
3761 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3762 				return check_condition_result;
3763 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3764 				sdeb_read_unlock(sip);
3765 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3766 				return illegal_condition_result;
3767 			}
3768 			break;
3769 		case 3: /* Reference tag error */
3770 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3771 				sdeb_read_unlock(sip);
3772 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3773 				return check_condition_result;
3774 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3775 				sdeb_read_unlock(sip);
3776 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3777 				return illegal_condition_result;
3778 			}
3779 			break;
3780 		}
3781 	}
3782 
3783 	ret = do_device_access(sip, scp, 0, lba, num, false, 0);
3784 	sdeb_read_unlock(sip);
3785 	if (unlikely(ret == -1))
3786 		return DID_ERROR << 16;
3787 
3788 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3789 
3790 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3791 		     atomic_read(&sdeb_inject_pending))) {
3792 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3793 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3794 			atomic_set(&sdeb_inject_pending, 0);
3795 			return check_condition_result;
3796 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3797 			/* Logical block guard check failed */
3798 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3799 			atomic_set(&sdeb_inject_pending, 0);
3800 			return illegal_condition_result;
3801 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3802 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3803 			atomic_set(&sdeb_inject_pending, 0);
3804 			return illegal_condition_result;
3805 		}
3806 	}
3807 	return 0;
3808 }
3809 
3810 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3811 			     unsigned int sectors, u32 ei_lba)
3812 {
3813 	int ret;
3814 	struct t10_pi_tuple *sdt;
3815 	void *daddr;
3816 	sector_t sector = start_sec;
3817 	int ppage_offset;
3818 	int dpage_offset;
3819 	struct sg_mapping_iter diter;
3820 	struct sg_mapping_iter piter;
3821 
3822 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3823 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3824 
3825 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3826 			scsi_prot_sg_count(SCpnt),
3827 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3828 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3829 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3830 
3831 	/* For each protection page */
3832 	while (sg_miter_next(&piter)) {
3833 		dpage_offset = 0;
3834 		if (WARN_ON(!sg_miter_next(&diter))) {
3835 			ret = 0x01;
3836 			goto out;
3837 		}
3838 
3839 		for (ppage_offset = 0; ppage_offset < piter.length;
3840 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3841 			/* If we're at the end of the current
3842 			 * data page advance to the next one
3843 			 */
3844 			if (dpage_offset >= diter.length) {
3845 				if (WARN_ON(!sg_miter_next(&diter))) {
3846 					ret = 0x01;
3847 					goto out;
3848 				}
3849 				dpage_offset = 0;
3850 			}
3851 
3852 			sdt = piter.addr + ppage_offset;
3853 			daddr = diter.addr + dpage_offset;
3854 
3855 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3856 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3857 				if (ret)
3858 					goto out;
3859 			}
3860 
3861 			sector++;
3862 			ei_lba++;
3863 			dpage_offset += sdebug_sector_size;
3864 		}
3865 		diter.consumed = dpage_offset;
3866 		sg_miter_stop(&diter);
3867 	}
3868 	sg_miter_stop(&piter);
3869 
3870 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3871 	dix_writes++;
3872 
3873 	return 0;
3874 
3875 out:
3876 	dif_errors++;
3877 	sg_miter_stop(&diter);
3878 	sg_miter_stop(&piter);
3879 	return ret;
3880 }
3881 
3882 static unsigned long lba_to_map_index(sector_t lba)
3883 {
3884 	if (sdebug_unmap_alignment)
3885 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3886 	sector_div(lba, sdebug_unmap_granularity);
3887 	return lba;
3888 }
3889 
3890 static sector_t map_index_to_lba(unsigned long index)
3891 {
3892 	sector_t lba = index * sdebug_unmap_granularity;
3893 
3894 	if (sdebug_unmap_alignment)
3895 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3896 	return lba;
3897 }
3898 
3899 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3900 			      unsigned int *num)
3901 {
3902 	sector_t end;
3903 	unsigned int mapped;
3904 	unsigned long index;
3905 	unsigned long next;
3906 
3907 	index = lba_to_map_index(lba);
3908 	mapped = test_bit(index, sip->map_storep);
3909 
3910 	if (mapped)
3911 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3912 	else
3913 		next = find_next_bit(sip->map_storep, map_size, index);
3914 
3915 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3916 	*num = end - lba;
3917 	return mapped;
3918 }
3919 
3920 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3921 		       unsigned int len)
3922 {
3923 	sector_t end = lba + len;
3924 
3925 	while (lba < end) {
3926 		unsigned long index = lba_to_map_index(lba);
3927 
3928 		if (index < map_size)
3929 			set_bit(index, sip->map_storep);
3930 
3931 		lba = map_index_to_lba(index + 1);
3932 	}
3933 }
3934 
3935 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3936 			 unsigned int len)
3937 {
3938 	sector_t end = lba + len;
3939 	u8 *fsp = sip->storep;
3940 
3941 	while (lba < end) {
3942 		unsigned long index = lba_to_map_index(lba);
3943 
3944 		if (lba == map_index_to_lba(index) &&
3945 		    lba + sdebug_unmap_granularity <= end &&
3946 		    index < map_size) {
3947 			clear_bit(index, sip->map_storep);
3948 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3949 				memset(fsp + lba * sdebug_sector_size,
3950 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3951 				       sdebug_sector_size *
3952 				       sdebug_unmap_granularity);
3953 			}
3954 			if (sip->dif_storep) {
3955 				memset(sip->dif_storep + lba, 0xff,
3956 				       sizeof(*sip->dif_storep) *
3957 				       sdebug_unmap_granularity);
3958 			}
3959 		}
3960 		lba = map_index_to_lba(index + 1);
3961 	}
3962 }
3963 
3964 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3965 {
3966 	bool check_prot;
3967 	u32 num;
3968 	u8 group = 0;
3969 	u32 ei_lba;
3970 	int ret;
3971 	u64 lba;
3972 	struct sdeb_store_info *sip = devip2sip(devip, true);
3973 	u8 *cmd = scp->cmnd;
3974 
3975 	switch (cmd[0]) {
3976 	case WRITE_16:
3977 		ei_lba = 0;
3978 		lba = get_unaligned_be64(cmd + 2);
3979 		num = get_unaligned_be32(cmd + 10);
3980 		group = cmd[14] & 0x3f;
3981 		check_prot = true;
3982 		break;
3983 	case WRITE_10:
3984 		ei_lba = 0;
3985 		lba = get_unaligned_be32(cmd + 2);
3986 		group = cmd[6] & 0x3f;
3987 		num = get_unaligned_be16(cmd + 7);
3988 		check_prot = true;
3989 		break;
3990 	case WRITE_6:
3991 		ei_lba = 0;
3992 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3993 		      (u32)(cmd[1] & 0x1f) << 16;
3994 		num = (0 == cmd[4]) ? 256 : cmd[4];
3995 		check_prot = true;
3996 		break;
3997 	case WRITE_12:
3998 		ei_lba = 0;
3999 		lba = get_unaligned_be32(cmd + 2);
4000 		num = get_unaligned_be32(cmd + 6);
4001 		group = cmd[6] & 0x3f;
4002 		check_prot = true;
4003 		break;
4004 	case 0x53:	/* XDWRITEREAD(10) */
4005 		ei_lba = 0;
4006 		lba = get_unaligned_be32(cmd + 2);
4007 		group = cmd[6] & 0x1f;
4008 		num = get_unaligned_be16(cmd + 7);
4009 		check_prot = false;
4010 		break;
4011 	default:	/* assume WRITE(32) */
4012 		group = cmd[6] & 0x3f;
4013 		lba = get_unaligned_be64(cmd + 12);
4014 		ei_lba = get_unaligned_be32(cmd + 20);
4015 		num = get_unaligned_be32(cmd + 28);
4016 		check_prot = false;
4017 		break;
4018 	}
4019 	if (unlikely(have_dif_prot && check_prot)) {
4020 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4021 		    (cmd[1] & 0xe0)) {
4022 			mk_sense_invalid_opcode(scp);
4023 			return check_condition_result;
4024 		}
4025 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4026 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4027 		    (cmd[1] & 0xe0) == 0)
4028 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4029 				    "to DIF device\n");
4030 	}
4031 
4032 	sdeb_write_lock(sip);
4033 	ret = check_device_access_params(scp, lba, num, true);
4034 	if (ret) {
4035 		sdeb_write_unlock(sip);
4036 		return ret;
4037 	}
4038 
4039 	/* DIX + T10 DIF */
4040 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4041 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
4042 		case 1: /* Guard tag error */
4043 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4044 				sdeb_write_unlock(sip);
4045 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4046 				return illegal_condition_result;
4047 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4048 				sdeb_write_unlock(sip);
4049 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4050 				return check_condition_result;
4051 			}
4052 			break;
4053 		case 3: /* Reference tag error */
4054 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4055 				sdeb_write_unlock(sip);
4056 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4057 				return illegal_condition_result;
4058 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4059 				sdeb_write_unlock(sip);
4060 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4061 				return check_condition_result;
4062 			}
4063 			break;
4064 		}
4065 	}
4066 
4067 	ret = do_device_access(sip, scp, 0, lba, num, true, group);
4068 	if (unlikely(scsi_debug_lbp()))
4069 		map_region(sip, lba, num);
4070 	/* If ZBC zone then bump its write pointer */
4071 	if (sdebug_dev_is_zoned(devip))
4072 		zbc_inc_wp(devip, lba, num);
4073 	sdeb_write_unlock(sip);
4074 	if (unlikely(-1 == ret))
4075 		return DID_ERROR << 16;
4076 	else if (unlikely(sdebug_verbose &&
4077 			  (ret < (num * sdebug_sector_size))))
4078 		sdev_printk(KERN_INFO, scp->device,
4079 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4080 			    my_name, num * sdebug_sector_size, ret);
4081 
4082 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4083 		     atomic_read(&sdeb_inject_pending))) {
4084 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4085 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4086 			atomic_set(&sdeb_inject_pending, 0);
4087 			return check_condition_result;
4088 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4089 			/* Logical block guard check failed */
4090 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4091 			atomic_set(&sdeb_inject_pending, 0);
4092 			return illegal_condition_result;
4093 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4094 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4095 			atomic_set(&sdeb_inject_pending, 0);
4096 			return illegal_condition_result;
4097 		}
4098 	}
4099 	return 0;
4100 }
4101 
4102 /*
4103  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4104  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4105  */
4106 static int resp_write_scat(struct scsi_cmnd *scp,
4107 			   struct sdebug_dev_info *devip)
4108 {
4109 	u8 *cmd = scp->cmnd;
4110 	u8 *lrdp = NULL;
4111 	u8 *up;
4112 	struct sdeb_store_info *sip = devip2sip(devip, true);
4113 	u8 wrprotect;
4114 	u16 lbdof, num_lrd, k;
4115 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4116 	u32 lb_size = sdebug_sector_size;
4117 	u32 ei_lba;
4118 	u64 lba;
4119 	u8 group;
4120 	int ret, res;
4121 	bool is_16;
4122 	static const u32 lrd_size = 32; /* + parameter list header size */
4123 
4124 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
4125 		is_16 = false;
4126 		group = cmd[6] & 0x3f;
4127 		wrprotect = (cmd[10] >> 5) & 0x7;
4128 		lbdof = get_unaligned_be16(cmd + 12);
4129 		num_lrd = get_unaligned_be16(cmd + 16);
4130 		bt_len = get_unaligned_be32(cmd + 28);
4131 	} else {        /* that leaves WRITE SCATTERED(16) */
4132 		is_16 = true;
4133 		wrprotect = (cmd[2] >> 5) & 0x7;
4134 		lbdof = get_unaligned_be16(cmd + 4);
4135 		num_lrd = get_unaligned_be16(cmd + 8);
4136 		bt_len = get_unaligned_be32(cmd + 10);
4137 		group = cmd[14] & 0x3f;
4138 		if (unlikely(have_dif_prot)) {
4139 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4140 			    wrprotect) {
4141 				mk_sense_invalid_opcode(scp);
4142 				return illegal_condition_result;
4143 			}
4144 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4145 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4146 			     wrprotect == 0)
4147 				sdev_printk(KERN_ERR, scp->device,
4148 					    "Unprotected WR to DIF device\n");
4149 		}
4150 	}
4151 	if ((num_lrd == 0) || (bt_len == 0))
4152 		return 0;       /* T10 says these do-nothings are not errors */
4153 	if (lbdof == 0) {
4154 		if (sdebug_verbose)
4155 			sdev_printk(KERN_INFO, scp->device,
4156 				"%s: %s: LB Data Offset field bad\n",
4157 				my_name, __func__);
4158 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4159 		return illegal_condition_result;
4160 	}
4161 	lbdof_blen = lbdof * lb_size;
4162 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4163 		if (sdebug_verbose)
4164 			sdev_printk(KERN_INFO, scp->device,
4165 				"%s: %s: LBA range descriptors don't fit\n",
4166 				my_name, __func__);
4167 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4168 		return illegal_condition_result;
4169 	}
4170 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4171 	if (lrdp == NULL)
4172 		return SCSI_MLQUEUE_HOST_BUSY;
4173 	if (sdebug_verbose)
4174 		sdev_printk(KERN_INFO, scp->device,
4175 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4176 			my_name, __func__, lbdof_blen);
4177 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4178 	if (res == -1) {
4179 		ret = DID_ERROR << 16;
4180 		goto err_out;
4181 	}
4182 
4183 	sdeb_write_lock(sip);
4184 	sg_off = lbdof_blen;
4185 	/* Spec says Buffer xfer Length field in number of LBs in dout */
4186 	cum_lb = 0;
4187 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4188 		lba = get_unaligned_be64(up + 0);
4189 		num = get_unaligned_be32(up + 8);
4190 		if (sdebug_verbose)
4191 			sdev_printk(KERN_INFO, scp->device,
4192 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4193 				my_name, __func__, k, lba, num, sg_off);
4194 		if (num == 0)
4195 			continue;
4196 		ret = check_device_access_params(scp, lba, num, true);
4197 		if (ret)
4198 			goto err_out_unlock;
4199 		num_by = num * lb_size;
4200 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4201 
4202 		if ((cum_lb + num) > bt_len) {
4203 			if (sdebug_verbose)
4204 				sdev_printk(KERN_INFO, scp->device,
4205 				    "%s: %s: sum of blocks > data provided\n",
4206 				    my_name, __func__);
4207 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4208 					0);
4209 			ret = illegal_condition_result;
4210 			goto err_out_unlock;
4211 		}
4212 
4213 		/* DIX + T10 DIF */
4214 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4215 			int prot_ret = prot_verify_write(scp, lba, num,
4216 							 ei_lba);
4217 
4218 			if (prot_ret) {
4219 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4220 						prot_ret);
4221 				ret = illegal_condition_result;
4222 				goto err_out_unlock;
4223 			}
4224 		}
4225 
4226 		ret = do_device_access(sip, scp, sg_off, lba, num, true, group);
4227 		/* If ZBC zone then bump its write pointer */
4228 		if (sdebug_dev_is_zoned(devip))
4229 			zbc_inc_wp(devip, lba, num);
4230 		if (unlikely(scsi_debug_lbp()))
4231 			map_region(sip, lba, num);
4232 		if (unlikely(-1 == ret)) {
4233 			ret = DID_ERROR << 16;
4234 			goto err_out_unlock;
4235 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
4236 			sdev_printk(KERN_INFO, scp->device,
4237 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4238 			    my_name, num_by, ret);
4239 
4240 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4241 			     atomic_read(&sdeb_inject_pending))) {
4242 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4243 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4244 				atomic_set(&sdeb_inject_pending, 0);
4245 				ret = check_condition_result;
4246 				goto err_out_unlock;
4247 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4248 				/* Logical block guard check failed */
4249 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4250 				atomic_set(&sdeb_inject_pending, 0);
4251 				ret = illegal_condition_result;
4252 				goto err_out_unlock;
4253 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4254 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4255 				atomic_set(&sdeb_inject_pending, 0);
4256 				ret = illegal_condition_result;
4257 				goto err_out_unlock;
4258 			}
4259 		}
4260 		sg_off += num_by;
4261 		cum_lb += num;
4262 	}
4263 	ret = 0;
4264 err_out_unlock:
4265 	sdeb_write_unlock(sip);
4266 err_out:
4267 	kfree(lrdp);
4268 	return ret;
4269 }
4270 
4271 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4272 			   u32 ei_lba, bool unmap, bool ndob)
4273 {
4274 	struct scsi_device *sdp = scp->device;
4275 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4276 	unsigned long long i;
4277 	u64 block, lbaa;
4278 	u32 lb_size = sdebug_sector_size;
4279 	int ret;
4280 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4281 						scp->device->hostdata, true);
4282 	u8 *fs1p;
4283 	u8 *fsp;
4284 
4285 	sdeb_write_lock(sip);
4286 
4287 	ret = check_device_access_params(scp, lba, num, true);
4288 	if (ret) {
4289 		sdeb_write_unlock(sip);
4290 		return ret;
4291 	}
4292 
4293 	if (unmap && scsi_debug_lbp()) {
4294 		unmap_region(sip, lba, num);
4295 		goto out;
4296 	}
4297 	lbaa = lba;
4298 	block = do_div(lbaa, sdebug_store_sectors);
4299 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
4300 	fsp = sip->storep;
4301 	fs1p = fsp + (block * lb_size);
4302 	if (ndob) {
4303 		memset(fs1p, 0, lb_size);
4304 		ret = 0;
4305 	} else
4306 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4307 
4308 	if (-1 == ret) {
4309 		sdeb_write_unlock(sip);
4310 		return DID_ERROR << 16;
4311 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
4312 		sdev_printk(KERN_INFO, scp->device,
4313 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
4314 			    my_name, "write same", lb_size, ret);
4315 
4316 	/* Copy first sector to remaining blocks */
4317 	for (i = 1 ; i < num ; i++) {
4318 		lbaa = lba + i;
4319 		block = do_div(lbaa, sdebug_store_sectors);
4320 		memmove(fsp + (block * lb_size), fs1p, lb_size);
4321 	}
4322 	if (scsi_debug_lbp())
4323 		map_region(sip, lba, num);
4324 	/* If ZBC zone then bump its write pointer */
4325 	if (sdebug_dev_is_zoned(devip))
4326 		zbc_inc_wp(devip, lba, num);
4327 out:
4328 	sdeb_write_unlock(sip);
4329 
4330 	return 0;
4331 }
4332 
4333 static int resp_write_same_10(struct scsi_cmnd *scp,
4334 			      struct sdebug_dev_info *devip)
4335 {
4336 	u8 *cmd = scp->cmnd;
4337 	u32 lba;
4338 	u16 num;
4339 	u32 ei_lba = 0;
4340 	bool unmap = false;
4341 
4342 	if (cmd[1] & 0x8) {
4343 		if (sdebug_lbpws10 == 0) {
4344 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4345 			return check_condition_result;
4346 		} else
4347 			unmap = true;
4348 	}
4349 	lba = get_unaligned_be32(cmd + 2);
4350 	num = get_unaligned_be16(cmd + 7);
4351 	if (num > sdebug_write_same_length) {
4352 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4353 		return check_condition_result;
4354 	}
4355 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4356 }
4357 
4358 static int resp_write_same_16(struct scsi_cmnd *scp,
4359 			      struct sdebug_dev_info *devip)
4360 {
4361 	u8 *cmd = scp->cmnd;
4362 	u64 lba;
4363 	u32 num;
4364 	u32 ei_lba = 0;
4365 	bool unmap = false;
4366 	bool ndob = false;
4367 
4368 	if (cmd[1] & 0x8) {	/* UNMAP */
4369 		if (sdebug_lbpws == 0) {
4370 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4371 			return check_condition_result;
4372 		} else
4373 			unmap = true;
4374 	}
4375 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4376 		ndob = true;
4377 	lba = get_unaligned_be64(cmd + 2);
4378 	num = get_unaligned_be32(cmd + 10);
4379 	if (num > sdebug_write_same_length) {
4380 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4381 		return check_condition_result;
4382 	}
4383 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4384 }
4385 
4386 /* Note the mode field is in the same position as the (lower) service action
4387  * field. For the Report supported operation codes command, SPC-4 suggests
4388  * each mode of this command should be reported separately; for future. */
4389 static int resp_write_buffer(struct scsi_cmnd *scp,
4390 			     struct sdebug_dev_info *devip)
4391 {
4392 	u8 *cmd = scp->cmnd;
4393 	struct scsi_device *sdp = scp->device;
4394 	struct sdebug_dev_info *dp;
4395 	u8 mode;
4396 
4397 	mode = cmd[1] & 0x1f;
4398 	switch (mode) {
4399 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4400 		/* set UAs on this device only */
4401 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4402 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4403 		break;
4404 	case 0x5:	/* download MC, save and ACT */
4405 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4406 		break;
4407 	case 0x6:	/* download MC with offsets and ACT */
4408 		/* set UAs on most devices (LUs) in this target */
4409 		list_for_each_entry(dp,
4410 				    &devip->sdbg_host->dev_info_list,
4411 				    dev_list)
4412 			if (dp->target == sdp->id) {
4413 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4414 				if (devip != dp)
4415 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4416 						dp->uas_bm);
4417 			}
4418 		break;
4419 	case 0x7:	/* download MC with offsets, save, and ACT */
4420 		/* set UA on all devices (LUs) in this target */
4421 		list_for_each_entry(dp,
4422 				    &devip->sdbg_host->dev_info_list,
4423 				    dev_list)
4424 			if (dp->target == sdp->id)
4425 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4426 					dp->uas_bm);
4427 		break;
4428 	default:
4429 		/* do nothing for this command for other mode values */
4430 		break;
4431 	}
4432 	return 0;
4433 }
4434 
4435 static int resp_comp_write(struct scsi_cmnd *scp,
4436 			   struct sdebug_dev_info *devip)
4437 {
4438 	u8 *cmd = scp->cmnd;
4439 	u8 *arr;
4440 	struct sdeb_store_info *sip = devip2sip(devip, true);
4441 	u64 lba;
4442 	u32 dnum;
4443 	u32 lb_size = sdebug_sector_size;
4444 	u8 num;
4445 	int ret;
4446 	int retval = 0;
4447 
4448 	lba = get_unaligned_be64(cmd + 2);
4449 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4450 	if (0 == num)
4451 		return 0;	/* degenerate case, not an error */
4452 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4453 	    (cmd[1] & 0xe0)) {
4454 		mk_sense_invalid_opcode(scp);
4455 		return check_condition_result;
4456 	}
4457 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4458 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4459 	    (cmd[1] & 0xe0) == 0)
4460 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4461 			    "to DIF device\n");
4462 	ret = check_device_access_params(scp, lba, num, false);
4463 	if (ret)
4464 		return ret;
4465 	dnum = 2 * num;
4466 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4467 	if (NULL == arr) {
4468 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4469 				INSUFF_RES_ASCQ);
4470 		return check_condition_result;
4471 	}
4472 
4473 	sdeb_write_lock(sip);
4474 
4475 	ret = do_dout_fetch(scp, dnum, arr);
4476 	if (ret == -1) {
4477 		retval = DID_ERROR << 16;
4478 		goto cleanup;
4479 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4480 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4481 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4482 			    dnum * lb_size, ret);
4483 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4484 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4485 		retval = check_condition_result;
4486 		goto cleanup;
4487 	}
4488 	if (scsi_debug_lbp())
4489 		map_region(sip, lba, num);
4490 cleanup:
4491 	sdeb_write_unlock(sip);
4492 	kfree(arr);
4493 	return retval;
4494 }
4495 
4496 struct unmap_block_desc {
4497 	__be64	lba;
4498 	__be32	blocks;
4499 	__be32	__reserved;
4500 };
4501 
4502 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4503 {
4504 	unsigned char *buf;
4505 	struct unmap_block_desc *desc;
4506 	struct sdeb_store_info *sip = devip2sip(devip, true);
4507 	unsigned int i, payload_len, descriptors;
4508 	int ret;
4509 
4510 	if (!scsi_debug_lbp())
4511 		return 0;	/* fib and say its done */
4512 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4513 	BUG_ON(scsi_bufflen(scp) != payload_len);
4514 
4515 	descriptors = (payload_len - 8) / 16;
4516 	if (descriptors > sdebug_unmap_max_desc) {
4517 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4518 		return check_condition_result;
4519 	}
4520 
4521 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4522 	if (!buf) {
4523 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4524 				INSUFF_RES_ASCQ);
4525 		return check_condition_result;
4526 	}
4527 
4528 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4529 
4530 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4531 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4532 
4533 	desc = (void *)&buf[8];
4534 
4535 	sdeb_write_lock(sip);
4536 
4537 	for (i = 0 ; i < descriptors ; i++) {
4538 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4539 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4540 
4541 		ret = check_device_access_params(scp, lba, num, true);
4542 		if (ret)
4543 			goto out;
4544 
4545 		unmap_region(sip, lba, num);
4546 	}
4547 
4548 	ret = 0;
4549 
4550 out:
4551 	sdeb_write_unlock(sip);
4552 	kfree(buf);
4553 
4554 	return ret;
4555 }
4556 
4557 #define SDEBUG_GET_LBA_STATUS_LEN 32
4558 
4559 static int resp_get_lba_status(struct scsi_cmnd *scp,
4560 			       struct sdebug_dev_info *devip)
4561 {
4562 	u8 *cmd = scp->cmnd;
4563 	u64 lba;
4564 	u32 alloc_len, mapped, num;
4565 	int ret;
4566 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4567 
4568 	lba = get_unaligned_be64(cmd + 2);
4569 	alloc_len = get_unaligned_be32(cmd + 10);
4570 
4571 	if (alloc_len < 24)
4572 		return 0;
4573 
4574 	ret = check_device_access_params(scp, lba, 1, false);
4575 	if (ret)
4576 		return ret;
4577 
4578 	if (scsi_debug_lbp()) {
4579 		struct sdeb_store_info *sip = devip2sip(devip, true);
4580 
4581 		mapped = map_state(sip, lba, &num);
4582 	} else {
4583 		mapped = 1;
4584 		/* following just in case virtual_gb changed */
4585 		sdebug_capacity = get_sdebug_capacity();
4586 		if (sdebug_capacity - lba <= 0xffffffff)
4587 			num = sdebug_capacity - lba;
4588 		else
4589 			num = 0xffffffff;
4590 	}
4591 
4592 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4593 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4594 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4595 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4596 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4597 
4598 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4599 }
4600 
4601 static int resp_get_stream_status(struct scsi_cmnd *scp,
4602 				  struct sdebug_dev_info *devip)
4603 {
4604 	u16 starting_stream_id, stream_id;
4605 	const u8 *cmd = scp->cmnd;
4606 	u32 alloc_len, offset;
4607 	u8 arr[256] = {};
4608 	struct scsi_stream_status_header *h = (void *)arr;
4609 
4610 	starting_stream_id = get_unaligned_be16(cmd + 4);
4611 	alloc_len = get_unaligned_be32(cmd + 10);
4612 
4613 	if (alloc_len < 8) {
4614 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4615 		return check_condition_result;
4616 	}
4617 
4618 	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
4619 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
4620 		return check_condition_result;
4621 	}
4622 
4623 	/*
4624 	 * The GET STREAM STATUS command only reports status information
4625 	 * about open streams. Treat the non-permanent stream as open.
4626 	 */
4627 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
4628 			   &h->number_of_open_streams);
4629 
4630 	for (offset = 8, stream_id = starting_stream_id;
4631 	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
4632 		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
4633 	     offset += 8, stream_id++) {
4634 		struct scsi_stream_status *stream_status = (void *)arr + offset;
4635 
4636 		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
4637 		put_unaligned_be16(stream_id,
4638 				   &stream_status->stream_identifier);
4639 		stream_status->rel_lifetime = stream_id + 1;
4640 	}
4641 	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
4642 
4643 	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
4644 }
4645 
4646 static int resp_sync_cache(struct scsi_cmnd *scp,
4647 			   struct sdebug_dev_info *devip)
4648 {
4649 	int res = 0;
4650 	u64 lba;
4651 	u32 num_blocks;
4652 	u8 *cmd = scp->cmnd;
4653 
4654 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4655 		lba = get_unaligned_be32(cmd + 2);
4656 		num_blocks = get_unaligned_be16(cmd + 7);
4657 	} else {				/* SYNCHRONIZE_CACHE(16) */
4658 		lba = get_unaligned_be64(cmd + 2);
4659 		num_blocks = get_unaligned_be32(cmd + 10);
4660 	}
4661 	if (lba + num_blocks > sdebug_capacity) {
4662 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4663 		return check_condition_result;
4664 	}
4665 	if (!write_since_sync || (cmd[1] & 0x2))
4666 		res = SDEG_RES_IMMED_MASK;
4667 	else		/* delay if write_since_sync and IMMED clear */
4668 		write_since_sync = false;
4669 	return res;
4670 }
4671 
4672 /*
4673  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4674  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4675  * a GOOD status otherwise. Model a disk with a big cache and yield
4676  * CONDITION MET. Actually tries to bring range in main memory into the
4677  * cache associated with the CPU(s).
4678  */
4679 static int resp_pre_fetch(struct scsi_cmnd *scp,
4680 			  struct sdebug_dev_info *devip)
4681 {
4682 	int res = 0;
4683 	u64 lba;
4684 	u64 block, rest = 0;
4685 	u32 nblks;
4686 	u8 *cmd = scp->cmnd;
4687 	struct sdeb_store_info *sip = devip2sip(devip, true);
4688 	u8 *fsp = sip->storep;
4689 
4690 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4691 		lba = get_unaligned_be32(cmd + 2);
4692 		nblks = get_unaligned_be16(cmd + 7);
4693 	} else {			/* PRE-FETCH(16) */
4694 		lba = get_unaligned_be64(cmd + 2);
4695 		nblks = get_unaligned_be32(cmd + 10);
4696 	}
4697 	if (lba + nblks > sdebug_capacity) {
4698 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4699 		return check_condition_result;
4700 	}
4701 	if (!fsp)
4702 		goto fini;
4703 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4704 	block = do_div(lba, sdebug_store_sectors);
4705 	if (block + nblks > sdebug_store_sectors)
4706 		rest = block + nblks - sdebug_store_sectors;
4707 
4708 	/* Try to bring the PRE-FETCH range into CPU's cache */
4709 	sdeb_read_lock(sip);
4710 	prefetch_range(fsp + (sdebug_sector_size * block),
4711 		       (nblks - rest) * sdebug_sector_size);
4712 	if (rest)
4713 		prefetch_range(fsp, rest * sdebug_sector_size);
4714 	sdeb_read_unlock(sip);
4715 fini:
4716 	if (cmd[1] & 0x2)
4717 		res = SDEG_RES_IMMED_MASK;
4718 	return res | condition_met_result;
4719 }
4720 
4721 #define RL_BUCKET_ELEMS 8
4722 
4723 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4724  * (W-LUN), the normal Linux scanning logic does not associate it with a
4725  * device (e.g. /dev/sg7). The following magic will make that association:
4726  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4727  * where <n> is a host number. If there are multiple targets in a host then
4728  * the above will associate a W-LUN to each target. To only get a W-LUN
4729  * for target 2, then use "echo '- 2 49409' > scan" .
4730  */
4731 static int resp_report_luns(struct scsi_cmnd *scp,
4732 			    struct sdebug_dev_info *devip)
4733 {
4734 	unsigned char *cmd = scp->cmnd;
4735 	unsigned int alloc_len;
4736 	unsigned char select_report;
4737 	u64 lun;
4738 	struct scsi_lun *lun_p;
4739 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4740 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4741 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4742 	unsigned int tlun_cnt;	/* total LUN count */
4743 	unsigned int rlen;	/* response length (in bytes) */
4744 	int k, j, n, res;
4745 	unsigned int off_rsp = 0;
4746 	const int sz_lun = sizeof(struct scsi_lun);
4747 
4748 	clear_luns_changed_on_target(devip);
4749 
4750 	select_report = cmd[2];
4751 	alloc_len = get_unaligned_be32(cmd + 6);
4752 
4753 	if (alloc_len < 4) {
4754 		pr_err("alloc len too small %d\n", alloc_len);
4755 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4756 		return check_condition_result;
4757 	}
4758 
4759 	switch (select_report) {
4760 	case 0:		/* all LUNs apart from W-LUNs */
4761 		lun_cnt = sdebug_max_luns;
4762 		wlun_cnt = 0;
4763 		break;
4764 	case 1:		/* only W-LUNs */
4765 		lun_cnt = 0;
4766 		wlun_cnt = 1;
4767 		break;
4768 	case 2:		/* all LUNs */
4769 		lun_cnt = sdebug_max_luns;
4770 		wlun_cnt = 1;
4771 		break;
4772 	case 0x10:	/* only administrative LUs */
4773 	case 0x11:	/* see SPC-5 */
4774 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4775 	default:
4776 		pr_debug("select report invalid %d\n", select_report);
4777 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4778 		return check_condition_result;
4779 	}
4780 
4781 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4782 		--lun_cnt;
4783 
4784 	tlun_cnt = lun_cnt + wlun_cnt;
4785 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4786 	scsi_set_resid(scp, scsi_bufflen(scp));
4787 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4788 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4789 
4790 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4791 	lun = sdebug_no_lun_0 ? 1 : 0;
4792 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4793 		memset(arr, 0, sizeof(arr));
4794 		lun_p = (struct scsi_lun *)&arr[0];
4795 		if (k == 0) {
4796 			put_unaligned_be32(rlen, &arr[0]);
4797 			++lun_p;
4798 			j = 1;
4799 		}
4800 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4801 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4802 				break;
4803 			int_to_scsilun(lun++, lun_p);
4804 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4805 				lun_p->scsi_lun[0] |= 0x40;
4806 		}
4807 		if (j < RL_BUCKET_ELEMS)
4808 			break;
4809 		n = j * sz_lun;
4810 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4811 		if (res)
4812 			return res;
4813 		off_rsp += n;
4814 	}
4815 	if (wlun_cnt) {
4816 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4817 		++j;
4818 	}
4819 	if (j > 0)
4820 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4821 	return res;
4822 }
4823 
4824 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4825 {
4826 	bool is_bytchk3 = false;
4827 	u8 bytchk;
4828 	int ret, j;
4829 	u32 vnum, a_num, off;
4830 	const u32 lb_size = sdebug_sector_size;
4831 	u64 lba;
4832 	u8 *arr;
4833 	u8 *cmd = scp->cmnd;
4834 	struct sdeb_store_info *sip = devip2sip(devip, true);
4835 
4836 	bytchk = (cmd[1] >> 1) & 0x3;
4837 	if (bytchk == 0) {
4838 		return 0;	/* always claim internal verify okay */
4839 	} else if (bytchk == 2) {
4840 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4841 		return check_condition_result;
4842 	} else if (bytchk == 3) {
4843 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4844 	}
4845 	switch (cmd[0]) {
4846 	case VERIFY_16:
4847 		lba = get_unaligned_be64(cmd + 2);
4848 		vnum = get_unaligned_be32(cmd + 10);
4849 		break;
4850 	case VERIFY:		/* is VERIFY(10) */
4851 		lba = get_unaligned_be32(cmd + 2);
4852 		vnum = get_unaligned_be16(cmd + 7);
4853 		break;
4854 	default:
4855 		mk_sense_invalid_opcode(scp);
4856 		return check_condition_result;
4857 	}
4858 	if (vnum == 0)
4859 		return 0;	/* not an error */
4860 	a_num = is_bytchk3 ? 1 : vnum;
4861 	/* Treat following check like one for read (i.e. no write) access */
4862 	ret = check_device_access_params(scp, lba, a_num, false);
4863 	if (ret)
4864 		return ret;
4865 
4866 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4867 	if (!arr) {
4868 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4869 				INSUFF_RES_ASCQ);
4870 		return check_condition_result;
4871 	}
4872 	/* Not changing store, so only need read access */
4873 	sdeb_read_lock(sip);
4874 
4875 	ret = do_dout_fetch(scp, a_num, arr);
4876 	if (ret == -1) {
4877 		ret = DID_ERROR << 16;
4878 		goto cleanup;
4879 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4880 		sdev_printk(KERN_INFO, scp->device,
4881 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4882 			    my_name, __func__, a_num * lb_size, ret);
4883 	}
4884 	if (is_bytchk3) {
4885 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4886 			memcpy(arr + off, arr, lb_size);
4887 	}
4888 	ret = 0;
4889 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4890 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4891 		ret = check_condition_result;
4892 		goto cleanup;
4893 	}
4894 cleanup:
4895 	sdeb_read_unlock(sip);
4896 	kfree(arr);
4897 	return ret;
4898 }
4899 
4900 #define RZONES_DESC_HD 64
4901 
4902 /* Report zones depending on start LBA and reporting options */
4903 static int resp_report_zones(struct scsi_cmnd *scp,
4904 			     struct sdebug_dev_info *devip)
4905 {
4906 	unsigned int rep_max_zones, nrz = 0;
4907 	int ret = 0;
4908 	u32 alloc_len, rep_opts, rep_len;
4909 	bool partial;
4910 	u64 lba, zs_lba;
4911 	u8 *arr = NULL, *desc;
4912 	u8 *cmd = scp->cmnd;
4913 	struct sdeb_zone_state *zsp = NULL;
4914 	struct sdeb_store_info *sip = devip2sip(devip, false);
4915 
4916 	if (!sdebug_dev_is_zoned(devip)) {
4917 		mk_sense_invalid_opcode(scp);
4918 		return check_condition_result;
4919 	}
4920 	zs_lba = get_unaligned_be64(cmd + 2);
4921 	alloc_len = get_unaligned_be32(cmd + 10);
4922 	if (alloc_len == 0)
4923 		return 0;	/* not an error */
4924 	rep_opts = cmd[14] & 0x3f;
4925 	partial = cmd[14] & 0x80;
4926 
4927 	if (zs_lba >= sdebug_capacity) {
4928 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4929 		return check_condition_result;
4930 	}
4931 
4932 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4933 
4934 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4935 	if (!arr) {
4936 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4937 				INSUFF_RES_ASCQ);
4938 		return check_condition_result;
4939 	}
4940 
4941 	sdeb_read_lock(sip);
4942 
4943 	desc = arr + 64;
4944 	for (lba = zs_lba; lba < sdebug_capacity;
4945 	     lba = zsp->z_start + zsp->z_size) {
4946 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4947 			break;
4948 		zsp = zbc_zone(devip, lba);
4949 		switch (rep_opts) {
4950 		case 0x00:
4951 			/* All zones */
4952 			break;
4953 		case 0x01:
4954 			/* Empty zones */
4955 			if (zsp->z_cond != ZC1_EMPTY)
4956 				continue;
4957 			break;
4958 		case 0x02:
4959 			/* Implicit open zones */
4960 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4961 				continue;
4962 			break;
4963 		case 0x03:
4964 			/* Explicit open zones */
4965 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4966 				continue;
4967 			break;
4968 		case 0x04:
4969 			/* Closed zones */
4970 			if (zsp->z_cond != ZC4_CLOSED)
4971 				continue;
4972 			break;
4973 		case 0x05:
4974 			/* Full zones */
4975 			if (zsp->z_cond != ZC5_FULL)
4976 				continue;
4977 			break;
4978 		case 0x06:
4979 		case 0x07:
4980 		case 0x10:
4981 			/*
4982 			 * Read-only, offline, reset WP recommended are
4983 			 * not emulated: no zones to report;
4984 			 */
4985 			continue;
4986 		case 0x11:
4987 			/* non-seq-resource set */
4988 			if (!zsp->z_non_seq_resource)
4989 				continue;
4990 			break;
4991 		case 0x3e:
4992 			/* All zones except gap zones. */
4993 			if (zbc_zone_is_gap(zsp))
4994 				continue;
4995 			break;
4996 		case 0x3f:
4997 			/* Not write pointer (conventional) zones */
4998 			if (zbc_zone_is_seq(zsp))
4999 				continue;
5000 			break;
5001 		default:
5002 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
5003 					INVALID_FIELD_IN_CDB, 0);
5004 			ret = check_condition_result;
5005 			goto fini;
5006 		}
5007 
5008 		if (nrz < rep_max_zones) {
5009 			/* Fill zone descriptor */
5010 			desc[0] = zsp->z_type;
5011 			desc[1] = zsp->z_cond << 4;
5012 			if (zsp->z_non_seq_resource)
5013 				desc[1] |= 1 << 1;
5014 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
5015 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
5016 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5017 			desc += 64;
5018 		}
5019 
5020 		if (partial && nrz >= rep_max_zones)
5021 			break;
5022 
5023 		nrz++;
5024 	}
5025 
5026 	/* Report header */
5027 	/* Zone list length. */
5028 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5029 	/* Maximum LBA */
5030 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5031 	/* Zone starting LBA granularity. */
5032 	if (devip->zcap < devip->zsize)
5033 		put_unaligned_be64(devip->zsize, arr + 16);
5034 
5035 	rep_len = (unsigned long)desc - (unsigned long)arr;
5036 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5037 
5038 fini:
5039 	sdeb_read_unlock(sip);
5040 	kfree(arr);
5041 	return ret;
5042 }
5043 
5044 /* Logic transplanted from tcmu-runner, file_zbc.c */
5045 static void zbc_open_all(struct sdebug_dev_info *devip)
5046 {
5047 	struct sdeb_zone_state *zsp = &devip->zstate[0];
5048 	unsigned int i;
5049 
5050 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
5051 		if (zsp->z_cond == ZC4_CLOSED)
5052 			zbc_open_zone(devip, &devip->zstate[i], true);
5053 	}
5054 }
5055 
5056 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5057 {
5058 	int res = 0;
5059 	u64 z_id;
5060 	enum sdebug_z_cond zc;
5061 	u8 *cmd = scp->cmnd;
5062 	struct sdeb_zone_state *zsp;
5063 	bool all = cmd[14] & 0x01;
5064 	struct sdeb_store_info *sip = devip2sip(devip, false);
5065 
5066 	if (!sdebug_dev_is_zoned(devip)) {
5067 		mk_sense_invalid_opcode(scp);
5068 		return check_condition_result;
5069 	}
5070 
5071 	sdeb_write_lock(sip);
5072 
5073 	if (all) {
5074 		/* Check if all closed zones can be open */
5075 		if (devip->max_open &&
5076 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
5077 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5078 					INSUFF_ZONE_ASCQ);
5079 			res = check_condition_result;
5080 			goto fini;
5081 		}
5082 		/* Open all closed zones */
5083 		zbc_open_all(devip);
5084 		goto fini;
5085 	}
5086 
5087 	/* Open the specified zone */
5088 	z_id = get_unaligned_be64(cmd + 2);
5089 	if (z_id >= sdebug_capacity) {
5090 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5091 		res = check_condition_result;
5092 		goto fini;
5093 	}
5094 
5095 	zsp = zbc_zone(devip, z_id);
5096 	if (z_id != zsp->z_start) {
5097 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5098 		res = check_condition_result;
5099 		goto fini;
5100 	}
5101 	if (zbc_zone_is_conv(zsp)) {
5102 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5103 		res = check_condition_result;
5104 		goto fini;
5105 	}
5106 
5107 	zc = zsp->z_cond;
5108 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
5109 		goto fini;
5110 
5111 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
5112 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5113 				INSUFF_ZONE_ASCQ);
5114 		res = check_condition_result;
5115 		goto fini;
5116 	}
5117 
5118 	zbc_open_zone(devip, zsp, true);
5119 fini:
5120 	sdeb_write_unlock(sip);
5121 	return res;
5122 }
5123 
5124 static void zbc_close_all(struct sdebug_dev_info *devip)
5125 {
5126 	unsigned int i;
5127 
5128 	for (i = 0; i < devip->nr_zones; i++)
5129 		zbc_close_zone(devip, &devip->zstate[i]);
5130 }
5131 
5132 static int resp_close_zone(struct scsi_cmnd *scp,
5133 			   struct sdebug_dev_info *devip)
5134 {
5135 	int res = 0;
5136 	u64 z_id;
5137 	u8 *cmd = scp->cmnd;
5138 	struct sdeb_zone_state *zsp;
5139 	bool all = cmd[14] & 0x01;
5140 	struct sdeb_store_info *sip = devip2sip(devip, false);
5141 
5142 	if (!sdebug_dev_is_zoned(devip)) {
5143 		mk_sense_invalid_opcode(scp);
5144 		return check_condition_result;
5145 	}
5146 
5147 	sdeb_write_lock(sip);
5148 
5149 	if (all) {
5150 		zbc_close_all(devip);
5151 		goto fini;
5152 	}
5153 
5154 	/* Close specified zone */
5155 	z_id = get_unaligned_be64(cmd + 2);
5156 	if (z_id >= sdebug_capacity) {
5157 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5158 		res = check_condition_result;
5159 		goto fini;
5160 	}
5161 
5162 	zsp = zbc_zone(devip, z_id);
5163 	if (z_id != zsp->z_start) {
5164 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5165 		res = check_condition_result;
5166 		goto fini;
5167 	}
5168 	if (zbc_zone_is_conv(zsp)) {
5169 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5170 		res = check_condition_result;
5171 		goto fini;
5172 	}
5173 
5174 	zbc_close_zone(devip, zsp);
5175 fini:
5176 	sdeb_write_unlock(sip);
5177 	return res;
5178 }
5179 
5180 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5181 			    struct sdeb_zone_state *zsp, bool empty)
5182 {
5183 	enum sdebug_z_cond zc = zsp->z_cond;
5184 
5185 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5186 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5187 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5188 			zbc_close_zone(devip, zsp);
5189 		if (zsp->z_cond == ZC4_CLOSED)
5190 			devip->nr_closed--;
5191 		zsp->z_wp = zsp->z_start + zsp->z_size;
5192 		zsp->z_cond = ZC5_FULL;
5193 	}
5194 }
5195 
5196 static void zbc_finish_all(struct sdebug_dev_info *devip)
5197 {
5198 	unsigned int i;
5199 
5200 	for (i = 0; i < devip->nr_zones; i++)
5201 		zbc_finish_zone(devip, &devip->zstate[i], false);
5202 }
5203 
5204 static int resp_finish_zone(struct scsi_cmnd *scp,
5205 			    struct sdebug_dev_info *devip)
5206 {
5207 	struct sdeb_zone_state *zsp;
5208 	int res = 0;
5209 	u64 z_id;
5210 	u8 *cmd = scp->cmnd;
5211 	bool all = cmd[14] & 0x01;
5212 	struct sdeb_store_info *sip = devip2sip(devip, false);
5213 
5214 	if (!sdebug_dev_is_zoned(devip)) {
5215 		mk_sense_invalid_opcode(scp);
5216 		return check_condition_result;
5217 	}
5218 
5219 	sdeb_write_lock(sip);
5220 
5221 	if (all) {
5222 		zbc_finish_all(devip);
5223 		goto fini;
5224 	}
5225 
5226 	/* Finish the specified zone */
5227 	z_id = get_unaligned_be64(cmd + 2);
5228 	if (z_id >= sdebug_capacity) {
5229 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5230 		res = check_condition_result;
5231 		goto fini;
5232 	}
5233 
5234 	zsp = zbc_zone(devip, z_id);
5235 	if (z_id != zsp->z_start) {
5236 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5237 		res = check_condition_result;
5238 		goto fini;
5239 	}
5240 	if (zbc_zone_is_conv(zsp)) {
5241 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5242 		res = check_condition_result;
5243 		goto fini;
5244 	}
5245 
5246 	zbc_finish_zone(devip, zsp, true);
5247 fini:
5248 	sdeb_write_unlock(sip);
5249 	return res;
5250 }
5251 
5252 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5253 			 struct sdeb_zone_state *zsp)
5254 {
5255 	enum sdebug_z_cond zc;
5256 	struct sdeb_store_info *sip = devip2sip(devip, false);
5257 
5258 	if (!zbc_zone_is_seq(zsp))
5259 		return;
5260 
5261 	zc = zsp->z_cond;
5262 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5263 		zbc_close_zone(devip, zsp);
5264 
5265 	if (zsp->z_cond == ZC4_CLOSED)
5266 		devip->nr_closed--;
5267 
5268 	if (zsp->z_wp > zsp->z_start)
5269 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5270 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5271 
5272 	zsp->z_non_seq_resource = false;
5273 	zsp->z_wp = zsp->z_start;
5274 	zsp->z_cond = ZC1_EMPTY;
5275 }
5276 
5277 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5278 {
5279 	unsigned int i;
5280 
5281 	for (i = 0; i < devip->nr_zones; i++)
5282 		zbc_rwp_zone(devip, &devip->zstate[i]);
5283 }
5284 
5285 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5286 {
5287 	struct sdeb_zone_state *zsp;
5288 	int res = 0;
5289 	u64 z_id;
5290 	u8 *cmd = scp->cmnd;
5291 	bool all = cmd[14] & 0x01;
5292 	struct sdeb_store_info *sip = devip2sip(devip, false);
5293 
5294 	if (!sdebug_dev_is_zoned(devip)) {
5295 		mk_sense_invalid_opcode(scp);
5296 		return check_condition_result;
5297 	}
5298 
5299 	sdeb_write_lock(sip);
5300 
5301 	if (all) {
5302 		zbc_rwp_all(devip);
5303 		goto fini;
5304 	}
5305 
5306 	z_id = get_unaligned_be64(cmd + 2);
5307 	if (z_id >= sdebug_capacity) {
5308 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5309 		res = check_condition_result;
5310 		goto fini;
5311 	}
5312 
5313 	zsp = zbc_zone(devip, z_id);
5314 	if (z_id != zsp->z_start) {
5315 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5316 		res = check_condition_result;
5317 		goto fini;
5318 	}
5319 	if (zbc_zone_is_conv(zsp)) {
5320 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5321 		res = check_condition_result;
5322 		goto fini;
5323 	}
5324 
5325 	zbc_rwp_zone(devip, zsp);
5326 fini:
5327 	sdeb_write_unlock(sip);
5328 	return res;
5329 }
5330 
5331 static u32 get_tag(struct scsi_cmnd *cmnd)
5332 {
5333 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5334 }
5335 
5336 /* Queued (deferred) command completions converge here. */
5337 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5338 {
5339 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5340 	unsigned long flags;
5341 	struct scsi_cmnd *scp = sqcp->scmd;
5342 	struct sdebug_scsi_cmd *sdsc;
5343 	bool aborted;
5344 
5345 	if (sdebug_statistics) {
5346 		atomic_inc(&sdebug_completions);
5347 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5348 			atomic_inc(&sdebug_miss_cpus);
5349 	}
5350 
5351 	if (!scp) {
5352 		pr_err("scmd=NULL\n");
5353 		goto out;
5354 	}
5355 
5356 	sdsc = scsi_cmd_priv(scp);
5357 	spin_lock_irqsave(&sdsc->lock, flags);
5358 	aborted = sd_dp->aborted;
5359 	if (unlikely(aborted))
5360 		sd_dp->aborted = false;
5361 	ASSIGN_QUEUED_CMD(scp, NULL);
5362 
5363 	spin_unlock_irqrestore(&sdsc->lock, flags);
5364 
5365 	if (aborted) {
5366 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5367 		blk_abort_request(scsi_cmd_to_rq(scp));
5368 		goto out;
5369 	}
5370 
5371 	scsi_done(scp); /* callback to mid level */
5372 out:
5373 	sdebug_free_queued_cmd(sqcp);
5374 }
5375 
5376 /* When high resolution timer goes off this function is called. */
5377 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5378 {
5379 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5380 						  hrt);
5381 	sdebug_q_cmd_complete(sd_dp);
5382 	return HRTIMER_NORESTART;
5383 }
5384 
5385 /* When work queue schedules work, it calls this function. */
5386 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5387 {
5388 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5389 						  ew.work);
5390 	sdebug_q_cmd_complete(sd_dp);
5391 }
5392 
5393 static bool got_shared_uuid;
5394 static uuid_t shared_uuid;
5395 
5396 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5397 {
5398 	struct sdeb_zone_state *zsp;
5399 	sector_t capacity = get_sdebug_capacity();
5400 	sector_t conv_capacity;
5401 	sector_t zstart = 0;
5402 	unsigned int i;
5403 
5404 	/*
5405 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5406 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5407 	 * use the specified zone size checking that at least 2 zones can be
5408 	 * created for the device.
5409 	 */
5410 	if (!sdeb_zbc_zone_size_mb) {
5411 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5412 			>> ilog2(sdebug_sector_size);
5413 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5414 			devip->zsize >>= 1;
5415 		if (devip->zsize < 2) {
5416 			pr_err("Device capacity too small\n");
5417 			return -EINVAL;
5418 		}
5419 	} else {
5420 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5421 			pr_err("Zone size is not a power of 2\n");
5422 			return -EINVAL;
5423 		}
5424 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5425 			>> ilog2(sdebug_sector_size);
5426 		if (devip->zsize >= capacity) {
5427 			pr_err("Zone size too large for device capacity\n");
5428 			return -EINVAL;
5429 		}
5430 	}
5431 
5432 	devip->zsize_shift = ilog2(devip->zsize);
5433 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5434 
5435 	if (sdeb_zbc_zone_cap_mb == 0) {
5436 		devip->zcap = devip->zsize;
5437 	} else {
5438 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5439 			      ilog2(sdebug_sector_size);
5440 		if (devip->zcap > devip->zsize) {
5441 			pr_err("Zone capacity too large\n");
5442 			return -EINVAL;
5443 		}
5444 	}
5445 
5446 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5447 	if (conv_capacity >= capacity) {
5448 		pr_err("Number of conventional zones too large\n");
5449 		return -EINVAL;
5450 	}
5451 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5452 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5453 			      devip->zsize_shift;
5454 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5455 
5456 	/* Add gap zones if zone capacity is smaller than the zone size */
5457 	if (devip->zcap < devip->zsize)
5458 		devip->nr_zones += devip->nr_seq_zones;
5459 
5460 	if (devip->zoned) {
5461 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5462 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5463 			devip->max_open = (devip->nr_zones - 1) / 2;
5464 		else
5465 			devip->max_open = sdeb_zbc_max_open;
5466 	}
5467 
5468 	devip->zstate = kcalloc(devip->nr_zones,
5469 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5470 	if (!devip->zstate)
5471 		return -ENOMEM;
5472 
5473 	for (i = 0; i < devip->nr_zones; i++) {
5474 		zsp = &devip->zstate[i];
5475 
5476 		zsp->z_start = zstart;
5477 
5478 		if (i < devip->nr_conv_zones) {
5479 			zsp->z_type = ZBC_ZTYPE_CNV;
5480 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5481 			zsp->z_wp = (sector_t)-1;
5482 			zsp->z_size =
5483 				min_t(u64, devip->zsize, capacity - zstart);
5484 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5485 			if (devip->zoned)
5486 				zsp->z_type = ZBC_ZTYPE_SWR;
5487 			else
5488 				zsp->z_type = ZBC_ZTYPE_SWP;
5489 			zsp->z_cond = ZC1_EMPTY;
5490 			zsp->z_wp = zsp->z_start;
5491 			zsp->z_size =
5492 				min_t(u64, devip->zcap, capacity - zstart);
5493 		} else {
5494 			zsp->z_type = ZBC_ZTYPE_GAP;
5495 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5496 			zsp->z_wp = (sector_t)-1;
5497 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5498 					    capacity - zstart);
5499 		}
5500 
5501 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5502 		zstart += zsp->z_size;
5503 	}
5504 
5505 	return 0;
5506 }
5507 
5508 static struct sdebug_dev_info *sdebug_device_create(
5509 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5510 {
5511 	struct sdebug_dev_info *devip;
5512 
5513 	devip = kzalloc(sizeof(*devip), flags);
5514 	if (devip) {
5515 		if (sdebug_uuid_ctl == 1)
5516 			uuid_gen(&devip->lu_name);
5517 		else if (sdebug_uuid_ctl == 2) {
5518 			if (got_shared_uuid)
5519 				devip->lu_name = shared_uuid;
5520 			else {
5521 				uuid_gen(&shared_uuid);
5522 				got_shared_uuid = true;
5523 				devip->lu_name = shared_uuid;
5524 			}
5525 		}
5526 		devip->sdbg_host = sdbg_host;
5527 		if (sdeb_zbc_in_use) {
5528 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5529 			if (sdebug_device_create_zones(devip)) {
5530 				kfree(devip);
5531 				return NULL;
5532 			}
5533 		} else {
5534 			devip->zoned = false;
5535 		}
5536 		devip->create_ts = ktime_get_boottime();
5537 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5538 		spin_lock_init(&devip->list_lock);
5539 		INIT_LIST_HEAD(&devip->inject_err_list);
5540 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5541 	}
5542 	return devip;
5543 }
5544 
5545 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5546 {
5547 	struct sdebug_host_info *sdbg_host;
5548 	struct sdebug_dev_info *open_devip = NULL;
5549 	struct sdebug_dev_info *devip;
5550 
5551 	sdbg_host = shost_to_sdebug_host(sdev->host);
5552 
5553 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5554 		if ((devip->used) && (devip->channel == sdev->channel) &&
5555 		    (devip->target == sdev->id) &&
5556 		    (devip->lun == sdev->lun))
5557 			return devip;
5558 		else {
5559 			if ((!devip->used) && (!open_devip))
5560 				open_devip = devip;
5561 		}
5562 	}
5563 	if (!open_devip) { /* try and make a new one */
5564 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5565 		if (!open_devip) {
5566 			pr_err("out of memory at line %d\n", __LINE__);
5567 			return NULL;
5568 		}
5569 	}
5570 
5571 	open_devip->channel = sdev->channel;
5572 	open_devip->target = sdev->id;
5573 	open_devip->lun = sdev->lun;
5574 	open_devip->sdbg_host = sdbg_host;
5575 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5576 	open_devip->used = true;
5577 	return open_devip;
5578 }
5579 
5580 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5581 {
5582 	if (sdebug_verbose)
5583 		pr_info("slave_alloc <%u %u %u %llu>\n",
5584 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5585 
5586 	return 0;
5587 }
5588 
5589 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5590 {
5591 	struct sdebug_dev_info *devip =
5592 			(struct sdebug_dev_info *)sdp->hostdata;
5593 	struct dentry *dentry;
5594 
5595 	if (sdebug_verbose)
5596 		pr_info("slave_configure <%u %u %u %llu>\n",
5597 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5598 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5599 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5600 	if (devip == NULL) {
5601 		devip = find_build_dev_info(sdp);
5602 		if (devip == NULL)
5603 			return 1;  /* no resources, will be marked offline */
5604 	}
5605 	sdp->hostdata = devip;
5606 	if (sdebug_no_uld)
5607 		sdp->no_uld_attach = 1;
5608 	config_cdb_len(sdp);
5609 
5610 	if (sdebug_allow_restart)
5611 		sdp->allow_restart = 1;
5612 
5613 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5614 				sdebug_debugfs_root);
5615 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
5616 		pr_info("%s: failed to create debugfs directory for device %s\n",
5617 			__func__, dev_name(&sdp->sdev_gendev));
5618 
5619 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5620 				&sdebug_error_fops);
5621 	if (IS_ERR_OR_NULL(dentry))
5622 		pr_info("%s: failed to create error file for device %s\n",
5623 			__func__, dev_name(&sdp->sdev_gendev));
5624 
5625 	return 0;
5626 }
5627 
5628 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5629 {
5630 	struct sdebug_dev_info *devip =
5631 		(struct sdebug_dev_info *)sdp->hostdata;
5632 	struct sdebug_err_inject *err;
5633 
5634 	if (sdebug_verbose)
5635 		pr_info("slave_destroy <%u %u %u %llu>\n",
5636 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5637 
5638 	if (!devip)
5639 		return;
5640 
5641 	spin_lock(&devip->list_lock);
5642 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5643 		list_del_rcu(&err->list);
5644 		call_rcu(&err->rcu, sdebug_err_free);
5645 	}
5646 	spin_unlock(&devip->list_lock);
5647 
5648 	debugfs_remove(devip->debugfs_entry);
5649 
5650 	/* make this slot available for re-use */
5651 	devip->used = false;
5652 	sdp->hostdata = NULL;
5653 }
5654 
5655 /* Returns true if we require the queued memory to be freed by the caller. */
5656 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5657 			   enum sdeb_defer_type defer_t)
5658 {
5659 	if (defer_t == SDEB_DEFER_HRT) {
5660 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5661 
5662 		switch (res) {
5663 		case 0: /* Not active, it must have already run */
5664 		case -1: /* -1 It's executing the CB */
5665 			return false;
5666 		case 1: /* Was active, we've now cancelled */
5667 		default:
5668 			return true;
5669 		}
5670 	} else if (defer_t == SDEB_DEFER_WQ) {
5671 		/* Cancel if pending */
5672 		if (cancel_work_sync(&sd_dp->ew.work))
5673 			return true;
5674 		/* Was not pending, so it must have run */
5675 		return false;
5676 	} else if (defer_t == SDEB_DEFER_POLL) {
5677 		return true;
5678 	}
5679 
5680 	return false;
5681 }
5682 
5683 
5684 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5685 {
5686 	enum sdeb_defer_type l_defer_t;
5687 	struct sdebug_defer *sd_dp;
5688 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5689 	struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5690 
5691 	lockdep_assert_held(&sdsc->lock);
5692 
5693 	if (!sqcp)
5694 		return false;
5695 	sd_dp = &sqcp->sd_dp;
5696 	l_defer_t = READ_ONCE(sd_dp->defer_t);
5697 	ASSIGN_QUEUED_CMD(cmnd, NULL);
5698 
5699 	if (stop_qc_helper(sd_dp, l_defer_t))
5700 		sdebug_free_queued_cmd(sqcp);
5701 
5702 	return true;
5703 }
5704 
5705 /*
5706  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5707  */
5708 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5709 {
5710 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5711 	unsigned long flags;
5712 	bool res;
5713 
5714 	spin_lock_irqsave(&sdsc->lock, flags);
5715 	res = scsi_debug_stop_cmnd(cmnd);
5716 	spin_unlock_irqrestore(&sdsc->lock, flags);
5717 
5718 	return res;
5719 }
5720 
5721 /*
5722  * All we can do is set the cmnd as internally aborted and wait for it to
5723  * finish. We cannot call scsi_done() as normal completion path may do that.
5724  */
5725 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5726 {
5727 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5728 
5729 	return true;
5730 }
5731 
5732 /* Deletes (stops) timers or work queues of all queued commands */
5733 static void stop_all_queued(void)
5734 {
5735 	struct sdebug_host_info *sdhp;
5736 
5737 	mutex_lock(&sdebug_host_list_mutex);
5738 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5739 		struct Scsi_Host *shost = sdhp->shost;
5740 
5741 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5742 	}
5743 	mutex_unlock(&sdebug_host_list_mutex);
5744 }
5745 
5746 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5747 {
5748 	struct scsi_device *sdp = cmnd->device;
5749 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5750 	struct sdebug_err_inject *err;
5751 	unsigned char *cmd = cmnd->cmnd;
5752 	int ret = 0;
5753 
5754 	if (devip == NULL)
5755 		return 0;
5756 
5757 	rcu_read_lock();
5758 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5759 		if (err->type == ERR_ABORT_CMD_FAILED &&
5760 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5761 			ret = !!err->cnt;
5762 			if (err->cnt < 0)
5763 				err->cnt++;
5764 
5765 			rcu_read_unlock();
5766 			return ret;
5767 		}
5768 	}
5769 	rcu_read_unlock();
5770 
5771 	return 0;
5772 }
5773 
5774 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5775 {
5776 	bool ok = scsi_debug_abort_cmnd(SCpnt);
5777 	u8 *cmd = SCpnt->cmnd;
5778 	u8 opcode = cmd[0];
5779 
5780 	++num_aborts;
5781 
5782 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5783 		sdev_printk(KERN_INFO, SCpnt->device,
5784 			    "%s: command%s found\n", __func__,
5785 			    ok ? "" : " not");
5786 
5787 	if (sdebug_fail_abort(SCpnt)) {
5788 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5789 			    opcode);
5790 		return FAILED;
5791 	}
5792 
5793 	return SUCCESS;
5794 }
5795 
5796 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5797 {
5798 	struct scsi_device *sdp = data;
5799 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5800 
5801 	if (scmd->device == sdp)
5802 		scsi_debug_abort_cmnd(scmd);
5803 
5804 	return true;
5805 }
5806 
5807 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5808 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5809 {
5810 	struct Scsi_Host *shost = sdp->host;
5811 
5812 	blk_mq_tagset_busy_iter(&shost->tag_set,
5813 				scsi_debug_stop_all_queued_iter, sdp);
5814 }
5815 
5816 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5817 {
5818 	struct scsi_device *sdp = cmnd->device;
5819 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5820 	struct sdebug_err_inject *err;
5821 	unsigned char *cmd = cmnd->cmnd;
5822 	int ret = 0;
5823 
5824 	if (devip == NULL)
5825 		return 0;
5826 
5827 	rcu_read_lock();
5828 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5829 		if (err->type == ERR_LUN_RESET_FAILED &&
5830 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5831 			ret = !!err->cnt;
5832 			if (err->cnt < 0)
5833 				err->cnt++;
5834 
5835 			rcu_read_unlock();
5836 			return ret;
5837 		}
5838 	}
5839 	rcu_read_unlock();
5840 
5841 	return 0;
5842 }
5843 
5844 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5845 {
5846 	struct scsi_device *sdp = SCpnt->device;
5847 	struct sdebug_dev_info *devip = sdp->hostdata;
5848 	u8 *cmd = SCpnt->cmnd;
5849 	u8 opcode = cmd[0];
5850 
5851 	++num_dev_resets;
5852 
5853 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5854 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5855 
5856 	scsi_debug_stop_all_queued(sdp);
5857 	if (devip)
5858 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5859 
5860 	if (sdebug_fail_lun_reset(SCpnt)) {
5861 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5862 		return FAILED;
5863 	}
5864 
5865 	return SUCCESS;
5866 }
5867 
5868 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5869 {
5870 	struct scsi_target *starget = scsi_target(cmnd->device);
5871 	struct sdebug_target_info *targetip =
5872 		(struct sdebug_target_info *)starget->hostdata;
5873 
5874 	if (targetip)
5875 		return targetip->reset_fail;
5876 
5877 	return 0;
5878 }
5879 
5880 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5881 {
5882 	struct scsi_device *sdp = SCpnt->device;
5883 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5884 	struct sdebug_dev_info *devip;
5885 	u8 *cmd = SCpnt->cmnd;
5886 	u8 opcode = cmd[0];
5887 	int k = 0;
5888 
5889 	++num_target_resets;
5890 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5891 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5892 
5893 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5894 		if (devip->target == sdp->id) {
5895 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5896 			++k;
5897 		}
5898 	}
5899 
5900 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5901 		sdev_printk(KERN_INFO, sdp,
5902 			    "%s: %d device(s) found in target\n", __func__, k);
5903 
5904 	if (sdebug_fail_target_reset(SCpnt)) {
5905 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5906 			    opcode);
5907 		return FAILED;
5908 	}
5909 
5910 	return SUCCESS;
5911 }
5912 
5913 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5914 {
5915 	struct scsi_device *sdp = SCpnt->device;
5916 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5917 	struct sdebug_dev_info *devip;
5918 	int k = 0;
5919 
5920 	++num_bus_resets;
5921 
5922 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5923 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5924 
5925 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5926 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5927 		++k;
5928 	}
5929 
5930 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5931 		sdev_printk(KERN_INFO, sdp,
5932 			    "%s: %d device(s) found in host\n", __func__, k);
5933 	return SUCCESS;
5934 }
5935 
5936 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5937 {
5938 	struct sdebug_host_info *sdbg_host;
5939 	struct sdebug_dev_info *devip;
5940 	int k = 0;
5941 
5942 	++num_host_resets;
5943 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5944 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5945 	mutex_lock(&sdebug_host_list_mutex);
5946 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5947 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5948 				    dev_list) {
5949 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5950 			++k;
5951 		}
5952 	}
5953 	mutex_unlock(&sdebug_host_list_mutex);
5954 	stop_all_queued();
5955 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5956 		sdev_printk(KERN_INFO, SCpnt->device,
5957 			    "%s: %d device(s) found\n", __func__, k);
5958 	return SUCCESS;
5959 }
5960 
5961 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5962 {
5963 	struct msdos_partition *pp;
5964 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5965 	int sectors_per_part, num_sectors, k;
5966 	int heads_by_sects, start_sec, end_sec;
5967 
5968 	/* assume partition table already zeroed */
5969 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5970 		return;
5971 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5972 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5973 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5974 	}
5975 	num_sectors = (int)get_sdebug_capacity();
5976 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5977 			   / sdebug_num_parts;
5978 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5979 	starts[0] = sdebug_sectors_per;
5980 	max_part_secs = sectors_per_part;
5981 	for (k = 1; k < sdebug_num_parts; ++k) {
5982 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5983 			    * heads_by_sects;
5984 		if (starts[k] - starts[k - 1] < max_part_secs)
5985 			max_part_secs = starts[k] - starts[k - 1];
5986 	}
5987 	starts[sdebug_num_parts] = num_sectors;
5988 	starts[sdebug_num_parts + 1] = 0;
5989 
5990 	ramp[510] = 0x55;	/* magic partition markings */
5991 	ramp[511] = 0xAA;
5992 	pp = (struct msdos_partition *)(ramp + 0x1be);
5993 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5994 		start_sec = starts[k];
5995 		end_sec = starts[k] + max_part_secs - 1;
5996 		pp->boot_ind = 0;
5997 
5998 		pp->cyl = start_sec / heads_by_sects;
5999 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
6000 			   / sdebug_sectors_per;
6001 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
6002 
6003 		pp->end_cyl = end_sec / heads_by_sects;
6004 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
6005 			       / sdebug_sectors_per;
6006 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
6007 
6008 		pp->start_sect = cpu_to_le32(start_sec);
6009 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
6010 		pp->sys_ind = 0x83;	/* plain Linux partition */
6011 	}
6012 }
6013 
6014 static void block_unblock_all_queues(bool block)
6015 {
6016 	struct sdebug_host_info *sdhp;
6017 
6018 	lockdep_assert_held(&sdebug_host_list_mutex);
6019 
6020 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6021 		struct Scsi_Host *shost = sdhp->shost;
6022 
6023 		if (block)
6024 			scsi_block_requests(shost);
6025 		else
6026 			scsi_unblock_requests(shost);
6027 	}
6028 }
6029 
6030 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6031  * commands will be processed normally before triggers occur.
6032  */
6033 static void tweak_cmnd_count(void)
6034 {
6035 	int count, modulo;
6036 
6037 	modulo = abs(sdebug_every_nth);
6038 	if (modulo < 2)
6039 		return;
6040 
6041 	mutex_lock(&sdebug_host_list_mutex);
6042 	block_unblock_all_queues(true);
6043 	count = atomic_read(&sdebug_cmnd_count);
6044 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
6045 	block_unblock_all_queues(false);
6046 	mutex_unlock(&sdebug_host_list_mutex);
6047 }
6048 
6049 static void clear_queue_stats(void)
6050 {
6051 	atomic_set(&sdebug_cmnd_count, 0);
6052 	atomic_set(&sdebug_completions, 0);
6053 	atomic_set(&sdebug_miss_cpus, 0);
6054 	atomic_set(&sdebug_a_tsf, 0);
6055 }
6056 
6057 static bool inject_on_this_cmd(void)
6058 {
6059 	if (sdebug_every_nth == 0)
6060 		return false;
6061 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6062 }
6063 
6064 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
6065 
6066 
6067 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
6068 {
6069 	if (sqcp)
6070 		kmem_cache_free(queued_cmd_cache, sqcp);
6071 }
6072 
6073 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
6074 {
6075 	struct sdebug_queued_cmd *sqcp;
6076 	struct sdebug_defer *sd_dp;
6077 
6078 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
6079 	if (!sqcp)
6080 		return NULL;
6081 
6082 	sd_dp = &sqcp->sd_dp;
6083 
6084 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6085 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
6086 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
6087 
6088 	sqcp->scmd = scmd;
6089 
6090 	return sqcp;
6091 }
6092 
6093 /* Complete the processing of the thread that queued a SCSI command to this
6094  * driver. It either completes the command by calling cmnd_done() or
6095  * schedules a hr timer or work queue then returns 0. Returns
6096  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6097  */
6098 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
6099 			 int scsi_result,
6100 			 int (*pfp)(struct scsi_cmnd *,
6101 				    struct sdebug_dev_info *),
6102 			 int delta_jiff, int ndelay)
6103 {
6104 	struct request *rq = scsi_cmd_to_rq(cmnd);
6105 	bool polled = rq->cmd_flags & REQ_POLLED;
6106 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6107 	unsigned long flags;
6108 	u64 ns_from_boot = 0;
6109 	struct sdebug_queued_cmd *sqcp;
6110 	struct scsi_device *sdp;
6111 	struct sdebug_defer *sd_dp;
6112 
6113 	if (unlikely(devip == NULL)) {
6114 		if (scsi_result == 0)
6115 			scsi_result = DID_NO_CONNECT << 16;
6116 		goto respond_in_thread;
6117 	}
6118 	sdp = cmnd->device;
6119 
6120 	if (delta_jiff == 0)
6121 		goto respond_in_thread;
6122 
6123 
6124 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6125 		     (scsi_result == 0))) {
6126 		int num_in_q = scsi_device_busy(sdp);
6127 		int qdepth = cmnd->device->queue_depth;
6128 
6129 		if ((num_in_q == qdepth) &&
6130 		    (atomic_inc_return(&sdebug_a_tsf) >=
6131 		     abs(sdebug_every_nth))) {
6132 			atomic_set(&sdebug_a_tsf, 0);
6133 			scsi_result = device_qfull_result;
6134 
6135 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6136 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6137 					    __func__, num_in_q);
6138 		}
6139 	}
6140 
6141 	sqcp = sdebug_alloc_queued_cmd(cmnd);
6142 	if (!sqcp) {
6143 		pr_err("%s no alloc\n", __func__);
6144 		return SCSI_MLQUEUE_HOST_BUSY;
6145 	}
6146 	sd_dp = &sqcp->sd_dp;
6147 
6148 	if (polled)
6149 		ns_from_boot = ktime_get_boottime_ns();
6150 
6151 	/* one of the resp_*() response functions is called here */
6152 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6153 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
6154 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
6155 		delta_jiff = ndelay = 0;
6156 	}
6157 	if (cmnd->result == 0 && scsi_result != 0)
6158 		cmnd->result = scsi_result;
6159 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6160 		if (atomic_read(&sdeb_inject_pending)) {
6161 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6162 			atomic_set(&sdeb_inject_pending, 0);
6163 			cmnd->result = check_condition_result;
6164 		}
6165 	}
6166 
6167 	if (unlikely(sdebug_verbose && cmnd->result))
6168 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6169 			    __func__, cmnd->result);
6170 
6171 	if (delta_jiff > 0 || ndelay > 0) {
6172 		ktime_t kt;
6173 
6174 		if (delta_jiff > 0) {
6175 			u64 ns = jiffies_to_nsecs(delta_jiff);
6176 
6177 			if (sdebug_random && ns < U32_MAX) {
6178 				ns = get_random_u32_below((u32)ns);
6179 			} else if (sdebug_random) {
6180 				ns >>= 12;	/* scale to 4 usec precision */
6181 				if (ns < U32_MAX)	/* over 4 hours max */
6182 					ns = get_random_u32_below((u32)ns);
6183 				ns <<= 12;
6184 			}
6185 			kt = ns_to_ktime(ns);
6186 		} else {	/* ndelay has a 4.2 second max */
6187 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6188 					     (u32)ndelay;
6189 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6190 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
6191 
6192 				if (kt <= d) {	/* elapsed duration >= kt */
6193 					/* call scsi_done() from this thread */
6194 					sdebug_free_queued_cmd(sqcp);
6195 					scsi_done(cmnd);
6196 					return 0;
6197 				}
6198 				/* otherwise reduce kt by elapsed time */
6199 				kt -= d;
6200 			}
6201 		}
6202 		if (sdebug_statistics)
6203 			sd_dp->issuing_cpu = raw_smp_processor_id();
6204 		if (polled) {
6205 			spin_lock_irqsave(&sdsc->lock, flags);
6206 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6207 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6208 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6209 			spin_unlock_irqrestore(&sdsc->lock, flags);
6210 		} else {
6211 			/* schedule the invocation of scsi_done() for a later time */
6212 			spin_lock_irqsave(&sdsc->lock, flags);
6213 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6214 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6215 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6216 			/*
6217 			 * The completion handler will try to grab sqcp->lock,
6218 			 * so there is no chance that the completion handler
6219 			 * will call scsi_done() until we release the lock
6220 			 * here (so ok to keep referencing sdsc).
6221 			 */
6222 			spin_unlock_irqrestore(&sdsc->lock, flags);
6223 		}
6224 	} else {	/* jdelay < 0, use work queue */
6225 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6226 			     atomic_read(&sdeb_inject_pending))) {
6227 			sd_dp->aborted = true;
6228 			atomic_set(&sdeb_inject_pending, 0);
6229 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6230 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6231 		}
6232 
6233 		if (sdebug_statistics)
6234 			sd_dp->issuing_cpu = raw_smp_processor_id();
6235 		if (polled) {
6236 			spin_lock_irqsave(&sdsc->lock, flags);
6237 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6238 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6239 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6240 			spin_unlock_irqrestore(&sdsc->lock, flags);
6241 		} else {
6242 			spin_lock_irqsave(&sdsc->lock, flags);
6243 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6244 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6245 			schedule_work(&sd_dp->ew.work);
6246 			spin_unlock_irqrestore(&sdsc->lock, flags);
6247 		}
6248 	}
6249 
6250 	return 0;
6251 
6252 respond_in_thread:	/* call back to mid-layer using invocation thread */
6253 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6254 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
6255 	if (cmnd->result == 0 && scsi_result != 0)
6256 		cmnd->result = scsi_result;
6257 	scsi_done(cmnd);
6258 	return 0;
6259 }
6260 
6261 /* Note: The following macros create attribute files in the
6262    /sys/module/scsi_debug/parameters directory. Unfortunately this
6263    driver is unaware of a change and cannot trigger auxiliary actions
6264    as it can when the corresponding attribute in the
6265    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6266  */
6267 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6268 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6269 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6270 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6271 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6272 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6273 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6274 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6275 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6276 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6277 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6278 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6279 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6280 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6281 module_param_string(inq_product, sdebug_inq_product_id,
6282 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6283 module_param_string(inq_rev, sdebug_inq_product_rev,
6284 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6285 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6286 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6287 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6288 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6289 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6290 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6291 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6292 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6293 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6294 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6295 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6296 		   S_IRUGO | S_IWUSR);
6297 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6298 		   S_IRUGO | S_IWUSR);
6299 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6300 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6301 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6302 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6303 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6304 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6305 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6306 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6307 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6308 module_param_named(per_host_store, sdebug_per_host_store, bool,
6309 		   S_IRUGO | S_IWUSR);
6310 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6311 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6312 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6313 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6314 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6315 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6316 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6317 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6318 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6319 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6320 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6321 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6322 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6323 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6324 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6325 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6326 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6327 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6328 		   S_IRUGO | S_IWUSR);
6329 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6330 module_param_named(write_same_length, sdebug_write_same_length, int,
6331 		   S_IRUGO | S_IWUSR);
6332 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6333 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6334 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6335 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6336 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6337 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6338 
6339 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6340 MODULE_DESCRIPTION("SCSI debug adapter driver");
6341 MODULE_LICENSE("GPL");
6342 MODULE_VERSION(SDEBUG_VERSION);
6343 
6344 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6345 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6346 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6347 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6348 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6349 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6350 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6351 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6352 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6353 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6354 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6355 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6356 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6357 MODULE_PARM_DESC(host_max_queue,
6358 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6359 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6360 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6361 		 SDEBUG_VERSION "\")");
6362 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6363 MODULE_PARM_DESC(lbprz,
6364 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6365 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6366 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6367 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6368 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6369 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6370 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6371 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6372 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6373 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6374 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6375 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6376 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6377 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6378 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6379 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6380 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6381 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6382 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6383 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6384 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6385 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6386 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6387 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6388 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6389 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6390 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6391 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6392 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6393 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6394 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6395 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6396 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6397 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6398 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6399 MODULE_PARM_DESC(uuid_ctl,
6400 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6401 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6402 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6403 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6404 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6405 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6406 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6407 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6408 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6409 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6410 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6411 
6412 #define SDEBUG_INFO_LEN 256
6413 static char sdebug_info[SDEBUG_INFO_LEN];
6414 
6415 static const char *scsi_debug_info(struct Scsi_Host *shp)
6416 {
6417 	int k;
6418 
6419 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6420 		      my_name, SDEBUG_VERSION, sdebug_version_date);
6421 	if (k >= (SDEBUG_INFO_LEN - 1))
6422 		return sdebug_info;
6423 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6424 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6425 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
6426 		  "statistics", (int)sdebug_statistics);
6427 	return sdebug_info;
6428 }
6429 
6430 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6431 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6432 				 int length)
6433 {
6434 	char arr[16];
6435 	int opts;
6436 	int minLen = length > 15 ? 15 : length;
6437 
6438 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6439 		return -EACCES;
6440 	memcpy(arr, buffer, minLen);
6441 	arr[minLen] = '\0';
6442 	if (1 != sscanf(arr, "%d", &opts))
6443 		return -EINVAL;
6444 	sdebug_opts = opts;
6445 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6446 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6447 	if (sdebug_every_nth != 0)
6448 		tweak_cmnd_count();
6449 	return length;
6450 }
6451 
6452 struct sdebug_submit_queue_data {
6453 	int *first;
6454 	int *last;
6455 	int queue_num;
6456 };
6457 
6458 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6459 {
6460 	struct sdebug_submit_queue_data *data = opaque;
6461 	u32 unique_tag = blk_mq_unique_tag(rq);
6462 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6463 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6464 	int queue_num = data->queue_num;
6465 
6466 	if (hwq != queue_num)
6467 		return true;
6468 
6469 	/* Rely on iter'ing in ascending tag order */
6470 	if (*data->first == -1)
6471 		*data->first = *data->last = tag;
6472 	else
6473 		*data->last = tag;
6474 
6475 	return true;
6476 }
6477 
6478 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6479  * same for each scsi_debug host (if more than one). Some of the counters
6480  * output are not atomics so might be inaccurate in a busy system. */
6481 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6482 {
6483 	struct sdebug_host_info *sdhp;
6484 	int j;
6485 
6486 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6487 		   SDEBUG_VERSION, sdebug_version_date);
6488 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6489 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6490 		   sdebug_opts, sdebug_every_nth);
6491 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6492 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6493 		   sdebug_sector_size, "bytes");
6494 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6495 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6496 		   num_aborts);
6497 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6498 		   num_dev_resets, num_target_resets, num_bus_resets,
6499 		   num_host_resets);
6500 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6501 		   dix_reads, dix_writes, dif_errors);
6502 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6503 		   sdebug_statistics);
6504 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6505 		   atomic_read(&sdebug_cmnd_count),
6506 		   atomic_read(&sdebug_completions),
6507 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6508 		   atomic_read(&sdebug_a_tsf),
6509 		   atomic_read(&sdeb_mq_poll_count));
6510 
6511 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6512 	for (j = 0; j < submit_queues; ++j) {
6513 		int f = -1, l = -1;
6514 		struct sdebug_submit_queue_data data = {
6515 			.queue_num = j,
6516 			.first = &f,
6517 			.last = &l,
6518 		};
6519 		seq_printf(m, "  queue %d:\n", j);
6520 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6521 					&data);
6522 		if (f >= 0) {
6523 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6524 				   "first,last bits", f, l);
6525 		}
6526 	}
6527 
6528 	seq_printf(m, "this host_no=%d\n", host->host_no);
6529 	if (!xa_empty(per_store_ap)) {
6530 		bool niu;
6531 		int idx;
6532 		unsigned long l_idx;
6533 		struct sdeb_store_info *sip;
6534 
6535 		seq_puts(m, "\nhost list:\n");
6536 		j = 0;
6537 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6538 			idx = sdhp->si_idx;
6539 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6540 				   sdhp->shost->host_no, idx);
6541 			++j;
6542 		}
6543 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6544 			   sdeb_most_recent_idx);
6545 		j = 0;
6546 		xa_for_each(per_store_ap, l_idx, sip) {
6547 			niu = xa_get_mark(per_store_ap, l_idx,
6548 					  SDEB_XA_NOT_IN_USE);
6549 			idx = (int)l_idx;
6550 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6551 				   (niu ? "  not_in_use" : ""));
6552 			++j;
6553 		}
6554 	}
6555 	return 0;
6556 }
6557 
6558 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6559 {
6560 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6561 }
6562 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6563  * of delay is jiffies.
6564  */
6565 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6566 			   size_t count)
6567 {
6568 	int jdelay, res;
6569 
6570 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6571 		res = count;
6572 		if (sdebug_jdelay != jdelay) {
6573 			struct sdebug_host_info *sdhp;
6574 
6575 			mutex_lock(&sdebug_host_list_mutex);
6576 			block_unblock_all_queues(true);
6577 
6578 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6579 				struct Scsi_Host *shost = sdhp->shost;
6580 
6581 				if (scsi_host_busy(shost)) {
6582 					res = -EBUSY;   /* queued commands */
6583 					break;
6584 				}
6585 			}
6586 			if (res > 0) {
6587 				sdebug_jdelay = jdelay;
6588 				sdebug_ndelay = 0;
6589 			}
6590 			block_unblock_all_queues(false);
6591 			mutex_unlock(&sdebug_host_list_mutex);
6592 		}
6593 		return res;
6594 	}
6595 	return -EINVAL;
6596 }
6597 static DRIVER_ATTR_RW(delay);
6598 
6599 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6600 {
6601 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6602 }
6603 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6604 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6605 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6606 			    size_t count)
6607 {
6608 	int ndelay, res;
6609 
6610 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6611 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6612 		res = count;
6613 		if (sdebug_ndelay != ndelay) {
6614 			struct sdebug_host_info *sdhp;
6615 
6616 			mutex_lock(&sdebug_host_list_mutex);
6617 			block_unblock_all_queues(true);
6618 
6619 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6620 				struct Scsi_Host *shost = sdhp->shost;
6621 
6622 				if (scsi_host_busy(shost)) {
6623 					res = -EBUSY;   /* queued commands */
6624 					break;
6625 				}
6626 			}
6627 
6628 			if (res > 0) {
6629 				sdebug_ndelay = ndelay;
6630 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6631 							: DEF_JDELAY;
6632 			}
6633 			block_unblock_all_queues(false);
6634 			mutex_unlock(&sdebug_host_list_mutex);
6635 		}
6636 		return res;
6637 	}
6638 	return -EINVAL;
6639 }
6640 static DRIVER_ATTR_RW(ndelay);
6641 
6642 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6643 {
6644 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6645 }
6646 
6647 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6648 			  size_t count)
6649 {
6650 	int opts;
6651 	char work[20];
6652 
6653 	if (sscanf(buf, "%10s", work) == 1) {
6654 		if (strncasecmp(work, "0x", 2) == 0) {
6655 			if (kstrtoint(work + 2, 16, &opts) == 0)
6656 				goto opts_done;
6657 		} else {
6658 			if (kstrtoint(work, 10, &opts) == 0)
6659 				goto opts_done;
6660 		}
6661 	}
6662 	return -EINVAL;
6663 opts_done:
6664 	sdebug_opts = opts;
6665 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6666 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6667 	tweak_cmnd_count();
6668 	return count;
6669 }
6670 static DRIVER_ATTR_RW(opts);
6671 
6672 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6673 {
6674 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6675 }
6676 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6677 			   size_t count)
6678 {
6679 	int n;
6680 
6681 	/* Cannot change from or to TYPE_ZBC with sysfs */
6682 	if (sdebug_ptype == TYPE_ZBC)
6683 		return -EINVAL;
6684 
6685 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6686 		if (n == TYPE_ZBC)
6687 			return -EINVAL;
6688 		sdebug_ptype = n;
6689 		return count;
6690 	}
6691 	return -EINVAL;
6692 }
6693 static DRIVER_ATTR_RW(ptype);
6694 
6695 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6696 {
6697 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6698 }
6699 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6700 			    size_t count)
6701 {
6702 	int n;
6703 
6704 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6705 		sdebug_dsense = n;
6706 		return count;
6707 	}
6708 	return -EINVAL;
6709 }
6710 static DRIVER_ATTR_RW(dsense);
6711 
6712 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6713 {
6714 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6715 }
6716 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6717 			     size_t count)
6718 {
6719 	int n, idx;
6720 
6721 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6722 		bool want_store = (n == 0);
6723 		struct sdebug_host_info *sdhp;
6724 
6725 		n = (n > 0);
6726 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6727 		if (sdebug_fake_rw == n)
6728 			return count;	/* not transitioning so do nothing */
6729 
6730 		if (want_store) {	/* 1 --> 0 transition, set up store */
6731 			if (sdeb_first_idx < 0) {
6732 				idx = sdebug_add_store();
6733 				if (idx < 0)
6734 					return idx;
6735 			} else {
6736 				idx = sdeb_first_idx;
6737 				xa_clear_mark(per_store_ap, idx,
6738 					      SDEB_XA_NOT_IN_USE);
6739 			}
6740 			/* make all hosts use same store */
6741 			list_for_each_entry(sdhp, &sdebug_host_list,
6742 					    host_list) {
6743 				if (sdhp->si_idx != idx) {
6744 					xa_set_mark(per_store_ap, sdhp->si_idx,
6745 						    SDEB_XA_NOT_IN_USE);
6746 					sdhp->si_idx = idx;
6747 				}
6748 			}
6749 			sdeb_most_recent_idx = idx;
6750 		} else {	/* 0 --> 1 transition is trigger for shrink */
6751 			sdebug_erase_all_stores(true /* apart from first */);
6752 		}
6753 		sdebug_fake_rw = n;
6754 		return count;
6755 	}
6756 	return -EINVAL;
6757 }
6758 static DRIVER_ATTR_RW(fake_rw);
6759 
6760 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6761 {
6762 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6763 }
6764 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6765 			      size_t count)
6766 {
6767 	int n;
6768 
6769 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6770 		sdebug_no_lun_0 = n;
6771 		return count;
6772 	}
6773 	return -EINVAL;
6774 }
6775 static DRIVER_ATTR_RW(no_lun_0);
6776 
6777 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6778 {
6779 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6780 }
6781 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6782 			      size_t count)
6783 {
6784 	int n;
6785 
6786 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6787 		sdebug_num_tgts = n;
6788 		sdebug_max_tgts_luns();
6789 		return count;
6790 	}
6791 	return -EINVAL;
6792 }
6793 static DRIVER_ATTR_RW(num_tgts);
6794 
6795 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6796 {
6797 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6798 }
6799 static DRIVER_ATTR_RO(dev_size_mb);
6800 
6801 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6802 {
6803 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6804 }
6805 
6806 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6807 				    size_t count)
6808 {
6809 	bool v;
6810 
6811 	if (kstrtobool(buf, &v))
6812 		return -EINVAL;
6813 
6814 	sdebug_per_host_store = v;
6815 	return count;
6816 }
6817 static DRIVER_ATTR_RW(per_host_store);
6818 
6819 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6820 {
6821 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6822 }
6823 static DRIVER_ATTR_RO(num_parts);
6824 
6825 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6826 {
6827 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6828 }
6829 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6830 			       size_t count)
6831 {
6832 	int nth;
6833 	char work[20];
6834 
6835 	if (sscanf(buf, "%10s", work) == 1) {
6836 		if (strncasecmp(work, "0x", 2) == 0) {
6837 			if (kstrtoint(work + 2, 16, &nth) == 0)
6838 				goto every_nth_done;
6839 		} else {
6840 			if (kstrtoint(work, 10, &nth) == 0)
6841 				goto every_nth_done;
6842 		}
6843 	}
6844 	return -EINVAL;
6845 
6846 every_nth_done:
6847 	sdebug_every_nth = nth;
6848 	if (nth && !sdebug_statistics) {
6849 		pr_info("every_nth needs statistics=1, set it\n");
6850 		sdebug_statistics = true;
6851 	}
6852 	tweak_cmnd_count();
6853 	return count;
6854 }
6855 static DRIVER_ATTR_RW(every_nth);
6856 
6857 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6858 {
6859 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6860 }
6861 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6862 				size_t count)
6863 {
6864 	int n;
6865 	bool changed;
6866 
6867 	if (kstrtoint(buf, 0, &n))
6868 		return -EINVAL;
6869 	if (n >= 0) {
6870 		if (n > (int)SAM_LUN_AM_FLAT) {
6871 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6872 			return -EINVAL;
6873 		}
6874 		changed = ((int)sdebug_lun_am != n);
6875 		sdebug_lun_am = n;
6876 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6877 			struct sdebug_host_info *sdhp;
6878 			struct sdebug_dev_info *dp;
6879 
6880 			mutex_lock(&sdebug_host_list_mutex);
6881 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6882 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6883 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6884 				}
6885 			}
6886 			mutex_unlock(&sdebug_host_list_mutex);
6887 		}
6888 		return count;
6889 	}
6890 	return -EINVAL;
6891 }
6892 static DRIVER_ATTR_RW(lun_format);
6893 
6894 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6895 {
6896 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6897 }
6898 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6899 			      size_t count)
6900 {
6901 	int n;
6902 	bool changed;
6903 
6904 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6905 		if (n > 256) {
6906 			pr_warn("max_luns can be no more than 256\n");
6907 			return -EINVAL;
6908 		}
6909 		changed = (sdebug_max_luns != n);
6910 		sdebug_max_luns = n;
6911 		sdebug_max_tgts_luns();
6912 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6913 			struct sdebug_host_info *sdhp;
6914 			struct sdebug_dev_info *dp;
6915 
6916 			mutex_lock(&sdebug_host_list_mutex);
6917 			list_for_each_entry(sdhp, &sdebug_host_list,
6918 					    host_list) {
6919 				list_for_each_entry(dp, &sdhp->dev_info_list,
6920 						    dev_list) {
6921 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6922 						dp->uas_bm);
6923 				}
6924 			}
6925 			mutex_unlock(&sdebug_host_list_mutex);
6926 		}
6927 		return count;
6928 	}
6929 	return -EINVAL;
6930 }
6931 static DRIVER_ATTR_RW(max_luns);
6932 
6933 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6934 {
6935 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6936 }
6937 /* N.B. max_queue can be changed while there are queued commands. In flight
6938  * commands beyond the new max_queue will be completed. */
6939 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6940 			       size_t count)
6941 {
6942 	int n;
6943 
6944 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6945 	    (n <= SDEBUG_CANQUEUE) &&
6946 	    (sdebug_host_max_queue == 0)) {
6947 		mutex_lock(&sdebug_host_list_mutex);
6948 
6949 		/* We may only change sdebug_max_queue when we have no shosts */
6950 		if (list_empty(&sdebug_host_list))
6951 			sdebug_max_queue = n;
6952 		else
6953 			count = -EBUSY;
6954 		mutex_unlock(&sdebug_host_list_mutex);
6955 		return count;
6956 	}
6957 	return -EINVAL;
6958 }
6959 static DRIVER_ATTR_RW(max_queue);
6960 
6961 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6962 {
6963 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6964 }
6965 
6966 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6967 {
6968 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6969 }
6970 
6971 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6972 {
6973 	bool v;
6974 
6975 	if (kstrtobool(buf, &v))
6976 		return -EINVAL;
6977 
6978 	sdebug_no_rwlock = v;
6979 	return count;
6980 }
6981 static DRIVER_ATTR_RW(no_rwlock);
6982 
6983 /*
6984  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6985  * in range [0, sdebug_host_max_queue), we can't change it.
6986  */
6987 static DRIVER_ATTR_RO(host_max_queue);
6988 
6989 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6990 {
6991 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6992 }
6993 static DRIVER_ATTR_RO(no_uld);
6994 
6995 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6996 {
6997 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6998 }
6999 static DRIVER_ATTR_RO(scsi_level);
7000 
7001 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
7002 {
7003 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
7004 }
7005 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
7006 				size_t count)
7007 {
7008 	int n;
7009 	bool changed;
7010 
7011 	/* Ignore capacity change for ZBC drives for now */
7012 	if (sdeb_zbc_in_use)
7013 		return -ENOTSUPP;
7014 
7015 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7016 		changed = (sdebug_virtual_gb != n);
7017 		sdebug_virtual_gb = n;
7018 		sdebug_capacity = get_sdebug_capacity();
7019 		if (changed) {
7020 			struct sdebug_host_info *sdhp;
7021 			struct sdebug_dev_info *dp;
7022 
7023 			mutex_lock(&sdebug_host_list_mutex);
7024 			list_for_each_entry(sdhp, &sdebug_host_list,
7025 					    host_list) {
7026 				list_for_each_entry(dp, &sdhp->dev_info_list,
7027 						    dev_list) {
7028 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
7029 						dp->uas_bm);
7030 				}
7031 			}
7032 			mutex_unlock(&sdebug_host_list_mutex);
7033 		}
7034 		return count;
7035 	}
7036 	return -EINVAL;
7037 }
7038 static DRIVER_ATTR_RW(virtual_gb);
7039 
7040 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
7041 {
7042 	/* absolute number of hosts currently active is what is shown */
7043 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
7044 }
7045 
7046 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
7047 			      size_t count)
7048 {
7049 	bool found;
7050 	unsigned long idx;
7051 	struct sdeb_store_info *sip;
7052 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
7053 	int delta_hosts;
7054 
7055 	if (sscanf(buf, "%d", &delta_hosts) != 1)
7056 		return -EINVAL;
7057 	if (delta_hosts > 0) {
7058 		do {
7059 			found = false;
7060 			if (want_phs) {
7061 				xa_for_each_marked(per_store_ap, idx, sip,
7062 						   SDEB_XA_NOT_IN_USE) {
7063 					sdeb_most_recent_idx = (int)idx;
7064 					found = true;
7065 					break;
7066 				}
7067 				if (found)	/* re-use case */
7068 					sdebug_add_host_helper((int)idx);
7069 				else
7070 					sdebug_do_add_host(true);
7071 			} else {
7072 				sdebug_do_add_host(false);
7073 			}
7074 		} while (--delta_hosts);
7075 	} else if (delta_hosts < 0) {
7076 		do {
7077 			sdebug_do_remove_host(false);
7078 		} while (++delta_hosts);
7079 	}
7080 	return count;
7081 }
7082 static DRIVER_ATTR_RW(add_host);
7083 
7084 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
7085 {
7086 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
7087 }
7088 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
7089 				    size_t count)
7090 {
7091 	int n;
7092 
7093 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7094 		sdebug_vpd_use_hostno = n;
7095 		return count;
7096 	}
7097 	return -EINVAL;
7098 }
7099 static DRIVER_ATTR_RW(vpd_use_hostno);
7100 
7101 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
7102 {
7103 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
7104 }
7105 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
7106 				size_t count)
7107 {
7108 	int n;
7109 
7110 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
7111 		if (n > 0)
7112 			sdebug_statistics = true;
7113 		else {
7114 			clear_queue_stats();
7115 			sdebug_statistics = false;
7116 		}
7117 		return count;
7118 	}
7119 	return -EINVAL;
7120 }
7121 static DRIVER_ATTR_RW(statistics);
7122 
7123 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7124 {
7125 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7126 }
7127 static DRIVER_ATTR_RO(sector_size);
7128 
7129 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7130 {
7131 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7132 }
7133 static DRIVER_ATTR_RO(submit_queues);
7134 
7135 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7136 {
7137 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7138 }
7139 static DRIVER_ATTR_RO(dix);
7140 
7141 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7142 {
7143 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7144 }
7145 static DRIVER_ATTR_RO(dif);
7146 
7147 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7148 {
7149 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7150 }
7151 static DRIVER_ATTR_RO(guard);
7152 
7153 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7154 {
7155 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7156 }
7157 static DRIVER_ATTR_RO(ato);
7158 
7159 static ssize_t map_show(struct device_driver *ddp, char *buf)
7160 {
7161 	ssize_t count = 0;
7162 
7163 	if (!scsi_debug_lbp())
7164 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7165 				 sdebug_store_sectors);
7166 
7167 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7168 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7169 
7170 		if (sip)
7171 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7172 					  (int)map_size, sip->map_storep);
7173 	}
7174 	buf[count++] = '\n';
7175 	buf[count] = '\0';
7176 
7177 	return count;
7178 }
7179 static DRIVER_ATTR_RO(map);
7180 
7181 static ssize_t random_show(struct device_driver *ddp, char *buf)
7182 {
7183 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7184 }
7185 
7186 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7187 			    size_t count)
7188 {
7189 	bool v;
7190 
7191 	if (kstrtobool(buf, &v))
7192 		return -EINVAL;
7193 
7194 	sdebug_random = v;
7195 	return count;
7196 }
7197 static DRIVER_ATTR_RW(random);
7198 
7199 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7200 {
7201 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7202 }
7203 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7204 			       size_t count)
7205 {
7206 	int n;
7207 
7208 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7209 		sdebug_removable = (n > 0);
7210 		return count;
7211 	}
7212 	return -EINVAL;
7213 }
7214 static DRIVER_ATTR_RW(removable);
7215 
7216 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7217 {
7218 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7219 }
7220 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7221 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7222 			       size_t count)
7223 {
7224 	int n;
7225 
7226 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7227 		sdebug_host_lock = (n > 0);
7228 		return count;
7229 	}
7230 	return -EINVAL;
7231 }
7232 static DRIVER_ATTR_RW(host_lock);
7233 
7234 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7235 {
7236 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7237 }
7238 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7239 			    size_t count)
7240 {
7241 	int n;
7242 
7243 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7244 		sdebug_strict = (n > 0);
7245 		return count;
7246 	}
7247 	return -EINVAL;
7248 }
7249 static DRIVER_ATTR_RW(strict);
7250 
7251 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7252 {
7253 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7254 }
7255 static DRIVER_ATTR_RO(uuid_ctl);
7256 
7257 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7258 {
7259 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7260 }
7261 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7262 			     size_t count)
7263 {
7264 	int ret, n;
7265 
7266 	ret = kstrtoint(buf, 0, &n);
7267 	if (ret)
7268 		return ret;
7269 	sdebug_cdb_len = n;
7270 	all_config_cdb_len();
7271 	return count;
7272 }
7273 static DRIVER_ATTR_RW(cdb_len);
7274 
7275 static const char * const zbc_model_strs_a[] = {
7276 	[BLK_ZONED_NONE] = "none",
7277 	[BLK_ZONED_HA]   = "host-aware",
7278 	[BLK_ZONED_HM]   = "host-managed",
7279 };
7280 
7281 static const char * const zbc_model_strs_b[] = {
7282 	[BLK_ZONED_NONE] = "no",
7283 	[BLK_ZONED_HA]   = "aware",
7284 	[BLK_ZONED_HM]   = "managed",
7285 };
7286 
7287 static const char * const zbc_model_strs_c[] = {
7288 	[BLK_ZONED_NONE] = "0",
7289 	[BLK_ZONED_HA]   = "1",
7290 	[BLK_ZONED_HM]   = "2",
7291 };
7292 
7293 static int sdeb_zbc_model_str(const char *cp)
7294 {
7295 	int res = sysfs_match_string(zbc_model_strs_a, cp);
7296 
7297 	if (res < 0) {
7298 		res = sysfs_match_string(zbc_model_strs_b, cp);
7299 		if (res < 0) {
7300 			res = sysfs_match_string(zbc_model_strs_c, cp);
7301 			if (res < 0)
7302 				return -EINVAL;
7303 		}
7304 	}
7305 	return res;
7306 }
7307 
7308 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7309 {
7310 	return scnprintf(buf, PAGE_SIZE, "%s\n",
7311 			 zbc_model_strs_a[sdeb_zbc_model]);
7312 }
7313 static DRIVER_ATTR_RO(zbc);
7314 
7315 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7316 {
7317 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7318 }
7319 static DRIVER_ATTR_RO(tur_ms_to_ready);
7320 
7321 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
7322 {
7323 	char *p = buf, *end = buf + PAGE_SIZE;
7324 	int i;
7325 
7326 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7327 		p += scnprintf(p, end - p, "%d %ld\n", i,
7328 			       atomic_long_read(&writes_by_group_number[i]));
7329 
7330 	return p - buf;
7331 }
7332 
7333 static ssize_t group_number_stats_store(struct device_driver *ddp,
7334 					const char *buf, size_t count)
7335 {
7336 	int i;
7337 
7338 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7339 		atomic_long_set(&writes_by_group_number[i], 0);
7340 
7341 	return count;
7342 }
7343 static DRIVER_ATTR_RW(group_number_stats);
7344 
7345 /* Note: The following array creates attribute files in the
7346    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7347    files (over those found in the /sys/module/scsi_debug/parameters
7348    directory) is that auxiliary actions can be triggered when an attribute
7349    is changed. For example see: add_host_store() above.
7350  */
7351 
7352 static struct attribute *sdebug_drv_attrs[] = {
7353 	&driver_attr_delay.attr,
7354 	&driver_attr_opts.attr,
7355 	&driver_attr_ptype.attr,
7356 	&driver_attr_dsense.attr,
7357 	&driver_attr_fake_rw.attr,
7358 	&driver_attr_host_max_queue.attr,
7359 	&driver_attr_no_lun_0.attr,
7360 	&driver_attr_num_tgts.attr,
7361 	&driver_attr_dev_size_mb.attr,
7362 	&driver_attr_num_parts.attr,
7363 	&driver_attr_every_nth.attr,
7364 	&driver_attr_lun_format.attr,
7365 	&driver_attr_max_luns.attr,
7366 	&driver_attr_max_queue.attr,
7367 	&driver_attr_no_rwlock.attr,
7368 	&driver_attr_no_uld.attr,
7369 	&driver_attr_scsi_level.attr,
7370 	&driver_attr_virtual_gb.attr,
7371 	&driver_attr_add_host.attr,
7372 	&driver_attr_per_host_store.attr,
7373 	&driver_attr_vpd_use_hostno.attr,
7374 	&driver_attr_sector_size.attr,
7375 	&driver_attr_statistics.attr,
7376 	&driver_attr_submit_queues.attr,
7377 	&driver_attr_dix.attr,
7378 	&driver_attr_dif.attr,
7379 	&driver_attr_guard.attr,
7380 	&driver_attr_ato.attr,
7381 	&driver_attr_map.attr,
7382 	&driver_attr_random.attr,
7383 	&driver_attr_removable.attr,
7384 	&driver_attr_host_lock.attr,
7385 	&driver_attr_ndelay.attr,
7386 	&driver_attr_strict.attr,
7387 	&driver_attr_uuid_ctl.attr,
7388 	&driver_attr_cdb_len.attr,
7389 	&driver_attr_tur_ms_to_ready.attr,
7390 	&driver_attr_zbc.attr,
7391 	&driver_attr_group_number_stats.attr,
7392 	NULL,
7393 };
7394 ATTRIBUTE_GROUPS(sdebug_drv);
7395 
7396 static struct device *pseudo_primary;
7397 
7398 static int __init scsi_debug_init(void)
7399 {
7400 	bool want_store = (sdebug_fake_rw == 0);
7401 	unsigned long sz;
7402 	int k, ret, hosts_to_add;
7403 	int idx = -1;
7404 
7405 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7406 		pr_warn("ndelay must be less than 1 second, ignored\n");
7407 		sdebug_ndelay = 0;
7408 	} else if (sdebug_ndelay > 0)
7409 		sdebug_jdelay = JDELAY_OVERRIDDEN;
7410 
7411 	switch (sdebug_sector_size) {
7412 	case  512:
7413 	case 1024:
7414 	case 2048:
7415 	case 4096:
7416 		break;
7417 	default:
7418 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
7419 		return -EINVAL;
7420 	}
7421 
7422 	switch (sdebug_dif) {
7423 	case T10_PI_TYPE0_PROTECTION:
7424 		break;
7425 	case T10_PI_TYPE1_PROTECTION:
7426 	case T10_PI_TYPE2_PROTECTION:
7427 	case T10_PI_TYPE3_PROTECTION:
7428 		have_dif_prot = true;
7429 		break;
7430 
7431 	default:
7432 		pr_err("dif must be 0, 1, 2 or 3\n");
7433 		return -EINVAL;
7434 	}
7435 
7436 	if (sdebug_num_tgts < 0) {
7437 		pr_err("num_tgts must be >= 0\n");
7438 		return -EINVAL;
7439 	}
7440 
7441 	if (sdebug_guard > 1) {
7442 		pr_err("guard must be 0 or 1\n");
7443 		return -EINVAL;
7444 	}
7445 
7446 	if (sdebug_ato > 1) {
7447 		pr_err("ato must be 0 or 1\n");
7448 		return -EINVAL;
7449 	}
7450 
7451 	if (sdebug_physblk_exp > 15) {
7452 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7453 		return -EINVAL;
7454 	}
7455 
7456 	sdebug_lun_am = sdebug_lun_am_i;
7457 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7458 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7459 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7460 	}
7461 
7462 	if (sdebug_max_luns > 256) {
7463 		if (sdebug_max_luns > 16384) {
7464 			pr_warn("max_luns can be no more than 16384, use default\n");
7465 			sdebug_max_luns = DEF_MAX_LUNS;
7466 		}
7467 		sdebug_lun_am = SAM_LUN_AM_FLAT;
7468 	}
7469 
7470 	if (sdebug_lowest_aligned > 0x3fff) {
7471 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7472 		return -EINVAL;
7473 	}
7474 
7475 	if (submit_queues < 1) {
7476 		pr_err("submit_queues must be 1 or more\n");
7477 		return -EINVAL;
7478 	}
7479 
7480 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7481 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7482 		return -EINVAL;
7483 	}
7484 
7485 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7486 	    (sdebug_host_max_queue < 0)) {
7487 		pr_err("host_max_queue must be in range [0 %d]\n",
7488 		       SDEBUG_CANQUEUE);
7489 		return -EINVAL;
7490 	}
7491 
7492 	if (sdebug_host_max_queue &&
7493 	    (sdebug_max_queue != sdebug_host_max_queue)) {
7494 		sdebug_max_queue = sdebug_host_max_queue;
7495 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7496 			sdebug_max_queue);
7497 	}
7498 
7499 	/*
7500 	 * check for host managed zoned block device specified with
7501 	 * ptype=0x14 or zbc=XXX.
7502 	 */
7503 	if (sdebug_ptype == TYPE_ZBC) {
7504 		sdeb_zbc_model = BLK_ZONED_HM;
7505 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7506 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7507 		if (k < 0)
7508 			return k;
7509 		sdeb_zbc_model = k;
7510 		switch (sdeb_zbc_model) {
7511 		case BLK_ZONED_NONE:
7512 		case BLK_ZONED_HA:
7513 			sdebug_ptype = TYPE_DISK;
7514 			break;
7515 		case BLK_ZONED_HM:
7516 			sdebug_ptype = TYPE_ZBC;
7517 			break;
7518 		default:
7519 			pr_err("Invalid ZBC model\n");
7520 			return -EINVAL;
7521 		}
7522 	}
7523 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7524 		sdeb_zbc_in_use = true;
7525 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7526 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7527 	}
7528 
7529 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7530 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7531 	if (sdebug_dev_size_mb < 1)
7532 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7533 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7534 	sdebug_store_sectors = sz / sdebug_sector_size;
7535 	sdebug_capacity = get_sdebug_capacity();
7536 
7537 	/* play around with geometry, don't waste too much on track 0 */
7538 	sdebug_heads = 8;
7539 	sdebug_sectors_per = 32;
7540 	if (sdebug_dev_size_mb >= 256)
7541 		sdebug_heads = 64;
7542 	else if (sdebug_dev_size_mb >= 16)
7543 		sdebug_heads = 32;
7544 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7545 			       (sdebug_sectors_per * sdebug_heads);
7546 	if (sdebug_cylinders_per >= 1024) {
7547 		/* other LLDs do this; implies >= 1GB ram disk ... */
7548 		sdebug_heads = 255;
7549 		sdebug_sectors_per = 63;
7550 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7551 			       (sdebug_sectors_per * sdebug_heads);
7552 	}
7553 	if (scsi_debug_lbp()) {
7554 		sdebug_unmap_max_blocks =
7555 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7556 
7557 		sdebug_unmap_max_desc =
7558 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7559 
7560 		sdebug_unmap_granularity =
7561 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7562 
7563 		if (sdebug_unmap_alignment &&
7564 		    sdebug_unmap_granularity <=
7565 		    sdebug_unmap_alignment) {
7566 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7567 			return -EINVAL;
7568 		}
7569 	}
7570 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7571 	if (want_store) {
7572 		idx = sdebug_add_store();
7573 		if (idx < 0)
7574 			return idx;
7575 	}
7576 
7577 	pseudo_primary = root_device_register("pseudo_0");
7578 	if (IS_ERR(pseudo_primary)) {
7579 		pr_warn("root_device_register() error\n");
7580 		ret = PTR_ERR(pseudo_primary);
7581 		goto free_vm;
7582 	}
7583 	ret = bus_register(&pseudo_lld_bus);
7584 	if (ret < 0) {
7585 		pr_warn("bus_register error: %d\n", ret);
7586 		goto dev_unreg;
7587 	}
7588 	ret = driver_register(&sdebug_driverfs_driver);
7589 	if (ret < 0) {
7590 		pr_warn("driver_register error: %d\n", ret);
7591 		goto bus_unreg;
7592 	}
7593 
7594 	hosts_to_add = sdebug_add_host;
7595 	sdebug_add_host = 0;
7596 
7597 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7598 	if (!queued_cmd_cache) {
7599 		ret = -ENOMEM;
7600 		goto driver_unreg;
7601 	}
7602 
7603 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7604 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7605 		pr_info("%s: failed to create initial debugfs directory\n", __func__);
7606 
7607 	for (k = 0; k < hosts_to_add; k++) {
7608 		if (want_store && k == 0) {
7609 			ret = sdebug_add_host_helper(idx);
7610 			if (ret < 0) {
7611 				pr_err("add_host_helper k=%d, error=%d\n",
7612 				       k, -ret);
7613 				break;
7614 			}
7615 		} else {
7616 			ret = sdebug_do_add_host(want_store &&
7617 						 sdebug_per_host_store);
7618 			if (ret < 0) {
7619 				pr_err("add_host k=%d error=%d\n", k, -ret);
7620 				break;
7621 			}
7622 		}
7623 	}
7624 	if (sdebug_verbose)
7625 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7626 
7627 	return 0;
7628 
7629 driver_unreg:
7630 	driver_unregister(&sdebug_driverfs_driver);
7631 bus_unreg:
7632 	bus_unregister(&pseudo_lld_bus);
7633 dev_unreg:
7634 	root_device_unregister(pseudo_primary);
7635 free_vm:
7636 	sdebug_erase_store(idx, NULL);
7637 	return ret;
7638 }
7639 
7640 static void __exit scsi_debug_exit(void)
7641 {
7642 	int k = sdebug_num_hosts;
7643 
7644 	for (; k; k--)
7645 		sdebug_do_remove_host(true);
7646 	kmem_cache_destroy(queued_cmd_cache);
7647 	driver_unregister(&sdebug_driverfs_driver);
7648 	bus_unregister(&pseudo_lld_bus);
7649 	root_device_unregister(pseudo_primary);
7650 
7651 	sdebug_erase_all_stores(false);
7652 	xa_destroy(per_store_ap);
7653 	debugfs_remove(sdebug_debugfs_root);
7654 }
7655 
7656 device_initcall(scsi_debug_init);
7657 module_exit(scsi_debug_exit);
7658 
7659 static void sdebug_release_adapter(struct device *dev)
7660 {
7661 	struct sdebug_host_info *sdbg_host;
7662 
7663 	sdbg_host = dev_to_sdebug_host(dev);
7664 	kfree(sdbg_host);
7665 }
7666 
7667 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7668 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7669 {
7670 	if (idx < 0)
7671 		return;
7672 	if (!sip) {
7673 		if (xa_empty(per_store_ap))
7674 			return;
7675 		sip = xa_load(per_store_ap, idx);
7676 		if (!sip)
7677 			return;
7678 	}
7679 	vfree(sip->map_storep);
7680 	vfree(sip->dif_storep);
7681 	vfree(sip->storep);
7682 	xa_erase(per_store_ap, idx);
7683 	kfree(sip);
7684 }
7685 
7686 /* Assume apart_from_first==false only in shutdown case. */
7687 static void sdebug_erase_all_stores(bool apart_from_first)
7688 {
7689 	unsigned long idx;
7690 	struct sdeb_store_info *sip = NULL;
7691 
7692 	xa_for_each(per_store_ap, idx, sip) {
7693 		if (apart_from_first)
7694 			apart_from_first = false;
7695 		else
7696 			sdebug_erase_store(idx, sip);
7697 	}
7698 	if (apart_from_first)
7699 		sdeb_most_recent_idx = sdeb_first_idx;
7700 }
7701 
7702 /*
7703  * Returns store xarray new element index (idx) if >=0 else negated errno.
7704  * Limit the number of stores to 65536.
7705  */
7706 static int sdebug_add_store(void)
7707 {
7708 	int res;
7709 	u32 n_idx;
7710 	unsigned long iflags;
7711 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7712 	struct sdeb_store_info *sip = NULL;
7713 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7714 
7715 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7716 	if (!sip)
7717 		return -ENOMEM;
7718 
7719 	xa_lock_irqsave(per_store_ap, iflags);
7720 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7721 	if (unlikely(res < 0)) {
7722 		xa_unlock_irqrestore(per_store_ap, iflags);
7723 		kfree(sip);
7724 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7725 		return res;
7726 	}
7727 	sdeb_most_recent_idx = n_idx;
7728 	if (sdeb_first_idx < 0)
7729 		sdeb_first_idx = n_idx;
7730 	xa_unlock_irqrestore(per_store_ap, iflags);
7731 
7732 	res = -ENOMEM;
7733 	sip->storep = vzalloc(sz);
7734 	if (!sip->storep) {
7735 		pr_err("user data oom\n");
7736 		goto err;
7737 	}
7738 	if (sdebug_num_parts > 0)
7739 		sdebug_build_parts(sip->storep, sz);
7740 
7741 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7742 	if (sdebug_dix) {
7743 		int dif_size;
7744 
7745 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7746 		sip->dif_storep = vmalloc(dif_size);
7747 
7748 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7749 			sip->dif_storep);
7750 
7751 		if (!sip->dif_storep) {
7752 			pr_err("DIX oom\n");
7753 			goto err;
7754 		}
7755 		memset(sip->dif_storep, 0xff, dif_size);
7756 	}
7757 	/* Logical Block Provisioning */
7758 	if (scsi_debug_lbp()) {
7759 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7760 		sip->map_storep = vmalloc(array_size(sizeof(long),
7761 						     BITS_TO_LONGS(map_size)));
7762 
7763 		pr_info("%lu provisioning blocks\n", map_size);
7764 
7765 		if (!sip->map_storep) {
7766 			pr_err("LBP map oom\n");
7767 			goto err;
7768 		}
7769 
7770 		bitmap_zero(sip->map_storep, map_size);
7771 
7772 		/* Map first 1KB for partition table */
7773 		if (sdebug_num_parts)
7774 			map_region(sip, 0, 2);
7775 	}
7776 
7777 	rwlock_init(&sip->macc_lck);
7778 	return (int)n_idx;
7779 err:
7780 	sdebug_erase_store((int)n_idx, sip);
7781 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7782 	return res;
7783 }
7784 
7785 static int sdebug_add_host_helper(int per_host_idx)
7786 {
7787 	int k, devs_per_host, idx;
7788 	int error = -ENOMEM;
7789 	struct sdebug_host_info *sdbg_host;
7790 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7791 
7792 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7793 	if (!sdbg_host)
7794 		return -ENOMEM;
7795 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7796 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7797 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7798 	sdbg_host->si_idx = idx;
7799 
7800 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7801 
7802 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7803 	for (k = 0; k < devs_per_host; k++) {
7804 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7805 		if (!sdbg_devinfo)
7806 			goto clean;
7807 	}
7808 
7809 	mutex_lock(&sdebug_host_list_mutex);
7810 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7811 	mutex_unlock(&sdebug_host_list_mutex);
7812 
7813 	sdbg_host->dev.bus = &pseudo_lld_bus;
7814 	sdbg_host->dev.parent = pseudo_primary;
7815 	sdbg_host->dev.release = &sdebug_release_adapter;
7816 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7817 
7818 	error = device_register(&sdbg_host->dev);
7819 	if (error) {
7820 		mutex_lock(&sdebug_host_list_mutex);
7821 		list_del(&sdbg_host->host_list);
7822 		mutex_unlock(&sdebug_host_list_mutex);
7823 		goto clean;
7824 	}
7825 
7826 	++sdebug_num_hosts;
7827 	return 0;
7828 
7829 clean:
7830 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7831 				 dev_list) {
7832 		list_del(&sdbg_devinfo->dev_list);
7833 		kfree(sdbg_devinfo->zstate);
7834 		kfree(sdbg_devinfo);
7835 	}
7836 	if (sdbg_host->dev.release)
7837 		put_device(&sdbg_host->dev);
7838 	else
7839 		kfree(sdbg_host);
7840 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7841 	return error;
7842 }
7843 
7844 static int sdebug_do_add_host(bool mk_new_store)
7845 {
7846 	int ph_idx = sdeb_most_recent_idx;
7847 
7848 	if (mk_new_store) {
7849 		ph_idx = sdebug_add_store();
7850 		if (ph_idx < 0)
7851 			return ph_idx;
7852 	}
7853 	return sdebug_add_host_helper(ph_idx);
7854 }
7855 
7856 static void sdebug_do_remove_host(bool the_end)
7857 {
7858 	int idx = -1;
7859 	struct sdebug_host_info *sdbg_host = NULL;
7860 	struct sdebug_host_info *sdbg_host2;
7861 
7862 	mutex_lock(&sdebug_host_list_mutex);
7863 	if (!list_empty(&sdebug_host_list)) {
7864 		sdbg_host = list_entry(sdebug_host_list.prev,
7865 				       struct sdebug_host_info, host_list);
7866 		idx = sdbg_host->si_idx;
7867 	}
7868 	if (!the_end && idx >= 0) {
7869 		bool unique = true;
7870 
7871 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7872 			if (sdbg_host2 == sdbg_host)
7873 				continue;
7874 			if (idx == sdbg_host2->si_idx) {
7875 				unique = false;
7876 				break;
7877 			}
7878 		}
7879 		if (unique) {
7880 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7881 			if (idx == sdeb_most_recent_idx)
7882 				--sdeb_most_recent_idx;
7883 		}
7884 	}
7885 	if (sdbg_host)
7886 		list_del(&sdbg_host->host_list);
7887 	mutex_unlock(&sdebug_host_list_mutex);
7888 
7889 	if (!sdbg_host)
7890 		return;
7891 
7892 	device_unregister(&sdbg_host->dev);
7893 	--sdebug_num_hosts;
7894 }
7895 
7896 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7897 {
7898 	struct sdebug_dev_info *devip = sdev->hostdata;
7899 
7900 	if (!devip)
7901 		return	-ENODEV;
7902 
7903 	mutex_lock(&sdebug_host_list_mutex);
7904 	block_unblock_all_queues(true);
7905 
7906 	if (qdepth > SDEBUG_CANQUEUE) {
7907 		qdepth = SDEBUG_CANQUEUE;
7908 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7909 			qdepth, SDEBUG_CANQUEUE);
7910 	}
7911 	if (qdepth < 1)
7912 		qdepth = 1;
7913 	if (qdepth != sdev->queue_depth)
7914 		scsi_change_queue_depth(sdev, qdepth);
7915 
7916 	block_unblock_all_queues(false);
7917 	mutex_unlock(&sdebug_host_list_mutex);
7918 
7919 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7920 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7921 
7922 	return sdev->queue_depth;
7923 }
7924 
7925 static bool fake_timeout(struct scsi_cmnd *scp)
7926 {
7927 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7928 		if (sdebug_every_nth < -1)
7929 			sdebug_every_nth = -1;
7930 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7931 			return true; /* ignore command causing timeout */
7932 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7933 			 scsi_medium_access_command(scp))
7934 			return true; /* time out reads and writes */
7935 	}
7936 	return false;
7937 }
7938 
7939 /* Response to TUR or media access command when device stopped */
7940 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7941 {
7942 	int stopped_state;
7943 	u64 diff_ns = 0;
7944 	ktime_t now_ts = ktime_get_boottime();
7945 	struct scsi_device *sdp = scp->device;
7946 
7947 	stopped_state = atomic_read(&devip->stopped);
7948 	if (stopped_state == 2) {
7949 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7950 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7951 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7952 				/* tur_ms_to_ready timer extinguished */
7953 				atomic_set(&devip->stopped, 0);
7954 				return 0;
7955 			}
7956 		}
7957 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7958 		if (sdebug_verbose)
7959 			sdev_printk(KERN_INFO, sdp,
7960 				    "%s: Not ready: in process of becoming ready\n", my_name);
7961 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7962 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7963 
7964 			if (diff_ns <= tur_nanosecs_to_ready)
7965 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7966 			else
7967 				diff_ns = tur_nanosecs_to_ready;
7968 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7969 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7970 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7971 						   diff_ns);
7972 			return check_condition_result;
7973 		}
7974 	}
7975 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7976 	if (sdebug_verbose)
7977 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7978 			    my_name);
7979 	return check_condition_result;
7980 }
7981 
7982 static void sdebug_map_queues(struct Scsi_Host *shost)
7983 {
7984 	int i, qoff;
7985 
7986 	if (shost->nr_hw_queues == 1)
7987 		return;
7988 
7989 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7990 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7991 
7992 		map->nr_queues  = 0;
7993 
7994 		if (i == HCTX_TYPE_DEFAULT)
7995 			map->nr_queues = submit_queues - poll_queues;
7996 		else if (i == HCTX_TYPE_POLL)
7997 			map->nr_queues = poll_queues;
7998 
7999 		if (!map->nr_queues) {
8000 			BUG_ON(i == HCTX_TYPE_DEFAULT);
8001 			continue;
8002 		}
8003 
8004 		map->queue_offset = qoff;
8005 		blk_mq_map_queues(map);
8006 
8007 		qoff += map->nr_queues;
8008 	}
8009 }
8010 
8011 struct sdebug_blk_mq_poll_data {
8012 	unsigned int queue_num;
8013 	int *num_entries;
8014 };
8015 
8016 /*
8017  * We don't handle aborted commands here, but it does not seem possible to have
8018  * aborted polled commands from schedule_resp()
8019  */
8020 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
8021 {
8022 	struct sdebug_blk_mq_poll_data *data = opaque;
8023 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
8024 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8025 	struct sdebug_defer *sd_dp;
8026 	u32 unique_tag = blk_mq_unique_tag(rq);
8027 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
8028 	struct sdebug_queued_cmd *sqcp;
8029 	unsigned long flags;
8030 	int queue_num = data->queue_num;
8031 	ktime_t time;
8032 
8033 	/* We're only interested in one queue for this iteration */
8034 	if (hwq != queue_num)
8035 		return true;
8036 
8037 	/* Subsequent checks would fail if this failed, but check anyway */
8038 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
8039 		return true;
8040 
8041 	time = ktime_get_boottime();
8042 
8043 	spin_lock_irqsave(&sdsc->lock, flags);
8044 	sqcp = TO_QUEUED_CMD(cmd);
8045 	if (!sqcp) {
8046 		spin_unlock_irqrestore(&sdsc->lock, flags);
8047 		return true;
8048 	}
8049 
8050 	sd_dp = &sqcp->sd_dp;
8051 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
8052 		spin_unlock_irqrestore(&sdsc->lock, flags);
8053 		return true;
8054 	}
8055 
8056 	if (time < sd_dp->cmpl_ts) {
8057 		spin_unlock_irqrestore(&sdsc->lock, flags);
8058 		return true;
8059 	}
8060 
8061 	ASSIGN_QUEUED_CMD(cmd, NULL);
8062 	spin_unlock_irqrestore(&sdsc->lock, flags);
8063 
8064 	if (sdebug_statistics) {
8065 		atomic_inc(&sdebug_completions);
8066 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
8067 			atomic_inc(&sdebug_miss_cpus);
8068 	}
8069 
8070 	sdebug_free_queued_cmd(sqcp);
8071 
8072 	scsi_done(cmd); /* callback to mid level */
8073 	(*data->num_entries)++;
8074 	return true;
8075 }
8076 
8077 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
8078 {
8079 	int num_entries = 0;
8080 	struct sdebug_blk_mq_poll_data data = {
8081 		.queue_num = queue_num,
8082 		.num_entries = &num_entries,
8083 	};
8084 
8085 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
8086 				&data);
8087 
8088 	if (num_entries > 0)
8089 		atomic_add(num_entries, &sdeb_mq_poll_count);
8090 	return num_entries;
8091 }
8092 
8093 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
8094 {
8095 	struct scsi_device *sdp = cmnd->device;
8096 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8097 	struct sdebug_err_inject *err;
8098 	unsigned char *cmd = cmnd->cmnd;
8099 	int ret = 0;
8100 
8101 	if (devip == NULL)
8102 		return 0;
8103 
8104 	rcu_read_lock();
8105 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8106 		if (err->type == ERR_TMOUT_CMD &&
8107 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8108 			ret = !!err->cnt;
8109 			if (err->cnt < 0)
8110 				err->cnt++;
8111 
8112 			rcu_read_unlock();
8113 			return ret;
8114 		}
8115 	}
8116 	rcu_read_unlock();
8117 
8118 	return 0;
8119 }
8120 
8121 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
8122 {
8123 	struct scsi_device *sdp = cmnd->device;
8124 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8125 	struct sdebug_err_inject *err;
8126 	unsigned char *cmd = cmnd->cmnd;
8127 	int ret = 0;
8128 
8129 	if (devip == NULL)
8130 		return 0;
8131 
8132 	rcu_read_lock();
8133 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8134 		if (err->type == ERR_FAIL_QUEUE_CMD &&
8135 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8136 			ret = err->cnt ? err->queuecmd_ret : 0;
8137 			if (err->cnt < 0)
8138 				err->cnt++;
8139 
8140 			rcu_read_unlock();
8141 			return ret;
8142 		}
8143 	}
8144 	rcu_read_unlock();
8145 
8146 	return 0;
8147 }
8148 
8149 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8150 			   struct sdebug_err_inject *info)
8151 {
8152 	struct scsi_device *sdp = cmnd->device;
8153 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8154 	struct sdebug_err_inject *err;
8155 	unsigned char *cmd = cmnd->cmnd;
8156 	int ret = 0;
8157 	int result;
8158 
8159 	if (devip == NULL)
8160 		return 0;
8161 
8162 	rcu_read_lock();
8163 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8164 		if (err->type == ERR_FAIL_CMD &&
8165 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8166 			if (!err->cnt) {
8167 				rcu_read_unlock();
8168 				return 0;
8169 			}
8170 
8171 			ret = !!err->cnt;
8172 			rcu_read_unlock();
8173 			goto out_handle;
8174 		}
8175 	}
8176 	rcu_read_unlock();
8177 
8178 	return 0;
8179 
8180 out_handle:
8181 	if (err->cnt < 0)
8182 		err->cnt++;
8183 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8184 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8185 	*info = *err;
8186 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8187 
8188 	return ret;
8189 }
8190 
8191 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8192 				   struct scsi_cmnd *scp)
8193 {
8194 	u8 sdeb_i;
8195 	struct scsi_device *sdp = scp->device;
8196 	const struct opcode_info_t *oip;
8197 	const struct opcode_info_t *r_oip;
8198 	struct sdebug_dev_info *devip;
8199 	u8 *cmd = scp->cmnd;
8200 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8201 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8202 	int k, na;
8203 	int errsts = 0;
8204 	u64 lun_index = sdp->lun & 0x3FFF;
8205 	u32 flags;
8206 	u16 sa;
8207 	u8 opcode = cmd[0];
8208 	bool has_wlun_rl;
8209 	bool inject_now;
8210 	int ret = 0;
8211 	struct sdebug_err_inject err;
8212 
8213 	scsi_set_resid(scp, 0);
8214 	if (sdebug_statistics) {
8215 		atomic_inc(&sdebug_cmnd_count);
8216 		inject_now = inject_on_this_cmd();
8217 	} else {
8218 		inject_now = false;
8219 	}
8220 	if (unlikely(sdebug_verbose &&
8221 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8222 		char b[120];
8223 		int n, len, sb;
8224 
8225 		len = scp->cmd_len;
8226 		sb = (int)sizeof(b);
8227 		if (len > 32)
8228 			strcpy(b, "too long, over 32 bytes");
8229 		else {
8230 			for (k = 0, n = 0; k < len && n < sb; ++k)
8231 				n += scnprintf(b + n, sb - n, "%02x ",
8232 					       (u32)cmd[k]);
8233 		}
8234 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8235 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8236 	}
8237 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8238 		return SCSI_MLQUEUE_HOST_BUSY;
8239 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8240 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8241 		goto err_out;
8242 
8243 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
8244 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
8245 	devip = (struct sdebug_dev_info *)sdp->hostdata;
8246 	if (unlikely(!devip)) {
8247 		devip = find_build_dev_info(sdp);
8248 		if (NULL == devip)
8249 			goto err_out;
8250 	}
8251 
8252 	if (sdebug_timeout_cmd(scp)) {
8253 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8254 		return 0;
8255 	}
8256 
8257 	ret = sdebug_fail_queue_cmd(scp);
8258 	if (ret) {
8259 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8260 				opcode, ret);
8261 		return ret;
8262 	}
8263 
8264 	if (sdebug_fail_cmd(scp, &ret, &err)) {
8265 		scmd_printk(KERN_INFO, scp,
8266 			"fail command 0x%x with hostbyte=0x%x, "
8267 			"driverbyte=0x%x, statusbyte=0x%x, "
8268 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8269 			opcode, err.host_byte, err.driver_byte,
8270 			err.status_byte, err.sense_key, err.asc, err.asq);
8271 		return ret;
8272 	}
8273 
8274 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8275 		atomic_set(&sdeb_inject_pending, 1);
8276 
8277 	na = oip->num_attached;
8278 	r_pfp = oip->pfp;
8279 	if (na) {	/* multiple commands with this opcode */
8280 		r_oip = oip;
8281 		if (FF_SA & r_oip->flags) {
8282 			if (F_SA_LOW & oip->flags)
8283 				sa = 0x1f & cmd[1];
8284 			else
8285 				sa = get_unaligned_be16(cmd + 8);
8286 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8287 				if (opcode == oip->opcode && sa == oip->sa)
8288 					break;
8289 			}
8290 		} else {   /* since no service action only check opcode */
8291 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8292 				if (opcode == oip->opcode)
8293 					break;
8294 			}
8295 		}
8296 		if (k > na) {
8297 			if (F_SA_LOW & r_oip->flags)
8298 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8299 			else if (F_SA_HIGH & r_oip->flags)
8300 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8301 			else
8302 				mk_sense_invalid_opcode(scp);
8303 			goto check_cond;
8304 		}
8305 	}	/* else (when na==0) we assume the oip is a match */
8306 	flags = oip->flags;
8307 	if (unlikely(F_INV_OP & flags)) {
8308 		mk_sense_invalid_opcode(scp);
8309 		goto check_cond;
8310 	}
8311 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8312 		if (sdebug_verbose)
8313 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8314 				    my_name, opcode, " supported for wlun");
8315 		mk_sense_invalid_opcode(scp);
8316 		goto check_cond;
8317 	}
8318 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
8319 		u8 rem;
8320 		int j;
8321 
8322 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8323 			rem = ~oip->len_mask[k] & cmd[k];
8324 			if (rem) {
8325 				for (j = 7; j >= 0; --j, rem <<= 1) {
8326 					if (0x80 & rem)
8327 						break;
8328 				}
8329 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8330 				goto check_cond;
8331 			}
8332 		}
8333 	}
8334 	if (unlikely(!(F_SKIP_UA & flags) &&
8335 		     find_first_bit(devip->uas_bm,
8336 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8337 		errsts = make_ua(scp, devip);
8338 		if (errsts)
8339 			goto check_cond;
8340 	}
8341 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8342 		     atomic_read(&devip->stopped))) {
8343 		errsts = resp_not_ready(scp, devip);
8344 		if (errsts)
8345 			goto fini;
8346 	}
8347 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
8348 		goto fini;
8349 	if (unlikely(sdebug_every_nth)) {
8350 		if (fake_timeout(scp))
8351 			return 0;	/* ignore command: make trouble */
8352 	}
8353 	if (likely(oip->pfp))
8354 		pfp = oip->pfp;	/* calls a resp_* function */
8355 	else
8356 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8357 
8358 fini:
8359 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
8360 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8361 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8362 					    sdebug_ndelay > 10000)) {
8363 		/*
8364 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8365 		 * for Start Stop Unit (SSU) want at least 1 second delay and
8366 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
8367 		 * For Synchronize Cache want 1/20 of SSU's delay.
8368 		 */
8369 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8370 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8371 
8372 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8373 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8374 	} else
8375 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8376 				     sdebug_ndelay);
8377 check_cond:
8378 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8379 err_out:
8380 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8381 }
8382 
8383 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8384 {
8385 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8386 
8387 	spin_lock_init(&sdsc->lock);
8388 
8389 	return 0;
8390 }
8391 
8392 static struct scsi_host_template sdebug_driver_template = {
8393 	.show_info =		scsi_debug_show_info,
8394 	.write_info =		scsi_debug_write_info,
8395 	.proc_name =		sdebug_proc_name,
8396 	.name =			"SCSI DEBUG",
8397 	.info =			scsi_debug_info,
8398 	.slave_alloc =		scsi_debug_slave_alloc,
8399 	.slave_configure =	scsi_debug_slave_configure,
8400 	.slave_destroy =	scsi_debug_slave_destroy,
8401 	.ioctl =		scsi_debug_ioctl,
8402 	.queuecommand =		scsi_debug_queuecommand,
8403 	.change_queue_depth =	sdebug_change_qdepth,
8404 	.map_queues =		sdebug_map_queues,
8405 	.mq_poll =		sdebug_blk_mq_poll,
8406 	.eh_abort_handler =	scsi_debug_abort,
8407 	.eh_device_reset_handler = scsi_debug_device_reset,
8408 	.eh_target_reset_handler = scsi_debug_target_reset,
8409 	.eh_bus_reset_handler = scsi_debug_bus_reset,
8410 	.eh_host_reset_handler = scsi_debug_host_reset,
8411 	.can_queue =		SDEBUG_CANQUEUE,
8412 	.this_id =		7,
8413 	.sg_tablesize =		SG_MAX_SEGMENTS,
8414 	.cmd_per_lun =		DEF_CMD_PER_LUN,
8415 	.max_sectors =		-1U,
8416 	.max_segment_size =	-1U,
8417 	.module =		THIS_MODULE,
8418 	.track_queue_depth =	1,
8419 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
8420 	.init_cmd_priv = sdebug_init_cmd_priv,
8421 	.target_alloc =		sdebug_target_alloc,
8422 	.target_destroy =	sdebug_target_destroy,
8423 };
8424 
8425 static int sdebug_driver_probe(struct device *dev)
8426 {
8427 	int error = 0;
8428 	struct sdebug_host_info *sdbg_host;
8429 	struct Scsi_Host *hpnt;
8430 	int hprot;
8431 
8432 	sdbg_host = dev_to_sdebug_host(dev);
8433 
8434 	sdebug_driver_template.can_queue = sdebug_max_queue;
8435 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8436 	if (!sdebug_clustering)
8437 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8438 
8439 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8440 	if (NULL == hpnt) {
8441 		pr_err("scsi_host_alloc failed\n");
8442 		error = -ENODEV;
8443 		return error;
8444 	}
8445 	if (submit_queues > nr_cpu_ids) {
8446 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8447 			my_name, submit_queues, nr_cpu_ids);
8448 		submit_queues = nr_cpu_ids;
8449 	}
8450 	/*
8451 	 * Decide whether to tell scsi subsystem that we want mq. The
8452 	 * following should give the same answer for each host.
8453 	 */
8454 	hpnt->nr_hw_queues = submit_queues;
8455 	if (sdebug_host_max_queue)
8456 		hpnt->host_tagset = 1;
8457 
8458 	/* poll queues are possible for nr_hw_queues > 1 */
8459 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8460 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8461 			 my_name, poll_queues, hpnt->nr_hw_queues);
8462 		poll_queues = 0;
8463 	}
8464 
8465 	/*
8466 	 * Poll queues don't need interrupts, but we need at least one I/O queue
8467 	 * left over for non-polled I/O.
8468 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
8469 	 */
8470 	if (poll_queues >= submit_queues) {
8471 		if (submit_queues < 3)
8472 			pr_warn("%s: trim poll_queues to 1\n", my_name);
8473 		else
8474 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8475 				my_name, submit_queues - 1);
8476 		poll_queues = 1;
8477 	}
8478 	if (poll_queues)
8479 		hpnt->nr_maps = 3;
8480 
8481 	sdbg_host->shost = hpnt;
8482 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8483 		hpnt->max_id = sdebug_num_tgts + 1;
8484 	else
8485 		hpnt->max_id = sdebug_num_tgts;
8486 	/* = sdebug_max_luns; */
8487 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8488 
8489 	hprot = 0;
8490 
8491 	switch (sdebug_dif) {
8492 
8493 	case T10_PI_TYPE1_PROTECTION:
8494 		hprot = SHOST_DIF_TYPE1_PROTECTION;
8495 		if (sdebug_dix)
8496 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
8497 		break;
8498 
8499 	case T10_PI_TYPE2_PROTECTION:
8500 		hprot = SHOST_DIF_TYPE2_PROTECTION;
8501 		if (sdebug_dix)
8502 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
8503 		break;
8504 
8505 	case T10_PI_TYPE3_PROTECTION:
8506 		hprot = SHOST_DIF_TYPE3_PROTECTION;
8507 		if (sdebug_dix)
8508 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
8509 		break;
8510 
8511 	default:
8512 		if (sdebug_dix)
8513 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
8514 		break;
8515 	}
8516 
8517 	scsi_host_set_prot(hpnt, hprot);
8518 
8519 	if (have_dif_prot || sdebug_dix)
8520 		pr_info("host protection%s%s%s%s%s%s%s\n",
8521 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8522 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8523 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8524 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8525 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8526 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8527 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8528 
8529 	if (sdebug_guard == 1)
8530 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8531 	else
8532 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8533 
8534 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8535 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8536 	if (sdebug_every_nth)	/* need stats counters for every_nth */
8537 		sdebug_statistics = true;
8538 	error = scsi_add_host(hpnt, &sdbg_host->dev);
8539 	if (error) {
8540 		pr_err("scsi_add_host failed\n");
8541 		error = -ENODEV;
8542 		scsi_host_put(hpnt);
8543 	} else {
8544 		scsi_scan_host(hpnt);
8545 	}
8546 
8547 	return error;
8548 }
8549 
8550 static void sdebug_driver_remove(struct device *dev)
8551 {
8552 	struct sdebug_host_info *sdbg_host;
8553 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8554 
8555 	sdbg_host = dev_to_sdebug_host(dev);
8556 
8557 	scsi_remove_host(sdbg_host->shost);
8558 
8559 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8560 				 dev_list) {
8561 		list_del(&sdbg_devinfo->dev_list);
8562 		kfree(sdbg_devinfo->zstate);
8563 		kfree(sdbg_devinfo);
8564 	}
8565 
8566 	scsi_host_put(sdbg_host->shost);
8567 }
8568 
8569 static const struct bus_type pseudo_lld_bus = {
8570 	.name = "pseudo",
8571 	.probe = sdebug_driver_probe,
8572 	.remove = sdebug_driver_remove,
8573 	.drv_groups = sdebug_drv_groups,
8574 };
8575