xref: /linux/drivers/scsi/scsi_debug.c (revision ba3193fa8fc8910f724b67a523ec67ee24997d3e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47 
48 #include <net/checksum.h>
49 
50 #include <asm/unaligned.h>
51 
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60 
61 #include "sd.h"
62 #include "scsi_logging.h"
63 
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67 
68 #define MY_NAME "scsi_debug"
69 
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define LOGICAL_UNIT_NOT_READY 0x4
73 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
74 #define UNRECOVERED_READ_ERR 0x11
75 #define PARAMETER_LIST_LENGTH_ERR 0x1a
76 #define INVALID_OPCODE 0x20
77 #define LBA_OUT_OF_RANGE 0x21
78 #define INVALID_FIELD_IN_CDB 0x24
79 #define INVALID_FIELD_IN_PARAM_LIST 0x26
80 #define WRITE_PROTECTED 0x27
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define TARGET_CHANGED_ASC 0x3f
84 #define LUNS_CHANGED_ASCQ 0x0e
85 #define INSUFF_RES_ASC 0x55
86 #define INSUFF_RES_ASCQ 0x3
87 #define POWER_ON_RESET_ASCQ 0x0
88 #define POWER_ON_OCCURRED_ASCQ 0x1
89 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
90 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
91 #define CAPACITY_CHANGED_ASCQ 0x9
92 #define SAVING_PARAMS_UNSUP 0x39
93 #define TRANSPORT_PROBLEM 0x4b
94 #define THRESHOLD_EXCEEDED 0x5d
95 #define LOW_POWER_COND_ON 0x5e
96 #define MISCOMPARE_VERIFY_ASC 0x1d
97 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
98 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
99 #define WRITE_ERROR_ASC 0xc
100 #define UNALIGNED_WRITE_ASCQ 0x4
101 #define WRITE_BOUNDARY_ASCQ 0x5
102 #define READ_INVDATA_ASCQ 0x6
103 #define READ_BOUNDARY_ASCQ 0x7
104 #define ATTEMPT_ACCESS_GAP 0x9
105 #define INSUFF_ZONE_ASCQ 0xe
106 
107 /* Additional Sense Code Qualifier (ASCQ) */
108 #define ACK_NAK_TO 0x3
109 
110 /* Default values for driver parameters */
111 #define DEF_NUM_HOST   1
112 #define DEF_NUM_TGTS   1
113 #define DEF_MAX_LUNS   1
114 /* With these defaults, this driver will make 1 host with 1 target
115  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
116  */
117 #define DEF_ATO 1
118 #define DEF_CDB_LEN 10
119 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
120 #define DEF_DEV_SIZE_PRE_INIT   0
121 #define DEF_DEV_SIZE_MB   8
122 #define DEF_ZBC_DEV_SIZE_MB   128
123 #define DEF_DIF 0
124 #define DEF_DIX 0
125 #define DEF_PER_HOST_STORE false
126 #define DEF_D_SENSE   0
127 #define DEF_EVERY_NTH   0
128 #define DEF_FAKE_RW	0
129 #define DEF_GUARD 0
130 #define DEF_HOST_LOCK 0
131 #define DEF_LBPU 0
132 #define DEF_LBPWS 0
133 #define DEF_LBPWS10 0
134 #define DEF_LBPRZ 1
135 #define DEF_LOWEST_ALIGNED 0
136 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
137 #define DEF_NO_LUN_0   0
138 #define DEF_NUM_PARTS   0
139 #define DEF_OPTS   0
140 #define DEF_OPT_BLKS 1024
141 #define DEF_PHYSBLK_EXP 0
142 #define DEF_OPT_XFERLEN_EXP 0
143 #define DEF_PTYPE   TYPE_DISK
144 #define DEF_RANDOM false
145 #define DEF_REMOVABLE false
146 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
147 #define DEF_SECTOR_SIZE 512
148 #define DEF_UNMAP_ALIGNMENT 0
149 #define DEF_UNMAP_GRANULARITY 1
150 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
151 #define DEF_UNMAP_MAX_DESC 256
152 #define DEF_VIRTUAL_GB   0
153 #define DEF_VPD_USE_HOSTNO 1
154 #define DEF_WRITESAME_LENGTH 0xFFFF
155 #define DEF_STRICT 0
156 #define DEF_STATISTICS false
157 #define DEF_SUBMIT_QUEUES 1
158 #define DEF_TUR_MS_TO_READY 0
159 #define DEF_UUID_CTL 0
160 #define JDELAY_OVERRIDDEN -9999
161 
162 /* Default parameters for ZBC drives */
163 #define DEF_ZBC_ZONE_SIZE_MB	128
164 #define DEF_ZBC_MAX_OPEN_ZONES	8
165 #define DEF_ZBC_NR_CONV_ZONES	1
166 
167 #define SDEBUG_LUN_0_VAL 0
168 
169 /* bit mask values for sdebug_opts */
170 #define SDEBUG_OPT_NOISE		1
171 #define SDEBUG_OPT_MEDIUM_ERR		2
172 #define SDEBUG_OPT_TIMEOUT		4
173 #define SDEBUG_OPT_RECOVERED_ERR	8
174 #define SDEBUG_OPT_TRANSPORT_ERR	16
175 #define SDEBUG_OPT_DIF_ERR		32
176 #define SDEBUG_OPT_DIX_ERR		64
177 #define SDEBUG_OPT_MAC_TIMEOUT		128
178 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
179 #define SDEBUG_OPT_Q_NOISE		0x200
180 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
181 #define SDEBUG_OPT_RARE_TSF		0x800
182 #define SDEBUG_OPT_N_WCE		0x1000
183 #define SDEBUG_OPT_RESET_NOISE		0x2000
184 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
185 #define SDEBUG_OPT_HOST_BUSY		0x8000
186 #define SDEBUG_OPT_CMD_ABORT		0x10000
187 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
188 			      SDEBUG_OPT_RESET_NOISE)
189 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
190 				  SDEBUG_OPT_TRANSPORT_ERR | \
191 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
192 				  SDEBUG_OPT_SHORT_TRANSFER | \
193 				  SDEBUG_OPT_HOST_BUSY | \
194 				  SDEBUG_OPT_CMD_ABORT)
195 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
196 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
197 
198 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
199  * priority order. In the subset implemented here lower numbers have higher
200  * priority. The UA numbers should be a sequence starting from 0 with
201  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
202 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
203 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
204 #define SDEBUG_UA_BUS_RESET 2
205 #define SDEBUG_UA_MODE_CHANGED 3
206 #define SDEBUG_UA_CAPACITY_CHANGED 4
207 #define SDEBUG_UA_LUNS_CHANGED 5
208 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
209 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
210 #define SDEBUG_NUM_UAS 8
211 
212 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
213  * sector on read commands: */
214 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
215 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
216 
217 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
218  * (for response) per submit queue at one time. Can be reduced by max_queue
219  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
220  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
221  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
222  * but cannot exceed SDEBUG_CANQUEUE .
223  */
224 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
225 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
226 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
227 
228 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
229 #define F_D_IN			1	/* Data-in command (e.g. READ) */
230 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
231 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
232 #define F_D_UNKN		8
233 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
234 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
235 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
236 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
237 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
238 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
239 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
240 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
241 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
242 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
243 
244 /* Useful combinations of the above flags */
245 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
246 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
247 #define FF_SA (F_SA_HIGH | F_SA_LOW)
248 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
249 
250 #define SDEBUG_MAX_PARTS 4
251 
252 #define SDEBUG_MAX_CMD_LEN 32
253 
254 #define SDEB_XA_NOT_IN_USE XA_MARK_1
255 
256 static struct kmem_cache *queued_cmd_cache;
257 
258 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
259 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
260 
261 /* Zone types (zbcr05 table 25) */
262 enum sdebug_z_type {
263 	ZBC_ZTYPE_CNV	= 0x1,
264 	ZBC_ZTYPE_SWR	= 0x2,
265 	ZBC_ZTYPE_SWP	= 0x3,
266 	/* ZBC_ZTYPE_SOBR = 0x4, */
267 	ZBC_ZTYPE_GAP	= 0x5,
268 };
269 
270 /* enumeration names taken from table 26, zbcr05 */
271 enum sdebug_z_cond {
272 	ZBC_NOT_WRITE_POINTER	= 0x0,
273 	ZC1_EMPTY		= 0x1,
274 	ZC2_IMPLICIT_OPEN	= 0x2,
275 	ZC3_EXPLICIT_OPEN	= 0x3,
276 	ZC4_CLOSED		= 0x4,
277 	ZC6_READ_ONLY		= 0xd,
278 	ZC5_FULL		= 0xe,
279 	ZC7_OFFLINE		= 0xf,
280 };
281 
282 struct sdeb_zone_state {	/* ZBC: per zone state */
283 	enum sdebug_z_type z_type;
284 	enum sdebug_z_cond z_cond;
285 	bool z_non_seq_resource;
286 	unsigned int z_size;
287 	sector_t z_start;
288 	sector_t z_wp;
289 };
290 
291 enum sdebug_err_type {
292 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
293 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
294 					/* queuecmd return failed */
295 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
296 					/* queuecmd return succeed but */
297 					/* with errors set in scsi_cmnd */
298 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
299 					/* scsi_debug_abort() */
300 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
301 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
302 };
303 
304 struct sdebug_err_inject {
305 	int type;
306 	struct list_head list;
307 	int cnt;
308 	unsigned char cmd;
309 	struct rcu_head rcu;
310 
311 	union {
312 		/*
313 		 * For ERR_FAIL_QUEUE_CMD
314 		 */
315 		int queuecmd_ret;
316 
317 		/*
318 		 * For ERR_FAIL_CMD
319 		 */
320 		struct {
321 			unsigned char host_byte;
322 			unsigned char driver_byte;
323 			unsigned char status_byte;
324 			unsigned char sense_key;
325 			unsigned char asc;
326 			unsigned char asq;
327 		};
328 	};
329 };
330 
331 struct sdebug_dev_info {
332 	struct list_head dev_list;
333 	unsigned int channel;
334 	unsigned int target;
335 	u64 lun;
336 	uuid_t lu_name;
337 	struct sdebug_host_info *sdbg_host;
338 	unsigned long uas_bm[1];
339 	atomic_t stopped;	/* 1: by SSU, 2: device start */
340 	bool used;
341 
342 	/* For ZBC devices */
343 	bool zoned;
344 	unsigned int zcap;
345 	unsigned int zsize;
346 	unsigned int zsize_shift;
347 	unsigned int nr_zones;
348 	unsigned int nr_conv_zones;
349 	unsigned int nr_seq_zones;
350 	unsigned int nr_imp_open;
351 	unsigned int nr_exp_open;
352 	unsigned int nr_closed;
353 	unsigned int max_open;
354 	ktime_t create_ts;	/* time since bootup that this device was created */
355 	struct sdeb_zone_state *zstate;
356 
357 	struct dentry *debugfs_entry;
358 	struct spinlock list_lock;
359 	struct list_head inject_err_list;
360 };
361 
362 struct sdebug_target_info {
363 	bool reset_fail;
364 	struct dentry *debugfs_entry;
365 };
366 
367 struct sdebug_host_info {
368 	struct list_head host_list;
369 	int si_idx;	/* sdeb_store_info (per host) xarray index */
370 	struct Scsi_Host *shost;
371 	struct device dev;
372 	struct list_head dev_info_list;
373 };
374 
375 /* There is an xarray of pointers to this struct's objects, one per host */
376 struct sdeb_store_info {
377 	rwlock_t macc_lck;	/* for atomic media access on this store */
378 	u8 *storep;		/* user data storage (ram) */
379 	struct t10_pi_tuple *dif_storep; /* protection info */
380 	void *map_storep;	/* provisioning map */
381 };
382 
383 #define dev_to_sdebug_host(d)	\
384 	container_of(d, struct sdebug_host_info, dev)
385 
386 #define shost_to_sdebug_host(shost)	\
387 	dev_to_sdebug_host(shost->dma_dev)
388 
389 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
390 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
391 
392 struct sdebug_defer {
393 	struct hrtimer hrt;
394 	struct execute_work ew;
395 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
396 	int issuing_cpu;
397 	bool aborted;	/* true when blk_abort_request() already called */
398 	enum sdeb_defer_type defer_t;
399 };
400 
401 struct sdebug_queued_cmd {
402 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
403 	 * instance indicates this slot is in use.
404 	 */
405 	struct sdebug_defer sd_dp;
406 	struct scsi_cmnd *scmd;
407 };
408 
409 struct sdebug_scsi_cmd {
410 	spinlock_t   lock;
411 };
412 
413 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
414 static atomic_t sdebug_completions;  /* count of deferred completions */
415 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
416 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
417 static atomic_t sdeb_inject_pending;
418 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
419 
420 struct opcode_info_t {
421 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
422 				/* for terminating element */
423 	u8 opcode;		/* if num_attached > 0, preferred */
424 	u16 sa;			/* service action */
425 	u32 flags;		/* OR-ed set of SDEB_F_* */
426 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
427 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
428 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
429 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
430 };
431 
432 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
433 enum sdeb_opcode_index {
434 	SDEB_I_INVALID_OPCODE =	0,
435 	SDEB_I_INQUIRY = 1,
436 	SDEB_I_REPORT_LUNS = 2,
437 	SDEB_I_REQUEST_SENSE = 3,
438 	SDEB_I_TEST_UNIT_READY = 4,
439 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
440 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
441 	SDEB_I_LOG_SENSE = 7,
442 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
443 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
444 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
445 	SDEB_I_START_STOP = 11,
446 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
447 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
448 	SDEB_I_MAINT_IN = 14,
449 	SDEB_I_MAINT_OUT = 15,
450 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
451 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
452 	SDEB_I_RESERVE = 18,		/* 6, 10 */
453 	SDEB_I_RELEASE = 19,		/* 6, 10 */
454 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
455 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
456 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
457 	SDEB_I_SEND_DIAG = 23,
458 	SDEB_I_UNMAP = 24,
459 	SDEB_I_WRITE_BUFFER = 25,
460 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
461 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
462 	SDEB_I_COMP_WRITE = 28,
463 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
464 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
465 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
466 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
467 };
468 
469 
470 static const unsigned char opcode_ind_arr[256] = {
471 /* 0x0; 0x0->0x1f: 6 byte cdbs */
472 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
473 	    0, 0, 0, 0,
474 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
475 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
476 	    SDEB_I_RELEASE,
477 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
478 	    SDEB_I_ALLOW_REMOVAL, 0,
479 /* 0x20; 0x20->0x3f: 10 byte cdbs */
480 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
481 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
482 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
483 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
484 /* 0x40; 0x40->0x5f: 10 byte cdbs */
485 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
486 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
487 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
488 	    SDEB_I_RELEASE,
489 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
490 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
491 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
493 	0, SDEB_I_VARIABLE_LEN,
494 /* 0x80; 0x80->0x9f: 16 byte cdbs */
495 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
496 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
497 	0, 0, 0, SDEB_I_VERIFY,
498 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
499 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
500 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
501 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
502 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
503 	     SDEB_I_MAINT_OUT, 0, 0, 0,
504 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
505 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
506 	0, 0, 0, 0, 0, 0, 0, 0,
507 	0, 0, 0, 0, 0, 0, 0, 0,
508 /* 0xc0; 0xc0->0xff: vendor specific */
509 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
513 };
514 
515 /*
516  * The following "response" functions return the SCSI mid-level's 4 byte
517  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
518  * command completion, they can mask their return value with
519  * SDEG_RES_IMMED_MASK .
520  */
521 #define SDEG_RES_IMMED_MASK 0x40000000
522 
523 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_get_stream_status(struct scsi_cmnd *scp,
537 				  struct sdebug_dev_info *devip);
538 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
551 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
553 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
554 
555 static int sdebug_do_add_host(bool mk_new_store);
556 static int sdebug_add_host_helper(int per_host_idx);
557 static void sdebug_do_remove_host(bool the_end);
558 static int sdebug_add_store(void);
559 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
560 static void sdebug_erase_all_stores(bool apart_from_first);
561 
562 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
563 
564 /*
565  * The following are overflow arrays for cdbs that "hit" the same index in
566  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
567  * should be placed in opcode_info_arr[], the others should be placed here.
568  */
569 static const struct opcode_info_t msense_iarr[] = {
570 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
571 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
572 };
573 
574 static const struct opcode_info_t mselect_iarr[] = {
575 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
576 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
577 };
578 
579 static const struct opcode_info_t read_iarr[] = {
580 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
581 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
582 	     0, 0, 0, 0} },
583 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
584 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
586 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
587 	     0xc7, 0, 0, 0, 0} },
588 };
589 
590 static const struct opcode_info_t write_iarr[] = {
591 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
592 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
593 		   0, 0, 0, 0, 0, 0} },
594 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
595 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
596 		   0, 0, 0} },
597 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
598 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
599 		   0xbf, 0xc7, 0, 0, 0, 0} },
600 };
601 
602 static const struct opcode_info_t verify_iarr[] = {
603 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
604 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
605 		   0, 0, 0, 0, 0, 0} },
606 };
607 
608 static const struct opcode_info_t sa_in_16_iarr[] = {
609 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
610 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
612 	{0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
613 	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
614 	     0, 0} },	/* GET STREAM STATUS */
615 };
616 
617 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
618 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
619 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
620 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
621 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
622 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
623 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
624 };
625 
626 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
627 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
628 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
629 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
630 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
631 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
632 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
633 };
634 
635 static const struct opcode_info_t write_same_iarr[] = {
636 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
637 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
638 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
639 };
640 
641 static const struct opcode_info_t reserve_iarr[] = {
642 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
643 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
644 };
645 
646 static const struct opcode_info_t release_iarr[] = {
647 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
648 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
649 };
650 
651 static const struct opcode_info_t sync_cache_iarr[] = {
652 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
653 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
655 };
656 
657 static const struct opcode_info_t pre_fetch_iarr[] = {
658 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
659 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
661 };
662 
663 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
664 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
665 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
667 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
668 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
669 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
670 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
671 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
673 };
674 
675 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
676 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
677 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
678 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
679 };
680 
681 
682 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
683  * plus the terminating elements for logic that scans this table such as
684  * REPORT SUPPORTED OPERATION CODES. */
685 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
686 /* 0 */
687 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
688 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
689 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
690 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
692 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
693 	     0, 0} },					/* REPORT LUNS */
694 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
695 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
697 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 /* 5 */
699 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
700 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
701 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
702 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
703 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
704 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
706 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
707 	     0, 0, 0} },
708 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
709 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
710 	     0, 0} },
711 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
712 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
713 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
714 /* 10 */
715 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
716 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
717 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
719 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
720 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
721 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
722 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
723 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
725 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
726 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
727 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
728 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
729 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
730 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
731 				0xff, 0, 0xc7, 0, 0, 0, 0} },
732 /* 15 */
733 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
734 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
735 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
736 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
737 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
738 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
739 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
740 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
741 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
742 	     0xff, 0xff} },
743 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
744 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
745 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
746 	     0} },
747 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
748 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
749 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
750 	     0} },
751 /* 20 */
752 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
753 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
755 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
756 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
757 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
758 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
759 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
760 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
761 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
762 /* 25 */
763 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
764 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
765 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
766 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
767 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
768 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
769 		 0, 0, 0, 0, 0} },
770 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
771 	    resp_sync_cache, sync_cache_iarr,
772 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
773 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
774 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
775 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
776 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
777 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
778 	    resp_pre_fetch, pre_fetch_iarr,
779 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
780 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
781 
782 /* 30 */
783 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
784 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
785 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
786 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
787 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
788 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
789 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
790 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
791 /* sentinel */
792 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
793 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
794 };
795 
796 static int sdebug_num_hosts;
797 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
798 static int sdebug_ato = DEF_ATO;
799 static int sdebug_cdb_len = DEF_CDB_LEN;
800 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
801 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
802 static int sdebug_dif = DEF_DIF;
803 static int sdebug_dix = DEF_DIX;
804 static int sdebug_dsense = DEF_D_SENSE;
805 static int sdebug_every_nth = DEF_EVERY_NTH;
806 static int sdebug_fake_rw = DEF_FAKE_RW;
807 static unsigned int sdebug_guard = DEF_GUARD;
808 static int sdebug_host_max_queue;	/* per host */
809 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
810 static int sdebug_max_luns = DEF_MAX_LUNS;
811 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
812 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
813 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
814 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
815 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
816 static int sdebug_no_uld;
817 static int sdebug_num_parts = DEF_NUM_PARTS;
818 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
819 static int sdebug_opt_blks = DEF_OPT_BLKS;
820 static int sdebug_opts = DEF_OPTS;
821 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
822 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
823 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
824 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
825 static int sdebug_sector_size = DEF_SECTOR_SIZE;
826 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
827 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
828 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
829 static unsigned int sdebug_lbpu = DEF_LBPU;
830 static unsigned int sdebug_lbpws = DEF_LBPWS;
831 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
832 static unsigned int sdebug_lbprz = DEF_LBPRZ;
833 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
834 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
835 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
836 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
837 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
838 static int sdebug_uuid_ctl = DEF_UUID_CTL;
839 static bool sdebug_random = DEF_RANDOM;
840 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
841 static bool sdebug_removable = DEF_REMOVABLE;
842 static bool sdebug_clustering;
843 static bool sdebug_host_lock = DEF_HOST_LOCK;
844 static bool sdebug_strict = DEF_STRICT;
845 static bool sdebug_any_injecting_opt;
846 static bool sdebug_no_rwlock;
847 static bool sdebug_verbose;
848 static bool have_dif_prot;
849 static bool write_since_sync;
850 static bool sdebug_statistics = DEF_STATISTICS;
851 static bool sdebug_wp;
852 static bool sdebug_allow_restart;
853 static enum {
854 	BLK_ZONED_NONE	= 0,
855 	BLK_ZONED_HA	= 1,
856 	BLK_ZONED_HM	= 2,
857 } sdeb_zbc_model = BLK_ZONED_NONE;
858 static char *sdeb_zbc_model_s;
859 
860 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
861 			  SAM_LUN_AM_FLAT = 0x1,
862 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
863 			  SAM_LUN_AM_EXTENDED = 0x3};
864 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
865 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
866 
867 static unsigned int sdebug_store_sectors;
868 static sector_t sdebug_capacity;	/* in sectors */
869 
870 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
871    may still need them */
872 static int sdebug_heads;		/* heads per disk */
873 static int sdebug_cylinders_per;	/* cylinders per surface */
874 static int sdebug_sectors_per;		/* sectors per cylinder */
875 
876 static LIST_HEAD(sdebug_host_list);
877 static DEFINE_MUTEX(sdebug_host_list_mutex);
878 
879 static struct xarray per_store_arr;
880 static struct xarray *per_store_ap = &per_store_arr;
881 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
882 static int sdeb_most_recent_idx = -1;
883 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
884 
885 static unsigned long map_size;
886 static int num_aborts;
887 static int num_dev_resets;
888 static int num_target_resets;
889 static int num_bus_resets;
890 static int num_host_resets;
891 static int dix_writes;
892 static int dix_reads;
893 static int dif_errors;
894 
895 /* ZBC global data */
896 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
897 static int sdeb_zbc_zone_cap_mb;
898 static int sdeb_zbc_zone_size_mb;
899 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
900 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
901 
902 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
903 static int poll_queues; /* iouring iopoll interface.*/
904 
905 static atomic_long_t writes_by_group_number[64];
906 
907 static char sdebug_proc_name[] = MY_NAME;
908 static const char *my_name = MY_NAME;
909 
910 static const struct bus_type pseudo_lld_bus;
911 
912 static struct device_driver sdebug_driverfs_driver = {
913 	.name 		= sdebug_proc_name,
914 	.bus		= &pseudo_lld_bus,
915 };
916 
917 static const int check_condition_result =
918 	SAM_STAT_CHECK_CONDITION;
919 
920 static const int illegal_condition_result =
921 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
922 
923 static const int device_qfull_result =
924 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
925 
926 static const int condition_met_result = SAM_STAT_CONDITION_MET;
927 
928 static struct dentry *sdebug_debugfs_root;
929 
930 static void sdebug_err_free(struct rcu_head *head)
931 {
932 	struct sdebug_err_inject *inject =
933 		container_of(head, typeof(*inject), rcu);
934 
935 	kfree(inject);
936 }
937 
938 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
939 {
940 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
941 	struct sdebug_err_inject *err;
942 
943 	spin_lock(&devip->list_lock);
944 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
945 		if (err->type == new->type && err->cmd == new->cmd) {
946 			list_del_rcu(&err->list);
947 			call_rcu(&err->rcu, sdebug_err_free);
948 		}
949 	}
950 
951 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
952 	spin_unlock(&devip->list_lock);
953 }
954 
955 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
956 {
957 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
958 	struct sdebug_err_inject *err;
959 	int type;
960 	unsigned char cmd;
961 
962 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
963 		kfree(buf);
964 		return -EINVAL;
965 	}
966 
967 	spin_lock(&devip->list_lock);
968 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
969 		if (err->type == type && err->cmd == cmd) {
970 			list_del_rcu(&err->list);
971 			call_rcu(&err->rcu, sdebug_err_free);
972 			spin_unlock(&devip->list_lock);
973 			kfree(buf);
974 			return count;
975 		}
976 	}
977 	spin_unlock(&devip->list_lock);
978 
979 	kfree(buf);
980 	return -EINVAL;
981 }
982 
983 static int sdebug_error_show(struct seq_file *m, void *p)
984 {
985 	struct scsi_device *sdev = (struct scsi_device *)m->private;
986 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
987 	struct sdebug_err_inject *err;
988 
989 	seq_puts(m, "Type\tCount\tCommand\n");
990 
991 	rcu_read_lock();
992 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
993 		switch (err->type) {
994 		case ERR_TMOUT_CMD:
995 		case ERR_ABORT_CMD_FAILED:
996 		case ERR_LUN_RESET_FAILED:
997 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
998 				err->cmd);
999 		break;
1000 
1001 		case ERR_FAIL_QUEUE_CMD:
1002 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1003 				err->cnt, err->cmd, err->queuecmd_ret);
1004 		break;
1005 
1006 		case ERR_FAIL_CMD:
1007 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1008 				err->type, err->cnt, err->cmd,
1009 				err->host_byte, err->driver_byte,
1010 				err->status_byte, err->sense_key,
1011 				err->asc, err->asq);
1012 		break;
1013 		}
1014 	}
1015 	rcu_read_unlock();
1016 
1017 	return 0;
1018 }
1019 
1020 static int sdebug_error_open(struct inode *inode, struct file *file)
1021 {
1022 	return single_open(file, sdebug_error_show, inode->i_private);
1023 }
1024 
1025 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1026 		size_t count, loff_t *ppos)
1027 {
1028 	char *buf;
1029 	unsigned int inject_type;
1030 	struct sdebug_err_inject *inject;
1031 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1032 
1033 	buf = kzalloc(count + 1, GFP_KERNEL);
1034 	if (!buf)
1035 		return -ENOMEM;
1036 
1037 	if (copy_from_user(buf, ubuf, count)) {
1038 		kfree(buf);
1039 		return -EFAULT;
1040 	}
1041 
1042 	if (buf[0] == '-')
1043 		return sdebug_err_remove(sdev, buf, count);
1044 
1045 	if (sscanf(buf, "%d", &inject_type) != 1) {
1046 		kfree(buf);
1047 		return -EINVAL;
1048 	}
1049 
1050 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1051 	if (!inject) {
1052 		kfree(buf);
1053 		return -ENOMEM;
1054 	}
1055 
1056 	switch (inject_type) {
1057 	case ERR_TMOUT_CMD:
1058 	case ERR_ABORT_CMD_FAILED:
1059 	case ERR_LUN_RESET_FAILED:
1060 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1061 			   &inject->cmd) != 3)
1062 			goto out_error;
1063 	break;
1064 
1065 	case ERR_FAIL_QUEUE_CMD:
1066 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1067 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1068 			goto out_error;
1069 	break;
1070 
1071 	case ERR_FAIL_CMD:
1072 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1073 			   &inject->type, &inject->cnt, &inject->cmd,
1074 			   &inject->host_byte, &inject->driver_byte,
1075 			   &inject->status_byte, &inject->sense_key,
1076 			   &inject->asc, &inject->asq) != 9)
1077 			goto out_error;
1078 	break;
1079 
1080 	default:
1081 		goto out_error;
1082 	break;
1083 	}
1084 
1085 	kfree(buf);
1086 	sdebug_err_add(sdev, inject);
1087 
1088 	return count;
1089 
1090 out_error:
1091 	kfree(buf);
1092 	kfree(inject);
1093 	return -EINVAL;
1094 }
1095 
1096 static const struct file_operations sdebug_error_fops = {
1097 	.open	= sdebug_error_open,
1098 	.read	= seq_read,
1099 	.write	= sdebug_error_write,
1100 	.release = single_release,
1101 };
1102 
1103 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1104 {
1105 	struct scsi_target *starget = (struct scsi_target *)m->private;
1106 	struct sdebug_target_info *targetip =
1107 		(struct sdebug_target_info *)starget->hostdata;
1108 
1109 	if (targetip)
1110 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1111 
1112 	return 0;
1113 }
1114 
1115 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1116 {
1117 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1118 }
1119 
1120 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1121 		const char __user *ubuf, size_t count, loff_t *ppos)
1122 {
1123 	int ret;
1124 	struct scsi_target *starget =
1125 		(struct scsi_target *)file->f_inode->i_private;
1126 	struct sdebug_target_info *targetip =
1127 		(struct sdebug_target_info *)starget->hostdata;
1128 
1129 	if (targetip) {
1130 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1131 		return ret < 0 ? ret : count;
1132 	}
1133 	return -ENODEV;
1134 }
1135 
1136 static const struct file_operations sdebug_target_reset_fail_fops = {
1137 	.open	= sdebug_target_reset_fail_open,
1138 	.read	= seq_read,
1139 	.write	= sdebug_target_reset_fail_write,
1140 	.release = single_release,
1141 };
1142 
1143 static int sdebug_target_alloc(struct scsi_target *starget)
1144 {
1145 	struct sdebug_target_info *targetip;
1146 
1147 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1148 	if (!targetip)
1149 		return -ENOMEM;
1150 
1151 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1152 				sdebug_debugfs_root);
1153 
1154 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1155 				&sdebug_target_reset_fail_fops);
1156 
1157 	starget->hostdata = targetip;
1158 
1159 	return 0;
1160 }
1161 
1162 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1163 {
1164 	struct sdebug_target_info *targetip = data;
1165 
1166 	debugfs_remove(targetip->debugfs_entry);
1167 	kfree(targetip);
1168 }
1169 
1170 static void sdebug_target_destroy(struct scsi_target *starget)
1171 {
1172 	struct sdebug_target_info *targetip;
1173 
1174 	targetip = (struct sdebug_target_info *)starget->hostdata;
1175 	if (targetip) {
1176 		starget->hostdata = NULL;
1177 		async_schedule(sdebug_tartget_cleanup_async, targetip);
1178 	}
1179 }
1180 
1181 /* Only do the extra work involved in logical block provisioning if one or
1182  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1183  * real reads and writes (i.e. not skipping them for speed).
1184  */
1185 static inline bool scsi_debug_lbp(void)
1186 {
1187 	return 0 == sdebug_fake_rw &&
1188 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1189 }
1190 
1191 static void *lba2fake_store(struct sdeb_store_info *sip,
1192 			    unsigned long long lba)
1193 {
1194 	struct sdeb_store_info *lsip = sip;
1195 
1196 	lba = do_div(lba, sdebug_store_sectors);
1197 	if (!sip || !sip->storep) {
1198 		WARN_ON_ONCE(true);
1199 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1200 	}
1201 	return lsip->storep + lba * sdebug_sector_size;
1202 }
1203 
1204 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1205 				      sector_t sector)
1206 {
1207 	sector = sector_div(sector, sdebug_store_sectors);
1208 
1209 	return sip->dif_storep + sector;
1210 }
1211 
1212 static void sdebug_max_tgts_luns(void)
1213 {
1214 	struct sdebug_host_info *sdbg_host;
1215 	struct Scsi_Host *hpnt;
1216 
1217 	mutex_lock(&sdebug_host_list_mutex);
1218 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1219 		hpnt = sdbg_host->shost;
1220 		if ((hpnt->this_id >= 0) &&
1221 		    (sdebug_num_tgts > hpnt->this_id))
1222 			hpnt->max_id = sdebug_num_tgts + 1;
1223 		else
1224 			hpnt->max_id = sdebug_num_tgts;
1225 		/* sdebug_max_luns; */
1226 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1227 	}
1228 	mutex_unlock(&sdebug_host_list_mutex);
1229 }
1230 
1231 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1232 
1233 /* Set in_bit to -1 to indicate no bit position of invalid field */
1234 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1235 				 enum sdeb_cmd_data c_d,
1236 				 int in_byte, int in_bit)
1237 {
1238 	unsigned char *sbuff;
1239 	u8 sks[4];
1240 	int sl, asc;
1241 
1242 	sbuff = scp->sense_buffer;
1243 	if (!sbuff) {
1244 		sdev_printk(KERN_ERR, scp->device,
1245 			    "%s: sense_buffer is NULL\n", __func__);
1246 		return;
1247 	}
1248 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1249 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1250 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1251 	memset(sks, 0, sizeof(sks));
1252 	sks[0] = 0x80;
1253 	if (c_d)
1254 		sks[0] |= 0x40;
1255 	if (in_bit >= 0) {
1256 		sks[0] |= 0x8;
1257 		sks[0] |= 0x7 & in_bit;
1258 	}
1259 	put_unaligned_be16(in_byte, sks + 1);
1260 	if (sdebug_dsense) {
1261 		sl = sbuff[7] + 8;
1262 		sbuff[7] = sl;
1263 		sbuff[sl] = 0x2;
1264 		sbuff[sl + 1] = 0x6;
1265 		memcpy(sbuff + sl + 4, sks, 3);
1266 	} else
1267 		memcpy(sbuff + 15, sks, 3);
1268 	if (sdebug_verbose)
1269 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1270 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1271 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1272 }
1273 
1274 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1275 {
1276 	if (!scp->sense_buffer) {
1277 		sdev_printk(KERN_ERR, scp->device,
1278 			    "%s: sense_buffer is NULL\n", __func__);
1279 		return;
1280 	}
1281 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1282 
1283 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1284 
1285 	if (sdebug_verbose)
1286 		sdev_printk(KERN_INFO, scp->device,
1287 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1288 			    my_name, key, asc, asq);
1289 }
1290 
1291 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1292 {
1293 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1294 }
1295 
1296 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1297 			    void __user *arg)
1298 {
1299 	if (sdebug_verbose) {
1300 		if (0x1261 == cmd)
1301 			sdev_printk(KERN_INFO, dev,
1302 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1303 		else if (0x5331 == cmd)
1304 			sdev_printk(KERN_INFO, dev,
1305 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1306 				    __func__);
1307 		else
1308 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1309 				    __func__, cmd);
1310 	}
1311 	return -EINVAL;
1312 	/* return -ENOTTY; // correct return but upsets fdisk */
1313 }
1314 
1315 static void config_cdb_len(struct scsi_device *sdev)
1316 {
1317 	switch (sdebug_cdb_len) {
1318 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1319 		sdev->use_10_for_rw = false;
1320 		sdev->use_16_for_rw = false;
1321 		sdev->use_10_for_ms = false;
1322 		break;
1323 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1324 		sdev->use_10_for_rw = true;
1325 		sdev->use_16_for_rw = false;
1326 		sdev->use_10_for_ms = false;
1327 		break;
1328 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1329 		sdev->use_10_for_rw = true;
1330 		sdev->use_16_for_rw = false;
1331 		sdev->use_10_for_ms = true;
1332 		break;
1333 	case 16:
1334 		sdev->use_10_for_rw = false;
1335 		sdev->use_16_for_rw = true;
1336 		sdev->use_10_for_ms = true;
1337 		break;
1338 	case 32: /* No knobs to suggest this so same as 16 for now */
1339 		sdev->use_10_for_rw = false;
1340 		sdev->use_16_for_rw = true;
1341 		sdev->use_10_for_ms = true;
1342 		break;
1343 	default:
1344 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1345 			sdebug_cdb_len);
1346 		sdev->use_10_for_rw = true;
1347 		sdev->use_16_for_rw = false;
1348 		sdev->use_10_for_ms = false;
1349 		sdebug_cdb_len = 10;
1350 		break;
1351 	}
1352 }
1353 
1354 static void all_config_cdb_len(void)
1355 {
1356 	struct sdebug_host_info *sdbg_host;
1357 	struct Scsi_Host *shost;
1358 	struct scsi_device *sdev;
1359 
1360 	mutex_lock(&sdebug_host_list_mutex);
1361 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1362 		shost = sdbg_host->shost;
1363 		shost_for_each_device(sdev, shost) {
1364 			config_cdb_len(sdev);
1365 		}
1366 	}
1367 	mutex_unlock(&sdebug_host_list_mutex);
1368 }
1369 
1370 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1371 {
1372 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1373 	struct sdebug_dev_info *dp;
1374 
1375 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1376 		if ((devip->sdbg_host == dp->sdbg_host) &&
1377 		    (devip->target == dp->target)) {
1378 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1379 		}
1380 	}
1381 }
1382 
1383 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1384 {
1385 	int k;
1386 
1387 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1388 	if (k != SDEBUG_NUM_UAS) {
1389 		const char *cp = NULL;
1390 
1391 		switch (k) {
1392 		case SDEBUG_UA_POR:
1393 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1394 					POWER_ON_RESET_ASCQ);
1395 			if (sdebug_verbose)
1396 				cp = "power on reset";
1397 			break;
1398 		case SDEBUG_UA_POOCCUR:
1399 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1400 					POWER_ON_OCCURRED_ASCQ);
1401 			if (sdebug_verbose)
1402 				cp = "power on occurred";
1403 			break;
1404 		case SDEBUG_UA_BUS_RESET:
1405 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1406 					BUS_RESET_ASCQ);
1407 			if (sdebug_verbose)
1408 				cp = "bus reset";
1409 			break;
1410 		case SDEBUG_UA_MODE_CHANGED:
1411 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1412 					MODE_CHANGED_ASCQ);
1413 			if (sdebug_verbose)
1414 				cp = "mode parameters changed";
1415 			break;
1416 		case SDEBUG_UA_CAPACITY_CHANGED:
1417 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1418 					CAPACITY_CHANGED_ASCQ);
1419 			if (sdebug_verbose)
1420 				cp = "capacity data changed";
1421 			break;
1422 		case SDEBUG_UA_MICROCODE_CHANGED:
1423 			mk_sense_buffer(scp, UNIT_ATTENTION,
1424 					TARGET_CHANGED_ASC,
1425 					MICROCODE_CHANGED_ASCQ);
1426 			if (sdebug_verbose)
1427 				cp = "microcode has been changed";
1428 			break;
1429 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1430 			mk_sense_buffer(scp, UNIT_ATTENTION,
1431 					TARGET_CHANGED_ASC,
1432 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1433 			if (sdebug_verbose)
1434 				cp = "microcode has been changed without reset";
1435 			break;
1436 		case SDEBUG_UA_LUNS_CHANGED:
1437 			/*
1438 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1439 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1440 			 * on the target, until a REPORT LUNS command is
1441 			 * received.  SPC-4 behavior is to report it only once.
1442 			 * NOTE:  sdebug_scsi_level does not use the same
1443 			 * values as struct scsi_device->scsi_level.
1444 			 */
1445 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1446 				clear_luns_changed_on_target(devip);
1447 			mk_sense_buffer(scp, UNIT_ATTENTION,
1448 					TARGET_CHANGED_ASC,
1449 					LUNS_CHANGED_ASCQ);
1450 			if (sdebug_verbose)
1451 				cp = "reported luns data has changed";
1452 			break;
1453 		default:
1454 			pr_warn("unexpected unit attention code=%d\n", k);
1455 			if (sdebug_verbose)
1456 				cp = "unknown";
1457 			break;
1458 		}
1459 		clear_bit(k, devip->uas_bm);
1460 		if (sdebug_verbose)
1461 			sdev_printk(KERN_INFO, scp->device,
1462 				   "%s reports: Unit attention: %s\n",
1463 				   my_name, cp);
1464 		return check_condition_result;
1465 	}
1466 	return 0;
1467 }
1468 
1469 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1470 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1471 				int arr_len)
1472 {
1473 	int act_len;
1474 	struct scsi_data_buffer *sdb = &scp->sdb;
1475 
1476 	if (!sdb->length)
1477 		return 0;
1478 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1479 		return DID_ERROR << 16;
1480 
1481 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1482 				      arr, arr_len);
1483 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1484 
1485 	return 0;
1486 }
1487 
1488 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1489  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1490  * calls, not required to write in ascending offset order. Assumes resid
1491  * set to scsi_bufflen() prior to any calls.
1492  */
1493 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1494 				  int arr_len, unsigned int off_dst)
1495 {
1496 	unsigned int act_len, n;
1497 	struct scsi_data_buffer *sdb = &scp->sdb;
1498 	off_t skip = off_dst;
1499 
1500 	if (sdb->length <= off_dst)
1501 		return 0;
1502 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1503 		return DID_ERROR << 16;
1504 
1505 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1506 				       arr, arr_len, skip);
1507 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1508 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1509 		 scsi_get_resid(scp));
1510 	n = scsi_bufflen(scp) - (off_dst + act_len);
1511 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1512 	return 0;
1513 }
1514 
1515 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1516  * 'arr' or -1 if error.
1517  */
1518 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1519 			       int arr_len)
1520 {
1521 	if (!scsi_bufflen(scp))
1522 		return 0;
1523 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1524 		return -1;
1525 
1526 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1527 }
1528 
1529 
1530 static char sdebug_inq_vendor_id[9] = "Linux   ";
1531 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1532 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1533 /* Use some locally assigned NAAs for SAS addresses. */
1534 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1535 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1536 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1537 
1538 /* Device identification VPD page. Returns number of bytes placed in arr */
1539 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1540 			  int target_dev_id, int dev_id_num,
1541 			  const char *dev_id_str, int dev_id_str_len,
1542 			  const uuid_t *lu_name)
1543 {
1544 	int num, port_a;
1545 	char b[32];
1546 
1547 	port_a = target_dev_id + 1;
1548 	/* T10 vendor identifier field format (faked) */
1549 	arr[0] = 0x2;	/* ASCII */
1550 	arr[1] = 0x1;
1551 	arr[2] = 0x0;
1552 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1553 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1554 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1555 	num = 8 + 16 + dev_id_str_len;
1556 	arr[3] = num;
1557 	num += 4;
1558 	if (dev_id_num >= 0) {
1559 		if (sdebug_uuid_ctl) {
1560 			/* Locally assigned UUID */
1561 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1562 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1563 			arr[num++] = 0x0;
1564 			arr[num++] = 0x12;
1565 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1566 			arr[num++] = 0x0;
1567 			memcpy(arr + num, lu_name, 16);
1568 			num += 16;
1569 		} else {
1570 			/* NAA-3, Logical unit identifier (binary) */
1571 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1572 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1573 			arr[num++] = 0x0;
1574 			arr[num++] = 0x8;
1575 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1576 			num += 8;
1577 		}
1578 		/* Target relative port number */
1579 		arr[num++] = 0x61;	/* proto=sas, binary */
1580 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1581 		arr[num++] = 0x0;	/* reserved */
1582 		arr[num++] = 0x4;	/* length */
1583 		arr[num++] = 0x0;	/* reserved */
1584 		arr[num++] = 0x0;	/* reserved */
1585 		arr[num++] = 0x0;
1586 		arr[num++] = 0x1;	/* relative port A */
1587 	}
1588 	/* NAA-3, Target port identifier */
1589 	arr[num++] = 0x61;	/* proto=sas, binary */
1590 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1591 	arr[num++] = 0x0;
1592 	arr[num++] = 0x8;
1593 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1594 	num += 8;
1595 	/* NAA-3, Target port group identifier */
1596 	arr[num++] = 0x61;	/* proto=sas, binary */
1597 	arr[num++] = 0x95;	/* piv=1, target port group id */
1598 	arr[num++] = 0x0;
1599 	arr[num++] = 0x4;
1600 	arr[num++] = 0;
1601 	arr[num++] = 0;
1602 	put_unaligned_be16(port_group_id, arr + num);
1603 	num += 2;
1604 	/* NAA-3, Target device identifier */
1605 	arr[num++] = 0x61;	/* proto=sas, binary */
1606 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1607 	arr[num++] = 0x0;
1608 	arr[num++] = 0x8;
1609 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1610 	num += 8;
1611 	/* SCSI name string: Target device identifier */
1612 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1613 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1614 	arr[num++] = 0x0;
1615 	arr[num++] = 24;
1616 	memcpy(arr + num, "naa.32222220", 12);
1617 	num += 12;
1618 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1619 	memcpy(arr + num, b, 8);
1620 	num += 8;
1621 	memset(arr + num, 0, 4);
1622 	num += 4;
1623 	return num;
1624 }
1625 
1626 static unsigned char vpd84_data[] = {
1627 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1628     0x22,0x22,0x22,0x0,0xbb,0x1,
1629     0x22,0x22,0x22,0x0,0xbb,0x2,
1630 };
1631 
1632 /*  Software interface identification VPD page */
1633 static int inquiry_vpd_84(unsigned char *arr)
1634 {
1635 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1636 	return sizeof(vpd84_data);
1637 }
1638 
1639 /* Management network addresses VPD page */
1640 static int inquiry_vpd_85(unsigned char *arr)
1641 {
1642 	int num = 0;
1643 	const char *na1 = "https://www.kernel.org/config";
1644 	const char *na2 = "http://www.kernel.org/log";
1645 	int plen, olen;
1646 
1647 	arr[num++] = 0x1;	/* lu, storage config */
1648 	arr[num++] = 0x0;	/* reserved */
1649 	arr[num++] = 0x0;
1650 	olen = strlen(na1);
1651 	plen = olen + 1;
1652 	if (plen % 4)
1653 		plen = ((plen / 4) + 1) * 4;
1654 	arr[num++] = plen;	/* length, null termianted, padded */
1655 	memcpy(arr + num, na1, olen);
1656 	memset(arr + num + olen, 0, plen - olen);
1657 	num += plen;
1658 
1659 	arr[num++] = 0x4;	/* lu, logging */
1660 	arr[num++] = 0x0;	/* reserved */
1661 	arr[num++] = 0x0;
1662 	olen = strlen(na2);
1663 	plen = olen + 1;
1664 	if (plen % 4)
1665 		plen = ((plen / 4) + 1) * 4;
1666 	arr[num++] = plen;	/* length, null terminated, padded */
1667 	memcpy(arr + num, na2, olen);
1668 	memset(arr + num + olen, 0, plen - olen);
1669 	num += plen;
1670 
1671 	return num;
1672 }
1673 
1674 /* SCSI ports VPD page */
1675 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1676 {
1677 	int num = 0;
1678 	int port_a, port_b;
1679 
1680 	port_a = target_dev_id + 1;
1681 	port_b = port_a + 1;
1682 	arr[num++] = 0x0;	/* reserved */
1683 	arr[num++] = 0x0;	/* reserved */
1684 	arr[num++] = 0x0;
1685 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1686 	memset(arr + num, 0, 6);
1687 	num += 6;
1688 	arr[num++] = 0x0;
1689 	arr[num++] = 12;	/* length tp descriptor */
1690 	/* naa-5 target port identifier (A) */
1691 	arr[num++] = 0x61;	/* proto=sas, binary */
1692 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1693 	arr[num++] = 0x0;	/* reserved */
1694 	arr[num++] = 0x8;	/* length */
1695 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1696 	num += 8;
1697 	arr[num++] = 0x0;	/* reserved */
1698 	arr[num++] = 0x0;	/* reserved */
1699 	arr[num++] = 0x0;
1700 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1701 	memset(arr + num, 0, 6);
1702 	num += 6;
1703 	arr[num++] = 0x0;
1704 	arr[num++] = 12;	/* length tp descriptor */
1705 	/* naa-5 target port identifier (B) */
1706 	arr[num++] = 0x61;	/* proto=sas, binary */
1707 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1708 	arr[num++] = 0x0;	/* reserved */
1709 	arr[num++] = 0x8;	/* length */
1710 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1711 	num += 8;
1712 
1713 	return num;
1714 }
1715 
1716 
1717 static unsigned char vpd89_data[] = {
1718 /* from 4th byte */ 0,0,0,0,
1719 'l','i','n','u','x',' ',' ',' ',
1720 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1721 '1','2','3','4',
1722 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1723 0xec,0,0,0,
1724 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1725 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1726 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1727 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1728 0x53,0x41,
1729 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1730 0x20,0x20,
1731 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1732 0x10,0x80,
1733 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1734 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1735 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1736 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1737 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1738 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1739 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1740 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1741 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1742 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1743 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1744 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1745 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1746 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1748 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1749 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1750 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1751 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1752 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1753 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1754 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1755 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1756 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1757 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1758 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1759 };
1760 
1761 /* ATA Information VPD page */
1762 static int inquiry_vpd_89(unsigned char *arr)
1763 {
1764 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1765 	return sizeof(vpd89_data);
1766 }
1767 
1768 
1769 static unsigned char vpdb0_data[] = {
1770 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1771 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1772 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1773 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1774 };
1775 
1776 /* Block limits VPD page (SBC-3) */
1777 static int inquiry_vpd_b0(unsigned char *arr)
1778 {
1779 	unsigned int gran;
1780 
1781 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1782 
1783 	/* Optimal transfer length granularity */
1784 	if (sdebug_opt_xferlen_exp != 0 &&
1785 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1786 		gran = 1 << sdebug_opt_xferlen_exp;
1787 	else
1788 		gran = 1 << sdebug_physblk_exp;
1789 	put_unaligned_be16(gran, arr + 2);
1790 
1791 	/* Maximum Transfer Length */
1792 	if (sdebug_store_sectors > 0x400)
1793 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1794 
1795 	/* Optimal Transfer Length */
1796 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1797 
1798 	if (sdebug_lbpu) {
1799 		/* Maximum Unmap LBA Count */
1800 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1801 
1802 		/* Maximum Unmap Block Descriptor Count */
1803 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1804 	}
1805 
1806 	/* Unmap Granularity Alignment */
1807 	if (sdebug_unmap_alignment) {
1808 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1809 		arr[28] |= 0x80; /* UGAVALID */
1810 	}
1811 
1812 	/* Optimal Unmap Granularity */
1813 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1814 
1815 	/* Maximum WRITE SAME Length */
1816 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1817 
1818 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1819 }
1820 
1821 /* Block device characteristics VPD page (SBC-3) */
1822 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1823 {
1824 	memset(arr, 0, 0x3c);
1825 	arr[0] = 0;
1826 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1827 	arr[2] = 0;
1828 	arr[3] = 5;	/* less than 1.8" */
1829 
1830 	return 0x3c;
1831 }
1832 
1833 /* Logical block provisioning VPD page (SBC-4) */
1834 static int inquiry_vpd_b2(unsigned char *arr)
1835 {
1836 	memset(arr, 0, 0x4);
1837 	arr[0] = 0;			/* threshold exponent */
1838 	if (sdebug_lbpu)
1839 		arr[1] = 1 << 7;
1840 	if (sdebug_lbpws)
1841 		arr[1] |= 1 << 6;
1842 	if (sdebug_lbpws10)
1843 		arr[1] |= 1 << 5;
1844 	if (sdebug_lbprz && scsi_debug_lbp())
1845 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1846 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1847 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1848 	/* threshold_percentage=0 */
1849 	return 0x4;
1850 }
1851 
1852 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1853 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1854 {
1855 	memset(arr, 0, 0x3c);
1856 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1857 	/*
1858 	 * Set Optimal number of open sequential write preferred zones and
1859 	 * Optimal number of non-sequentially written sequential write
1860 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1861 	 * fields set to zero, apart from Max. number of open swrz_s field.
1862 	 */
1863 	put_unaligned_be32(0xffffffff, &arr[4]);
1864 	put_unaligned_be32(0xffffffff, &arr[8]);
1865 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1866 		put_unaligned_be32(devip->max_open, &arr[12]);
1867 	else
1868 		put_unaligned_be32(0xffffffff, &arr[12]);
1869 	if (devip->zcap < devip->zsize) {
1870 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1871 		put_unaligned_be64(devip->zsize, &arr[20]);
1872 	} else {
1873 		arr[19] = 0;
1874 	}
1875 	return 0x3c;
1876 }
1877 
1878 #define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
1879 
1880 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1881 
1882 /* Block limits extension VPD page (SBC-4) */
1883 static int inquiry_vpd_b7(unsigned char *arrb4)
1884 {
1885 	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1886 	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1887 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1888 	return SDEBUG_BLE_LEN_AFTER_B4;
1889 }
1890 
1891 #define SDEBUG_LONG_INQ_SZ 96
1892 #define SDEBUG_MAX_INQ_ARR_SZ 584
1893 
1894 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1895 {
1896 	unsigned char pq_pdt;
1897 	unsigned char *arr;
1898 	unsigned char *cmd = scp->cmnd;
1899 	u32 alloc_len, n;
1900 	int ret;
1901 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1902 
1903 	alloc_len = get_unaligned_be16(cmd + 3);
1904 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1905 	if (! arr)
1906 		return DID_REQUEUE << 16;
1907 	is_disk = (sdebug_ptype == TYPE_DISK);
1908 	is_zbc = devip->zoned;
1909 	is_disk_zbc = (is_disk || is_zbc);
1910 	have_wlun = scsi_is_wlun(scp->device->lun);
1911 	if (have_wlun)
1912 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1913 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1914 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1915 	else
1916 		pq_pdt = (sdebug_ptype & 0x1f);
1917 	arr[0] = pq_pdt;
1918 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1919 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1920 		kfree(arr);
1921 		return check_condition_result;
1922 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1923 		int lu_id_num, port_group_id, target_dev_id;
1924 		u32 len;
1925 		char lu_id_str[6];
1926 		int host_no = devip->sdbg_host->shost->host_no;
1927 
1928 		arr[1] = cmd[2];
1929 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1930 		    (devip->channel & 0x7f);
1931 		if (sdebug_vpd_use_hostno == 0)
1932 			host_no = 0;
1933 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1934 			    (devip->target * 1000) + devip->lun);
1935 		target_dev_id = ((host_no + 1) * 2000) +
1936 				 (devip->target * 1000) - 3;
1937 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1938 		if (0 == cmd[2]) { /* supported vital product data pages */
1939 			n = 4;
1940 			arr[n++] = 0x0;   /* this page */
1941 			arr[n++] = 0x80;  /* unit serial number */
1942 			arr[n++] = 0x83;  /* device identification */
1943 			arr[n++] = 0x84;  /* software interface ident. */
1944 			arr[n++] = 0x85;  /* management network addresses */
1945 			arr[n++] = 0x86;  /* extended inquiry */
1946 			arr[n++] = 0x87;  /* mode page policy */
1947 			arr[n++] = 0x88;  /* SCSI ports */
1948 			if (is_disk_zbc) {	  /* SBC or ZBC */
1949 				arr[n++] = 0x89;  /* ATA information */
1950 				arr[n++] = 0xb0;  /* Block limits */
1951 				arr[n++] = 0xb1;  /* Block characteristics */
1952 				if (is_disk)
1953 					arr[n++] = 0xb2;  /* LB Provisioning */
1954 				if (is_zbc)
1955 					arr[n++] = 0xb6;  /* ZB dev. char. */
1956 				arr[n++] = 0xb7;  /* Block limits extension */
1957 			}
1958 			arr[3] = n - 4;	  /* number of supported VPD pages */
1959 		} else if (0x80 == cmd[2]) { /* unit serial number */
1960 			arr[3] = len;
1961 			memcpy(&arr[4], lu_id_str, len);
1962 		} else if (0x83 == cmd[2]) { /* device identification */
1963 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1964 						target_dev_id, lu_id_num,
1965 						lu_id_str, len,
1966 						&devip->lu_name);
1967 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1968 			arr[3] = inquiry_vpd_84(&arr[4]);
1969 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1970 			arr[3] = inquiry_vpd_85(&arr[4]);
1971 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1972 			arr[3] = 0x3c;	/* number of following entries */
1973 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1974 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1975 			else if (have_dif_prot)
1976 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1977 			else
1978 				arr[4] = 0x0;   /* no protection stuff */
1979 			/*
1980 			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
1981 			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
1982 			 */
1983 			arr[5] = 0x17;
1984 		} else if (0x87 == cmd[2]) { /* mode page policy */
1985 			arr[3] = 0x8;	/* number of following entries */
1986 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1987 			arr[6] = 0x80;	/* mlus, shared */
1988 			arr[8] = 0x18;	 /* protocol specific lu */
1989 			arr[10] = 0x82;	 /* mlus, per initiator port */
1990 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1991 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1992 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1993 			n = inquiry_vpd_89(&arr[4]);
1994 			put_unaligned_be16(n, arr + 2);
1995 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1996 			arr[3] = inquiry_vpd_b0(&arr[4]);
1997 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1998 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1999 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2000 			arr[3] = inquiry_vpd_b2(&arr[4]);
2001 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2002 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2003 		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2004 			arr[3] = inquiry_vpd_b7(&arr[4]);
2005 		} else {
2006 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2007 			kfree(arr);
2008 			return check_condition_result;
2009 		}
2010 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2011 		ret = fill_from_dev_buffer(scp, arr,
2012 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2013 		kfree(arr);
2014 		return ret;
2015 	}
2016 	/* drops through here for a standard inquiry */
2017 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2018 	arr[2] = sdebug_scsi_level;
2019 	arr[3] = 2;    /* response_data_format==2 */
2020 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2021 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2022 	if (sdebug_vpd_use_hostno == 0)
2023 		arr[5] |= 0x10; /* claim: implicit TPGS */
2024 	arr[6] = 0x10; /* claim: MultiP */
2025 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2026 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2027 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2028 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2029 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2030 	/* Use Vendor Specific area to place driver date in ASCII hex */
2031 	memcpy(&arr[36], sdebug_version_date, 8);
2032 	/* version descriptors (2 bytes each) follow */
2033 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2034 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2035 	n = 62;
2036 	if (is_disk) {		/* SBC-4 no version claimed */
2037 		put_unaligned_be16(0x600, arr + n);
2038 		n += 2;
2039 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
2040 		put_unaligned_be16(0x525, arr + n);
2041 		n += 2;
2042 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2043 		put_unaligned_be16(0x624, arr + n);
2044 		n += 2;
2045 	}
2046 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2047 	ret = fill_from_dev_buffer(scp, arr,
2048 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2049 	kfree(arr);
2050 	return ret;
2051 }
2052 
2053 /* See resp_iec_m_pg() for how this data is manipulated */
2054 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2055 				   0, 0, 0x0, 0x0};
2056 
2057 static int resp_requests(struct scsi_cmnd *scp,
2058 			 struct sdebug_dev_info *devip)
2059 {
2060 	unsigned char *cmd = scp->cmnd;
2061 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2062 	bool dsense = !!(cmd[1] & 1);
2063 	u32 alloc_len = cmd[4];
2064 	u32 len = 18;
2065 	int stopped_state = atomic_read(&devip->stopped);
2066 
2067 	memset(arr, 0, sizeof(arr));
2068 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2069 		if (dsense) {
2070 			arr[0] = 0x72;
2071 			arr[1] = NOT_READY;
2072 			arr[2] = LOGICAL_UNIT_NOT_READY;
2073 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2074 			len = 8;
2075 		} else {
2076 			arr[0] = 0x70;
2077 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2078 			arr[7] = 0xa;			/* 18 byte sense buffer */
2079 			arr[12] = LOGICAL_UNIT_NOT_READY;
2080 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2081 		}
2082 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2083 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2084 		if (dsense) {
2085 			arr[0] = 0x72;
2086 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2087 			arr[2] = THRESHOLD_EXCEEDED;
2088 			arr[3] = 0xff;		/* Failure prediction(false) */
2089 			len = 8;
2090 		} else {
2091 			arr[0] = 0x70;
2092 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2093 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2094 			arr[12] = THRESHOLD_EXCEEDED;
2095 			arr[13] = 0xff;		/* Failure prediction(false) */
2096 		}
2097 	} else {	/* nothing to report */
2098 		if (dsense) {
2099 			len = 8;
2100 			memset(arr, 0, len);
2101 			arr[0] = 0x72;
2102 		} else {
2103 			memset(arr, 0, len);
2104 			arr[0] = 0x70;
2105 			arr[7] = 0xa;
2106 		}
2107 	}
2108 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2109 }
2110 
2111 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2112 {
2113 	unsigned char *cmd = scp->cmnd;
2114 	int power_cond, want_stop, stopped_state;
2115 	bool changing;
2116 
2117 	power_cond = (cmd[4] & 0xf0) >> 4;
2118 	if (power_cond) {
2119 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2120 		return check_condition_result;
2121 	}
2122 	want_stop = !(cmd[4] & 1);
2123 	stopped_state = atomic_read(&devip->stopped);
2124 	if (stopped_state == 2) {
2125 		ktime_t now_ts = ktime_get_boottime();
2126 
2127 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2128 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2129 
2130 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2131 				/* tur_ms_to_ready timer extinguished */
2132 				atomic_set(&devip->stopped, 0);
2133 				stopped_state = 0;
2134 			}
2135 		}
2136 		if (stopped_state == 2) {
2137 			if (want_stop) {
2138 				stopped_state = 1;	/* dummy up success */
2139 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2140 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2141 				return check_condition_result;
2142 			}
2143 		}
2144 	}
2145 	changing = (stopped_state != want_stop);
2146 	if (changing)
2147 		atomic_xchg(&devip->stopped, want_stop);
2148 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2149 		return SDEG_RES_IMMED_MASK;
2150 	else
2151 		return 0;
2152 }
2153 
2154 static sector_t get_sdebug_capacity(void)
2155 {
2156 	static const unsigned int gibibyte = 1073741824;
2157 
2158 	if (sdebug_virtual_gb > 0)
2159 		return (sector_t)sdebug_virtual_gb *
2160 			(gibibyte / sdebug_sector_size);
2161 	else
2162 		return sdebug_store_sectors;
2163 }
2164 
2165 #define SDEBUG_READCAP_ARR_SZ 8
2166 static int resp_readcap(struct scsi_cmnd *scp,
2167 			struct sdebug_dev_info *devip)
2168 {
2169 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2170 	unsigned int capac;
2171 
2172 	/* following just in case virtual_gb changed */
2173 	sdebug_capacity = get_sdebug_capacity();
2174 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2175 	if (sdebug_capacity < 0xffffffff) {
2176 		capac = (unsigned int)sdebug_capacity - 1;
2177 		put_unaligned_be32(capac, arr + 0);
2178 	} else
2179 		put_unaligned_be32(0xffffffff, arr + 0);
2180 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2181 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2182 }
2183 
2184 #define SDEBUG_READCAP16_ARR_SZ 32
2185 static int resp_readcap16(struct scsi_cmnd *scp,
2186 			  struct sdebug_dev_info *devip)
2187 {
2188 	unsigned char *cmd = scp->cmnd;
2189 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2190 	u32 alloc_len;
2191 
2192 	alloc_len = get_unaligned_be32(cmd + 10);
2193 	/* following just in case virtual_gb changed */
2194 	sdebug_capacity = get_sdebug_capacity();
2195 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2196 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2197 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2198 	arr[13] = sdebug_physblk_exp & 0xf;
2199 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2200 
2201 	if (scsi_debug_lbp()) {
2202 		arr[14] |= 0x80; /* LBPME */
2203 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2204 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2205 		 * in the wider field maps to 0 in this field.
2206 		 */
2207 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2208 			arr[14] |= 0x40;
2209 	}
2210 
2211 	/*
2212 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2213 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2214 	 */
2215 	if (devip->zoned)
2216 		arr[12] |= 1 << 4;
2217 
2218 	arr[15] = sdebug_lowest_aligned & 0xff;
2219 
2220 	if (have_dif_prot) {
2221 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2222 		arr[12] |= 1; /* PROT_EN */
2223 	}
2224 
2225 	return fill_from_dev_buffer(scp, arr,
2226 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2227 }
2228 
2229 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2230 
2231 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2232 			      struct sdebug_dev_info *devip)
2233 {
2234 	unsigned char *cmd = scp->cmnd;
2235 	unsigned char *arr;
2236 	int host_no = devip->sdbg_host->shost->host_no;
2237 	int port_group_a, port_group_b, port_a, port_b;
2238 	u32 alen, n, rlen;
2239 	int ret;
2240 
2241 	alen = get_unaligned_be32(cmd + 6);
2242 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2243 	if (! arr)
2244 		return DID_REQUEUE << 16;
2245 	/*
2246 	 * EVPD page 0x88 states we have two ports, one
2247 	 * real and a fake port with no device connected.
2248 	 * So we create two port groups with one port each
2249 	 * and set the group with port B to unavailable.
2250 	 */
2251 	port_a = 0x1; /* relative port A */
2252 	port_b = 0x2; /* relative port B */
2253 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2254 			(devip->channel & 0x7f);
2255 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2256 			(devip->channel & 0x7f) + 0x80;
2257 
2258 	/*
2259 	 * The asymmetric access state is cycled according to the host_id.
2260 	 */
2261 	n = 4;
2262 	if (sdebug_vpd_use_hostno == 0) {
2263 		arr[n++] = host_no % 3; /* Asymm access state */
2264 		arr[n++] = 0x0F; /* claim: all states are supported */
2265 	} else {
2266 		arr[n++] = 0x0; /* Active/Optimized path */
2267 		arr[n++] = 0x01; /* only support active/optimized paths */
2268 	}
2269 	put_unaligned_be16(port_group_a, arr + n);
2270 	n += 2;
2271 	arr[n++] = 0;    /* Reserved */
2272 	arr[n++] = 0;    /* Status code */
2273 	arr[n++] = 0;    /* Vendor unique */
2274 	arr[n++] = 0x1;  /* One port per group */
2275 	arr[n++] = 0;    /* Reserved */
2276 	arr[n++] = 0;    /* Reserved */
2277 	put_unaligned_be16(port_a, arr + n);
2278 	n += 2;
2279 	arr[n++] = 3;    /* Port unavailable */
2280 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2281 	put_unaligned_be16(port_group_b, arr + n);
2282 	n += 2;
2283 	arr[n++] = 0;    /* Reserved */
2284 	arr[n++] = 0;    /* Status code */
2285 	arr[n++] = 0;    /* Vendor unique */
2286 	arr[n++] = 0x1;  /* One port per group */
2287 	arr[n++] = 0;    /* Reserved */
2288 	arr[n++] = 0;    /* Reserved */
2289 	put_unaligned_be16(port_b, arr + n);
2290 	n += 2;
2291 
2292 	rlen = n - 4;
2293 	put_unaligned_be32(rlen, arr + 0);
2294 
2295 	/*
2296 	 * Return the smallest value of either
2297 	 * - The allocated length
2298 	 * - The constructed command length
2299 	 * - The maximum array size
2300 	 */
2301 	rlen = min(alen, n);
2302 	ret = fill_from_dev_buffer(scp, arr,
2303 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2304 	kfree(arr);
2305 	return ret;
2306 }
2307 
2308 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2309 			     struct sdebug_dev_info *devip)
2310 {
2311 	bool rctd;
2312 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2313 	u16 req_sa, u;
2314 	u32 alloc_len, a_len;
2315 	int k, offset, len, errsts, count, bump, na;
2316 	const struct opcode_info_t *oip;
2317 	const struct opcode_info_t *r_oip;
2318 	u8 *arr;
2319 	u8 *cmd = scp->cmnd;
2320 
2321 	rctd = !!(cmd[2] & 0x80);
2322 	reporting_opts = cmd[2] & 0x7;
2323 	req_opcode = cmd[3];
2324 	req_sa = get_unaligned_be16(cmd + 4);
2325 	alloc_len = get_unaligned_be32(cmd + 6);
2326 	if (alloc_len < 4 || alloc_len > 0xffff) {
2327 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2328 		return check_condition_result;
2329 	}
2330 	if (alloc_len > 8192)
2331 		a_len = 8192;
2332 	else
2333 		a_len = alloc_len;
2334 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2335 	if (NULL == arr) {
2336 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2337 				INSUFF_RES_ASCQ);
2338 		return check_condition_result;
2339 	}
2340 	switch (reporting_opts) {
2341 	case 0:	/* all commands */
2342 		/* count number of commands */
2343 		for (count = 0, oip = opcode_info_arr;
2344 		     oip->num_attached != 0xff; ++oip) {
2345 			if (F_INV_OP & oip->flags)
2346 				continue;
2347 			count += (oip->num_attached + 1);
2348 		}
2349 		bump = rctd ? 20 : 8;
2350 		put_unaligned_be32(count * bump, arr);
2351 		for (offset = 4, oip = opcode_info_arr;
2352 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2353 			if (F_INV_OP & oip->flags)
2354 				continue;
2355 			na = oip->num_attached;
2356 			arr[offset] = oip->opcode;
2357 			put_unaligned_be16(oip->sa, arr + offset + 2);
2358 			if (rctd)
2359 				arr[offset + 5] |= 0x2;
2360 			if (FF_SA & oip->flags)
2361 				arr[offset + 5] |= 0x1;
2362 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2363 			if (rctd)
2364 				put_unaligned_be16(0xa, arr + offset + 8);
2365 			r_oip = oip;
2366 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2367 				if (F_INV_OP & oip->flags)
2368 					continue;
2369 				offset += bump;
2370 				arr[offset] = oip->opcode;
2371 				put_unaligned_be16(oip->sa, arr + offset + 2);
2372 				if (rctd)
2373 					arr[offset + 5] |= 0x2;
2374 				if (FF_SA & oip->flags)
2375 					arr[offset + 5] |= 0x1;
2376 				put_unaligned_be16(oip->len_mask[0],
2377 						   arr + offset + 6);
2378 				if (rctd)
2379 					put_unaligned_be16(0xa,
2380 							   arr + offset + 8);
2381 			}
2382 			oip = r_oip;
2383 			offset += bump;
2384 		}
2385 		break;
2386 	case 1:	/* one command: opcode only */
2387 	case 2:	/* one command: opcode plus service action */
2388 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2389 		sdeb_i = opcode_ind_arr[req_opcode];
2390 		oip = &opcode_info_arr[sdeb_i];
2391 		if (F_INV_OP & oip->flags) {
2392 			supp = 1;
2393 			offset = 4;
2394 		} else {
2395 			if (1 == reporting_opts) {
2396 				if (FF_SA & oip->flags) {
2397 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2398 							     2, 2);
2399 					kfree(arr);
2400 					return check_condition_result;
2401 				}
2402 				req_sa = 0;
2403 			} else if (2 == reporting_opts &&
2404 				   0 == (FF_SA & oip->flags)) {
2405 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2406 				kfree(arr);	/* point at requested sa */
2407 				return check_condition_result;
2408 			}
2409 			if (0 == (FF_SA & oip->flags) &&
2410 			    req_opcode == oip->opcode)
2411 				supp = 3;
2412 			else if (0 == (FF_SA & oip->flags)) {
2413 				na = oip->num_attached;
2414 				for (k = 0, oip = oip->arrp; k < na;
2415 				     ++k, ++oip) {
2416 					if (req_opcode == oip->opcode)
2417 						break;
2418 				}
2419 				supp = (k >= na) ? 1 : 3;
2420 			} else if (req_sa != oip->sa) {
2421 				na = oip->num_attached;
2422 				for (k = 0, oip = oip->arrp; k < na;
2423 				     ++k, ++oip) {
2424 					if (req_sa == oip->sa)
2425 						break;
2426 				}
2427 				supp = (k >= na) ? 1 : 3;
2428 			} else
2429 				supp = 3;
2430 			if (3 == supp) {
2431 				u = oip->len_mask[0];
2432 				put_unaligned_be16(u, arr + 2);
2433 				arr[4] = oip->opcode;
2434 				for (k = 1; k < u; ++k)
2435 					arr[4 + k] = (k < 16) ?
2436 						 oip->len_mask[k] : 0xff;
2437 				offset = 4 + u;
2438 			} else
2439 				offset = 4;
2440 		}
2441 		arr[1] = (rctd ? 0x80 : 0) | supp;
2442 		if (rctd) {
2443 			put_unaligned_be16(0xa, arr + offset);
2444 			offset += 12;
2445 		}
2446 		break;
2447 	default:
2448 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2449 		kfree(arr);
2450 		return check_condition_result;
2451 	}
2452 	offset = (offset < a_len) ? offset : a_len;
2453 	len = (offset < alloc_len) ? offset : alloc_len;
2454 	errsts = fill_from_dev_buffer(scp, arr, len);
2455 	kfree(arr);
2456 	return errsts;
2457 }
2458 
2459 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2460 			  struct sdebug_dev_info *devip)
2461 {
2462 	bool repd;
2463 	u32 alloc_len, len;
2464 	u8 arr[16];
2465 	u8 *cmd = scp->cmnd;
2466 
2467 	memset(arr, 0, sizeof(arr));
2468 	repd = !!(cmd[2] & 0x80);
2469 	alloc_len = get_unaligned_be32(cmd + 6);
2470 	if (alloc_len < 4) {
2471 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2472 		return check_condition_result;
2473 	}
2474 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2475 	arr[1] = 0x1;		/* ITNRS */
2476 	if (repd) {
2477 		arr[3] = 0xc;
2478 		len = 16;
2479 	} else
2480 		len = 4;
2481 
2482 	len = (len < alloc_len) ? len : alloc_len;
2483 	return fill_from_dev_buffer(scp, arr, len);
2484 }
2485 
2486 /* <<Following mode page info copied from ST318451LW>> */
2487 
2488 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2489 {	/* Read-Write Error Recovery page for mode_sense */
2490 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2491 					5, 0, 0xff, 0xff};
2492 
2493 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2494 	if (1 == pcontrol)
2495 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2496 	return sizeof(err_recov_pg);
2497 }
2498 
2499 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2500 { 	/* Disconnect-Reconnect page for mode_sense */
2501 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2502 					 0, 0, 0, 0, 0, 0, 0, 0};
2503 
2504 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2505 	if (1 == pcontrol)
2506 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2507 	return sizeof(disconnect_pg);
2508 }
2509 
2510 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2511 {       /* Format device page for mode_sense */
2512 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2513 				     0, 0, 0, 0, 0, 0, 0, 0,
2514 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2515 
2516 	memcpy(p, format_pg, sizeof(format_pg));
2517 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2518 	put_unaligned_be16(sdebug_sector_size, p + 12);
2519 	if (sdebug_removable)
2520 		p[20] |= 0x20; /* should agree with INQUIRY */
2521 	if (1 == pcontrol)
2522 		memset(p + 2, 0, sizeof(format_pg) - 2);
2523 	return sizeof(format_pg);
2524 }
2525 
2526 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2527 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2528 				     0, 0, 0, 0};
2529 
2530 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2531 { 	/* Caching page for mode_sense */
2532 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2533 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2534 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2535 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2536 
2537 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2538 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2539 	memcpy(p, caching_pg, sizeof(caching_pg));
2540 	if (1 == pcontrol)
2541 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2542 	else if (2 == pcontrol)
2543 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2544 	return sizeof(caching_pg);
2545 }
2546 
2547 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2548 				    0, 0, 0x2, 0x4b};
2549 
2550 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2551 { 	/* Control mode page for mode_sense */
2552 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2553 					0, 0, 0, 0};
2554 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2555 				     0, 0, 0x2, 0x4b};
2556 
2557 	if (sdebug_dsense)
2558 		ctrl_m_pg[2] |= 0x4;
2559 	else
2560 		ctrl_m_pg[2] &= ~0x4;
2561 
2562 	if (sdebug_ato)
2563 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2564 
2565 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2566 	if (1 == pcontrol)
2567 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2568 	else if (2 == pcontrol)
2569 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2570 	return sizeof(ctrl_m_pg);
2571 }
2572 
2573 /* IO Advice Hints Grouping mode page */
2574 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2575 {
2576 	/* IO Advice Hints Grouping mode page */
2577 	struct grouping_m_pg {
2578 		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2579 		u8 subpage_code;
2580 		__be16 page_length;
2581 		u8 reserved[12];
2582 		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2583 	};
2584 	static const struct grouping_m_pg gr_m_pg = {
2585 		.page_code = 0xa | 0x40,
2586 		.subpage_code = 5,
2587 		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2588 		.descr = {
2589 			{ .st_enble = 1 },
2590 			{ .st_enble = 1 },
2591 			{ .st_enble = 1 },
2592 			{ .st_enble = 1 },
2593 			{ .st_enble = 1 },
2594 			{ .st_enble = 0 },
2595 		}
2596 	};
2597 
2598 	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2599 		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2600 	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2601 	if (1 == pcontrol) {
2602 		/* There are no changeable values so clear from byte 4 on. */
2603 		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2604 	}
2605 	return sizeof(gr_m_pg);
2606 }
2607 
2608 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2609 {	/* Informational Exceptions control mode page for mode_sense */
2610 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2611 				       0, 0, 0x0, 0x0};
2612 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2613 				      0, 0, 0x0, 0x0};
2614 
2615 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2616 	if (1 == pcontrol)
2617 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2618 	else if (2 == pcontrol)
2619 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2620 	return sizeof(iec_m_pg);
2621 }
2622 
2623 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2624 {	/* SAS SSP mode page - short format for mode_sense */
2625 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2626 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2627 
2628 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2629 	if (1 == pcontrol)
2630 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2631 	return sizeof(sas_sf_m_pg);
2632 }
2633 
2634 
2635 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2636 			      int target_dev_id)
2637 {	/* SAS phy control and discover mode page for mode_sense */
2638 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2639 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2640 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2641 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2642 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2643 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2644 		    0, 0, 0, 0, 0, 0, 0, 0,
2645 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2646 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2647 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2648 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2649 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2650 		    0, 0, 0, 0, 0, 0, 0, 0,
2651 		};
2652 	int port_a, port_b;
2653 
2654 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2655 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2656 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2657 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2658 	port_a = target_dev_id + 1;
2659 	port_b = port_a + 1;
2660 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2661 	put_unaligned_be32(port_a, p + 20);
2662 	put_unaligned_be32(port_b, p + 48 + 20);
2663 	if (1 == pcontrol)
2664 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2665 	return sizeof(sas_pcd_m_pg);
2666 }
2667 
2668 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2669 {	/* SAS SSP shared protocol specific port mode subpage */
2670 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2671 		    0, 0, 0, 0, 0, 0, 0, 0,
2672 		};
2673 
2674 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2675 	if (1 == pcontrol)
2676 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2677 	return sizeof(sas_sha_m_pg);
2678 }
2679 
2680 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2681 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2682 
2683 static int resp_mode_sense(struct scsi_cmnd *scp,
2684 			   struct sdebug_dev_info *devip)
2685 {
2686 	int pcontrol, pcode, subpcode, bd_len;
2687 	unsigned char dev_spec;
2688 	u32 alloc_len, offset, len;
2689 	int target_dev_id;
2690 	int target = scp->device->id;
2691 	unsigned char *ap;
2692 	unsigned char *arr __free(kfree);
2693 	unsigned char *cmd = scp->cmnd;
2694 	bool dbd, llbaa, msense_6, is_disk, is_zbc;
2695 
2696 	arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2697 	if (!arr)
2698 		return -ENOMEM;
2699 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2700 	pcontrol = (cmd[2] & 0xc0) >> 6;
2701 	pcode = cmd[2] & 0x3f;
2702 	subpcode = cmd[3];
2703 	msense_6 = (MODE_SENSE == cmd[0]);
2704 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2705 	is_disk = (sdebug_ptype == TYPE_DISK);
2706 	is_zbc = devip->zoned;
2707 	if ((is_disk || is_zbc) && !dbd)
2708 		bd_len = llbaa ? 16 : 8;
2709 	else
2710 		bd_len = 0;
2711 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2712 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2713 	if (0x3 == pcontrol) {  /* Saving values not supported */
2714 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2715 		return check_condition_result;
2716 	}
2717 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2718 			(devip->target * 1000) - 3;
2719 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2720 	if (is_disk || is_zbc) {
2721 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2722 		if (sdebug_wp)
2723 			dev_spec |= 0x80;
2724 	} else
2725 		dev_spec = 0x0;
2726 	if (msense_6) {
2727 		arr[2] = dev_spec;
2728 		arr[3] = bd_len;
2729 		offset = 4;
2730 	} else {
2731 		arr[3] = dev_spec;
2732 		if (16 == bd_len)
2733 			arr[4] = 0x1;	/* set LONGLBA bit */
2734 		arr[7] = bd_len;	/* assume 255 or less */
2735 		offset = 8;
2736 	}
2737 	ap = arr + offset;
2738 	if ((bd_len > 0) && (!sdebug_capacity))
2739 		sdebug_capacity = get_sdebug_capacity();
2740 
2741 	if (8 == bd_len) {
2742 		if (sdebug_capacity > 0xfffffffe)
2743 			put_unaligned_be32(0xffffffff, ap + 0);
2744 		else
2745 			put_unaligned_be32(sdebug_capacity, ap + 0);
2746 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2747 		offset += bd_len;
2748 		ap = arr + offset;
2749 	} else if (16 == bd_len) {
2750 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2751 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2752 		offset += bd_len;
2753 		ap = arr + offset;
2754 	}
2755 
2756 	/*
2757 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2758 	 *        len += resp_*_pg(ap + len, pcontrol, target);
2759 	 */
2760 	switch (pcode) {
2761 	case 0x1:	/* Read-Write error recovery page, direct access */
2762 		if (subpcode > 0x0 && subpcode < 0xff)
2763 			goto bad_subpcode;
2764 		len = resp_err_recov_pg(ap, pcontrol, target);
2765 		offset += len;
2766 		break;
2767 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2768 		if (subpcode > 0x0 && subpcode < 0xff)
2769 			goto bad_subpcode;
2770 		len = resp_disconnect_pg(ap, pcontrol, target);
2771 		offset += len;
2772 		break;
2773 	case 0x3:       /* Format device page, direct access */
2774 		if (subpcode > 0x0 && subpcode < 0xff)
2775 			goto bad_subpcode;
2776 		if (is_disk) {
2777 			len = resp_format_pg(ap, pcontrol, target);
2778 			offset += len;
2779 		} else {
2780 			goto bad_pcode;
2781 		}
2782 		break;
2783 	case 0x8:	/* Caching page, direct access */
2784 		if (subpcode > 0x0 && subpcode < 0xff)
2785 			goto bad_subpcode;
2786 		if (is_disk || is_zbc) {
2787 			len = resp_caching_pg(ap, pcontrol, target);
2788 			offset += len;
2789 		} else {
2790 			goto bad_pcode;
2791 		}
2792 		break;
2793 	case 0xa:	/* Control Mode page, all devices */
2794 		switch (subpcode) {
2795 		case 0:
2796 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2797 			break;
2798 		case 0x05:
2799 			len = resp_grouping_m_pg(ap, pcontrol, target);
2800 			break;
2801 		case 0xff:
2802 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2803 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2804 			break;
2805 		default:
2806 			goto bad_subpcode;
2807 		}
2808 		offset += len;
2809 		break;
2810 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2811 		if (subpcode > 0x2 && subpcode < 0xff)
2812 			goto bad_subpcode;
2813 		len = 0;
2814 		if ((0x0 == subpcode) || (0xff == subpcode))
2815 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2816 		if ((0x1 == subpcode) || (0xff == subpcode))
2817 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2818 						  target_dev_id);
2819 		if ((0x2 == subpcode) || (0xff == subpcode))
2820 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2821 		offset += len;
2822 		break;
2823 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2824 		if (subpcode > 0x0 && subpcode < 0xff)
2825 			goto bad_subpcode;
2826 		len = resp_iec_m_pg(ap, pcontrol, target);
2827 		offset += len;
2828 		break;
2829 	case 0x3f:	/* Read all Mode pages */
2830 		if (subpcode > 0x0 && subpcode < 0xff)
2831 			goto bad_subpcode;
2832 		len = resp_err_recov_pg(ap, pcontrol, target);
2833 		len += resp_disconnect_pg(ap + len, pcontrol, target);
2834 		if (is_disk) {
2835 			len += resp_format_pg(ap + len, pcontrol, target);
2836 			len += resp_caching_pg(ap + len, pcontrol, target);
2837 		} else if (is_zbc) {
2838 			len += resp_caching_pg(ap + len, pcontrol, target);
2839 		}
2840 		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2841 		if (0xff == subpcode)
2842 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2843 		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2844 		if (0xff == subpcode) {
2845 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2846 						  target_dev_id);
2847 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2848 		}
2849 		len += resp_iec_m_pg(ap + len, pcontrol, target);
2850 		offset += len;
2851 		break;
2852 	default:
2853 		goto bad_pcode;
2854 	}
2855 	if (msense_6)
2856 		arr[0] = offset - 1;
2857 	else
2858 		put_unaligned_be16((offset - 2), arr + 0);
2859 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2860 
2861 bad_pcode:
2862 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2863 	return check_condition_result;
2864 
2865 bad_subpcode:
2866 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2867 	return check_condition_result;
2868 }
2869 
2870 #define SDEBUG_MAX_MSELECT_SZ 512
2871 
2872 static int resp_mode_select(struct scsi_cmnd *scp,
2873 			    struct sdebug_dev_info *devip)
2874 {
2875 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2876 	int param_len, res, mpage;
2877 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2878 	unsigned char *cmd = scp->cmnd;
2879 	int mselect6 = (MODE_SELECT == cmd[0]);
2880 
2881 	memset(arr, 0, sizeof(arr));
2882 	pf = cmd[1] & 0x10;
2883 	sp = cmd[1] & 0x1;
2884 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2885 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2886 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2887 		return check_condition_result;
2888 	}
2889 	res = fetch_to_dev_buffer(scp, arr, param_len);
2890 	if (-1 == res)
2891 		return DID_ERROR << 16;
2892 	else if (sdebug_verbose && (res < param_len))
2893 		sdev_printk(KERN_INFO, scp->device,
2894 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2895 			    __func__, param_len, res);
2896 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2897 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2898 	off = bd_len + (mselect6 ? 4 : 8);
2899 	if (md_len > 2 || off >= res) {
2900 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2901 		return check_condition_result;
2902 	}
2903 	mpage = arr[off] & 0x3f;
2904 	ps = !!(arr[off] & 0x80);
2905 	if (ps) {
2906 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2907 		return check_condition_result;
2908 	}
2909 	spf = !!(arr[off] & 0x40);
2910 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2911 		       (arr[off + 1] + 2);
2912 	if ((pg_len + off) > param_len) {
2913 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2914 				PARAMETER_LIST_LENGTH_ERR, 0);
2915 		return check_condition_result;
2916 	}
2917 	switch (mpage) {
2918 	case 0x8:      /* Caching Mode page */
2919 		if (caching_pg[1] == arr[off + 1]) {
2920 			memcpy(caching_pg + 2, arr + off + 2,
2921 			       sizeof(caching_pg) - 2);
2922 			goto set_mode_changed_ua;
2923 		}
2924 		break;
2925 	case 0xa:      /* Control Mode page */
2926 		if (ctrl_m_pg[1] == arr[off + 1]) {
2927 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2928 			       sizeof(ctrl_m_pg) - 2);
2929 			if (ctrl_m_pg[4] & 0x8)
2930 				sdebug_wp = true;
2931 			else
2932 				sdebug_wp = false;
2933 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2934 			goto set_mode_changed_ua;
2935 		}
2936 		break;
2937 	case 0x1c:      /* Informational Exceptions Mode page */
2938 		if (iec_m_pg[1] == arr[off + 1]) {
2939 			memcpy(iec_m_pg + 2, arr + off + 2,
2940 			       sizeof(iec_m_pg) - 2);
2941 			goto set_mode_changed_ua;
2942 		}
2943 		break;
2944 	default:
2945 		break;
2946 	}
2947 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2948 	return check_condition_result;
2949 set_mode_changed_ua:
2950 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2951 	return 0;
2952 }
2953 
2954 static int resp_temp_l_pg(unsigned char *arr)
2955 {
2956 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2957 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2958 		};
2959 
2960 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2961 	return sizeof(temp_l_pg);
2962 }
2963 
2964 static int resp_ie_l_pg(unsigned char *arr)
2965 {
2966 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2967 		};
2968 
2969 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2970 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2971 		arr[4] = THRESHOLD_EXCEEDED;
2972 		arr[5] = 0xff;
2973 	}
2974 	return sizeof(ie_l_pg);
2975 }
2976 
2977 static int resp_env_rep_l_spg(unsigned char *arr)
2978 {
2979 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2980 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2981 					 0x1, 0x0, 0x23, 0x8,
2982 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2983 		};
2984 
2985 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2986 	return sizeof(env_rep_l_spg);
2987 }
2988 
2989 #define SDEBUG_MAX_LSENSE_SZ 512
2990 
2991 static int resp_log_sense(struct scsi_cmnd *scp,
2992 			  struct sdebug_dev_info *devip)
2993 {
2994 	int ppc, sp, pcode, subpcode;
2995 	u32 alloc_len, len, n;
2996 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2997 	unsigned char *cmd = scp->cmnd;
2998 
2999 	memset(arr, 0, sizeof(arr));
3000 	ppc = cmd[1] & 0x2;
3001 	sp = cmd[1] & 0x1;
3002 	if (ppc || sp) {
3003 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3004 		return check_condition_result;
3005 	}
3006 	pcode = cmd[2] & 0x3f;
3007 	subpcode = cmd[3] & 0xff;
3008 	alloc_len = get_unaligned_be16(cmd + 7);
3009 	arr[0] = pcode;
3010 	if (0 == subpcode) {
3011 		switch (pcode) {
3012 		case 0x0:	/* Supported log pages log page */
3013 			n = 4;
3014 			arr[n++] = 0x0;		/* this page */
3015 			arr[n++] = 0xd;		/* Temperature */
3016 			arr[n++] = 0x2f;	/* Informational exceptions */
3017 			arr[3] = n - 4;
3018 			break;
3019 		case 0xd:	/* Temperature log page */
3020 			arr[3] = resp_temp_l_pg(arr + 4);
3021 			break;
3022 		case 0x2f:	/* Informational exceptions log page */
3023 			arr[3] = resp_ie_l_pg(arr + 4);
3024 			break;
3025 		default:
3026 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3027 			return check_condition_result;
3028 		}
3029 	} else if (0xff == subpcode) {
3030 		arr[0] |= 0x40;
3031 		arr[1] = subpcode;
3032 		switch (pcode) {
3033 		case 0x0:	/* Supported log pages and subpages log page */
3034 			n = 4;
3035 			arr[n++] = 0x0;
3036 			arr[n++] = 0x0;		/* 0,0 page */
3037 			arr[n++] = 0x0;
3038 			arr[n++] = 0xff;	/* this page */
3039 			arr[n++] = 0xd;
3040 			arr[n++] = 0x0;		/* Temperature */
3041 			arr[n++] = 0xd;
3042 			arr[n++] = 0x1;		/* Environment reporting */
3043 			arr[n++] = 0xd;
3044 			arr[n++] = 0xff;	/* all 0xd subpages */
3045 			arr[n++] = 0x2f;
3046 			arr[n++] = 0x0;	/* Informational exceptions */
3047 			arr[n++] = 0x2f;
3048 			arr[n++] = 0xff;	/* all 0x2f subpages */
3049 			arr[3] = n - 4;
3050 			break;
3051 		case 0xd:	/* Temperature subpages */
3052 			n = 4;
3053 			arr[n++] = 0xd;
3054 			arr[n++] = 0x0;		/* Temperature */
3055 			arr[n++] = 0xd;
3056 			arr[n++] = 0x1;		/* Environment reporting */
3057 			arr[n++] = 0xd;
3058 			arr[n++] = 0xff;	/* these subpages */
3059 			arr[3] = n - 4;
3060 			break;
3061 		case 0x2f:	/* Informational exceptions subpages */
3062 			n = 4;
3063 			arr[n++] = 0x2f;
3064 			arr[n++] = 0x0;		/* Informational exceptions */
3065 			arr[n++] = 0x2f;
3066 			arr[n++] = 0xff;	/* these subpages */
3067 			arr[3] = n - 4;
3068 			break;
3069 		default:
3070 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3071 			return check_condition_result;
3072 		}
3073 	} else if (subpcode > 0) {
3074 		arr[0] |= 0x40;
3075 		arr[1] = subpcode;
3076 		if (pcode == 0xd && subpcode == 1)
3077 			arr[3] = resp_env_rep_l_spg(arr + 4);
3078 		else {
3079 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3080 			return check_condition_result;
3081 		}
3082 	} else {
3083 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3084 		return check_condition_result;
3085 	}
3086 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3087 	return fill_from_dev_buffer(scp, arr,
3088 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3089 }
3090 
3091 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3092 {
3093 	return devip->nr_zones != 0;
3094 }
3095 
3096 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3097 					unsigned long long lba)
3098 {
3099 	u32 zno = lba >> devip->zsize_shift;
3100 	struct sdeb_zone_state *zsp;
3101 
3102 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3103 		return &devip->zstate[zno];
3104 
3105 	/*
3106 	 * If the zone capacity is less than the zone size, adjust for gap
3107 	 * zones.
3108 	 */
3109 	zno = 2 * zno - devip->nr_conv_zones;
3110 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3111 	zsp = &devip->zstate[zno];
3112 	if (lba >= zsp->z_start + zsp->z_size)
3113 		zsp++;
3114 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3115 	return zsp;
3116 }
3117 
3118 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3119 {
3120 	return zsp->z_type == ZBC_ZTYPE_CNV;
3121 }
3122 
3123 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3124 {
3125 	return zsp->z_type == ZBC_ZTYPE_GAP;
3126 }
3127 
3128 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3129 {
3130 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3131 }
3132 
3133 static void zbc_close_zone(struct sdebug_dev_info *devip,
3134 			   struct sdeb_zone_state *zsp)
3135 {
3136 	enum sdebug_z_cond zc;
3137 
3138 	if (!zbc_zone_is_seq(zsp))
3139 		return;
3140 
3141 	zc = zsp->z_cond;
3142 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3143 		return;
3144 
3145 	if (zc == ZC2_IMPLICIT_OPEN)
3146 		devip->nr_imp_open--;
3147 	else
3148 		devip->nr_exp_open--;
3149 
3150 	if (zsp->z_wp == zsp->z_start) {
3151 		zsp->z_cond = ZC1_EMPTY;
3152 	} else {
3153 		zsp->z_cond = ZC4_CLOSED;
3154 		devip->nr_closed++;
3155 	}
3156 }
3157 
3158 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3159 {
3160 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3161 	unsigned int i;
3162 
3163 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3164 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3165 			zbc_close_zone(devip, zsp);
3166 			return;
3167 		}
3168 	}
3169 }
3170 
3171 static void zbc_open_zone(struct sdebug_dev_info *devip,
3172 			  struct sdeb_zone_state *zsp, bool explicit)
3173 {
3174 	enum sdebug_z_cond zc;
3175 
3176 	if (!zbc_zone_is_seq(zsp))
3177 		return;
3178 
3179 	zc = zsp->z_cond;
3180 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3181 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3182 		return;
3183 
3184 	/* Close an implicit open zone if necessary */
3185 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3186 		zbc_close_zone(devip, zsp);
3187 	else if (devip->max_open &&
3188 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3189 		zbc_close_imp_open_zone(devip);
3190 
3191 	if (zsp->z_cond == ZC4_CLOSED)
3192 		devip->nr_closed--;
3193 	if (explicit) {
3194 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3195 		devip->nr_exp_open++;
3196 	} else {
3197 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3198 		devip->nr_imp_open++;
3199 	}
3200 }
3201 
3202 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3203 				     struct sdeb_zone_state *zsp)
3204 {
3205 	switch (zsp->z_cond) {
3206 	case ZC2_IMPLICIT_OPEN:
3207 		devip->nr_imp_open--;
3208 		break;
3209 	case ZC3_EXPLICIT_OPEN:
3210 		devip->nr_exp_open--;
3211 		break;
3212 	default:
3213 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3214 			  zsp->z_start, zsp->z_cond);
3215 		break;
3216 	}
3217 	zsp->z_cond = ZC5_FULL;
3218 }
3219 
3220 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3221 		       unsigned long long lba, unsigned int num)
3222 {
3223 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3224 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3225 
3226 	if (!zbc_zone_is_seq(zsp))
3227 		return;
3228 
3229 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3230 		zsp->z_wp += num;
3231 		if (zsp->z_wp >= zend)
3232 			zbc_set_zone_full(devip, zsp);
3233 		return;
3234 	}
3235 
3236 	while (num) {
3237 		if (lba != zsp->z_wp)
3238 			zsp->z_non_seq_resource = true;
3239 
3240 		end = lba + num;
3241 		if (end >= zend) {
3242 			n = zend - lba;
3243 			zsp->z_wp = zend;
3244 		} else if (end > zsp->z_wp) {
3245 			n = num;
3246 			zsp->z_wp = end;
3247 		} else {
3248 			n = num;
3249 		}
3250 		if (zsp->z_wp >= zend)
3251 			zbc_set_zone_full(devip, zsp);
3252 
3253 		num -= n;
3254 		lba += n;
3255 		if (num) {
3256 			zsp++;
3257 			zend = zsp->z_start + zsp->z_size;
3258 		}
3259 	}
3260 }
3261 
3262 static int check_zbc_access_params(struct scsi_cmnd *scp,
3263 			unsigned long long lba, unsigned int num, bool write)
3264 {
3265 	struct scsi_device *sdp = scp->device;
3266 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3267 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3268 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3269 
3270 	if (!write) {
3271 		/* For host-managed, reads cannot cross zone types boundaries */
3272 		if (zsp->z_type != zsp_end->z_type) {
3273 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3274 					LBA_OUT_OF_RANGE,
3275 					READ_INVDATA_ASCQ);
3276 			return check_condition_result;
3277 		}
3278 		return 0;
3279 	}
3280 
3281 	/* Writing into a gap zone is not allowed */
3282 	if (zbc_zone_is_gap(zsp)) {
3283 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3284 				ATTEMPT_ACCESS_GAP);
3285 		return check_condition_result;
3286 	}
3287 
3288 	/* No restrictions for writes within conventional zones */
3289 	if (zbc_zone_is_conv(zsp)) {
3290 		if (!zbc_zone_is_conv(zsp_end)) {
3291 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3292 					LBA_OUT_OF_RANGE,
3293 					WRITE_BOUNDARY_ASCQ);
3294 			return check_condition_result;
3295 		}
3296 		return 0;
3297 	}
3298 
3299 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3300 		/* Writes cannot cross sequential zone boundaries */
3301 		if (zsp_end != zsp) {
3302 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3303 					LBA_OUT_OF_RANGE,
3304 					WRITE_BOUNDARY_ASCQ);
3305 			return check_condition_result;
3306 		}
3307 		/* Cannot write full zones */
3308 		if (zsp->z_cond == ZC5_FULL) {
3309 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3310 					INVALID_FIELD_IN_CDB, 0);
3311 			return check_condition_result;
3312 		}
3313 		/* Writes must be aligned to the zone WP */
3314 		if (lba != zsp->z_wp) {
3315 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3316 					LBA_OUT_OF_RANGE,
3317 					UNALIGNED_WRITE_ASCQ);
3318 			return check_condition_result;
3319 		}
3320 	}
3321 
3322 	/* Handle implicit open of closed and empty zones */
3323 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3324 		if (devip->max_open &&
3325 		    devip->nr_exp_open >= devip->max_open) {
3326 			mk_sense_buffer(scp, DATA_PROTECT,
3327 					INSUFF_RES_ASC,
3328 					INSUFF_ZONE_ASCQ);
3329 			return check_condition_result;
3330 		}
3331 		zbc_open_zone(devip, zsp, false);
3332 	}
3333 
3334 	return 0;
3335 }
3336 
3337 static inline int check_device_access_params
3338 			(struct scsi_cmnd *scp, unsigned long long lba,
3339 			 unsigned int num, bool write)
3340 {
3341 	struct scsi_device *sdp = scp->device;
3342 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3343 
3344 	if (lba + num > sdebug_capacity) {
3345 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3346 		return check_condition_result;
3347 	}
3348 	/* transfer length excessive (tie in to block limits VPD page) */
3349 	if (num > sdebug_store_sectors) {
3350 		/* needs work to find which cdb byte 'num' comes from */
3351 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3352 		return check_condition_result;
3353 	}
3354 	if (write && unlikely(sdebug_wp)) {
3355 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3356 		return check_condition_result;
3357 	}
3358 	if (sdebug_dev_is_zoned(devip))
3359 		return check_zbc_access_params(scp, lba, num, write);
3360 
3361 	return 0;
3362 }
3363 
3364 /*
3365  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3366  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3367  * that access any of the "stores" in struct sdeb_store_info should call this
3368  * function with bug_if_fake_rw set to true.
3369  */
3370 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3371 						bool bug_if_fake_rw)
3372 {
3373 	if (sdebug_fake_rw) {
3374 		BUG_ON(bug_if_fake_rw);	/* See note above */
3375 		return NULL;
3376 	}
3377 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3378 }
3379 
3380 /* Returns number of bytes copied or -1 if error. */
3381 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3382 			    u32 sg_skip, u64 lba, u32 num, bool do_write,
3383 			    u8 group_number)
3384 {
3385 	int ret;
3386 	u64 block, rest = 0;
3387 	enum dma_data_direction dir;
3388 	struct scsi_data_buffer *sdb = &scp->sdb;
3389 	u8 *fsp;
3390 
3391 	if (do_write) {
3392 		dir = DMA_TO_DEVICE;
3393 		write_since_sync = true;
3394 	} else {
3395 		dir = DMA_FROM_DEVICE;
3396 	}
3397 
3398 	if (!sdb->length || !sip)
3399 		return 0;
3400 	if (scp->sc_data_direction != dir)
3401 		return -1;
3402 
3403 	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
3404 		atomic_long_inc(&writes_by_group_number[group_number]);
3405 
3406 	fsp = sip->storep;
3407 
3408 	block = do_div(lba, sdebug_store_sectors);
3409 	if (block + num > sdebug_store_sectors)
3410 		rest = block + num - sdebug_store_sectors;
3411 
3412 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3413 		   fsp + (block * sdebug_sector_size),
3414 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3415 	if (ret != (num - rest) * sdebug_sector_size)
3416 		return ret;
3417 
3418 	if (rest) {
3419 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3420 			    fsp, rest * sdebug_sector_size,
3421 			    sg_skip + ((num - rest) * sdebug_sector_size),
3422 			    do_write);
3423 	}
3424 
3425 	return ret;
3426 }
3427 
3428 /* Returns number of bytes copied or -1 if error. */
3429 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3430 {
3431 	struct scsi_data_buffer *sdb = &scp->sdb;
3432 
3433 	if (!sdb->length)
3434 		return 0;
3435 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3436 		return -1;
3437 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3438 			      num * sdebug_sector_size, 0, true);
3439 }
3440 
3441 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3442  * arr into sip->storep+lba and return true. If comparison fails then
3443  * return false. */
3444 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3445 			      const u8 *arr, bool compare_only)
3446 {
3447 	bool res;
3448 	u64 block, rest = 0;
3449 	u32 store_blks = sdebug_store_sectors;
3450 	u32 lb_size = sdebug_sector_size;
3451 	u8 *fsp = sip->storep;
3452 
3453 	block = do_div(lba, store_blks);
3454 	if (block + num > store_blks)
3455 		rest = block + num - store_blks;
3456 
3457 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3458 	if (!res)
3459 		return res;
3460 	if (rest)
3461 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3462 			     rest * lb_size);
3463 	if (!res)
3464 		return res;
3465 	if (compare_only)
3466 		return true;
3467 	arr += num * lb_size;
3468 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3469 	if (rest)
3470 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3471 	return res;
3472 }
3473 
3474 static __be16 dif_compute_csum(const void *buf, int len)
3475 {
3476 	__be16 csum;
3477 
3478 	if (sdebug_guard)
3479 		csum = (__force __be16)ip_compute_csum(buf, len);
3480 	else
3481 		csum = cpu_to_be16(crc_t10dif(buf, len));
3482 
3483 	return csum;
3484 }
3485 
3486 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3487 		      sector_t sector, u32 ei_lba)
3488 {
3489 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3490 
3491 	if (sdt->guard_tag != csum) {
3492 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3493 			(unsigned long)sector,
3494 			be16_to_cpu(sdt->guard_tag),
3495 			be16_to_cpu(csum));
3496 		return 0x01;
3497 	}
3498 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3499 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3500 		pr_err("REF check failed on sector %lu\n",
3501 			(unsigned long)sector);
3502 		return 0x03;
3503 	}
3504 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3505 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3506 		pr_err("REF check failed on sector %lu\n",
3507 			(unsigned long)sector);
3508 		return 0x03;
3509 	}
3510 	return 0;
3511 }
3512 
3513 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3514 			  unsigned int sectors, bool read)
3515 {
3516 	size_t resid;
3517 	void *paddr;
3518 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3519 						scp->device->hostdata, true);
3520 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3521 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3522 	struct sg_mapping_iter miter;
3523 
3524 	/* Bytes of protection data to copy into sgl */
3525 	resid = sectors * sizeof(*dif_storep);
3526 
3527 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3528 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3529 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3530 
3531 	while (sg_miter_next(&miter) && resid > 0) {
3532 		size_t len = min_t(size_t, miter.length, resid);
3533 		void *start = dif_store(sip, sector);
3534 		size_t rest = 0;
3535 
3536 		if (dif_store_end < start + len)
3537 			rest = start + len - dif_store_end;
3538 
3539 		paddr = miter.addr;
3540 
3541 		if (read)
3542 			memcpy(paddr, start, len - rest);
3543 		else
3544 			memcpy(start, paddr, len - rest);
3545 
3546 		if (rest) {
3547 			if (read)
3548 				memcpy(paddr + len - rest, dif_storep, rest);
3549 			else
3550 				memcpy(dif_storep, paddr + len - rest, rest);
3551 		}
3552 
3553 		sector += len / sizeof(*dif_storep);
3554 		resid -= len;
3555 	}
3556 	sg_miter_stop(&miter);
3557 }
3558 
3559 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3560 			    unsigned int sectors, u32 ei_lba)
3561 {
3562 	int ret = 0;
3563 	unsigned int i;
3564 	sector_t sector;
3565 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3566 						scp->device->hostdata, true);
3567 	struct t10_pi_tuple *sdt;
3568 
3569 	for (i = 0; i < sectors; i++, ei_lba++) {
3570 		sector = start_sec + i;
3571 		sdt = dif_store(sip, sector);
3572 
3573 		if (sdt->app_tag == cpu_to_be16(0xffff))
3574 			continue;
3575 
3576 		/*
3577 		 * Because scsi_debug acts as both initiator and
3578 		 * target we proceed to verify the PI even if
3579 		 * RDPROTECT=3. This is done so the "initiator" knows
3580 		 * which type of error to return. Otherwise we would
3581 		 * have to iterate over the PI twice.
3582 		 */
3583 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3584 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3585 					 sector, ei_lba);
3586 			if (ret) {
3587 				dif_errors++;
3588 				break;
3589 			}
3590 		}
3591 	}
3592 
3593 	dif_copy_prot(scp, start_sec, sectors, true);
3594 	dix_reads++;
3595 
3596 	return ret;
3597 }
3598 
3599 static inline void
3600 sdeb_read_lock(struct sdeb_store_info *sip)
3601 {
3602 	if (sdebug_no_rwlock) {
3603 		if (sip)
3604 			__acquire(&sip->macc_lck);
3605 		else
3606 			__acquire(&sdeb_fake_rw_lck);
3607 	} else {
3608 		if (sip)
3609 			read_lock(&sip->macc_lck);
3610 		else
3611 			read_lock(&sdeb_fake_rw_lck);
3612 	}
3613 }
3614 
3615 static inline void
3616 sdeb_read_unlock(struct sdeb_store_info *sip)
3617 {
3618 	if (sdebug_no_rwlock) {
3619 		if (sip)
3620 			__release(&sip->macc_lck);
3621 		else
3622 			__release(&sdeb_fake_rw_lck);
3623 	} else {
3624 		if (sip)
3625 			read_unlock(&sip->macc_lck);
3626 		else
3627 			read_unlock(&sdeb_fake_rw_lck);
3628 	}
3629 }
3630 
3631 static inline void
3632 sdeb_write_lock(struct sdeb_store_info *sip)
3633 {
3634 	if (sdebug_no_rwlock) {
3635 		if (sip)
3636 			__acquire(&sip->macc_lck);
3637 		else
3638 			__acquire(&sdeb_fake_rw_lck);
3639 	} else {
3640 		if (sip)
3641 			write_lock(&sip->macc_lck);
3642 		else
3643 			write_lock(&sdeb_fake_rw_lck);
3644 	}
3645 }
3646 
3647 static inline void
3648 sdeb_write_unlock(struct sdeb_store_info *sip)
3649 {
3650 	if (sdebug_no_rwlock) {
3651 		if (sip)
3652 			__release(&sip->macc_lck);
3653 		else
3654 			__release(&sdeb_fake_rw_lck);
3655 	} else {
3656 		if (sip)
3657 			write_unlock(&sip->macc_lck);
3658 		else
3659 			write_unlock(&sdeb_fake_rw_lck);
3660 	}
3661 }
3662 
3663 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3664 {
3665 	bool check_prot;
3666 	u32 num;
3667 	u32 ei_lba;
3668 	int ret;
3669 	u64 lba;
3670 	struct sdeb_store_info *sip = devip2sip(devip, true);
3671 	u8 *cmd = scp->cmnd;
3672 
3673 	switch (cmd[0]) {
3674 	case READ_16:
3675 		ei_lba = 0;
3676 		lba = get_unaligned_be64(cmd + 2);
3677 		num = get_unaligned_be32(cmd + 10);
3678 		check_prot = true;
3679 		break;
3680 	case READ_10:
3681 		ei_lba = 0;
3682 		lba = get_unaligned_be32(cmd + 2);
3683 		num = get_unaligned_be16(cmd + 7);
3684 		check_prot = true;
3685 		break;
3686 	case READ_6:
3687 		ei_lba = 0;
3688 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3689 		      (u32)(cmd[1] & 0x1f) << 16;
3690 		num = (0 == cmd[4]) ? 256 : cmd[4];
3691 		check_prot = true;
3692 		break;
3693 	case READ_12:
3694 		ei_lba = 0;
3695 		lba = get_unaligned_be32(cmd + 2);
3696 		num = get_unaligned_be32(cmd + 6);
3697 		check_prot = true;
3698 		break;
3699 	case XDWRITEREAD_10:
3700 		ei_lba = 0;
3701 		lba = get_unaligned_be32(cmd + 2);
3702 		num = get_unaligned_be16(cmd + 7);
3703 		check_prot = false;
3704 		break;
3705 	default:	/* assume READ(32) */
3706 		lba = get_unaligned_be64(cmd + 12);
3707 		ei_lba = get_unaligned_be32(cmd + 20);
3708 		num = get_unaligned_be32(cmd + 28);
3709 		check_prot = false;
3710 		break;
3711 	}
3712 	if (unlikely(have_dif_prot && check_prot)) {
3713 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3714 		    (cmd[1] & 0xe0)) {
3715 			mk_sense_invalid_opcode(scp);
3716 			return check_condition_result;
3717 		}
3718 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3719 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3720 		    (cmd[1] & 0xe0) == 0)
3721 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3722 				    "to DIF device\n");
3723 	}
3724 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3725 		     atomic_read(&sdeb_inject_pending))) {
3726 		num /= 2;
3727 		atomic_set(&sdeb_inject_pending, 0);
3728 	}
3729 
3730 	ret = check_device_access_params(scp, lba, num, false);
3731 	if (ret)
3732 		return ret;
3733 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3734 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3735 		     ((lba + num) > sdebug_medium_error_start))) {
3736 		/* claim unrecoverable read error */
3737 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3738 		/* set info field and valid bit for fixed descriptor */
3739 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3740 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3741 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3742 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3743 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3744 		}
3745 		scsi_set_resid(scp, scsi_bufflen(scp));
3746 		return check_condition_result;
3747 	}
3748 
3749 	sdeb_read_lock(sip);
3750 
3751 	/* DIX + T10 DIF */
3752 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3753 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3754 		case 1: /* Guard tag error */
3755 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3756 				sdeb_read_unlock(sip);
3757 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3758 				return check_condition_result;
3759 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3760 				sdeb_read_unlock(sip);
3761 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3762 				return illegal_condition_result;
3763 			}
3764 			break;
3765 		case 3: /* Reference tag error */
3766 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3767 				sdeb_read_unlock(sip);
3768 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3769 				return check_condition_result;
3770 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3771 				sdeb_read_unlock(sip);
3772 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3773 				return illegal_condition_result;
3774 			}
3775 			break;
3776 		}
3777 	}
3778 
3779 	ret = do_device_access(sip, scp, 0, lba, num, false, 0);
3780 	sdeb_read_unlock(sip);
3781 	if (unlikely(ret == -1))
3782 		return DID_ERROR << 16;
3783 
3784 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3785 
3786 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3787 		     atomic_read(&sdeb_inject_pending))) {
3788 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3789 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3790 			atomic_set(&sdeb_inject_pending, 0);
3791 			return check_condition_result;
3792 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3793 			/* Logical block guard check failed */
3794 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3795 			atomic_set(&sdeb_inject_pending, 0);
3796 			return illegal_condition_result;
3797 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3798 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3799 			atomic_set(&sdeb_inject_pending, 0);
3800 			return illegal_condition_result;
3801 		}
3802 	}
3803 	return 0;
3804 }
3805 
3806 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3807 			     unsigned int sectors, u32 ei_lba)
3808 {
3809 	int ret;
3810 	struct t10_pi_tuple *sdt;
3811 	void *daddr;
3812 	sector_t sector = start_sec;
3813 	int ppage_offset;
3814 	int dpage_offset;
3815 	struct sg_mapping_iter diter;
3816 	struct sg_mapping_iter piter;
3817 
3818 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3819 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3820 
3821 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3822 			scsi_prot_sg_count(SCpnt),
3823 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3824 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3825 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3826 
3827 	/* For each protection page */
3828 	while (sg_miter_next(&piter)) {
3829 		dpage_offset = 0;
3830 		if (WARN_ON(!sg_miter_next(&diter))) {
3831 			ret = 0x01;
3832 			goto out;
3833 		}
3834 
3835 		for (ppage_offset = 0; ppage_offset < piter.length;
3836 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3837 			/* If we're at the end of the current
3838 			 * data page advance to the next one
3839 			 */
3840 			if (dpage_offset >= diter.length) {
3841 				if (WARN_ON(!sg_miter_next(&diter))) {
3842 					ret = 0x01;
3843 					goto out;
3844 				}
3845 				dpage_offset = 0;
3846 			}
3847 
3848 			sdt = piter.addr + ppage_offset;
3849 			daddr = diter.addr + dpage_offset;
3850 
3851 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3852 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3853 				if (ret)
3854 					goto out;
3855 			}
3856 
3857 			sector++;
3858 			ei_lba++;
3859 			dpage_offset += sdebug_sector_size;
3860 		}
3861 		diter.consumed = dpage_offset;
3862 		sg_miter_stop(&diter);
3863 	}
3864 	sg_miter_stop(&piter);
3865 
3866 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3867 	dix_writes++;
3868 
3869 	return 0;
3870 
3871 out:
3872 	dif_errors++;
3873 	sg_miter_stop(&diter);
3874 	sg_miter_stop(&piter);
3875 	return ret;
3876 }
3877 
3878 static unsigned long lba_to_map_index(sector_t lba)
3879 {
3880 	if (sdebug_unmap_alignment)
3881 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3882 	sector_div(lba, sdebug_unmap_granularity);
3883 	return lba;
3884 }
3885 
3886 static sector_t map_index_to_lba(unsigned long index)
3887 {
3888 	sector_t lba = index * sdebug_unmap_granularity;
3889 
3890 	if (sdebug_unmap_alignment)
3891 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3892 	return lba;
3893 }
3894 
3895 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3896 			      unsigned int *num)
3897 {
3898 	sector_t end;
3899 	unsigned int mapped;
3900 	unsigned long index;
3901 	unsigned long next;
3902 
3903 	index = lba_to_map_index(lba);
3904 	mapped = test_bit(index, sip->map_storep);
3905 
3906 	if (mapped)
3907 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3908 	else
3909 		next = find_next_bit(sip->map_storep, map_size, index);
3910 
3911 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3912 	*num = end - lba;
3913 	return mapped;
3914 }
3915 
3916 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3917 		       unsigned int len)
3918 {
3919 	sector_t end = lba + len;
3920 
3921 	while (lba < end) {
3922 		unsigned long index = lba_to_map_index(lba);
3923 
3924 		if (index < map_size)
3925 			set_bit(index, sip->map_storep);
3926 
3927 		lba = map_index_to_lba(index + 1);
3928 	}
3929 }
3930 
3931 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3932 			 unsigned int len)
3933 {
3934 	sector_t end = lba + len;
3935 	u8 *fsp = sip->storep;
3936 
3937 	while (lba < end) {
3938 		unsigned long index = lba_to_map_index(lba);
3939 
3940 		if (lba == map_index_to_lba(index) &&
3941 		    lba + sdebug_unmap_granularity <= end &&
3942 		    index < map_size) {
3943 			clear_bit(index, sip->map_storep);
3944 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3945 				memset(fsp + lba * sdebug_sector_size,
3946 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3947 				       sdebug_sector_size *
3948 				       sdebug_unmap_granularity);
3949 			}
3950 			if (sip->dif_storep) {
3951 				memset(sip->dif_storep + lba, 0xff,
3952 				       sizeof(*sip->dif_storep) *
3953 				       sdebug_unmap_granularity);
3954 			}
3955 		}
3956 		lba = map_index_to_lba(index + 1);
3957 	}
3958 }
3959 
3960 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3961 {
3962 	bool check_prot;
3963 	u32 num;
3964 	u8 group = 0;
3965 	u32 ei_lba;
3966 	int ret;
3967 	u64 lba;
3968 	struct sdeb_store_info *sip = devip2sip(devip, true);
3969 	u8 *cmd = scp->cmnd;
3970 
3971 	switch (cmd[0]) {
3972 	case WRITE_16:
3973 		ei_lba = 0;
3974 		lba = get_unaligned_be64(cmd + 2);
3975 		num = get_unaligned_be32(cmd + 10);
3976 		group = cmd[14] & 0x3f;
3977 		check_prot = true;
3978 		break;
3979 	case WRITE_10:
3980 		ei_lba = 0;
3981 		lba = get_unaligned_be32(cmd + 2);
3982 		group = cmd[6] & 0x3f;
3983 		num = get_unaligned_be16(cmd + 7);
3984 		check_prot = true;
3985 		break;
3986 	case WRITE_6:
3987 		ei_lba = 0;
3988 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3989 		      (u32)(cmd[1] & 0x1f) << 16;
3990 		num = (0 == cmd[4]) ? 256 : cmd[4];
3991 		check_prot = true;
3992 		break;
3993 	case WRITE_12:
3994 		ei_lba = 0;
3995 		lba = get_unaligned_be32(cmd + 2);
3996 		num = get_unaligned_be32(cmd + 6);
3997 		group = cmd[6] & 0x3f;
3998 		check_prot = true;
3999 		break;
4000 	case 0x53:	/* XDWRITEREAD(10) */
4001 		ei_lba = 0;
4002 		lba = get_unaligned_be32(cmd + 2);
4003 		group = cmd[6] & 0x1f;
4004 		num = get_unaligned_be16(cmd + 7);
4005 		check_prot = false;
4006 		break;
4007 	default:	/* assume WRITE(32) */
4008 		group = cmd[6] & 0x3f;
4009 		lba = get_unaligned_be64(cmd + 12);
4010 		ei_lba = get_unaligned_be32(cmd + 20);
4011 		num = get_unaligned_be32(cmd + 28);
4012 		check_prot = false;
4013 		break;
4014 	}
4015 	if (unlikely(have_dif_prot && check_prot)) {
4016 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4017 		    (cmd[1] & 0xe0)) {
4018 			mk_sense_invalid_opcode(scp);
4019 			return check_condition_result;
4020 		}
4021 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4022 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4023 		    (cmd[1] & 0xe0) == 0)
4024 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4025 				    "to DIF device\n");
4026 	}
4027 
4028 	sdeb_write_lock(sip);
4029 	ret = check_device_access_params(scp, lba, num, true);
4030 	if (ret) {
4031 		sdeb_write_unlock(sip);
4032 		return ret;
4033 	}
4034 
4035 	/* DIX + T10 DIF */
4036 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4037 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
4038 		case 1: /* Guard tag error */
4039 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4040 				sdeb_write_unlock(sip);
4041 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4042 				return illegal_condition_result;
4043 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4044 				sdeb_write_unlock(sip);
4045 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4046 				return check_condition_result;
4047 			}
4048 			break;
4049 		case 3: /* Reference tag error */
4050 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4051 				sdeb_write_unlock(sip);
4052 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4053 				return illegal_condition_result;
4054 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4055 				sdeb_write_unlock(sip);
4056 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4057 				return check_condition_result;
4058 			}
4059 			break;
4060 		}
4061 	}
4062 
4063 	ret = do_device_access(sip, scp, 0, lba, num, true, group);
4064 	if (unlikely(scsi_debug_lbp()))
4065 		map_region(sip, lba, num);
4066 	/* If ZBC zone then bump its write pointer */
4067 	if (sdebug_dev_is_zoned(devip))
4068 		zbc_inc_wp(devip, lba, num);
4069 	sdeb_write_unlock(sip);
4070 	if (unlikely(-1 == ret))
4071 		return DID_ERROR << 16;
4072 	else if (unlikely(sdebug_verbose &&
4073 			  (ret < (num * sdebug_sector_size))))
4074 		sdev_printk(KERN_INFO, scp->device,
4075 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4076 			    my_name, num * sdebug_sector_size, ret);
4077 
4078 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4079 		     atomic_read(&sdeb_inject_pending))) {
4080 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4081 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4082 			atomic_set(&sdeb_inject_pending, 0);
4083 			return check_condition_result;
4084 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4085 			/* Logical block guard check failed */
4086 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4087 			atomic_set(&sdeb_inject_pending, 0);
4088 			return illegal_condition_result;
4089 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4090 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4091 			atomic_set(&sdeb_inject_pending, 0);
4092 			return illegal_condition_result;
4093 		}
4094 	}
4095 	return 0;
4096 }
4097 
4098 /*
4099  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4100  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4101  */
4102 static int resp_write_scat(struct scsi_cmnd *scp,
4103 			   struct sdebug_dev_info *devip)
4104 {
4105 	u8 *cmd = scp->cmnd;
4106 	u8 *lrdp = NULL;
4107 	u8 *up;
4108 	struct sdeb_store_info *sip = devip2sip(devip, true);
4109 	u8 wrprotect;
4110 	u16 lbdof, num_lrd, k;
4111 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4112 	u32 lb_size = sdebug_sector_size;
4113 	u32 ei_lba;
4114 	u64 lba;
4115 	u8 group;
4116 	int ret, res;
4117 	bool is_16;
4118 	static const u32 lrd_size = 32; /* + parameter list header size */
4119 
4120 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
4121 		is_16 = false;
4122 		group = cmd[6] & 0x3f;
4123 		wrprotect = (cmd[10] >> 5) & 0x7;
4124 		lbdof = get_unaligned_be16(cmd + 12);
4125 		num_lrd = get_unaligned_be16(cmd + 16);
4126 		bt_len = get_unaligned_be32(cmd + 28);
4127 	} else {        /* that leaves WRITE SCATTERED(16) */
4128 		is_16 = true;
4129 		wrprotect = (cmd[2] >> 5) & 0x7;
4130 		lbdof = get_unaligned_be16(cmd + 4);
4131 		num_lrd = get_unaligned_be16(cmd + 8);
4132 		bt_len = get_unaligned_be32(cmd + 10);
4133 		group = cmd[14] & 0x3f;
4134 		if (unlikely(have_dif_prot)) {
4135 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4136 			    wrprotect) {
4137 				mk_sense_invalid_opcode(scp);
4138 				return illegal_condition_result;
4139 			}
4140 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4141 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4142 			     wrprotect == 0)
4143 				sdev_printk(KERN_ERR, scp->device,
4144 					    "Unprotected WR to DIF device\n");
4145 		}
4146 	}
4147 	if ((num_lrd == 0) || (bt_len == 0))
4148 		return 0;       /* T10 says these do-nothings are not errors */
4149 	if (lbdof == 0) {
4150 		if (sdebug_verbose)
4151 			sdev_printk(KERN_INFO, scp->device,
4152 				"%s: %s: LB Data Offset field bad\n",
4153 				my_name, __func__);
4154 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4155 		return illegal_condition_result;
4156 	}
4157 	lbdof_blen = lbdof * lb_size;
4158 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4159 		if (sdebug_verbose)
4160 			sdev_printk(KERN_INFO, scp->device,
4161 				"%s: %s: LBA range descriptors don't fit\n",
4162 				my_name, __func__);
4163 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4164 		return illegal_condition_result;
4165 	}
4166 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4167 	if (lrdp == NULL)
4168 		return SCSI_MLQUEUE_HOST_BUSY;
4169 	if (sdebug_verbose)
4170 		sdev_printk(KERN_INFO, scp->device,
4171 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4172 			my_name, __func__, lbdof_blen);
4173 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4174 	if (res == -1) {
4175 		ret = DID_ERROR << 16;
4176 		goto err_out;
4177 	}
4178 
4179 	sdeb_write_lock(sip);
4180 	sg_off = lbdof_blen;
4181 	/* Spec says Buffer xfer Length field in number of LBs in dout */
4182 	cum_lb = 0;
4183 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4184 		lba = get_unaligned_be64(up + 0);
4185 		num = get_unaligned_be32(up + 8);
4186 		if (sdebug_verbose)
4187 			sdev_printk(KERN_INFO, scp->device,
4188 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4189 				my_name, __func__, k, lba, num, sg_off);
4190 		if (num == 0)
4191 			continue;
4192 		ret = check_device_access_params(scp, lba, num, true);
4193 		if (ret)
4194 			goto err_out_unlock;
4195 		num_by = num * lb_size;
4196 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4197 
4198 		if ((cum_lb + num) > bt_len) {
4199 			if (sdebug_verbose)
4200 				sdev_printk(KERN_INFO, scp->device,
4201 				    "%s: %s: sum of blocks > data provided\n",
4202 				    my_name, __func__);
4203 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4204 					0);
4205 			ret = illegal_condition_result;
4206 			goto err_out_unlock;
4207 		}
4208 
4209 		/* DIX + T10 DIF */
4210 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4211 			int prot_ret = prot_verify_write(scp, lba, num,
4212 							 ei_lba);
4213 
4214 			if (prot_ret) {
4215 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4216 						prot_ret);
4217 				ret = illegal_condition_result;
4218 				goto err_out_unlock;
4219 			}
4220 		}
4221 
4222 		ret = do_device_access(sip, scp, sg_off, lba, num, true, group);
4223 		/* If ZBC zone then bump its write pointer */
4224 		if (sdebug_dev_is_zoned(devip))
4225 			zbc_inc_wp(devip, lba, num);
4226 		if (unlikely(scsi_debug_lbp()))
4227 			map_region(sip, lba, num);
4228 		if (unlikely(-1 == ret)) {
4229 			ret = DID_ERROR << 16;
4230 			goto err_out_unlock;
4231 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
4232 			sdev_printk(KERN_INFO, scp->device,
4233 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4234 			    my_name, num_by, ret);
4235 
4236 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4237 			     atomic_read(&sdeb_inject_pending))) {
4238 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4239 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4240 				atomic_set(&sdeb_inject_pending, 0);
4241 				ret = check_condition_result;
4242 				goto err_out_unlock;
4243 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4244 				/* Logical block guard check failed */
4245 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4246 				atomic_set(&sdeb_inject_pending, 0);
4247 				ret = illegal_condition_result;
4248 				goto err_out_unlock;
4249 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4250 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4251 				atomic_set(&sdeb_inject_pending, 0);
4252 				ret = illegal_condition_result;
4253 				goto err_out_unlock;
4254 			}
4255 		}
4256 		sg_off += num_by;
4257 		cum_lb += num;
4258 	}
4259 	ret = 0;
4260 err_out_unlock:
4261 	sdeb_write_unlock(sip);
4262 err_out:
4263 	kfree(lrdp);
4264 	return ret;
4265 }
4266 
4267 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4268 			   u32 ei_lba, bool unmap, bool ndob)
4269 {
4270 	struct scsi_device *sdp = scp->device;
4271 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4272 	unsigned long long i;
4273 	u64 block, lbaa;
4274 	u32 lb_size = sdebug_sector_size;
4275 	int ret;
4276 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4277 						scp->device->hostdata, true);
4278 	u8 *fs1p;
4279 	u8 *fsp;
4280 
4281 	sdeb_write_lock(sip);
4282 
4283 	ret = check_device_access_params(scp, lba, num, true);
4284 	if (ret) {
4285 		sdeb_write_unlock(sip);
4286 		return ret;
4287 	}
4288 
4289 	if (unmap && scsi_debug_lbp()) {
4290 		unmap_region(sip, lba, num);
4291 		goto out;
4292 	}
4293 	lbaa = lba;
4294 	block = do_div(lbaa, sdebug_store_sectors);
4295 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
4296 	fsp = sip->storep;
4297 	fs1p = fsp + (block * lb_size);
4298 	if (ndob) {
4299 		memset(fs1p, 0, lb_size);
4300 		ret = 0;
4301 	} else
4302 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4303 
4304 	if (-1 == ret) {
4305 		sdeb_write_unlock(sip);
4306 		return DID_ERROR << 16;
4307 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
4308 		sdev_printk(KERN_INFO, scp->device,
4309 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
4310 			    my_name, "write same", lb_size, ret);
4311 
4312 	/* Copy first sector to remaining blocks */
4313 	for (i = 1 ; i < num ; i++) {
4314 		lbaa = lba + i;
4315 		block = do_div(lbaa, sdebug_store_sectors);
4316 		memmove(fsp + (block * lb_size), fs1p, lb_size);
4317 	}
4318 	if (scsi_debug_lbp())
4319 		map_region(sip, lba, num);
4320 	/* If ZBC zone then bump its write pointer */
4321 	if (sdebug_dev_is_zoned(devip))
4322 		zbc_inc_wp(devip, lba, num);
4323 out:
4324 	sdeb_write_unlock(sip);
4325 
4326 	return 0;
4327 }
4328 
4329 static int resp_write_same_10(struct scsi_cmnd *scp,
4330 			      struct sdebug_dev_info *devip)
4331 {
4332 	u8 *cmd = scp->cmnd;
4333 	u32 lba;
4334 	u16 num;
4335 	u32 ei_lba = 0;
4336 	bool unmap = false;
4337 
4338 	if (cmd[1] & 0x8) {
4339 		if (sdebug_lbpws10 == 0) {
4340 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4341 			return check_condition_result;
4342 		} else
4343 			unmap = true;
4344 	}
4345 	lba = get_unaligned_be32(cmd + 2);
4346 	num = get_unaligned_be16(cmd + 7);
4347 	if (num > sdebug_write_same_length) {
4348 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4349 		return check_condition_result;
4350 	}
4351 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4352 }
4353 
4354 static int resp_write_same_16(struct scsi_cmnd *scp,
4355 			      struct sdebug_dev_info *devip)
4356 {
4357 	u8 *cmd = scp->cmnd;
4358 	u64 lba;
4359 	u32 num;
4360 	u32 ei_lba = 0;
4361 	bool unmap = false;
4362 	bool ndob = false;
4363 
4364 	if (cmd[1] & 0x8) {	/* UNMAP */
4365 		if (sdebug_lbpws == 0) {
4366 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4367 			return check_condition_result;
4368 		} else
4369 			unmap = true;
4370 	}
4371 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4372 		ndob = true;
4373 	lba = get_unaligned_be64(cmd + 2);
4374 	num = get_unaligned_be32(cmd + 10);
4375 	if (num > sdebug_write_same_length) {
4376 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4377 		return check_condition_result;
4378 	}
4379 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4380 }
4381 
4382 /* Note the mode field is in the same position as the (lower) service action
4383  * field. For the Report supported operation codes command, SPC-4 suggests
4384  * each mode of this command should be reported separately; for future. */
4385 static int resp_write_buffer(struct scsi_cmnd *scp,
4386 			     struct sdebug_dev_info *devip)
4387 {
4388 	u8 *cmd = scp->cmnd;
4389 	struct scsi_device *sdp = scp->device;
4390 	struct sdebug_dev_info *dp;
4391 	u8 mode;
4392 
4393 	mode = cmd[1] & 0x1f;
4394 	switch (mode) {
4395 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4396 		/* set UAs on this device only */
4397 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4398 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4399 		break;
4400 	case 0x5:	/* download MC, save and ACT */
4401 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4402 		break;
4403 	case 0x6:	/* download MC with offsets and ACT */
4404 		/* set UAs on most devices (LUs) in this target */
4405 		list_for_each_entry(dp,
4406 				    &devip->sdbg_host->dev_info_list,
4407 				    dev_list)
4408 			if (dp->target == sdp->id) {
4409 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4410 				if (devip != dp)
4411 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4412 						dp->uas_bm);
4413 			}
4414 		break;
4415 	case 0x7:	/* download MC with offsets, save, and ACT */
4416 		/* set UA on all devices (LUs) in this target */
4417 		list_for_each_entry(dp,
4418 				    &devip->sdbg_host->dev_info_list,
4419 				    dev_list)
4420 			if (dp->target == sdp->id)
4421 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4422 					dp->uas_bm);
4423 		break;
4424 	default:
4425 		/* do nothing for this command for other mode values */
4426 		break;
4427 	}
4428 	return 0;
4429 }
4430 
4431 static int resp_comp_write(struct scsi_cmnd *scp,
4432 			   struct sdebug_dev_info *devip)
4433 {
4434 	u8 *cmd = scp->cmnd;
4435 	u8 *arr;
4436 	struct sdeb_store_info *sip = devip2sip(devip, true);
4437 	u64 lba;
4438 	u32 dnum;
4439 	u32 lb_size = sdebug_sector_size;
4440 	u8 num;
4441 	int ret;
4442 	int retval = 0;
4443 
4444 	lba = get_unaligned_be64(cmd + 2);
4445 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4446 	if (0 == num)
4447 		return 0;	/* degenerate case, not an error */
4448 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4449 	    (cmd[1] & 0xe0)) {
4450 		mk_sense_invalid_opcode(scp);
4451 		return check_condition_result;
4452 	}
4453 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4454 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4455 	    (cmd[1] & 0xe0) == 0)
4456 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4457 			    "to DIF device\n");
4458 	ret = check_device_access_params(scp, lba, num, false);
4459 	if (ret)
4460 		return ret;
4461 	dnum = 2 * num;
4462 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4463 	if (NULL == arr) {
4464 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4465 				INSUFF_RES_ASCQ);
4466 		return check_condition_result;
4467 	}
4468 
4469 	sdeb_write_lock(sip);
4470 
4471 	ret = do_dout_fetch(scp, dnum, arr);
4472 	if (ret == -1) {
4473 		retval = DID_ERROR << 16;
4474 		goto cleanup;
4475 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4476 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4477 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4478 			    dnum * lb_size, ret);
4479 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4480 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4481 		retval = check_condition_result;
4482 		goto cleanup;
4483 	}
4484 	if (scsi_debug_lbp())
4485 		map_region(sip, lba, num);
4486 cleanup:
4487 	sdeb_write_unlock(sip);
4488 	kfree(arr);
4489 	return retval;
4490 }
4491 
4492 struct unmap_block_desc {
4493 	__be64	lba;
4494 	__be32	blocks;
4495 	__be32	__reserved;
4496 };
4497 
4498 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4499 {
4500 	unsigned char *buf;
4501 	struct unmap_block_desc *desc;
4502 	struct sdeb_store_info *sip = devip2sip(devip, true);
4503 	unsigned int i, payload_len, descriptors;
4504 	int ret;
4505 
4506 	if (!scsi_debug_lbp())
4507 		return 0;	/* fib and say its done */
4508 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4509 	BUG_ON(scsi_bufflen(scp) != payload_len);
4510 
4511 	descriptors = (payload_len - 8) / 16;
4512 	if (descriptors > sdebug_unmap_max_desc) {
4513 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4514 		return check_condition_result;
4515 	}
4516 
4517 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4518 	if (!buf) {
4519 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4520 				INSUFF_RES_ASCQ);
4521 		return check_condition_result;
4522 	}
4523 
4524 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4525 
4526 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4527 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4528 
4529 	desc = (void *)&buf[8];
4530 
4531 	sdeb_write_lock(sip);
4532 
4533 	for (i = 0 ; i < descriptors ; i++) {
4534 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4535 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4536 
4537 		ret = check_device_access_params(scp, lba, num, true);
4538 		if (ret)
4539 			goto out;
4540 
4541 		unmap_region(sip, lba, num);
4542 	}
4543 
4544 	ret = 0;
4545 
4546 out:
4547 	sdeb_write_unlock(sip);
4548 	kfree(buf);
4549 
4550 	return ret;
4551 }
4552 
4553 #define SDEBUG_GET_LBA_STATUS_LEN 32
4554 
4555 static int resp_get_lba_status(struct scsi_cmnd *scp,
4556 			       struct sdebug_dev_info *devip)
4557 {
4558 	u8 *cmd = scp->cmnd;
4559 	u64 lba;
4560 	u32 alloc_len, mapped, num;
4561 	int ret;
4562 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4563 
4564 	lba = get_unaligned_be64(cmd + 2);
4565 	alloc_len = get_unaligned_be32(cmd + 10);
4566 
4567 	if (alloc_len < 24)
4568 		return 0;
4569 
4570 	ret = check_device_access_params(scp, lba, 1, false);
4571 	if (ret)
4572 		return ret;
4573 
4574 	if (scsi_debug_lbp()) {
4575 		struct sdeb_store_info *sip = devip2sip(devip, true);
4576 
4577 		mapped = map_state(sip, lba, &num);
4578 	} else {
4579 		mapped = 1;
4580 		/* following just in case virtual_gb changed */
4581 		sdebug_capacity = get_sdebug_capacity();
4582 		if (sdebug_capacity - lba <= 0xffffffff)
4583 			num = sdebug_capacity - lba;
4584 		else
4585 			num = 0xffffffff;
4586 	}
4587 
4588 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4589 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4590 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4591 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4592 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4593 
4594 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4595 }
4596 
4597 static int resp_get_stream_status(struct scsi_cmnd *scp,
4598 				  struct sdebug_dev_info *devip)
4599 {
4600 	u16 starting_stream_id, stream_id;
4601 	const u8 *cmd = scp->cmnd;
4602 	u32 alloc_len, offset;
4603 	u8 arr[256] = {};
4604 	struct scsi_stream_status_header *h = (void *)arr;
4605 
4606 	starting_stream_id = get_unaligned_be16(cmd + 4);
4607 	alloc_len = get_unaligned_be32(cmd + 10);
4608 
4609 	if (alloc_len < 8) {
4610 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4611 		return check_condition_result;
4612 	}
4613 
4614 	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
4615 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
4616 		return check_condition_result;
4617 	}
4618 
4619 	/*
4620 	 * The GET STREAM STATUS command only reports status information
4621 	 * about open streams. Treat the non-permanent stream as open.
4622 	 */
4623 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
4624 			   &h->number_of_open_streams);
4625 
4626 	for (offset = 8, stream_id = starting_stream_id;
4627 	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
4628 		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
4629 	     offset += 8, stream_id++) {
4630 		struct scsi_stream_status *stream_status = (void *)arr + offset;
4631 
4632 		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
4633 		put_unaligned_be16(stream_id,
4634 				   &stream_status->stream_identifier);
4635 		stream_status->rel_lifetime = stream_id + 1;
4636 	}
4637 	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
4638 
4639 	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
4640 }
4641 
4642 static int resp_sync_cache(struct scsi_cmnd *scp,
4643 			   struct sdebug_dev_info *devip)
4644 {
4645 	int res = 0;
4646 	u64 lba;
4647 	u32 num_blocks;
4648 	u8 *cmd = scp->cmnd;
4649 
4650 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4651 		lba = get_unaligned_be32(cmd + 2);
4652 		num_blocks = get_unaligned_be16(cmd + 7);
4653 	} else {				/* SYNCHRONIZE_CACHE(16) */
4654 		lba = get_unaligned_be64(cmd + 2);
4655 		num_blocks = get_unaligned_be32(cmd + 10);
4656 	}
4657 	if (lba + num_blocks > sdebug_capacity) {
4658 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4659 		return check_condition_result;
4660 	}
4661 	if (!write_since_sync || (cmd[1] & 0x2))
4662 		res = SDEG_RES_IMMED_MASK;
4663 	else		/* delay if write_since_sync and IMMED clear */
4664 		write_since_sync = false;
4665 	return res;
4666 }
4667 
4668 /*
4669  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4670  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4671  * a GOOD status otherwise. Model a disk with a big cache and yield
4672  * CONDITION MET. Actually tries to bring range in main memory into the
4673  * cache associated with the CPU(s).
4674  */
4675 static int resp_pre_fetch(struct scsi_cmnd *scp,
4676 			  struct sdebug_dev_info *devip)
4677 {
4678 	int res = 0;
4679 	u64 lba;
4680 	u64 block, rest = 0;
4681 	u32 nblks;
4682 	u8 *cmd = scp->cmnd;
4683 	struct sdeb_store_info *sip = devip2sip(devip, true);
4684 	u8 *fsp = sip->storep;
4685 
4686 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4687 		lba = get_unaligned_be32(cmd + 2);
4688 		nblks = get_unaligned_be16(cmd + 7);
4689 	} else {			/* PRE-FETCH(16) */
4690 		lba = get_unaligned_be64(cmd + 2);
4691 		nblks = get_unaligned_be32(cmd + 10);
4692 	}
4693 	if (lba + nblks > sdebug_capacity) {
4694 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4695 		return check_condition_result;
4696 	}
4697 	if (!fsp)
4698 		goto fini;
4699 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4700 	block = do_div(lba, sdebug_store_sectors);
4701 	if (block + nblks > sdebug_store_sectors)
4702 		rest = block + nblks - sdebug_store_sectors;
4703 
4704 	/* Try to bring the PRE-FETCH range into CPU's cache */
4705 	sdeb_read_lock(sip);
4706 	prefetch_range(fsp + (sdebug_sector_size * block),
4707 		       (nblks - rest) * sdebug_sector_size);
4708 	if (rest)
4709 		prefetch_range(fsp, rest * sdebug_sector_size);
4710 	sdeb_read_unlock(sip);
4711 fini:
4712 	if (cmd[1] & 0x2)
4713 		res = SDEG_RES_IMMED_MASK;
4714 	return res | condition_met_result;
4715 }
4716 
4717 #define RL_BUCKET_ELEMS 8
4718 
4719 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4720  * (W-LUN), the normal Linux scanning logic does not associate it with a
4721  * device (e.g. /dev/sg7). The following magic will make that association:
4722  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4723  * where <n> is a host number. If there are multiple targets in a host then
4724  * the above will associate a W-LUN to each target. To only get a W-LUN
4725  * for target 2, then use "echo '- 2 49409' > scan" .
4726  */
4727 static int resp_report_luns(struct scsi_cmnd *scp,
4728 			    struct sdebug_dev_info *devip)
4729 {
4730 	unsigned char *cmd = scp->cmnd;
4731 	unsigned int alloc_len;
4732 	unsigned char select_report;
4733 	u64 lun;
4734 	struct scsi_lun *lun_p;
4735 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4736 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4737 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4738 	unsigned int tlun_cnt;	/* total LUN count */
4739 	unsigned int rlen;	/* response length (in bytes) */
4740 	int k, j, n, res;
4741 	unsigned int off_rsp = 0;
4742 	const int sz_lun = sizeof(struct scsi_lun);
4743 
4744 	clear_luns_changed_on_target(devip);
4745 
4746 	select_report = cmd[2];
4747 	alloc_len = get_unaligned_be32(cmd + 6);
4748 
4749 	if (alloc_len < 4) {
4750 		pr_err("alloc len too small %d\n", alloc_len);
4751 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4752 		return check_condition_result;
4753 	}
4754 
4755 	switch (select_report) {
4756 	case 0:		/* all LUNs apart from W-LUNs */
4757 		lun_cnt = sdebug_max_luns;
4758 		wlun_cnt = 0;
4759 		break;
4760 	case 1:		/* only W-LUNs */
4761 		lun_cnt = 0;
4762 		wlun_cnt = 1;
4763 		break;
4764 	case 2:		/* all LUNs */
4765 		lun_cnt = sdebug_max_luns;
4766 		wlun_cnt = 1;
4767 		break;
4768 	case 0x10:	/* only administrative LUs */
4769 	case 0x11:	/* see SPC-5 */
4770 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4771 	default:
4772 		pr_debug("select report invalid %d\n", select_report);
4773 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4774 		return check_condition_result;
4775 	}
4776 
4777 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4778 		--lun_cnt;
4779 
4780 	tlun_cnt = lun_cnt + wlun_cnt;
4781 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4782 	scsi_set_resid(scp, scsi_bufflen(scp));
4783 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4784 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4785 
4786 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4787 	lun = sdebug_no_lun_0 ? 1 : 0;
4788 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4789 		memset(arr, 0, sizeof(arr));
4790 		lun_p = (struct scsi_lun *)&arr[0];
4791 		if (k == 0) {
4792 			put_unaligned_be32(rlen, &arr[0]);
4793 			++lun_p;
4794 			j = 1;
4795 		}
4796 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4797 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4798 				break;
4799 			int_to_scsilun(lun++, lun_p);
4800 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4801 				lun_p->scsi_lun[0] |= 0x40;
4802 		}
4803 		if (j < RL_BUCKET_ELEMS)
4804 			break;
4805 		n = j * sz_lun;
4806 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4807 		if (res)
4808 			return res;
4809 		off_rsp += n;
4810 	}
4811 	if (wlun_cnt) {
4812 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4813 		++j;
4814 	}
4815 	if (j > 0)
4816 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4817 	return res;
4818 }
4819 
4820 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4821 {
4822 	bool is_bytchk3 = false;
4823 	u8 bytchk;
4824 	int ret, j;
4825 	u32 vnum, a_num, off;
4826 	const u32 lb_size = sdebug_sector_size;
4827 	u64 lba;
4828 	u8 *arr;
4829 	u8 *cmd = scp->cmnd;
4830 	struct sdeb_store_info *sip = devip2sip(devip, true);
4831 
4832 	bytchk = (cmd[1] >> 1) & 0x3;
4833 	if (bytchk == 0) {
4834 		return 0;	/* always claim internal verify okay */
4835 	} else if (bytchk == 2) {
4836 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4837 		return check_condition_result;
4838 	} else if (bytchk == 3) {
4839 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4840 	}
4841 	switch (cmd[0]) {
4842 	case VERIFY_16:
4843 		lba = get_unaligned_be64(cmd + 2);
4844 		vnum = get_unaligned_be32(cmd + 10);
4845 		break;
4846 	case VERIFY:		/* is VERIFY(10) */
4847 		lba = get_unaligned_be32(cmd + 2);
4848 		vnum = get_unaligned_be16(cmd + 7);
4849 		break;
4850 	default:
4851 		mk_sense_invalid_opcode(scp);
4852 		return check_condition_result;
4853 	}
4854 	if (vnum == 0)
4855 		return 0;	/* not an error */
4856 	a_num = is_bytchk3 ? 1 : vnum;
4857 	/* Treat following check like one for read (i.e. no write) access */
4858 	ret = check_device_access_params(scp, lba, a_num, false);
4859 	if (ret)
4860 		return ret;
4861 
4862 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4863 	if (!arr) {
4864 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4865 				INSUFF_RES_ASCQ);
4866 		return check_condition_result;
4867 	}
4868 	/* Not changing store, so only need read access */
4869 	sdeb_read_lock(sip);
4870 
4871 	ret = do_dout_fetch(scp, a_num, arr);
4872 	if (ret == -1) {
4873 		ret = DID_ERROR << 16;
4874 		goto cleanup;
4875 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4876 		sdev_printk(KERN_INFO, scp->device,
4877 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4878 			    my_name, __func__, a_num * lb_size, ret);
4879 	}
4880 	if (is_bytchk3) {
4881 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4882 			memcpy(arr + off, arr, lb_size);
4883 	}
4884 	ret = 0;
4885 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4886 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4887 		ret = check_condition_result;
4888 		goto cleanup;
4889 	}
4890 cleanup:
4891 	sdeb_read_unlock(sip);
4892 	kfree(arr);
4893 	return ret;
4894 }
4895 
4896 #define RZONES_DESC_HD 64
4897 
4898 /* Report zones depending on start LBA and reporting options */
4899 static int resp_report_zones(struct scsi_cmnd *scp,
4900 			     struct sdebug_dev_info *devip)
4901 {
4902 	unsigned int rep_max_zones, nrz = 0;
4903 	int ret = 0;
4904 	u32 alloc_len, rep_opts, rep_len;
4905 	bool partial;
4906 	u64 lba, zs_lba;
4907 	u8 *arr = NULL, *desc;
4908 	u8 *cmd = scp->cmnd;
4909 	struct sdeb_zone_state *zsp = NULL;
4910 	struct sdeb_store_info *sip = devip2sip(devip, false);
4911 
4912 	if (!sdebug_dev_is_zoned(devip)) {
4913 		mk_sense_invalid_opcode(scp);
4914 		return check_condition_result;
4915 	}
4916 	zs_lba = get_unaligned_be64(cmd + 2);
4917 	alloc_len = get_unaligned_be32(cmd + 10);
4918 	if (alloc_len == 0)
4919 		return 0;	/* not an error */
4920 	rep_opts = cmd[14] & 0x3f;
4921 	partial = cmd[14] & 0x80;
4922 
4923 	if (zs_lba >= sdebug_capacity) {
4924 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4925 		return check_condition_result;
4926 	}
4927 
4928 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4929 
4930 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4931 	if (!arr) {
4932 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4933 				INSUFF_RES_ASCQ);
4934 		return check_condition_result;
4935 	}
4936 
4937 	sdeb_read_lock(sip);
4938 
4939 	desc = arr + 64;
4940 	for (lba = zs_lba; lba < sdebug_capacity;
4941 	     lba = zsp->z_start + zsp->z_size) {
4942 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4943 			break;
4944 		zsp = zbc_zone(devip, lba);
4945 		switch (rep_opts) {
4946 		case 0x00:
4947 			/* All zones */
4948 			break;
4949 		case 0x01:
4950 			/* Empty zones */
4951 			if (zsp->z_cond != ZC1_EMPTY)
4952 				continue;
4953 			break;
4954 		case 0x02:
4955 			/* Implicit open zones */
4956 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4957 				continue;
4958 			break;
4959 		case 0x03:
4960 			/* Explicit open zones */
4961 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4962 				continue;
4963 			break;
4964 		case 0x04:
4965 			/* Closed zones */
4966 			if (zsp->z_cond != ZC4_CLOSED)
4967 				continue;
4968 			break;
4969 		case 0x05:
4970 			/* Full zones */
4971 			if (zsp->z_cond != ZC5_FULL)
4972 				continue;
4973 			break;
4974 		case 0x06:
4975 		case 0x07:
4976 		case 0x10:
4977 			/*
4978 			 * Read-only, offline, reset WP recommended are
4979 			 * not emulated: no zones to report;
4980 			 */
4981 			continue;
4982 		case 0x11:
4983 			/* non-seq-resource set */
4984 			if (!zsp->z_non_seq_resource)
4985 				continue;
4986 			break;
4987 		case 0x3e:
4988 			/* All zones except gap zones. */
4989 			if (zbc_zone_is_gap(zsp))
4990 				continue;
4991 			break;
4992 		case 0x3f:
4993 			/* Not write pointer (conventional) zones */
4994 			if (zbc_zone_is_seq(zsp))
4995 				continue;
4996 			break;
4997 		default:
4998 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4999 					INVALID_FIELD_IN_CDB, 0);
5000 			ret = check_condition_result;
5001 			goto fini;
5002 		}
5003 
5004 		if (nrz < rep_max_zones) {
5005 			/* Fill zone descriptor */
5006 			desc[0] = zsp->z_type;
5007 			desc[1] = zsp->z_cond << 4;
5008 			if (zsp->z_non_seq_resource)
5009 				desc[1] |= 1 << 1;
5010 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
5011 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
5012 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5013 			desc += 64;
5014 		}
5015 
5016 		if (partial && nrz >= rep_max_zones)
5017 			break;
5018 
5019 		nrz++;
5020 	}
5021 
5022 	/* Report header */
5023 	/* Zone list length. */
5024 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5025 	/* Maximum LBA */
5026 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5027 	/* Zone starting LBA granularity. */
5028 	if (devip->zcap < devip->zsize)
5029 		put_unaligned_be64(devip->zsize, arr + 16);
5030 
5031 	rep_len = (unsigned long)desc - (unsigned long)arr;
5032 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5033 
5034 fini:
5035 	sdeb_read_unlock(sip);
5036 	kfree(arr);
5037 	return ret;
5038 }
5039 
5040 /* Logic transplanted from tcmu-runner, file_zbc.c */
5041 static void zbc_open_all(struct sdebug_dev_info *devip)
5042 {
5043 	struct sdeb_zone_state *zsp = &devip->zstate[0];
5044 	unsigned int i;
5045 
5046 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
5047 		if (zsp->z_cond == ZC4_CLOSED)
5048 			zbc_open_zone(devip, &devip->zstate[i], true);
5049 	}
5050 }
5051 
5052 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5053 {
5054 	int res = 0;
5055 	u64 z_id;
5056 	enum sdebug_z_cond zc;
5057 	u8 *cmd = scp->cmnd;
5058 	struct sdeb_zone_state *zsp;
5059 	bool all = cmd[14] & 0x01;
5060 	struct sdeb_store_info *sip = devip2sip(devip, false);
5061 
5062 	if (!sdebug_dev_is_zoned(devip)) {
5063 		mk_sense_invalid_opcode(scp);
5064 		return check_condition_result;
5065 	}
5066 
5067 	sdeb_write_lock(sip);
5068 
5069 	if (all) {
5070 		/* Check if all closed zones can be open */
5071 		if (devip->max_open &&
5072 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
5073 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5074 					INSUFF_ZONE_ASCQ);
5075 			res = check_condition_result;
5076 			goto fini;
5077 		}
5078 		/* Open all closed zones */
5079 		zbc_open_all(devip);
5080 		goto fini;
5081 	}
5082 
5083 	/* Open the specified zone */
5084 	z_id = get_unaligned_be64(cmd + 2);
5085 	if (z_id >= sdebug_capacity) {
5086 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5087 		res = check_condition_result;
5088 		goto fini;
5089 	}
5090 
5091 	zsp = zbc_zone(devip, z_id);
5092 	if (z_id != zsp->z_start) {
5093 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5094 		res = check_condition_result;
5095 		goto fini;
5096 	}
5097 	if (zbc_zone_is_conv(zsp)) {
5098 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5099 		res = check_condition_result;
5100 		goto fini;
5101 	}
5102 
5103 	zc = zsp->z_cond;
5104 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
5105 		goto fini;
5106 
5107 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
5108 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5109 				INSUFF_ZONE_ASCQ);
5110 		res = check_condition_result;
5111 		goto fini;
5112 	}
5113 
5114 	zbc_open_zone(devip, zsp, true);
5115 fini:
5116 	sdeb_write_unlock(sip);
5117 	return res;
5118 }
5119 
5120 static void zbc_close_all(struct sdebug_dev_info *devip)
5121 {
5122 	unsigned int i;
5123 
5124 	for (i = 0; i < devip->nr_zones; i++)
5125 		zbc_close_zone(devip, &devip->zstate[i]);
5126 }
5127 
5128 static int resp_close_zone(struct scsi_cmnd *scp,
5129 			   struct sdebug_dev_info *devip)
5130 {
5131 	int res = 0;
5132 	u64 z_id;
5133 	u8 *cmd = scp->cmnd;
5134 	struct sdeb_zone_state *zsp;
5135 	bool all = cmd[14] & 0x01;
5136 	struct sdeb_store_info *sip = devip2sip(devip, false);
5137 
5138 	if (!sdebug_dev_is_zoned(devip)) {
5139 		mk_sense_invalid_opcode(scp);
5140 		return check_condition_result;
5141 	}
5142 
5143 	sdeb_write_lock(sip);
5144 
5145 	if (all) {
5146 		zbc_close_all(devip);
5147 		goto fini;
5148 	}
5149 
5150 	/* Close specified zone */
5151 	z_id = get_unaligned_be64(cmd + 2);
5152 	if (z_id >= sdebug_capacity) {
5153 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5154 		res = check_condition_result;
5155 		goto fini;
5156 	}
5157 
5158 	zsp = zbc_zone(devip, z_id);
5159 	if (z_id != zsp->z_start) {
5160 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5161 		res = check_condition_result;
5162 		goto fini;
5163 	}
5164 	if (zbc_zone_is_conv(zsp)) {
5165 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5166 		res = check_condition_result;
5167 		goto fini;
5168 	}
5169 
5170 	zbc_close_zone(devip, zsp);
5171 fini:
5172 	sdeb_write_unlock(sip);
5173 	return res;
5174 }
5175 
5176 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5177 			    struct sdeb_zone_state *zsp, bool empty)
5178 {
5179 	enum sdebug_z_cond zc = zsp->z_cond;
5180 
5181 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5182 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5183 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5184 			zbc_close_zone(devip, zsp);
5185 		if (zsp->z_cond == ZC4_CLOSED)
5186 			devip->nr_closed--;
5187 		zsp->z_wp = zsp->z_start + zsp->z_size;
5188 		zsp->z_cond = ZC5_FULL;
5189 	}
5190 }
5191 
5192 static void zbc_finish_all(struct sdebug_dev_info *devip)
5193 {
5194 	unsigned int i;
5195 
5196 	for (i = 0; i < devip->nr_zones; i++)
5197 		zbc_finish_zone(devip, &devip->zstate[i], false);
5198 }
5199 
5200 static int resp_finish_zone(struct scsi_cmnd *scp,
5201 			    struct sdebug_dev_info *devip)
5202 {
5203 	struct sdeb_zone_state *zsp;
5204 	int res = 0;
5205 	u64 z_id;
5206 	u8 *cmd = scp->cmnd;
5207 	bool all = cmd[14] & 0x01;
5208 	struct sdeb_store_info *sip = devip2sip(devip, false);
5209 
5210 	if (!sdebug_dev_is_zoned(devip)) {
5211 		mk_sense_invalid_opcode(scp);
5212 		return check_condition_result;
5213 	}
5214 
5215 	sdeb_write_lock(sip);
5216 
5217 	if (all) {
5218 		zbc_finish_all(devip);
5219 		goto fini;
5220 	}
5221 
5222 	/* Finish the specified zone */
5223 	z_id = get_unaligned_be64(cmd + 2);
5224 	if (z_id >= sdebug_capacity) {
5225 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5226 		res = check_condition_result;
5227 		goto fini;
5228 	}
5229 
5230 	zsp = zbc_zone(devip, z_id);
5231 	if (z_id != zsp->z_start) {
5232 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5233 		res = check_condition_result;
5234 		goto fini;
5235 	}
5236 	if (zbc_zone_is_conv(zsp)) {
5237 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5238 		res = check_condition_result;
5239 		goto fini;
5240 	}
5241 
5242 	zbc_finish_zone(devip, zsp, true);
5243 fini:
5244 	sdeb_write_unlock(sip);
5245 	return res;
5246 }
5247 
5248 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5249 			 struct sdeb_zone_state *zsp)
5250 {
5251 	enum sdebug_z_cond zc;
5252 	struct sdeb_store_info *sip = devip2sip(devip, false);
5253 
5254 	if (!zbc_zone_is_seq(zsp))
5255 		return;
5256 
5257 	zc = zsp->z_cond;
5258 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5259 		zbc_close_zone(devip, zsp);
5260 
5261 	if (zsp->z_cond == ZC4_CLOSED)
5262 		devip->nr_closed--;
5263 
5264 	if (zsp->z_wp > zsp->z_start)
5265 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5266 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5267 
5268 	zsp->z_non_seq_resource = false;
5269 	zsp->z_wp = zsp->z_start;
5270 	zsp->z_cond = ZC1_EMPTY;
5271 }
5272 
5273 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5274 {
5275 	unsigned int i;
5276 
5277 	for (i = 0; i < devip->nr_zones; i++)
5278 		zbc_rwp_zone(devip, &devip->zstate[i]);
5279 }
5280 
5281 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5282 {
5283 	struct sdeb_zone_state *zsp;
5284 	int res = 0;
5285 	u64 z_id;
5286 	u8 *cmd = scp->cmnd;
5287 	bool all = cmd[14] & 0x01;
5288 	struct sdeb_store_info *sip = devip2sip(devip, false);
5289 
5290 	if (!sdebug_dev_is_zoned(devip)) {
5291 		mk_sense_invalid_opcode(scp);
5292 		return check_condition_result;
5293 	}
5294 
5295 	sdeb_write_lock(sip);
5296 
5297 	if (all) {
5298 		zbc_rwp_all(devip);
5299 		goto fini;
5300 	}
5301 
5302 	z_id = get_unaligned_be64(cmd + 2);
5303 	if (z_id >= sdebug_capacity) {
5304 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5305 		res = check_condition_result;
5306 		goto fini;
5307 	}
5308 
5309 	zsp = zbc_zone(devip, z_id);
5310 	if (z_id != zsp->z_start) {
5311 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5312 		res = check_condition_result;
5313 		goto fini;
5314 	}
5315 	if (zbc_zone_is_conv(zsp)) {
5316 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5317 		res = check_condition_result;
5318 		goto fini;
5319 	}
5320 
5321 	zbc_rwp_zone(devip, zsp);
5322 fini:
5323 	sdeb_write_unlock(sip);
5324 	return res;
5325 }
5326 
5327 static u32 get_tag(struct scsi_cmnd *cmnd)
5328 {
5329 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5330 }
5331 
5332 /* Queued (deferred) command completions converge here. */
5333 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5334 {
5335 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5336 	unsigned long flags;
5337 	struct scsi_cmnd *scp = sqcp->scmd;
5338 	struct sdebug_scsi_cmd *sdsc;
5339 	bool aborted;
5340 
5341 	if (sdebug_statistics) {
5342 		atomic_inc(&sdebug_completions);
5343 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5344 			atomic_inc(&sdebug_miss_cpus);
5345 	}
5346 
5347 	if (!scp) {
5348 		pr_err("scmd=NULL\n");
5349 		goto out;
5350 	}
5351 
5352 	sdsc = scsi_cmd_priv(scp);
5353 	spin_lock_irqsave(&sdsc->lock, flags);
5354 	aborted = sd_dp->aborted;
5355 	if (unlikely(aborted))
5356 		sd_dp->aborted = false;
5357 	ASSIGN_QUEUED_CMD(scp, NULL);
5358 
5359 	spin_unlock_irqrestore(&sdsc->lock, flags);
5360 
5361 	if (aborted) {
5362 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5363 		blk_abort_request(scsi_cmd_to_rq(scp));
5364 		goto out;
5365 	}
5366 
5367 	scsi_done(scp); /* callback to mid level */
5368 out:
5369 	sdebug_free_queued_cmd(sqcp);
5370 }
5371 
5372 /* When high resolution timer goes off this function is called. */
5373 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5374 {
5375 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5376 						  hrt);
5377 	sdebug_q_cmd_complete(sd_dp);
5378 	return HRTIMER_NORESTART;
5379 }
5380 
5381 /* When work queue schedules work, it calls this function. */
5382 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5383 {
5384 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5385 						  ew.work);
5386 	sdebug_q_cmd_complete(sd_dp);
5387 }
5388 
5389 static bool got_shared_uuid;
5390 static uuid_t shared_uuid;
5391 
5392 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5393 {
5394 	struct sdeb_zone_state *zsp;
5395 	sector_t capacity = get_sdebug_capacity();
5396 	sector_t conv_capacity;
5397 	sector_t zstart = 0;
5398 	unsigned int i;
5399 
5400 	/*
5401 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5402 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5403 	 * use the specified zone size checking that at least 2 zones can be
5404 	 * created for the device.
5405 	 */
5406 	if (!sdeb_zbc_zone_size_mb) {
5407 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5408 			>> ilog2(sdebug_sector_size);
5409 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5410 			devip->zsize >>= 1;
5411 		if (devip->zsize < 2) {
5412 			pr_err("Device capacity too small\n");
5413 			return -EINVAL;
5414 		}
5415 	} else {
5416 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5417 			pr_err("Zone size is not a power of 2\n");
5418 			return -EINVAL;
5419 		}
5420 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5421 			>> ilog2(sdebug_sector_size);
5422 		if (devip->zsize >= capacity) {
5423 			pr_err("Zone size too large for device capacity\n");
5424 			return -EINVAL;
5425 		}
5426 	}
5427 
5428 	devip->zsize_shift = ilog2(devip->zsize);
5429 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5430 
5431 	if (sdeb_zbc_zone_cap_mb == 0) {
5432 		devip->zcap = devip->zsize;
5433 	} else {
5434 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5435 			      ilog2(sdebug_sector_size);
5436 		if (devip->zcap > devip->zsize) {
5437 			pr_err("Zone capacity too large\n");
5438 			return -EINVAL;
5439 		}
5440 	}
5441 
5442 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5443 	if (conv_capacity >= capacity) {
5444 		pr_err("Number of conventional zones too large\n");
5445 		return -EINVAL;
5446 	}
5447 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5448 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5449 			      devip->zsize_shift;
5450 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5451 
5452 	/* Add gap zones if zone capacity is smaller than the zone size */
5453 	if (devip->zcap < devip->zsize)
5454 		devip->nr_zones += devip->nr_seq_zones;
5455 
5456 	if (devip->zoned) {
5457 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5458 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5459 			devip->max_open = (devip->nr_zones - 1) / 2;
5460 		else
5461 			devip->max_open = sdeb_zbc_max_open;
5462 	}
5463 
5464 	devip->zstate = kcalloc(devip->nr_zones,
5465 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5466 	if (!devip->zstate)
5467 		return -ENOMEM;
5468 
5469 	for (i = 0; i < devip->nr_zones; i++) {
5470 		zsp = &devip->zstate[i];
5471 
5472 		zsp->z_start = zstart;
5473 
5474 		if (i < devip->nr_conv_zones) {
5475 			zsp->z_type = ZBC_ZTYPE_CNV;
5476 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5477 			zsp->z_wp = (sector_t)-1;
5478 			zsp->z_size =
5479 				min_t(u64, devip->zsize, capacity - zstart);
5480 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5481 			if (devip->zoned)
5482 				zsp->z_type = ZBC_ZTYPE_SWR;
5483 			else
5484 				zsp->z_type = ZBC_ZTYPE_SWP;
5485 			zsp->z_cond = ZC1_EMPTY;
5486 			zsp->z_wp = zsp->z_start;
5487 			zsp->z_size =
5488 				min_t(u64, devip->zcap, capacity - zstart);
5489 		} else {
5490 			zsp->z_type = ZBC_ZTYPE_GAP;
5491 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5492 			zsp->z_wp = (sector_t)-1;
5493 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5494 					    capacity - zstart);
5495 		}
5496 
5497 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5498 		zstart += zsp->z_size;
5499 	}
5500 
5501 	return 0;
5502 }
5503 
5504 static struct sdebug_dev_info *sdebug_device_create(
5505 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5506 {
5507 	struct sdebug_dev_info *devip;
5508 
5509 	devip = kzalloc(sizeof(*devip), flags);
5510 	if (devip) {
5511 		if (sdebug_uuid_ctl == 1)
5512 			uuid_gen(&devip->lu_name);
5513 		else if (sdebug_uuid_ctl == 2) {
5514 			if (got_shared_uuid)
5515 				devip->lu_name = shared_uuid;
5516 			else {
5517 				uuid_gen(&shared_uuid);
5518 				got_shared_uuid = true;
5519 				devip->lu_name = shared_uuid;
5520 			}
5521 		}
5522 		devip->sdbg_host = sdbg_host;
5523 		if (sdeb_zbc_in_use) {
5524 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5525 			if (sdebug_device_create_zones(devip)) {
5526 				kfree(devip);
5527 				return NULL;
5528 			}
5529 		} else {
5530 			devip->zoned = false;
5531 		}
5532 		devip->create_ts = ktime_get_boottime();
5533 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5534 		spin_lock_init(&devip->list_lock);
5535 		INIT_LIST_HEAD(&devip->inject_err_list);
5536 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5537 	}
5538 	return devip;
5539 }
5540 
5541 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5542 {
5543 	struct sdebug_host_info *sdbg_host;
5544 	struct sdebug_dev_info *open_devip = NULL;
5545 	struct sdebug_dev_info *devip;
5546 
5547 	sdbg_host = shost_to_sdebug_host(sdev->host);
5548 
5549 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5550 		if ((devip->used) && (devip->channel == sdev->channel) &&
5551 		    (devip->target == sdev->id) &&
5552 		    (devip->lun == sdev->lun))
5553 			return devip;
5554 		else {
5555 			if ((!devip->used) && (!open_devip))
5556 				open_devip = devip;
5557 		}
5558 	}
5559 	if (!open_devip) { /* try and make a new one */
5560 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5561 		if (!open_devip) {
5562 			pr_err("out of memory at line %d\n", __LINE__);
5563 			return NULL;
5564 		}
5565 	}
5566 
5567 	open_devip->channel = sdev->channel;
5568 	open_devip->target = sdev->id;
5569 	open_devip->lun = sdev->lun;
5570 	open_devip->sdbg_host = sdbg_host;
5571 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5572 	open_devip->used = true;
5573 	return open_devip;
5574 }
5575 
5576 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5577 {
5578 	if (sdebug_verbose)
5579 		pr_info("slave_alloc <%u %u %u %llu>\n",
5580 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5581 
5582 	return 0;
5583 }
5584 
5585 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5586 {
5587 	struct sdebug_dev_info *devip =
5588 			(struct sdebug_dev_info *)sdp->hostdata;
5589 	struct dentry *dentry;
5590 
5591 	if (sdebug_verbose)
5592 		pr_info("slave_configure <%u %u %u %llu>\n",
5593 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5594 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5595 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5596 	if (devip == NULL) {
5597 		devip = find_build_dev_info(sdp);
5598 		if (devip == NULL)
5599 			return 1;  /* no resources, will be marked offline */
5600 	}
5601 	sdp->hostdata = devip;
5602 	if (sdebug_no_uld)
5603 		sdp->no_uld_attach = 1;
5604 	config_cdb_len(sdp);
5605 
5606 	if (sdebug_allow_restart)
5607 		sdp->allow_restart = 1;
5608 
5609 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5610 				sdebug_debugfs_root);
5611 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
5612 		pr_info("%s: failed to create debugfs directory for device %s\n",
5613 			__func__, dev_name(&sdp->sdev_gendev));
5614 
5615 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5616 				&sdebug_error_fops);
5617 	if (IS_ERR_OR_NULL(dentry))
5618 		pr_info("%s: failed to create error file for device %s\n",
5619 			__func__, dev_name(&sdp->sdev_gendev));
5620 
5621 	return 0;
5622 }
5623 
5624 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5625 {
5626 	struct sdebug_dev_info *devip =
5627 		(struct sdebug_dev_info *)sdp->hostdata;
5628 	struct sdebug_err_inject *err;
5629 
5630 	if (sdebug_verbose)
5631 		pr_info("slave_destroy <%u %u %u %llu>\n",
5632 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5633 
5634 	if (!devip)
5635 		return;
5636 
5637 	spin_lock(&devip->list_lock);
5638 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5639 		list_del_rcu(&err->list);
5640 		call_rcu(&err->rcu, sdebug_err_free);
5641 	}
5642 	spin_unlock(&devip->list_lock);
5643 
5644 	debugfs_remove(devip->debugfs_entry);
5645 
5646 	/* make this slot available for re-use */
5647 	devip->used = false;
5648 	sdp->hostdata = NULL;
5649 }
5650 
5651 /* Returns true if we require the queued memory to be freed by the caller. */
5652 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5653 			   enum sdeb_defer_type defer_t)
5654 {
5655 	if (defer_t == SDEB_DEFER_HRT) {
5656 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5657 
5658 		switch (res) {
5659 		case 0: /* Not active, it must have already run */
5660 		case -1: /* -1 It's executing the CB */
5661 			return false;
5662 		case 1: /* Was active, we've now cancelled */
5663 		default:
5664 			return true;
5665 		}
5666 	} else if (defer_t == SDEB_DEFER_WQ) {
5667 		/* Cancel if pending */
5668 		if (cancel_work_sync(&sd_dp->ew.work))
5669 			return true;
5670 		/* Was not pending, so it must have run */
5671 		return false;
5672 	} else if (defer_t == SDEB_DEFER_POLL) {
5673 		return true;
5674 	}
5675 
5676 	return false;
5677 }
5678 
5679 
5680 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5681 {
5682 	enum sdeb_defer_type l_defer_t;
5683 	struct sdebug_defer *sd_dp;
5684 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5685 	struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5686 
5687 	lockdep_assert_held(&sdsc->lock);
5688 
5689 	if (!sqcp)
5690 		return false;
5691 	sd_dp = &sqcp->sd_dp;
5692 	l_defer_t = READ_ONCE(sd_dp->defer_t);
5693 	ASSIGN_QUEUED_CMD(cmnd, NULL);
5694 
5695 	if (stop_qc_helper(sd_dp, l_defer_t))
5696 		sdebug_free_queued_cmd(sqcp);
5697 
5698 	return true;
5699 }
5700 
5701 /*
5702  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5703  */
5704 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5705 {
5706 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5707 	unsigned long flags;
5708 	bool res;
5709 
5710 	spin_lock_irqsave(&sdsc->lock, flags);
5711 	res = scsi_debug_stop_cmnd(cmnd);
5712 	spin_unlock_irqrestore(&sdsc->lock, flags);
5713 
5714 	return res;
5715 }
5716 
5717 /*
5718  * All we can do is set the cmnd as internally aborted and wait for it to
5719  * finish. We cannot call scsi_done() as normal completion path may do that.
5720  */
5721 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5722 {
5723 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5724 
5725 	return true;
5726 }
5727 
5728 /* Deletes (stops) timers or work queues of all queued commands */
5729 static void stop_all_queued(void)
5730 {
5731 	struct sdebug_host_info *sdhp;
5732 
5733 	mutex_lock(&sdebug_host_list_mutex);
5734 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5735 		struct Scsi_Host *shost = sdhp->shost;
5736 
5737 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5738 	}
5739 	mutex_unlock(&sdebug_host_list_mutex);
5740 }
5741 
5742 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5743 {
5744 	struct scsi_device *sdp = cmnd->device;
5745 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5746 	struct sdebug_err_inject *err;
5747 	unsigned char *cmd = cmnd->cmnd;
5748 	int ret = 0;
5749 
5750 	if (devip == NULL)
5751 		return 0;
5752 
5753 	rcu_read_lock();
5754 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5755 		if (err->type == ERR_ABORT_CMD_FAILED &&
5756 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5757 			ret = !!err->cnt;
5758 			if (err->cnt < 0)
5759 				err->cnt++;
5760 
5761 			rcu_read_unlock();
5762 			return ret;
5763 		}
5764 	}
5765 	rcu_read_unlock();
5766 
5767 	return 0;
5768 }
5769 
5770 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5771 {
5772 	bool ok = scsi_debug_abort_cmnd(SCpnt);
5773 	u8 *cmd = SCpnt->cmnd;
5774 	u8 opcode = cmd[0];
5775 
5776 	++num_aborts;
5777 
5778 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5779 		sdev_printk(KERN_INFO, SCpnt->device,
5780 			    "%s: command%s found\n", __func__,
5781 			    ok ? "" : " not");
5782 
5783 	if (sdebug_fail_abort(SCpnt)) {
5784 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5785 			    opcode);
5786 		return FAILED;
5787 	}
5788 
5789 	return SUCCESS;
5790 }
5791 
5792 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5793 {
5794 	struct scsi_device *sdp = data;
5795 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5796 
5797 	if (scmd->device == sdp)
5798 		scsi_debug_abort_cmnd(scmd);
5799 
5800 	return true;
5801 }
5802 
5803 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5804 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5805 {
5806 	struct Scsi_Host *shost = sdp->host;
5807 
5808 	blk_mq_tagset_busy_iter(&shost->tag_set,
5809 				scsi_debug_stop_all_queued_iter, sdp);
5810 }
5811 
5812 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5813 {
5814 	struct scsi_device *sdp = cmnd->device;
5815 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5816 	struct sdebug_err_inject *err;
5817 	unsigned char *cmd = cmnd->cmnd;
5818 	int ret = 0;
5819 
5820 	if (devip == NULL)
5821 		return 0;
5822 
5823 	rcu_read_lock();
5824 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5825 		if (err->type == ERR_LUN_RESET_FAILED &&
5826 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5827 			ret = !!err->cnt;
5828 			if (err->cnt < 0)
5829 				err->cnt++;
5830 
5831 			rcu_read_unlock();
5832 			return ret;
5833 		}
5834 	}
5835 	rcu_read_unlock();
5836 
5837 	return 0;
5838 }
5839 
5840 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5841 {
5842 	struct scsi_device *sdp = SCpnt->device;
5843 	struct sdebug_dev_info *devip = sdp->hostdata;
5844 	u8 *cmd = SCpnt->cmnd;
5845 	u8 opcode = cmd[0];
5846 
5847 	++num_dev_resets;
5848 
5849 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5850 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5851 
5852 	scsi_debug_stop_all_queued(sdp);
5853 	if (devip)
5854 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5855 
5856 	if (sdebug_fail_lun_reset(SCpnt)) {
5857 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5858 		return FAILED;
5859 	}
5860 
5861 	return SUCCESS;
5862 }
5863 
5864 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5865 {
5866 	struct scsi_target *starget = scsi_target(cmnd->device);
5867 	struct sdebug_target_info *targetip =
5868 		(struct sdebug_target_info *)starget->hostdata;
5869 
5870 	if (targetip)
5871 		return targetip->reset_fail;
5872 
5873 	return 0;
5874 }
5875 
5876 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5877 {
5878 	struct scsi_device *sdp = SCpnt->device;
5879 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5880 	struct sdebug_dev_info *devip;
5881 	u8 *cmd = SCpnt->cmnd;
5882 	u8 opcode = cmd[0];
5883 	int k = 0;
5884 
5885 	++num_target_resets;
5886 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5887 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5888 
5889 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5890 		if (devip->target == sdp->id) {
5891 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5892 			++k;
5893 		}
5894 	}
5895 
5896 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5897 		sdev_printk(KERN_INFO, sdp,
5898 			    "%s: %d device(s) found in target\n", __func__, k);
5899 
5900 	if (sdebug_fail_target_reset(SCpnt)) {
5901 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5902 			    opcode);
5903 		return FAILED;
5904 	}
5905 
5906 	return SUCCESS;
5907 }
5908 
5909 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5910 {
5911 	struct scsi_device *sdp = SCpnt->device;
5912 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5913 	struct sdebug_dev_info *devip;
5914 	int k = 0;
5915 
5916 	++num_bus_resets;
5917 
5918 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5919 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5920 
5921 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5922 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5923 		++k;
5924 	}
5925 
5926 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5927 		sdev_printk(KERN_INFO, sdp,
5928 			    "%s: %d device(s) found in host\n", __func__, k);
5929 	return SUCCESS;
5930 }
5931 
5932 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5933 {
5934 	struct sdebug_host_info *sdbg_host;
5935 	struct sdebug_dev_info *devip;
5936 	int k = 0;
5937 
5938 	++num_host_resets;
5939 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5940 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5941 	mutex_lock(&sdebug_host_list_mutex);
5942 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5943 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5944 				    dev_list) {
5945 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5946 			++k;
5947 		}
5948 	}
5949 	mutex_unlock(&sdebug_host_list_mutex);
5950 	stop_all_queued();
5951 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5952 		sdev_printk(KERN_INFO, SCpnt->device,
5953 			    "%s: %d device(s) found\n", __func__, k);
5954 	return SUCCESS;
5955 }
5956 
5957 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5958 {
5959 	struct msdos_partition *pp;
5960 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5961 	int sectors_per_part, num_sectors, k;
5962 	int heads_by_sects, start_sec, end_sec;
5963 
5964 	/* assume partition table already zeroed */
5965 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5966 		return;
5967 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5968 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5969 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5970 	}
5971 	num_sectors = (int)get_sdebug_capacity();
5972 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5973 			   / sdebug_num_parts;
5974 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5975 	starts[0] = sdebug_sectors_per;
5976 	max_part_secs = sectors_per_part;
5977 	for (k = 1; k < sdebug_num_parts; ++k) {
5978 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5979 			    * heads_by_sects;
5980 		if (starts[k] - starts[k - 1] < max_part_secs)
5981 			max_part_secs = starts[k] - starts[k - 1];
5982 	}
5983 	starts[sdebug_num_parts] = num_sectors;
5984 	starts[sdebug_num_parts + 1] = 0;
5985 
5986 	ramp[510] = 0x55;	/* magic partition markings */
5987 	ramp[511] = 0xAA;
5988 	pp = (struct msdos_partition *)(ramp + 0x1be);
5989 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5990 		start_sec = starts[k];
5991 		end_sec = starts[k] + max_part_secs - 1;
5992 		pp->boot_ind = 0;
5993 
5994 		pp->cyl = start_sec / heads_by_sects;
5995 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5996 			   / sdebug_sectors_per;
5997 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5998 
5999 		pp->end_cyl = end_sec / heads_by_sects;
6000 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
6001 			       / sdebug_sectors_per;
6002 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
6003 
6004 		pp->start_sect = cpu_to_le32(start_sec);
6005 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
6006 		pp->sys_ind = 0x83;	/* plain Linux partition */
6007 	}
6008 }
6009 
6010 static void block_unblock_all_queues(bool block)
6011 {
6012 	struct sdebug_host_info *sdhp;
6013 
6014 	lockdep_assert_held(&sdebug_host_list_mutex);
6015 
6016 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6017 		struct Scsi_Host *shost = sdhp->shost;
6018 
6019 		if (block)
6020 			scsi_block_requests(shost);
6021 		else
6022 			scsi_unblock_requests(shost);
6023 	}
6024 }
6025 
6026 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6027  * commands will be processed normally before triggers occur.
6028  */
6029 static void tweak_cmnd_count(void)
6030 {
6031 	int count, modulo;
6032 
6033 	modulo = abs(sdebug_every_nth);
6034 	if (modulo < 2)
6035 		return;
6036 
6037 	mutex_lock(&sdebug_host_list_mutex);
6038 	block_unblock_all_queues(true);
6039 	count = atomic_read(&sdebug_cmnd_count);
6040 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
6041 	block_unblock_all_queues(false);
6042 	mutex_unlock(&sdebug_host_list_mutex);
6043 }
6044 
6045 static void clear_queue_stats(void)
6046 {
6047 	atomic_set(&sdebug_cmnd_count, 0);
6048 	atomic_set(&sdebug_completions, 0);
6049 	atomic_set(&sdebug_miss_cpus, 0);
6050 	atomic_set(&sdebug_a_tsf, 0);
6051 }
6052 
6053 static bool inject_on_this_cmd(void)
6054 {
6055 	if (sdebug_every_nth == 0)
6056 		return false;
6057 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6058 }
6059 
6060 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
6061 
6062 
6063 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
6064 {
6065 	if (sqcp)
6066 		kmem_cache_free(queued_cmd_cache, sqcp);
6067 }
6068 
6069 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
6070 {
6071 	struct sdebug_queued_cmd *sqcp;
6072 	struct sdebug_defer *sd_dp;
6073 
6074 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
6075 	if (!sqcp)
6076 		return NULL;
6077 
6078 	sd_dp = &sqcp->sd_dp;
6079 
6080 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6081 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
6082 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
6083 
6084 	sqcp->scmd = scmd;
6085 
6086 	return sqcp;
6087 }
6088 
6089 /* Complete the processing of the thread that queued a SCSI command to this
6090  * driver. It either completes the command by calling cmnd_done() or
6091  * schedules a hr timer or work queue then returns 0. Returns
6092  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6093  */
6094 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
6095 			 int scsi_result,
6096 			 int (*pfp)(struct scsi_cmnd *,
6097 				    struct sdebug_dev_info *),
6098 			 int delta_jiff, int ndelay)
6099 {
6100 	struct request *rq = scsi_cmd_to_rq(cmnd);
6101 	bool polled = rq->cmd_flags & REQ_POLLED;
6102 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6103 	unsigned long flags;
6104 	u64 ns_from_boot = 0;
6105 	struct sdebug_queued_cmd *sqcp;
6106 	struct scsi_device *sdp;
6107 	struct sdebug_defer *sd_dp;
6108 
6109 	if (unlikely(devip == NULL)) {
6110 		if (scsi_result == 0)
6111 			scsi_result = DID_NO_CONNECT << 16;
6112 		goto respond_in_thread;
6113 	}
6114 	sdp = cmnd->device;
6115 
6116 	if (delta_jiff == 0)
6117 		goto respond_in_thread;
6118 
6119 
6120 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6121 		     (scsi_result == 0))) {
6122 		int num_in_q = scsi_device_busy(sdp);
6123 		int qdepth = cmnd->device->queue_depth;
6124 
6125 		if ((num_in_q == qdepth) &&
6126 		    (atomic_inc_return(&sdebug_a_tsf) >=
6127 		     abs(sdebug_every_nth))) {
6128 			atomic_set(&sdebug_a_tsf, 0);
6129 			scsi_result = device_qfull_result;
6130 
6131 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6132 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6133 					    __func__, num_in_q);
6134 		}
6135 	}
6136 
6137 	sqcp = sdebug_alloc_queued_cmd(cmnd);
6138 	if (!sqcp) {
6139 		pr_err("%s no alloc\n", __func__);
6140 		return SCSI_MLQUEUE_HOST_BUSY;
6141 	}
6142 	sd_dp = &sqcp->sd_dp;
6143 
6144 	if (polled)
6145 		ns_from_boot = ktime_get_boottime_ns();
6146 
6147 	/* one of the resp_*() response functions is called here */
6148 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6149 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
6150 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
6151 		delta_jiff = ndelay = 0;
6152 	}
6153 	if (cmnd->result == 0 && scsi_result != 0)
6154 		cmnd->result = scsi_result;
6155 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6156 		if (atomic_read(&sdeb_inject_pending)) {
6157 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6158 			atomic_set(&sdeb_inject_pending, 0);
6159 			cmnd->result = check_condition_result;
6160 		}
6161 	}
6162 
6163 	if (unlikely(sdebug_verbose && cmnd->result))
6164 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6165 			    __func__, cmnd->result);
6166 
6167 	if (delta_jiff > 0 || ndelay > 0) {
6168 		ktime_t kt;
6169 
6170 		if (delta_jiff > 0) {
6171 			u64 ns = jiffies_to_nsecs(delta_jiff);
6172 
6173 			if (sdebug_random && ns < U32_MAX) {
6174 				ns = get_random_u32_below((u32)ns);
6175 			} else if (sdebug_random) {
6176 				ns >>= 12;	/* scale to 4 usec precision */
6177 				if (ns < U32_MAX)	/* over 4 hours max */
6178 					ns = get_random_u32_below((u32)ns);
6179 				ns <<= 12;
6180 			}
6181 			kt = ns_to_ktime(ns);
6182 		} else {	/* ndelay has a 4.2 second max */
6183 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6184 					     (u32)ndelay;
6185 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6186 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
6187 
6188 				if (kt <= d) {	/* elapsed duration >= kt */
6189 					/* call scsi_done() from this thread */
6190 					sdebug_free_queued_cmd(sqcp);
6191 					scsi_done(cmnd);
6192 					return 0;
6193 				}
6194 				/* otherwise reduce kt by elapsed time */
6195 				kt -= d;
6196 			}
6197 		}
6198 		if (sdebug_statistics)
6199 			sd_dp->issuing_cpu = raw_smp_processor_id();
6200 		if (polled) {
6201 			spin_lock_irqsave(&sdsc->lock, flags);
6202 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6203 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6204 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6205 			spin_unlock_irqrestore(&sdsc->lock, flags);
6206 		} else {
6207 			/* schedule the invocation of scsi_done() for a later time */
6208 			spin_lock_irqsave(&sdsc->lock, flags);
6209 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6210 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6211 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6212 			/*
6213 			 * The completion handler will try to grab sqcp->lock,
6214 			 * so there is no chance that the completion handler
6215 			 * will call scsi_done() until we release the lock
6216 			 * here (so ok to keep referencing sdsc).
6217 			 */
6218 			spin_unlock_irqrestore(&sdsc->lock, flags);
6219 		}
6220 	} else {	/* jdelay < 0, use work queue */
6221 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6222 			     atomic_read(&sdeb_inject_pending))) {
6223 			sd_dp->aborted = true;
6224 			atomic_set(&sdeb_inject_pending, 0);
6225 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6226 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6227 		}
6228 
6229 		if (sdebug_statistics)
6230 			sd_dp->issuing_cpu = raw_smp_processor_id();
6231 		if (polled) {
6232 			spin_lock_irqsave(&sdsc->lock, flags);
6233 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6234 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6235 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6236 			spin_unlock_irqrestore(&sdsc->lock, flags);
6237 		} else {
6238 			spin_lock_irqsave(&sdsc->lock, flags);
6239 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6240 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6241 			schedule_work(&sd_dp->ew.work);
6242 			spin_unlock_irqrestore(&sdsc->lock, flags);
6243 		}
6244 	}
6245 
6246 	return 0;
6247 
6248 respond_in_thread:	/* call back to mid-layer using invocation thread */
6249 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6250 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
6251 	if (cmnd->result == 0 && scsi_result != 0)
6252 		cmnd->result = scsi_result;
6253 	scsi_done(cmnd);
6254 	return 0;
6255 }
6256 
6257 /* Note: The following macros create attribute files in the
6258    /sys/module/scsi_debug/parameters directory. Unfortunately this
6259    driver is unaware of a change and cannot trigger auxiliary actions
6260    as it can when the corresponding attribute in the
6261    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6262  */
6263 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6264 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6265 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6266 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6267 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6268 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6269 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6270 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6271 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6272 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6273 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6274 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6275 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6276 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6277 module_param_string(inq_product, sdebug_inq_product_id,
6278 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6279 module_param_string(inq_rev, sdebug_inq_product_rev,
6280 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6281 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6282 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6283 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6284 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6285 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6286 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6287 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6288 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6289 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6290 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6291 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6292 		   S_IRUGO | S_IWUSR);
6293 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6294 		   S_IRUGO | S_IWUSR);
6295 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6296 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6297 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6298 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6299 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6300 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6301 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6302 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6303 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6304 module_param_named(per_host_store, sdebug_per_host_store, bool,
6305 		   S_IRUGO | S_IWUSR);
6306 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6307 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6308 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6309 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6310 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6311 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6312 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6313 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6314 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6315 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6316 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6317 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6318 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6319 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6320 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6321 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6322 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6323 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6324 		   S_IRUGO | S_IWUSR);
6325 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6326 module_param_named(write_same_length, sdebug_write_same_length, int,
6327 		   S_IRUGO | S_IWUSR);
6328 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6329 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6330 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6331 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6332 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6333 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6334 
6335 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6336 MODULE_DESCRIPTION("SCSI debug adapter driver");
6337 MODULE_LICENSE("GPL");
6338 MODULE_VERSION(SDEBUG_VERSION);
6339 
6340 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6341 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6342 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6343 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6344 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6345 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6346 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6347 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6348 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6349 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6350 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6351 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6352 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6353 MODULE_PARM_DESC(host_max_queue,
6354 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6355 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6356 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6357 		 SDEBUG_VERSION "\")");
6358 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6359 MODULE_PARM_DESC(lbprz,
6360 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6361 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6362 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6363 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6364 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6365 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6366 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6367 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6368 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6369 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6370 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6371 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6372 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6373 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6374 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6375 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6376 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6377 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6378 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6379 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6380 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6381 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6382 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6383 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6384 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6385 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6386 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6387 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6388 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6389 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6390 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6391 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6392 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6393 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6394 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6395 MODULE_PARM_DESC(uuid_ctl,
6396 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6397 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6398 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6399 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6400 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6401 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6402 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6403 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6404 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6405 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6406 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6407 
6408 #define SDEBUG_INFO_LEN 256
6409 static char sdebug_info[SDEBUG_INFO_LEN];
6410 
6411 static const char *scsi_debug_info(struct Scsi_Host *shp)
6412 {
6413 	int k;
6414 
6415 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6416 		      my_name, SDEBUG_VERSION, sdebug_version_date);
6417 	if (k >= (SDEBUG_INFO_LEN - 1))
6418 		return sdebug_info;
6419 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6420 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6421 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
6422 		  "statistics", (int)sdebug_statistics);
6423 	return sdebug_info;
6424 }
6425 
6426 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6427 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6428 				 int length)
6429 {
6430 	char arr[16];
6431 	int opts;
6432 	int minLen = length > 15 ? 15 : length;
6433 
6434 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6435 		return -EACCES;
6436 	memcpy(arr, buffer, minLen);
6437 	arr[minLen] = '\0';
6438 	if (1 != sscanf(arr, "%d", &opts))
6439 		return -EINVAL;
6440 	sdebug_opts = opts;
6441 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6442 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6443 	if (sdebug_every_nth != 0)
6444 		tweak_cmnd_count();
6445 	return length;
6446 }
6447 
6448 struct sdebug_submit_queue_data {
6449 	int *first;
6450 	int *last;
6451 	int queue_num;
6452 };
6453 
6454 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6455 {
6456 	struct sdebug_submit_queue_data *data = opaque;
6457 	u32 unique_tag = blk_mq_unique_tag(rq);
6458 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6459 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6460 	int queue_num = data->queue_num;
6461 
6462 	if (hwq != queue_num)
6463 		return true;
6464 
6465 	/* Rely on iter'ing in ascending tag order */
6466 	if (*data->first == -1)
6467 		*data->first = *data->last = tag;
6468 	else
6469 		*data->last = tag;
6470 
6471 	return true;
6472 }
6473 
6474 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6475  * same for each scsi_debug host (if more than one). Some of the counters
6476  * output are not atomics so might be inaccurate in a busy system. */
6477 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6478 {
6479 	struct sdebug_host_info *sdhp;
6480 	int j;
6481 
6482 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6483 		   SDEBUG_VERSION, sdebug_version_date);
6484 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6485 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6486 		   sdebug_opts, sdebug_every_nth);
6487 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6488 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6489 		   sdebug_sector_size, "bytes");
6490 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6491 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6492 		   num_aborts);
6493 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6494 		   num_dev_resets, num_target_resets, num_bus_resets,
6495 		   num_host_resets);
6496 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6497 		   dix_reads, dix_writes, dif_errors);
6498 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6499 		   sdebug_statistics);
6500 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6501 		   atomic_read(&sdebug_cmnd_count),
6502 		   atomic_read(&sdebug_completions),
6503 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6504 		   atomic_read(&sdebug_a_tsf),
6505 		   atomic_read(&sdeb_mq_poll_count));
6506 
6507 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6508 	for (j = 0; j < submit_queues; ++j) {
6509 		int f = -1, l = -1;
6510 		struct sdebug_submit_queue_data data = {
6511 			.queue_num = j,
6512 			.first = &f,
6513 			.last = &l,
6514 		};
6515 		seq_printf(m, "  queue %d:\n", j);
6516 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6517 					&data);
6518 		if (f >= 0) {
6519 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6520 				   "first,last bits", f, l);
6521 		}
6522 	}
6523 
6524 	seq_printf(m, "this host_no=%d\n", host->host_no);
6525 	if (!xa_empty(per_store_ap)) {
6526 		bool niu;
6527 		int idx;
6528 		unsigned long l_idx;
6529 		struct sdeb_store_info *sip;
6530 
6531 		seq_puts(m, "\nhost list:\n");
6532 		j = 0;
6533 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6534 			idx = sdhp->si_idx;
6535 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6536 				   sdhp->shost->host_no, idx);
6537 			++j;
6538 		}
6539 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6540 			   sdeb_most_recent_idx);
6541 		j = 0;
6542 		xa_for_each(per_store_ap, l_idx, sip) {
6543 			niu = xa_get_mark(per_store_ap, l_idx,
6544 					  SDEB_XA_NOT_IN_USE);
6545 			idx = (int)l_idx;
6546 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6547 				   (niu ? "  not_in_use" : ""));
6548 			++j;
6549 		}
6550 	}
6551 	return 0;
6552 }
6553 
6554 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6555 {
6556 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6557 }
6558 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6559  * of delay is jiffies.
6560  */
6561 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6562 			   size_t count)
6563 {
6564 	int jdelay, res;
6565 
6566 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6567 		res = count;
6568 		if (sdebug_jdelay != jdelay) {
6569 			struct sdebug_host_info *sdhp;
6570 
6571 			mutex_lock(&sdebug_host_list_mutex);
6572 			block_unblock_all_queues(true);
6573 
6574 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6575 				struct Scsi_Host *shost = sdhp->shost;
6576 
6577 				if (scsi_host_busy(shost)) {
6578 					res = -EBUSY;   /* queued commands */
6579 					break;
6580 				}
6581 			}
6582 			if (res > 0) {
6583 				sdebug_jdelay = jdelay;
6584 				sdebug_ndelay = 0;
6585 			}
6586 			block_unblock_all_queues(false);
6587 			mutex_unlock(&sdebug_host_list_mutex);
6588 		}
6589 		return res;
6590 	}
6591 	return -EINVAL;
6592 }
6593 static DRIVER_ATTR_RW(delay);
6594 
6595 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6596 {
6597 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6598 }
6599 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6600 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6601 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6602 			    size_t count)
6603 {
6604 	int ndelay, res;
6605 
6606 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6607 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6608 		res = count;
6609 		if (sdebug_ndelay != ndelay) {
6610 			struct sdebug_host_info *sdhp;
6611 
6612 			mutex_lock(&sdebug_host_list_mutex);
6613 			block_unblock_all_queues(true);
6614 
6615 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6616 				struct Scsi_Host *shost = sdhp->shost;
6617 
6618 				if (scsi_host_busy(shost)) {
6619 					res = -EBUSY;   /* queued commands */
6620 					break;
6621 				}
6622 			}
6623 
6624 			if (res > 0) {
6625 				sdebug_ndelay = ndelay;
6626 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6627 							: DEF_JDELAY;
6628 			}
6629 			block_unblock_all_queues(false);
6630 			mutex_unlock(&sdebug_host_list_mutex);
6631 		}
6632 		return res;
6633 	}
6634 	return -EINVAL;
6635 }
6636 static DRIVER_ATTR_RW(ndelay);
6637 
6638 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6639 {
6640 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6641 }
6642 
6643 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6644 			  size_t count)
6645 {
6646 	int opts;
6647 	char work[20];
6648 
6649 	if (sscanf(buf, "%10s", work) == 1) {
6650 		if (strncasecmp(work, "0x", 2) == 0) {
6651 			if (kstrtoint(work + 2, 16, &opts) == 0)
6652 				goto opts_done;
6653 		} else {
6654 			if (kstrtoint(work, 10, &opts) == 0)
6655 				goto opts_done;
6656 		}
6657 	}
6658 	return -EINVAL;
6659 opts_done:
6660 	sdebug_opts = opts;
6661 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6662 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6663 	tweak_cmnd_count();
6664 	return count;
6665 }
6666 static DRIVER_ATTR_RW(opts);
6667 
6668 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6669 {
6670 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6671 }
6672 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6673 			   size_t count)
6674 {
6675 	int n;
6676 
6677 	/* Cannot change from or to TYPE_ZBC with sysfs */
6678 	if (sdebug_ptype == TYPE_ZBC)
6679 		return -EINVAL;
6680 
6681 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6682 		if (n == TYPE_ZBC)
6683 			return -EINVAL;
6684 		sdebug_ptype = n;
6685 		return count;
6686 	}
6687 	return -EINVAL;
6688 }
6689 static DRIVER_ATTR_RW(ptype);
6690 
6691 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6692 {
6693 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6694 }
6695 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6696 			    size_t count)
6697 {
6698 	int n;
6699 
6700 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6701 		sdebug_dsense = n;
6702 		return count;
6703 	}
6704 	return -EINVAL;
6705 }
6706 static DRIVER_ATTR_RW(dsense);
6707 
6708 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6709 {
6710 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6711 }
6712 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6713 			     size_t count)
6714 {
6715 	int n, idx;
6716 
6717 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6718 		bool want_store = (n == 0);
6719 		struct sdebug_host_info *sdhp;
6720 
6721 		n = (n > 0);
6722 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6723 		if (sdebug_fake_rw == n)
6724 			return count;	/* not transitioning so do nothing */
6725 
6726 		if (want_store) {	/* 1 --> 0 transition, set up store */
6727 			if (sdeb_first_idx < 0) {
6728 				idx = sdebug_add_store();
6729 				if (idx < 0)
6730 					return idx;
6731 			} else {
6732 				idx = sdeb_first_idx;
6733 				xa_clear_mark(per_store_ap, idx,
6734 					      SDEB_XA_NOT_IN_USE);
6735 			}
6736 			/* make all hosts use same store */
6737 			list_for_each_entry(sdhp, &sdebug_host_list,
6738 					    host_list) {
6739 				if (sdhp->si_idx != idx) {
6740 					xa_set_mark(per_store_ap, sdhp->si_idx,
6741 						    SDEB_XA_NOT_IN_USE);
6742 					sdhp->si_idx = idx;
6743 				}
6744 			}
6745 			sdeb_most_recent_idx = idx;
6746 		} else {	/* 0 --> 1 transition is trigger for shrink */
6747 			sdebug_erase_all_stores(true /* apart from first */);
6748 		}
6749 		sdebug_fake_rw = n;
6750 		return count;
6751 	}
6752 	return -EINVAL;
6753 }
6754 static DRIVER_ATTR_RW(fake_rw);
6755 
6756 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6757 {
6758 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6759 }
6760 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6761 			      size_t count)
6762 {
6763 	int n;
6764 
6765 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6766 		sdebug_no_lun_0 = n;
6767 		return count;
6768 	}
6769 	return -EINVAL;
6770 }
6771 static DRIVER_ATTR_RW(no_lun_0);
6772 
6773 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6774 {
6775 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6776 }
6777 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6778 			      size_t count)
6779 {
6780 	int n;
6781 
6782 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6783 		sdebug_num_tgts = n;
6784 		sdebug_max_tgts_luns();
6785 		return count;
6786 	}
6787 	return -EINVAL;
6788 }
6789 static DRIVER_ATTR_RW(num_tgts);
6790 
6791 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6792 {
6793 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6794 }
6795 static DRIVER_ATTR_RO(dev_size_mb);
6796 
6797 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6798 {
6799 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6800 }
6801 
6802 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6803 				    size_t count)
6804 {
6805 	bool v;
6806 
6807 	if (kstrtobool(buf, &v))
6808 		return -EINVAL;
6809 
6810 	sdebug_per_host_store = v;
6811 	return count;
6812 }
6813 static DRIVER_ATTR_RW(per_host_store);
6814 
6815 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6816 {
6817 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6818 }
6819 static DRIVER_ATTR_RO(num_parts);
6820 
6821 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6822 {
6823 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6824 }
6825 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6826 			       size_t count)
6827 {
6828 	int nth;
6829 	char work[20];
6830 
6831 	if (sscanf(buf, "%10s", work) == 1) {
6832 		if (strncasecmp(work, "0x", 2) == 0) {
6833 			if (kstrtoint(work + 2, 16, &nth) == 0)
6834 				goto every_nth_done;
6835 		} else {
6836 			if (kstrtoint(work, 10, &nth) == 0)
6837 				goto every_nth_done;
6838 		}
6839 	}
6840 	return -EINVAL;
6841 
6842 every_nth_done:
6843 	sdebug_every_nth = nth;
6844 	if (nth && !sdebug_statistics) {
6845 		pr_info("every_nth needs statistics=1, set it\n");
6846 		sdebug_statistics = true;
6847 	}
6848 	tweak_cmnd_count();
6849 	return count;
6850 }
6851 static DRIVER_ATTR_RW(every_nth);
6852 
6853 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6854 {
6855 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6856 }
6857 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6858 				size_t count)
6859 {
6860 	int n;
6861 	bool changed;
6862 
6863 	if (kstrtoint(buf, 0, &n))
6864 		return -EINVAL;
6865 	if (n >= 0) {
6866 		if (n > (int)SAM_LUN_AM_FLAT) {
6867 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6868 			return -EINVAL;
6869 		}
6870 		changed = ((int)sdebug_lun_am != n);
6871 		sdebug_lun_am = n;
6872 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6873 			struct sdebug_host_info *sdhp;
6874 			struct sdebug_dev_info *dp;
6875 
6876 			mutex_lock(&sdebug_host_list_mutex);
6877 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6878 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6879 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6880 				}
6881 			}
6882 			mutex_unlock(&sdebug_host_list_mutex);
6883 		}
6884 		return count;
6885 	}
6886 	return -EINVAL;
6887 }
6888 static DRIVER_ATTR_RW(lun_format);
6889 
6890 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6891 {
6892 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6893 }
6894 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6895 			      size_t count)
6896 {
6897 	int n;
6898 	bool changed;
6899 
6900 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6901 		if (n > 256) {
6902 			pr_warn("max_luns can be no more than 256\n");
6903 			return -EINVAL;
6904 		}
6905 		changed = (sdebug_max_luns != n);
6906 		sdebug_max_luns = n;
6907 		sdebug_max_tgts_luns();
6908 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6909 			struct sdebug_host_info *sdhp;
6910 			struct sdebug_dev_info *dp;
6911 
6912 			mutex_lock(&sdebug_host_list_mutex);
6913 			list_for_each_entry(sdhp, &sdebug_host_list,
6914 					    host_list) {
6915 				list_for_each_entry(dp, &sdhp->dev_info_list,
6916 						    dev_list) {
6917 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6918 						dp->uas_bm);
6919 				}
6920 			}
6921 			mutex_unlock(&sdebug_host_list_mutex);
6922 		}
6923 		return count;
6924 	}
6925 	return -EINVAL;
6926 }
6927 static DRIVER_ATTR_RW(max_luns);
6928 
6929 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6930 {
6931 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6932 }
6933 /* N.B. max_queue can be changed while there are queued commands. In flight
6934  * commands beyond the new max_queue will be completed. */
6935 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6936 			       size_t count)
6937 {
6938 	int n;
6939 
6940 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6941 	    (n <= SDEBUG_CANQUEUE) &&
6942 	    (sdebug_host_max_queue == 0)) {
6943 		mutex_lock(&sdebug_host_list_mutex);
6944 
6945 		/* We may only change sdebug_max_queue when we have no shosts */
6946 		if (list_empty(&sdebug_host_list))
6947 			sdebug_max_queue = n;
6948 		else
6949 			count = -EBUSY;
6950 		mutex_unlock(&sdebug_host_list_mutex);
6951 		return count;
6952 	}
6953 	return -EINVAL;
6954 }
6955 static DRIVER_ATTR_RW(max_queue);
6956 
6957 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6958 {
6959 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6960 }
6961 
6962 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6963 {
6964 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6965 }
6966 
6967 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6968 {
6969 	bool v;
6970 
6971 	if (kstrtobool(buf, &v))
6972 		return -EINVAL;
6973 
6974 	sdebug_no_rwlock = v;
6975 	return count;
6976 }
6977 static DRIVER_ATTR_RW(no_rwlock);
6978 
6979 /*
6980  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6981  * in range [0, sdebug_host_max_queue), we can't change it.
6982  */
6983 static DRIVER_ATTR_RO(host_max_queue);
6984 
6985 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6986 {
6987 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6988 }
6989 static DRIVER_ATTR_RO(no_uld);
6990 
6991 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6992 {
6993 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6994 }
6995 static DRIVER_ATTR_RO(scsi_level);
6996 
6997 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6998 {
6999 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
7000 }
7001 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
7002 				size_t count)
7003 {
7004 	int n;
7005 	bool changed;
7006 
7007 	/* Ignore capacity change for ZBC drives for now */
7008 	if (sdeb_zbc_in_use)
7009 		return -ENOTSUPP;
7010 
7011 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7012 		changed = (sdebug_virtual_gb != n);
7013 		sdebug_virtual_gb = n;
7014 		sdebug_capacity = get_sdebug_capacity();
7015 		if (changed) {
7016 			struct sdebug_host_info *sdhp;
7017 			struct sdebug_dev_info *dp;
7018 
7019 			mutex_lock(&sdebug_host_list_mutex);
7020 			list_for_each_entry(sdhp, &sdebug_host_list,
7021 					    host_list) {
7022 				list_for_each_entry(dp, &sdhp->dev_info_list,
7023 						    dev_list) {
7024 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
7025 						dp->uas_bm);
7026 				}
7027 			}
7028 			mutex_unlock(&sdebug_host_list_mutex);
7029 		}
7030 		return count;
7031 	}
7032 	return -EINVAL;
7033 }
7034 static DRIVER_ATTR_RW(virtual_gb);
7035 
7036 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
7037 {
7038 	/* absolute number of hosts currently active is what is shown */
7039 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
7040 }
7041 
7042 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
7043 			      size_t count)
7044 {
7045 	bool found;
7046 	unsigned long idx;
7047 	struct sdeb_store_info *sip;
7048 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
7049 	int delta_hosts;
7050 
7051 	if (sscanf(buf, "%d", &delta_hosts) != 1)
7052 		return -EINVAL;
7053 	if (delta_hosts > 0) {
7054 		do {
7055 			found = false;
7056 			if (want_phs) {
7057 				xa_for_each_marked(per_store_ap, idx, sip,
7058 						   SDEB_XA_NOT_IN_USE) {
7059 					sdeb_most_recent_idx = (int)idx;
7060 					found = true;
7061 					break;
7062 				}
7063 				if (found)	/* re-use case */
7064 					sdebug_add_host_helper((int)idx);
7065 				else
7066 					sdebug_do_add_host(true);
7067 			} else {
7068 				sdebug_do_add_host(false);
7069 			}
7070 		} while (--delta_hosts);
7071 	} else if (delta_hosts < 0) {
7072 		do {
7073 			sdebug_do_remove_host(false);
7074 		} while (++delta_hosts);
7075 	}
7076 	return count;
7077 }
7078 static DRIVER_ATTR_RW(add_host);
7079 
7080 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
7081 {
7082 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
7083 }
7084 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
7085 				    size_t count)
7086 {
7087 	int n;
7088 
7089 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7090 		sdebug_vpd_use_hostno = n;
7091 		return count;
7092 	}
7093 	return -EINVAL;
7094 }
7095 static DRIVER_ATTR_RW(vpd_use_hostno);
7096 
7097 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
7098 {
7099 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
7100 }
7101 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
7102 				size_t count)
7103 {
7104 	int n;
7105 
7106 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
7107 		if (n > 0)
7108 			sdebug_statistics = true;
7109 		else {
7110 			clear_queue_stats();
7111 			sdebug_statistics = false;
7112 		}
7113 		return count;
7114 	}
7115 	return -EINVAL;
7116 }
7117 static DRIVER_ATTR_RW(statistics);
7118 
7119 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7120 {
7121 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7122 }
7123 static DRIVER_ATTR_RO(sector_size);
7124 
7125 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7126 {
7127 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7128 }
7129 static DRIVER_ATTR_RO(submit_queues);
7130 
7131 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7132 {
7133 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7134 }
7135 static DRIVER_ATTR_RO(dix);
7136 
7137 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7138 {
7139 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7140 }
7141 static DRIVER_ATTR_RO(dif);
7142 
7143 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7144 {
7145 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7146 }
7147 static DRIVER_ATTR_RO(guard);
7148 
7149 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7150 {
7151 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7152 }
7153 static DRIVER_ATTR_RO(ato);
7154 
7155 static ssize_t map_show(struct device_driver *ddp, char *buf)
7156 {
7157 	ssize_t count = 0;
7158 
7159 	if (!scsi_debug_lbp())
7160 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7161 				 sdebug_store_sectors);
7162 
7163 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7164 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7165 
7166 		if (sip)
7167 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7168 					  (int)map_size, sip->map_storep);
7169 	}
7170 	buf[count++] = '\n';
7171 	buf[count] = '\0';
7172 
7173 	return count;
7174 }
7175 static DRIVER_ATTR_RO(map);
7176 
7177 static ssize_t random_show(struct device_driver *ddp, char *buf)
7178 {
7179 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7180 }
7181 
7182 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7183 			    size_t count)
7184 {
7185 	bool v;
7186 
7187 	if (kstrtobool(buf, &v))
7188 		return -EINVAL;
7189 
7190 	sdebug_random = v;
7191 	return count;
7192 }
7193 static DRIVER_ATTR_RW(random);
7194 
7195 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7196 {
7197 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7198 }
7199 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7200 			       size_t count)
7201 {
7202 	int n;
7203 
7204 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7205 		sdebug_removable = (n > 0);
7206 		return count;
7207 	}
7208 	return -EINVAL;
7209 }
7210 static DRIVER_ATTR_RW(removable);
7211 
7212 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7213 {
7214 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7215 }
7216 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7217 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7218 			       size_t count)
7219 {
7220 	int n;
7221 
7222 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7223 		sdebug_host_lock = (n > 0);
7224 		return count;
7225 	}
7226 	return -EINVAL;
7227 }
7228 static DRIVER_ATTR_RW(host_lock);
7229 
7230 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7231 {
7232 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7233 }
7234 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7235 			    size_t count)
7236 {
7237 	int n;
7238 
7239 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7240 		sdebug_strict = (n > 0);
7241 		return count;
7242 	}
7243 	return -EINVAL;
7244 }
7245 static DRIVER_ATTR_RW(strict);
7246 
7247 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7248 {
7249 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7250 }
7251 static DRIVER_ATTR_RO(uuid_ctl);
7252 
7253 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7254 {
7255 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7256 }
7257 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7258 			     size_t count)
7259 {
7260 	int ret, n;
7261 
7262 	ret = kstrtoint(buf, 0, &n);
7263 	if (ret)
7264 		return ret;
7265 	sdebug_cdb_len = n;
7266 	all_config_cdb_len();
7267 	return count;
7268 }
7269 static DRIVER_ATTR_RW(cdb_len);
7270 
7271 static const char * const zbc_model_strs_a[] = {
7272 	[BLK_ZONED_NONE] = "none",
7273 	[BLK_ZONED_HA]   = "host-aware",
7274 	[BLK_ZONED_HM]   = "host-managed",
7275 };
7276 
7277 static const char * const zbc_model_strs_b[] = {
7278 	[BLK_ZONED_NONE] = "no",
7279 	[BLK_ZONED_HA]   = "aware",
7280 	[BLK_ZONED_HM]   = "managed",
7281 };
7282 
7283 static const char * const zbc_model_strs_c[] = {
7284 	[BLK_ZONED_NONE] = "0",
7285 	[BLK_ZONED_HA]   = "1",
7286 	[BLK_ZONED_HM]   = "2",
7287 };
7288 
7289 static int sdeb_zbc_model_str(const char *cp)
7290 {
7291 	int res = sysfs_match_string(zbc_model_strs_a, cp);
7292 
7293 	if (res < 0) {
7294 		res = sysfs_match_string(zbc_model_strs_b, cp);
7295 		if (res < 0) {
7296 			res = sysfs_match_string(zbc_model_strs_c, cp);
7297 			if (res < 0)
7298 				return -EINVAL;
7299 		}
7300 	}
7301 	return res;
7302 }
7303 
7304 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7305 {
7306 	return scnprintf(buf, PAGE_SIZE, "%s\n",
7307 			 zbc_model_strs_a[sdeb_zbc_model]);
7308 }
7309 static DRIVER_ATTR_RO(zbc);
7310 
7311 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7312 {
7313 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7314 }
7315 static DRIVER_ATTR_RO(tur_ms_to_ready);
7316 
7317 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
7318 {
7319 	char *p = buf, *end = buf + PAGE_SIZE;
7320 	int i;
7321 
7322 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7323 		p += scnprintf(p, end - p, "%d %ld\n", i,
7324 			       atomic_long_read(&writes_by_group_number[i]));
7325 
7326 	return p - buf;
7327 }
7328 
7329 static ssize_t group_number_stats_store(struct device_driver *ddp,
7330 					const char *buf, size_t count)
7331 {
7332 	int i;
7333 
7334 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7335 		atomic_long_set(&writes_by_group_number[i], 0);
7336 
7337 	return count;
7338 }
7339 static DRIVER_ATTR_RW(group_number_stats);
7340 
7341 /* Note: The following array creates attribute files in the
7342    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7343    files (over those found in the /sys/module/scsi_debug/parameters
7344    directory) is that auxiliary actions can be triggered when an attribute
7345    is changed. For example see: add_host_store() above.
7346  */
7347 
7348 static struct attribute *sdebug_drv_attrs[] = {
7349 	&driver_attr_delay.attr,
7350 	&driver_attr_opts.attr,
7351 	&driver_attr_ptype.attr,
7352 	&driver_attr_dsense.attr,
7353 	&driver_attr_fake_rw.attr,
7354 	&driver_attr_host_max_queue.attr,
7355 	&driver_attr_no_lun_0.attr,
7356 	&driver_attr_num_tgts.attr,
7357 	&driver_attr_dev_size_mb.attr,
7358 	&driver_attr_num_parts.attr,
7359 	&driver_attr_every_nth.attr,
7360 	&driver_attr_lun_format.attr,
7361 	&driver_attr_max_luns.attr,
7362 	&driver_attr_max_queue.attr,
7363 	&driver_attr_no_rwlock.attr,
7364 	&driver_attr_no_uld.attr,
7365 	&driver_attr_scsi_level.attr,
7366 	&driver_attr_virtual_gb.attr,
7367 	&driver_attr_add_host.attr,
7368 	&driver_attr_per_host_store.attr,
7369 	&driver_attr_vpd_use_hostno.attr,
7370 	&driver_attr_sector_size.attr,
7371 	&driver_attr_statistics.attr,
7372 	&driver_attr_submit_queues.attr,
7373 	&driver_attr_dix.attr,
7374 	&driver_attr_dif.attr,
7375 	&driver_attr_guard.attr,
7376 	&driver_attr_ato.attr,
7377 	&driver_attr_map.attr,
7378 	&driver_attr_random.attr,
7379 	&driver_attr_removable.attr,
7380 	&driver_attr_host_lock.attr,
7381 	&driver_attr_ndelay.attr,
7382 	&driver_attr_strict.attr,
7383 	&driver_attr_uuid_ctl.attr,
7384 	&driver_attr_cdb_len.attr,
7385 	&driver_attr_tur_ms_to_ready.attr,
7386 	&driver_attr_zbc.attr,
7387 	&driver_attr_group_number_stats.attr,
7388 	NULL,
7389 };
7390 ATTRIBUTE_GROUPS(sdebug_drv);
7391 
7392 static struct device *pseudo_primary;
7393 
7394 static int __init scsi_debug_init(void)
7395 {
7396 	bool want_store = (sdebug_fake_rw == 0);
7397 	unsigned long sz;
7398 	int k, ret, hosts_to_add;
7399 	int idx = -1;
7400 
7401 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7402 		pr_warn("ndelay must be less than 1 second, ignored\n");
7403 		sdebug_ndelay = 0;
7404 	} else if (sdebug_ndelay > 0)
7405 		sdebug_jdelay = JDELAY_OVERRIDDEN;
7406 
7407 	switch (sdebug_sector_size) {
7408 	case  512:
7409 	case 1024:
7410 	case 2048:
7411 	case 4096:
7412 		break;
7413 	default:
7414 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
7415 		return -EINVAL;
7416 	}
7417 
7418 	switch (sdebug_dif) {
7419 	case T10_PI_TYPE0_PROTECTION:
7420 		break;
7421 	case T10_PI_TYPE1_PROTECTION:
7422 	case T10_PI_TYPE2_PROTECTION:
7423 	case T10_PI_TYPE3_PROTECTION:
7424 		have_dif_prot = true;
7425 		break;
7426 
7427 	default:
7428 		pr_err("dif must be 0, 1, 2 or 3\n");
7429 		return -EINVAL;
7430 	}
7431 
7432 	if (sdebug_num_tgts < 0) {
7433 		pr_err("num_tgts must be >= 0\n");
7434 		return -EINVAL;
7435 	}
7436 
7437 	if (sdebug_guard > 1) {
7438 		pr_err("guard must be 0 or 1\n");
7439 		return -EINVAL;
7440 	}
7441 
7442 	if (sdebug_ato > 1) {
7443 		pr_err("ato must be 0 or 1\n");
7444 		return -EINVAL;
7445 	}
7446 
7447 	if (sdebug_physblk_exp > 15) {
7448 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7449 		return -EINVAL;
7450 	}
7451 
7452 	sdebug_lun_am = sdebug_lun_am_i;
7453 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7454 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7455 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7456 	}
7457 
7458 	if (sdebug_max_luns > 256) {
7459 		if (sdebug_max_luns > 16384) {
7460 			pr_warn("max_luns can be no more than 16384, use default\n");
7461 			sdebug_max_luns = DEF_MAX_LUNS;
7462 		}
7463 		sdebug_lun_am = SAM_LUN_AM_FLAT;
7464 	}
7465 
7466 	if (sdebug_lowest_aligned > 0x3fff) {
7467 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7468 		return -EINVAL;
7469 	}
7470 
7471 	if (submit_queues < 1) {
7472 		pr_err("submit_queues must be 1 or more\n");
7473 		return -EINVAL;
7474 	}
7475 
7476 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7477 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7478 		return -EINVAL;
7479 	}
7480 
7481 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7482 	    (sdebug_host_max_queue < 0)) {
7483 		pr_err("host_max_queue must be in range [0 %d]\n",
7484 		       SDEBUG_CANQUEUE);
7485 		return -EINVAL;
7486 	}
7487 
7488 	if (sdebug_host_max_queue &&
7489 	    (sdebug_max_queue != sdebug_host_max_queue)) {
7490 		sdebug_max_queue = sdebug_host_max_queue;
7491 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7492 			sdebug_max_queue);
7493 	}
7494 
7495 	/*
7496 	 * check for host managed zoned block device specified with
7497 	 * ptype=0x14 or zbc=XXX.
7498 	 */
7499 	if (sdebug_ptype == TYPE_ZBC) {
7500 		sdeb_zbc_model = BLK_ZONED_HM;
7501 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7502 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7503 		if (k < 0)
7504 			return k;
7505 		sdeb_zbc_model = k;
7506 		switch (sdeb_zbc_model) {
7507 		case BLK_ZONED_NONE:
7508 		case BLK_ZONED_HA:
7509 			sdebug_ptype = TYPE_DISK;
7510 			break;
7511 		case BLK_ZONED_HM:
7512 			sdebug_ptype = TYPE_ZBC;
7513 			break;
7514 		default:
7515 			pr_err("Invalid ZBC model\n");
7516 			return -EINVAL;
7517 		}
7518 	}
7519 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7520 		sdeb_zbc_in_use = true;
7521 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7522 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7523 	}
7524 
7525 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7526 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7527 	if (sdebug_dev_size_mb < 1)
7528 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7529 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7530 	sdebug_store_sectors = sz / sdebug_sector_size;
7531 	sdebug_capacity = get_sdebug_capacity();
7532 
7533 	/* play around with geometry, don't waste too much on track 0 */
7534 	sdebug_heads = 8;
7535 	sdebug_sectors_per = 32;
7536 	if (sdebug_dev_size_mb >= 256)
7537 		sdebug_heads = 64;
7538 	else if (sdebug_dev_size_mb >= 16)
7539 		sdebug_heads = 32;
7540 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7541 			       (sdebug_sectors_per * sdebug_heads);
7542 	if (sdebug_cylinders_per >= 1024) {
7543 		/* other LLDs do this; implies >= 1GB ram disk ... */
7544 		sdebug_heads = 255;
7545 		sdebug_sectors_per = 63;
7546 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7547 			       (sdebug_sectors_per * sdebug_heads);
7548 	}
7549 	if (scsi_debug_lbp()) {
7550 		sdebug_unmap_max_blocks =
7551 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7552 
7553 		sdebug_unmap_max_desc =
7554 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7555 
7556 		sdebug_unmap_granularity =
7557 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7558 
7559 		if (sdebug_unmap_alignment &&
7560 		    sdebug_unmap_granularity <=
7561 		    sdebug_unmap_alignment) {
7562 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7563 			return -EINVAL;
7564 		}
7565 	}
7566 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7567 	if (want_store) {
7568 		idx = sdebug_add_store();
7569 		if (idx < 0)
7570 			return idx;
7571 	}
7572 
7573 	pseudo_primary = root_device_register("pseudo_0");
7574 	if (IS_ERR(pseudo_primary)) {
7575 		pr_warn("root_device_register() error\n");
7576 		ret = PTR_ERR(pseudo_primary);
7577 		goto free_vm;
7578 	}
7579 	ret = bus_register(&pseudo_lld_bus);
7580 	if (ret < 0) {
7581 		pr_warn("bus_register error: %d\n", ret);
7582 		goto dev_unreg;
7583 	}
7584 	ret = driver_register(&sdebug_driverfs_driver);
7585 	if (ret < 0) {
7586 		pr_warn("driver_register error: %d\n", ret);
7587 		goto bus_unreg;
7588 	}
7589 
7590 	hosts_to_add = sdebug_add_host;
7591 	sdebug_add_host = 0;
7592 
7593 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7594 	if (!queued_cmd_cache) {
7595 		ret = -ENOMEM;
7596 		goto driver_unreg;
7597 	}
7598 
7599 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7600 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7601 		pr_info("%s: failed to create initial debugfs directory\n", __func__);
7602 
7603 	for (k = 0; k < hosts_to_add; k++) {
7604 		if (want_store && k == 0) {
7605 			ret = sdebug_add_host_helper(idx);
7606 			if (ret < 0) {
7607 				pr_err("add_host_helper k=%d, error=%d\n",
7608 				       k, -ret);
7609 				break;
7610 			}
7611 		} else {
7612 			ret = sdebug_do_add_host(want_store &&
7613 						 sdebug_per_host_store);
7614 			if (ret < 0) {
7615 				pr_err("add_host k=%d error=%d\n", k, -ret);
7616 				break;
7617 			}
7618 		}
7619 	}
7620 	if (sdebug_verbose)
7621 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7622 
7623 	return 0;
7624 
7625 driver_unreg:
7626 	driver_unregister(&sdebug_driverfs_driver);
7627 bus_unreg:
7628 	bus_unregister(&pseudo_lld_bus);
7629 dev_unreg:
7630 	root_device_unregister(pseudo_primary);
7631 free_vm:
7632 	sdebug_erase_store(idx, NULL);
7633 	return ret;
7634 }
7635 
7636 static void __exit scsi_debug_exit(void)
7637 {
7638 	int k = sdebug_num_hosts;
7639 
7640 	for (; k; k--)
7641 		sdebug_do_remove_host(true);
7642 	kmem_cache_destroy(queued_cmd_cache);
7643 	driver_unregister(&sdebug_driverfs_driver);
7644 	bus_unregister(&pseudo_lld_bus);
7645 	root_device_unregister(pseudo_primary);
7646 
7647 	sdebug_erase_all_stores(false);
7648 	xa_destroy(per_store_ap);
7649 	debugfs_remove(sdebug_debugfs_root);
7650 }
7651 
7652 device_initcall(scsi_debug_init);
7653 module_exit(scsi_debug_exit);
7654 
7655 static void sdebug_release_adapter(struct device *dev)
7656 {
7657 	struct sdebug_host_info *sdbg_host;
7658 
7659 	sdbg_host = dev_to_sdebug_host(dev);
7660 	kfree(sdbg_host);
7661 }
7662 
7663 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7664 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7665 {
7666 	if (idx < 0)
7667 		return;
7668 	if (!sip) {
7669 		if (xa_empty(per_store_ap))
7670 			return;
7671 		sip = xa_load(per_store_ap, idx);
7672 		if (!sip)
7673 			return;
7674 	}
7675 	vfree(sip->map_storep);
7676 	vfree(sip->dif_storep);
7677 	vfree(sip->storep);
7678 	xa_erase(per_store_ap, idx);
7679 	kfree(sip);
7680 }
7681 
7682 /* Assume apart_from_first==false only in shutdown case. */
7683 static void sdebug_erase_all_stores(bool apart_from_first)
7684 {
7685 	unsigned long idx;
7686 	struct sdeb_store_info *sip = NULL;
7687 
7688 	xa_for_each(per_store_ap, idx, sip) {
7689 		if (apart_from_first)
7690 			apart_from_first = false;
7691 		else
7692 			sdebug_erase_store(idx, sip);
7693 	}
7694 	if (apart_from_first)
7695 		sdeb_most_recent_idx = sdeb_first_idx;
7696 }
7697 
7698 /*
7699  * Returns store xarray new element index (idx) if >=0 else negated errno.
7700  * Limit the number of stores to 65536.
7701  */
7702 static int sdebug_add_store(void)
7703 {
7704 	int res;
7705 	u32 n_idx;
7706 	unsigned long iflags;
7707 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7708 	struct sdeb_store_info *sip = NULL;
7709 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7710 
7711 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7712 	if (!sip)
7713 		return -ENOMEM;
7714 
7715 	xa_lock_irqsave(per_store_ap, iflags);
7716 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7717 	if (unlikely(res < 0)) {
7718 		xa_unlock_irqrestore(per_store_ap, iflags);
7719 		kfree(sip);
7720 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7721 		return res;
7722 	}
7723 	sdeb_most_recent_idx = n_idx;
7724 	if (sdeb_first_idx < 0)
7725 		sdeb_first_idx = n_idx;
7726 	xa_unlock_irqrestore(per_store_ap, iflags);
7727 
7728 	res = -ENOMEM;
7729 	sip->storep = vzalloc(sz);
7730 	if (!sip->storep) {
7731 		pr_err("user data oom\n");
7732 		goto err;
7733 	}
7734 	if (sdebug_num_parts > 0)
7735 		sdebug_build_parts(sip->storep, sz);
7736 
7737 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7738 	if (sdebug_dix) {
7739 		int dif_size;
7740 
7741 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7742 		sip->dif_storep = vmalloc(dif_size);
7743 
7744 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7745 			sip->dif_storep);
7746 
7747 		if (!sip->dif_storep) {
7748 			pr_err("DIX oom\n");
7749 			goto err;
7750 		}
7751 		memset(sip->dif_storep, 0xff, dif_size);
7752 	}
7753 	/* Logical Block Provisioning */
7754 	if (scsi_debug_lbp()) {
7755 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7756 		sip->map_storep = vmalloc(array_size(sizeof(long),
7757 						     BITS_TO_LONGS(map_size)));
7758 
7759 		pr_info("%lu provisioning blocks\n", map_size);
7760 
7761 		if (!sip->map_storep) {
7762 			pr_err("LBP map oom\n");
7763 			goto err;
7764 		}
7765 
7766 		bitmap_zero(sip->map_storep, map_size);
7767 
7768 		/* Map first 1KB for partition table */
7769 		if (sdebug_num_parts)
7770 			map_region(sip, 0, 2);
7771 	}
7772 
7773 	rwlock_init(&sip->macc_lck);
7774 	return (int)n_idx;
7775 err:
7776 	sdebug_erase_store((int)n_idx, sip);
7777 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7778 	return res;
7779 }
7780 
7781 static int sdebug_add_host_helper(int per_host_idx)
7782 {
7783 	int k, devs_per_host, idx;
7784 	int error = -ENOMEM;
7785 	struct sdebug_host_info *sdbg_host;
7786 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7787 
7788 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7789 	if (!sdbg_host)
7790 		return -ENOMEM;
7791 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7792 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7793 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7794 	sdbg_host->si_idx = idx;
7795 
7796 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7797 
7798 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7799 	for (k = 0; k < devs_per_host; k++) {
7800 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7801 		if (!sdbg_devinfo)
7802 			goto clean;
7803 	}
7804 
7805 	mutex_lock(&sdebug_host_list_mutex);
7806 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7807 	mutex_unlock(&sdebug_host_list_mutex);
7808 
7809 	sdbg_host->dev.bus = &pseudo_lld_bus;
7810 	sdbg_host->dev.parent = pseudo_primary;
7811 	sdbg_host->dev.release = &sdebug_release_adapter;
7812 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7813 
7814 	error = device_register(&sdbg_host->dev);
7815 	if (error) {
7816 		mutex_lock(&sdebug_host_list_mutex);
7817 		list_del(&sdbg_host->host_list);
7818 		mutex_unlock(&sdebug_host_list_mutex);
7819 		goto clean;
7820 	}
7821 
7822 	++sdebug_num_hosts;
7823 	return 0;
7824 
7825 clean:
7826 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7827 				 dev_list) {
7828 		list_del(&sdbg_devinfo->dev_list);
7829 		kfree(sdbg_devinfo->zstate);
7830 		kfree(sdbg_devinfo);
7831 	}
7832 	if (sdbg_host->dev.release)
7833 		put_device(&sdbg_host->dev);
7834 	else
7835 		kfree(sdbg_host);
7836 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7837 	return error;
7838 }
7839 
7840 static int sdebug_do_add_host(bool mk_new_store)
7841 {
7842 	int ph_idx = sdeb_most_recent_idx;
7843 
7844 	if (mk_new_store) {
7845 		ph_idx = sdebug_add_store();
7846 		if (ph_idx < 0)
7847 			return ph_idx;
7848 	}
7849 	return sdebug_add_host_helper(ph_idx);
7850 }
7851 
7852 static void sdebug_do_remove_host(bool the_end)
7853 {
7854 	int idx = -1;
7855 	struct sdebug_host_info *sdbg_host = NULL;
7856 	struct sdebug_host_info *sdbg_host2;
7857 
7858 	mutex_lock(&sdebug_host_list_mutex);
7859 	if (!list_empty(&sdebug_host_list)) {
7860 		sdbg_host = list_entry(sdebug_host_list.prev,
7861 				       struct sdebug_host_info, host_list);
7862 		idx = sdbg_host->si_idx;
7863 	}
7864 	if (!the_end && idx >= 0) {
7865 		bool unique = true;
7866 
7867 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7868 			if (sdbg_host2 == sdbg_host)
7869 				continue;
7870 			if (idx == sdbg_host2->si_idx) {
7871 				unique = false;
7872 				break;
7873 			}
7874 		}
7875 		if (unique) {
7876 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7877 			if (idx == sdeb_most_recent_idx)
7878 				--sdeb_most_recent_idx;
7879 		}
7880 	}
7881 	if (sdbg_host)
7882 		list_del(&sdbg_host->host_list);
7883 	mutex_unlock(&sdebug_host_list_mutex);
7884 
7885 	if (!sdbg_host)
7886 		return;
7887 
7888 	device_unregister(&sdbg_host->dev);
7889 	--sdebug_num_hosts;
7890 }
7891 
7892 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7893 {
7894 	struct sdebug_dev_info *devip = sdev->hostdata;
7895 
7896 	if (!devip)
7897 		return	-ENODEV;
7898 
7899 	mutex_lock(&sdebug_host_list_mutex);
7900 	block_unblock_all_queues(true);
7901 
7902 	if (qdepth > SDEBUG_CANQUEUE) {
7903 		qdepth = SDEBUG_CANQUEUE;
7904 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7905 			qdepth, SDEBUG_CANQUEUE);
7906 	}
7907 	if (qdepth < 1)
7908 		qdepth = 1;
7909 	if (qdepth != sdev->queue_depth)
7910 		scsi_change_queue_depth(sdev, qdepth);
7911 
7912 	block_unblock_all_queues(false);
7913 	mutex_unlock(&sdebug_host_list_mutex);
7914 
7915 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7916 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7917 
7918 	return sdev->queue_depth;
7919 }
7920 
7921 static bool fake_timeout(struct scsi_cmnd *scp)
7922 {
7923 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7924 		if (sdebug_every_nth < -1)
7925 			sdebug_every_nth = -1;
7926 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7927 			return true; /* ignore command causing timeout */
7928 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7929 			 scsi_medium_access_command(scp))
7930 			return true; /* time out reads and writes */
7931 	}
7932 	return false;
7933 }
7934 
7935 /* Response to TUR or media access command when device stopped */
7936 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7937 {
7938 	int stopped_state;
7939 	u64 diff_ns = 0;
7940 	ktime_t now_ts = ktime_get_boottime();
7941 	struct scsi_device *sdp = scp->device;
7942 
7943 	stopped_state = atomic_read(&devip->stopped);
7944 	if (stopped_state == 2) {
7945 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7946 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7947 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7948 				/* tur_ms_to_ready timer extinguished */
7949 				atomic_set(&devip->stopped, 0);
7950 				return 0;
7951 			}
7952 		}
7953 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7954 		if (sdebug_verbose)
7955 			sdev_printk(KERN_INFO, sdp,
7956 				    "%s: Not ready: in process of becoming ready\n", my_name);
7957 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7958 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7959 
7960 			if (diff_ns <= tur_nanosecs_to_ready)
7961 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7962 			else
7963 				diff_ns = tur_nanosecs_to_ready;
7964 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7965 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7966 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7967 						   diff_ns);
7968 			return check_condition_result;
7969 		}
7970 	}
7971 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7972 	if (sdebug_verbose)
7973 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7974 			    my_name);
7975 	return check_condition_result;
7976 }
7977 
7978 static void sdebug_map_queues(struct Scsi_Host *shost)
7979 {
7980 	int i, qoff;
7981 
7982 	if (shost->nr_hw_queues == 1)
7983 		return;
7984 
7985 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7986 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7987 
7988 		map->nr_queues  = 0;
7989 
7990 		if (i == HCTX_TYPE_DEFAULT)
7991 			map->nr_queues = submit_queues - poll_queues;
7992 		else if (i == HCTX_TYPE_POLL)
7993 			map->nr_queues = poll_queues;
7994 
7995 		if (!map->nr_queues) {
7996 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7997 			continue;
7998 		}
7999 
8000 		map->queue_offset = qoff;
8001 		blk_mq_map_queues(map);
8002 
8003 		qoff += map->nr_queues;
8004 	}
8005 }
8006 
8007 struct sdebug_blk_mq_poll_data {
8008 	unsigned int queue_num;
8009 	int *num_entries;
8010 };
8011 
8012 /*
8013  * We don't handle aborted commands here, but it does not seem possible to have
8014  * aborted polled commands from schedule_resp()
8015  */
8016 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
8017 {
8018 	struct sdebug_blk_mq_poll_data *data = opaque;
8019 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
8020 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8021 	struct sdebug_defer *sd_dp;
8022 	u32 unique_tag = blk_mq_unique_tag(rq);
8023 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
8024 	struct sdebug_queued_cmd *sqcp;
8025 	unsigned long flags;
8026 	int queue_num = data->queue_num;
8027 	ktime_t time;
8028 
8029 	/* We're only interested in one queue for this iteration */
8030 	if (hwq != queue_num)
8031 		return true;
8032 
8033 	/* Subsequent checks would fail if this failed, but check anyway */
8034 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
8035 		return true;
8036 
8037 	time = ktime_get_boottime();
8038 
8039 	spin_lock_irqsave(&sdsc->lock, flags);
8040 	sqcp = TO_QUEUED_CMD(cmd);
8041 	if (!sqcp) {
8042 		spin_unlock_irqrestore(&sdsc->lock, flags);
8043 		return true;
8044 	}
8045 
8046 	sd_dp = &sqcp->sd_dp;
8047 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
8048 		spin_unlock_irqrestore(&sdsc->lock, flags);
8049 		return true;
8050 	}
8051 
8052 	if (time < sd_dp->cmpl_ts) {
8053 		spin_unlock_irqrestore(&sdsc->lock, flags);
8054 		return true;
8055 	}
8056 
8057 	ASSIGN_QUEUED_CMD(cmd, NULL);
8058 	spin_unlock_irqrestore(&sdsc->lock, flags);
8059 
8060 	if (sdebug_statistics) {
8061 		atomic_inc(&sdebug_completions);
8062 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
8063 			atomic_inc(&sdebug_miss_cpus);
8064 	}
8065 
8066 	sdebug_free_queued_cmd(sqcp);
8067 
8068 	scsi_done(cmd); /* callback to mid level */
8069 	(*data->num_entries)++;
8070 	return true;
8071 }
8072 
8073 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
8074 {
8075 	int num_entries = 0;
8076 	struct sdebug_blk_mq_poll_data data = {
8077 		.queue_num = queue_num,
8078 		.num_entries = &num_entries,
8079 	};
8080 
8081 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
8082 				&data);
8083 
8084 	if (num_entries > 0)
8085 		atomic_add(num_entries, &sdeb_mq_poll_count);
8086 	return num_entries;
8087 }
8088 
8089 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
8090 {
8091 	struct scsi_device *sdp = cmnd->device;
8092 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8093 	struct sdebug_err_inject *err;
8094 	unsigned char *cmd = cmnd->cmnd;
8095 	int ret = 0;
8096 
8097 	if (devip == NULL)
8098 		return 0;
8099 
8100 	rcu_read_lock();
8101 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8102 		if (err->type == ERR_TMOUT_CMD &&
8103 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8104 			ret = !!err->cnt;
8105 			if (err->cnt < 0)
8106 				err->cnt++;
8107 
8108 			rcu_read_unlock();
8109 			return ret;
8110 		}
8111 	}
8112 	rcu_read_unlock();
8113 
8114 	return 0;
8115 }
8116 
8117 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
8118 {
8119 	struct scsi_device *sdp = cmnd->device;
8120 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8121 	struct sdebug_err_inject *err;
8122 	unsigned char *cmd = cmnd->cmnd;
8123 	int ret = 0;
8124 
8125 	if (devip == NULL)
8126 		return 0;
8127 
8128 	rcu_read_lock();
8129 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8130 		if (err->type == ERR_FAIL_QUEUE_CMD &&
8131 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8132 			ret = err->cnt ? err->queuecmd_ret : 0;
8133 			if (err->cnt < 0)
8134 				err->cnt++;
8135 
8136 			rcu_read_unlock();
8137 			return ret;
8138 		}
8139 	}
8140 	rcu_read_unlock();
8141 
8142 	return 0;
8143 }
8144 
8145 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8146 			   struct sdebug_err_inject *info)
8147 {
8148 	struct scsi_device *sdp = cmnd->device;
8149 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8150 	struct sdebug_err_inject *err;
8151 	unsigned char *cmd = cmnd->cmnd;
8152 	int ret = 0;
8153 	int result;
8154 
8155 	if (devip == NULL)
8156 		return 0;
8157 
8158 	rcu_read_lock();
8159 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8160 		if (err->type == ERR_FAIL_CMD &&
8161 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8162 			if (!err->cnt) {
8163 				rcu_read_unlock();
8164 				return 0;
8165 			}
8166 
8167 			ret = !!err->cnt;
8168 			rcu_read_unlock();
8169 			goto out_handle;
8170 		}
8171 	}
8172 	rcu_read_unlock();
8173 
8174 	return 0;
8175 
8176 out_handle:
8177 	if (err->cnt < 0)
8178 		err->cnt++;
8179 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8180 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8181 	*info = *err;
8182 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8183 
8184 	return ret;
8185 }
8186 
8187 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8188 				   struct scsi_cmnd *scp)
8189 {
8190 	u8 sdeb_i;
8191 	struct scsi_device *sdp = scp->device;
8192 	const struct opcode_info_t *oip;
8193 	const struct opcode_info_t *r_oip;
8194 	struct sdebug_dev_info *devip;
8195 	u8 *cmd = scp->cmnd;
8196 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8197 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8198 	int k, na;
8199 	int errsts = 0;
8200 	u64 lun_index = sdp->lun & 0x3FFF;
8201 	u32 flags;
8202 	u16 sa;
8203 	u8 opcode = cmd[0];
8204 	bool has_wlun_rl;
8205 	bool inject_now;
8206 	int ret = 0;
8207 	struct sdebug_err_inject err;
8208 
8209 	scsi_set_resid(scp, 0);
8210 	if (sdebug_statistics) {
8211 		atomic_inc(&sdebug_cmnd_count);
8212 		inject_now = inject_on_this_cmd();
8213 	} else {
8214 		inject_now = false;
8215 	}
8216 	if (unlikely(sdebug_verbose &&
8217 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8218 		char b[120];
8219 		int n, len, sb;
8220 
8221 		len = scp->cmd_len;
8222 		sb = (int)sizeof(b);
8223 		if (len > 32)
8224 			strcpy(b, "too long, over 32 bytes");
8225 		else {
8226 			for (k = 0, n = 0; k < len && n < sb; ++k)
8227 				n += scnprintf(b + n, sb - n, "%02x ",
8228 					       (u32)cmd[k]);
8229 		}
8230 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8231 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8232 	}
8233 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8234 		return SCSI_MLQUEUE_HOST_BUSY;
8235 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8236 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8237 		goto err_out;
8238 
8239 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
8240 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
8241 	devip = (struct sdebug_dev_info *)sdp->hostdata;
8242 	if (unlikely(!devip)) {
8243 		devip = find_build_dev_info(sdp);
8244 		if (NULL == devip)
8245 			goto err_out;
8246 	}
8247 
8248 	if (sdebug_timeout_cmd(scp)) {
8249 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8250 		return 0;
8251 	}
8252 
8253 	ret = sdebug_fail_queue_cmd(scp);
8254 	if (ret) {
8255 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8256 				opcode, ret);
8257 		return ret;
8258 	}
8259 
8260 	if (sdebug_fail_cmd(scp, &ret, &err)) {
8261 		scmd_printk(KERN_INFO, scp,
8262 			"fail command 0x%x with hostbyte=0x%x, "
8263 			"driverbyte=0x%x, statusbyte=0x%x, "
8264 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8265 			opcode, err.host_byte, err.driver_byte,
8266 			err.status_byte, err.sense_key, err.asc, err.asq);
8267 		return ret;
8268 	}
8269 
8270 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8271 		atomic_set(&sdeb_inject_pending, 1);
8272 
8273 	na = oip->num_attached;
8274 	r_pfp = oip->pfp;
8275 	if (na) {	/* multiple commands with this opcode */
8276 		r_oip = oip;
8277 		if (FF_SA & r_oip->flags) {
8278 			if (F_SA_LOW & oip->flags)
8279 				sa = 0x1f & cmd[1];
8280 			else
8281 				sa = get_unaligned_be16(cmd + 8);
8282 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8283 				if (opcode == oip->opcode && sa == oip->sa)
8284 					break;
8285 			}
8286 		} else {   /* since no service action only check opcode */
8287 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8288 				if (opcode == oip->opcode)
8289 					break;
8290 			}
8291 		}
8292 		if (k > na) {
8293 			if (F_SA_LOW & r_oip->flags)
8294 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8295 			else if (F_SA_HIGH & r_oip->flags)
8296 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8297 			else
8298 				mk_sense_invalid_opcode(scp);
8299 			goto check_cond;
8300 		}
8301 	}	/* else (when na==0) we assume the oip is a match */
8302 	flags = oip->flags;
8303 	if (unlikely(F_INV_OP & flags)) {
8304 		mk_sense_invalid_opcode(scp);
8305 		goto check_cond;
8306 	}
8307 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8308 		if (sdebug_verbose)
8309 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8310 				    my_name, opcode, " supported for wlun");
8311 		mk_sense_invalid_opcode(scp);
8312 		goto check_cond;
8313 	}
8314 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
8315 		u8 rem;
8316 		int j;
8317 
8318 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8319 			rem = ~oip->len_mask[k] & cmd[k];
8320 			if (rem) {
8321 				for (j = 7; j >= 0; --j, rem <<= 1) {
8322 					if (0x80 & rem)
8323 						break;
8324 				}
8325 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8326 				goto check_cond;
8327 			}
8328 		}
8329 	}
8330 	if (unlikely(!(F_SKIP_UA & flags) &&
8331 		     find_first_bit(devip->uas_bm,
8332 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8333 		errsts = make_ua(scp, devip);
8334 		if (errsts)
8335 			goto check_cond;
8336 	}
8337 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8338 		     atomic_read(&devip->stopped))) {
8339 		errsts = resp_not_ready(scp, devip);
8340 		if (errsts)
8341 			goto fini;
8342 	}
8343 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
8344 		goto fini;
8345 	if (unlikely(sdebug_every_nth)) {
8346 		if (fake_timeout(scp))
8347 			return 0;	/* ignore command: make trouble */
8348 	}
8349 	if (likely(oip->pfp))
8350 		pfp = oip->pfp;	/* calls a resp_* function */
8351 	else
8352 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8353 
8354 fini:
8355 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
8356 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8357 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8358 					    sdebug_ndelay > 10000)) {
8359 		/*
8360 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8361 		 * for Start Stop Unit (SSU) want at least 1 second delay and
8362 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
8363 		 * For Synchronize Cache want 1/20 of SSU's delay.
8364 		 */
8365 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8366 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8367 
8368 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8369 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8370 	} else
8371 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8372 				     sdebug_ndelay);
8373 check_cond:
8374 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8375 err_out:
8376 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8377 }
8378 
8379 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8380 {
8381 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8382 
8383 	spin_lock_init(&sdsc->lock);
8384 
8385 	return 0;
8386 }
8387 
8388 static struct scsi_host_template sdebug_driver_template = {
8389 	.show_info =		scsi_debug_show_info,
8390 	.write_info =		scsi_debug_write_info,
8391 	.proc_name =		sdebug_proc_name,
8392 	.name =			"SCSI DEBUG",
8393 	.info =			scsi_debug_info,
8394 	.slave_alloc =		scsi_debug_slave_alloc,
8395 	.slave_configure =	scsi_debug_slave_configure,
8396 	.slave_destroy =	scsi_debug_slave_destroy,
8397 	.ioctl =		scsi_debug_ioctl,
8398 	.queuecommand =		scsi_debug_queuecommand,
8399 	.change_queue_depth =	sdebug_change_qdepth,
8400 	.map_queues =		sdebug_map_queues,
8401 	.mq_poll =		sdebug_blk_mq_poll,
8402 	.eh_abort_handler =	scsi_debug_abort,
8403 	.eh_device_reset_handler = scsi_debug_device_reset,
8404 	.eh_target_reset_handler = scsi_debug_target_reset,
8405 	.eh_bus_reset_handler = scsi_debug_bus_reset,
8406 	.eh_host_reset_handler = scsi_debug_host_reset,
8407 	.can_queue =		SDEBUG_CANQUEUE,
8408 	.this_id =		7,
8409 	.sg_tablesize =		SG_MAX_SEGMENTS,
8410 	.cmd_per_lun =		DEF_CMD_PER_LUN,
8411 	.max_sectors =		-1U,
8412 	.max_segment_size =	-1U,
8413 	.module =		THIS_MODULE,
8414 	.track_queue_depth =	1,
8415 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
8416 	.init_cmd_priv = sdebug_init_cmd_priv,
8417 	.target_alloc =		sdebug_target_alloc,
8418 	.target_destroy =	sdebug_target_destroy,
8419 };
8420 
8421 static int sdebug_driver_probe(struct device *dev)
8422 {
8423 	int error = 0;
8424 	struct sdebug_host_info *sdbg_host;
8425 	struct Scsi_Host *hpnt;
8426 	int hprot;
8427 
8428 	sdbg_host = dev_to_sdebug_host(dev);
8429 
8430 	sdebug_driver_template.can_queue = sdebug_max_queue;
8431 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8432 	if (!sdebug_clustering)
8433 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8434 
8435 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8436 	if (NULL == hpnt) {
8437 		pr_err("scsi_host_alloc failed\n");
8438 		error = -ENODEV;
8439 		return error;
8440 	}
8441 	if (submit_queues > nr_cpu_ids) {
8442 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8443 			my_name, submit_queues, nr_cpu_ids);
8444 		submit_queues = nr_cpu_ids;
8445 	}
8446 	/*
8447 	 * Decide whether to tell scsi subsystem that we want mq. The
8448 	 * following should give the same answer for each host.
8449 	 */
8450 	hpnt->nr_hw_queues = submit_queues;
8451 	if (sdebug_host_max_queue)
8452 		hpnt->host_tagset = 1;
8453 
8454 	/* poll queues are possible for nr_hw_queues > 1 */
8455 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8456 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8457 			 my_name, poll_queues, hpnt->nr_hw_queues);
8458 		poll_queues = 0;
8459 	}
8460 
8461 	/*
8462 	 * Poll queues don't need interrupts, but we need at least one I/O queue
8463 	 * left over for non-polled I/O.
8464 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
8465 	 */
8466 	if (poll_queues >= submit_queues) {
8467 		if (submit_queues < 3)
8468 			pr_warn("%s: trim poll_queues to 1\n", my_name);
8469 		else
8470 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8471 				my_name, submit_queues - 1);
8472 		poll_queues = 1;
8473 	}
8474 	if (poll_queues)
8475 		hpnt->nr_maps = 3;
8476 
8477 	sdbg_host->shost = hpnt;
8478 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8479 		hpnt->max_id = sdebug_num_tgts + 1;
8480 	else
8481 		hpnt->max_id = sdebug_num_tgts;
8482 	/* = sdebug_max_luns; */
8483 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8484 
8485 	hprot = 0;
8486 
8487 	switch (sdebug_dif) {
8488 
8489 	case T10_PI_TYPE1_PROTECTION:
8490 		hprot = SHOST_DIF_TYPE1_PROTECTION;
8491 		if (sdebug_dix)
8492 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
8493 		break;
8494 
8495 	case T10_PI_TYPE2_PROTECTION:
8496 		hprot = SHOST_DIF_TYPE2_PROTECTION;
8497 		if (sdebug_dix)
8498 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
8499 		break;
8500 
8501 	case T10_PI_TYPE3_PROTECTION:
8502 		hprot = SHOST_DIF_TYPE3_PROTECTION;
8503 		if (sdebug_dix)
8504 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
8505 		break;
8506 
8507 	default:
8508 		if (sdebug_dix)
8509 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
8510 		break;
8511 	}
8512 
8513 	scsi_host_set_prot(hpnt, hprot);
8514 
8515 	if (have_dif_prot || sdebug_dix)
8516 		pr_info("host protection%s%s%s%s%s%s%s\n",
8517 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8518 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8519 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8520 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8521 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8522 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8523 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8524 
8525 	if (sdebug_guard == 1)
8526 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8527 	else
8528 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8529 
8530 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8531 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8532 	if (sdebug_every_nth)	/* need stats counters for every_nth */
8533 		sdebug_statistics = true;
8534 	error = scsi_add_host(hpnt, &sdbg_host->dev);
8535 	if (error) {
8536 		pr_err("scsi_add_host failed\n");
8537 		error = -ENODEV;
8538 		scsi_host_put(hpnt);
8539 	} else {
8540 		scsi_scan_host(hpnt);
8541 	}
8542 
8543 	return error;
8544 }
8545 
8546 static void sdebug_driver_remove(struct device *dev)
8547 {
8548 	struct sdebug_host_info *sdbg_host;
8549 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8550 
8551 	sdbg_host = dev_to_sdebug_host(dev);
8552 
8553 	scsi_remove_host(sdbg_host->shost);
8554 
8555 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8556 				 dev_list) {
8557 		list_del(&sdbg_devinfo->dev_list);
8558 		kfree(sdbg_devinfo->zstate);
8559 		kfree(sdbg_devinfo);
8560 	}
8561 
8562 	scsi_host_put(sdbg_host->shost);
8563 }
8564 
8565 static const struct bus_type pseudo_lld_bus = {
8566 	.name = "pseudo",
8567 	.probe = sdebug_driver_probe,
8568 	.remove = sdebug_driver_remove,
8569 	.drv_groups = sdebug_drv_groups,
8570 };
8571