xref: /linux/drivers/scsi/scsi_debug.c (revision f6154d8babbb8a98f0d3ea325aafae2e33bfd8be)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20210520";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define WRITE_PROTECTED 0x27
80 #define UA_RESET_ASC 0x29
81 #define UA_CHANGED_ASC 0x2a
82 #define TARGET_CHANGED_ASC 0x3f
83 #define LUNS_CHANGED_ASCQ 0x0e
84 #define INSUFF_RES_ASC 0x55
85 #define INSUFF_RES_ASCQ 0x3
86 #define POWER_ON_RESET_ASCQ 0x0
87 #define POWER_ON_OCCURRED_ASCQ 0x1
88 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
89 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
90 #define CAPACITY_CHANGED_ASCQ 0x9
91 #define SAVING_PARAMS_UNSUP 0x39
92 #define TRANSPORT_PROBLEM 0x4b
93 #define THRESHOLD_EXCEEDED 0x5d
94 #define LOW_POWER_COND_ON 0x5e
95 #define MISCOMPARE_VERIFY_ASC 0x1d
96 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
97 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
98 #define WRITE_ERROR_ASC 0xc
99 #define UNALIGNED_WRITE_ASCQ 0x4
100 #define WRITE_BOUNDARY_ASCQ 0x5
101 #define READ_INVDATA_ASCQ 0x6
102 #define READ_BOUNDARY_ASCQ 0x7
103 #define ATTEMPT_ACCESS_GAP 0x9
104 #define INSUFF_ZONE_ASCQ 0xe
105 
106 /* Additional Sense Code Qualifier (ASCQ) */
107 #define ACK_NAK_TO 0x3
108 
109 /* Default values for driver parameters */
110 #define DEF_NUM_HOST   1
111 #define DEF_NUM_TGTS   1
112 #define DEF_MAX_LUNS   1
113 /* With these defaults, this driver will make 1 host with 1 target
114  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
115  */
116 #define DEF_ATO 1
117 #define DEF_CDB_LEN 10
118 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
119 #define DEF_DEV_SIZE_PRE_INIT   0
120 #define DEF_DEV_SIZE_MB   8
121 #define DEF_ZBC_DEV_SIZE_MB   128
122 #define DEF_DIF 0
123 #define DEF_DIX 0
124 #define DEF_PER_HOST_STORE false
125 #define DEF_D_SENSE   0
126 #define DEF_EVERY_NTH   0
127 #define DEF_FAKE_RW	0
128 #define DEF_GUARD 0
129 #define DEF_HOST_LOCK 0
130 #define DEF_LBPU 0
131 #define DEF_LBPWS 0
132 #define DEF_LBPWS10 0
133 #define DEF_LBPRZ 1
134 #define DEF_LOWEST_ALIGNED 0
135 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
136 #define DEF_NO_LUN_0   0
137 #define DEF_NUM_PARTS   0
138 #define DEF_OPTS   0
139 #define DEF_OPT_BLKS 1024
140 #define DEF_PHYSBLK_EXP 0
141 #define DEF_OPT_XFERLEN_EXP 0
142 #define DEF_PTYPE   TYPE_DISK
143 #define DEF_RANDOM false
144 #define DEF_REMOVABLE false
145 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
146 #define DEF_SECTOR_SIZE 512
147 #define DEF_UNMAP_ALIGNMENT 0
148 #define DEF_UNMAP_GRANULARITY 1
149 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
150 #define DEF_UNMAP_MAX_DESC 256
151 #define DEF_VIRTUAL_GB   0
152 #define DEF_VPD_USE_HOSTNO 1
153 #define DEF_WRITESAME_LENGTH 0xFFFF
154 #define DEF_STRICT 0
155 #define DEF_STATISTICS false
156 #define DEF_SUBMIT_QUEUES 1
157 #define DEF_TUR_MS_TO_READY 0
158 #define DEF_UUID_CTL 0
159 #define JDELAY_OVERRIDDEN -9999
160 
161 /* Default parameters for ZBC drives */
162 #define DEF_ZBC_ZONE_SIZE_MB	128
163 #define DEF_ZBC_MAX_OPEN_ZONES	8
164 #define DEF_ZBC_NR_CONV_ZONES	1
165 
166 #define SDEBUG_LUN_0_VAL 0
167 
168 /* bit mask values for sdebug_opts */
169 #define SDEBUG_OPT_NOISE		1
170 #define SDEBUG_OPT_MEDIUM_ERR		2
171 #define SDEBUG_OPT_TIMEOUT		4
172 #define SDEBUG_OPT_RECOVERED_ERR	8
173 #define SDEBUG_OPT_TRANSPORT_ERR	16
174 #define SDEBUG_OPT_DIF_ERR		32
175 #define SDEBUG_OPT_DIX_ERR		64
176 #define SDEBUG_OPT_MAC_TIMEOUT		128
177 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
178 #define SDEBUG_OPT_Q_NOISE		0x200
179 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
180 #define SDEBUG_OPT_RARE_TSF		0x800
181 #define SDEBUG_OPT_N_WCE		0x1000
182 #define SDEBUG_OPT_RESET_NOISE		0x2000
183 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
184 #define SDEBUG_OPT_HOST_BUSY		0x8000
185 #define SDEBUG_OPT_CMD_ABORT		0x10000
186 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
187 			      SDEBUG_OPT_RESET_NOISE)
188 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
189 				  SDEBUG_OPT_TRANSPORT_ERR | \
190 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
191 				  SDEBUG_OPT_SHORT_TRANSFER | \
192 				  SDEBUG_OPT_HOST_BUSY | \
193 				  SDEBUG_OPT_CMD_ABORT)
194 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
195 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
196 
197 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
198  * priority order. In the subset implemented here lower numbers have higher
199  * priority. The UA numbers should be a sequence starting from 0 with
200  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
201 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
202 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
203 #define SDEBUG_UA_BUS_RESET 2
204 #define SDEBUG_UA_MODE_CHANGED 3
205 #define SDEBUG_UA_CAPACITY_CHANGED 4
206 #define SDEBUG_UA_LUNS_CHANGED 5
207 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
208 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
209 #define SDEBUG_NUM_UAS 8
210 
211 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
212  * sector on read commands: */
213 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
214 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
215 
216 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
217  * (for response) per submit queue at one time. Can be reduced by max_queue
218  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
219  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
220  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
221  * but cannot exceed SDEBUG_CANQUEUE .
222  */
223 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
224 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
225 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
226 
227 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
228 #define F_D_IN			1	/* Data-in command (e.g. READ) */
229 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
230 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
231 #define F_D_UNKN		8
232 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
233 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
234 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
235 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
236 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
237 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
238 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
239 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
240 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
241 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
242 
243 /* Useful combinations of the above flags */
244 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
245 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
246 #define FF_SA (F_SA_HIGH | F_SA_LOW)
247 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
248 
249 #define SDEBUG_MAX_PARTS 4
250 
251 #define SDEBUG_MAX_CMD_LEN 32
252 
253 #define SDEB_XA_NOT_IN_USE XA_MARK_1
254 
255 static struct kmem_cache *queued_cmd_cache;
256 
257 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
258 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
259 
260 /* Zone types (zbcr05 table 25) */
261 enum sdebug_z_type {
262 	ZBC_ZTYPE_CNV	= 0x1,
263 	ZBC_ZTYPE_SWR	= 0x2,
264 	ZBC_ZTYPE_SWP	= 0x3,
265 	/* ZBC_ZTYPE_SOBR = 0x4, */
266 	ZBC_ZTYPE_GAP	= 0x5,
267 };
268 
269 /* enumeration names taken from table 26, zbcr05 */
270 enum sdebug_z_cond {
271 	ZBC_NOT_WRITE_POINTER	= 0x0,
272 	ZC1_EMPTY		= 0x1,
273 	ZC2_IMPLICIT_OPEN	= 0x2,
274 	ZC3_EXPLICIT_OPEN	= 0x3,
275 	ZC4_CLOSED		= 0x4,
276 	ZC6_READ_ONLY		= 0xd,
277 	ZC5_FULL		= 0xe,
278 	ZC7_OFFLINE		= 0xf,
279 };
280 
281 struct sdeb_zone_state {	/* ZBC: per zone state */
282 	enum sdebug_z_type z_type;
283 	enum sdebug_z_cond z_cond;
284 	bool z_non_seq_resource;
285 	unsigned int z_size;
286 	sector_t z_start;
287 	sector_t z_wp;
288 };
289 
290 enum sdebug_err_type {
291 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
292 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
293 					/* queuecmd return failed */
294 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
295 					/* queuecmd return succeed but */
296 					/* with errors set in scsi_cmnd */
297 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
298 					/* scsi_debug_abort() */
299 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
300 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
301 };
302 
303 struct sdebug_err_inject {
304 	int type;
305 	struct list_head list;
306 	int cnt;
307 	unsigned char cmd;
308 	struct rcu_head rcu;
309 
310 	union {
311 		/*
312 		 * For ERR_FAIL_QUEUE_CMD
313 		 */
314 		int queuecmd_ret;
315 
316 		/*
317 		 * For ERR_FAIL_CMD
318 		 */
319 		struct {
320 			unsigned char host_byte;
321 			unsigned char driver_byte;
322 			unsigned char status_byte;
323 			unsigned char sense_key;
324 			unsigned char asc;
325 			unsigned char asq;
326 		};
327 	};
328 };
329 
330 struct sdebug_dev_info {
331 	struct list_head dev_list;
332 	unsigned int channel;
333 	unsigned int target;
334 	u64 lun;
335 	uuid_t lu_name;
336 	struct sdebug_host_info *sdbg_host;
337 	unsigned long uas_bm[1];
338 	atomic_t stopped;	/* 1: by SSU, 2: device start */
339 	bool used;
340 
341 	/* For ZBC devices */
342 	enum blk_zoned_model zmodel;
343 	unsigned int zcap;
344 	unsigned int zsize;
345 	unsigned int zsize_shift;
346 	unsigned int nr_zones;
347 	unsigned int nr_conv_zones;
348 	unsigned int nr_seq_zones;
349 	unsigned int nr_imp_open;
350 	unsigned int nr_exp_open;
351 	unsigned int nr_closed;
352 	unsigned int max_open;
353 	ktime_t create_ts;	/* time since bootup that this device was created */
354 	struct sdeb_zone_state *zstate;
355 
356 	struct dentry *debugfs_entry;
357 	struct spinlock list_lock;
358 	struct list_head inject_err_list;
359 };
360 
361 struct sdebug_target_info {
362 	bool reset_fail;
363 	struct dentry *debugfs_entry;
364 };
365 
366 struct sdebug_host_info {
367 	struct list_head host_list;
368 	int si_idx;	/* sdeb_store_info (per host) xarray index */
369 	struct Scsi_Host *shost;
370 	struct device dev;
371 	struct list_head dev_info_list;
372 };
373 
374 /* There is an xarray of pointers to this struct's objects, one per host */
375 struct sdeb_store_info {
376 	rwlock_t macc_lck;	/* for atomic media access on this store */
377 	u8 *storep;		/* user data storage (ram) */
378 	struct t10_pi_tuple *dif_storep; /* protection info */
379 	void *map_storep;	/* provisioning map */
380 };
381 
382 #define dev_to_sdebug_host(d)	\
383 	container_of(d, struct sdebug_host_info, dev)
384 
385 #define shost_to_sdebug_host(shost)	\
386 	dev_to_sdebug_host(shost->dma_dev)
387 
388 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
389 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
390 
391 struct sdebug_defer {
392 	struct hrtimer hrt;
393 	struct execute_work ew;
394 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
395 	int issuing_cpu;
396 	bool aborted;	/* true when blk_abort_request() already called */
397 	enum sdeb_defer_type defer_t;
398 };
399 
400 struct sdebug_queued_cmd {
401 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
402 	 * instance indicates this slot is in use.
403 	 */
404 	struct sdebug_defer sd_dp;
405 	struct scsi_cmnd *scmd;
406 };
407 
408 struct sdebug_scsi_cmd {
409 	spinlock_t   lock;
410 };
411 
412 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
413 static atomic_t sdebug_completions;  /* count of deferred completions */
414 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
415 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
416 static atomic_t sdeb_inject_pending;
417 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
418 
419 struct opcode_info_t {
420 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
421 				/* for terminating element */
422 	u8 opcode;		/* if num_attached > 0, preferred */
423 	u16 sa;			/* service action */
424 	u32 flags;		/* OR-ed set of SDEB_F_* */
425 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
426 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
427 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
428 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
429 };
430 
431 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
432 enum sdeb_opcode_index {
433 	SDEB_I_INVALID_OPCODE =	0,
434 	SDEB_I_INQUIRY = 1,
435 	SDEB_I_REPORT_LUNS = 2,
436 	SDEB_I_REQUEST_SENSE = 3,
437 	SDEB_I_TEST_UNIT_READY = 4,
438 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
439 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
440 	SDEB_I_LOG_SENSE = 7,
441 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
442 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
443 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
444 	SDEB_I_START_STOP = 11,
445 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
446 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
447 	SDEB_I_MAINT_IN = 14,
448 	SDEB_I_MAINT_OUT = 15,
449 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
450 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
451 	SDEB_I_RESERVE = 18,		/* 6, 10 */
452 	SDEB_I_RELEASE = 19,		/* 6, 10 */
453 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
454 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
455 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
456 	SDEB_I_SEND_DIAG = 23,
457 	SDEB_I_UNMAP = 24,
458 	SDEB_I_WRITE_BUFFER = 25,
459 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
460 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
461 	SDEB_I_COMP_WRITE = 28,
462 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
463 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
464 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
465 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
466 };
467 
468 
469 static const unsigned char opcode_ind_arr[256] = {
470 /* 0x0; 0x0->0x1f: 6 byte cdbs */
471 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
472 	    0, 0, 0, 0,
473 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
474 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
475 	    SDEB_I_RELEASE,
476 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
477 	    SDEB_I_ALLOW_REMOVAL, 0,
478 /* 0x20; 0x20->0x3f: 10 byte cdbs */
479 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
480 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
481 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
482 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
483 /* 0x40; 0x40->0x5f: 10 byte cdbs */
484 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
485 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
486 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
487 	    SDEB_I_RELEASE,
488 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
489 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
490 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
491 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 	0, SDEB_I_VARIABLE_LEN,
493 /* 0x80; 0x80->0x9f: 16 byte cdbs */
494 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
495 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
496 	0, 0, 0, SDEB_I_VERIFY,
497 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
498 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
499 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
500 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
501 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
502 	     SDEB_I_MAINT_OUT, 0, 0, 0,
503 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
504 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
505 	0, 0, 0, 0, 0, 0, 0, 0,
506 	0, 0, 0, 0, 0, 0, 0, 0,
507 /* 0xc0; 0xc0->0xff: vendor specific */
508 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
509 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 };
513 
514 /*
515  * The following "response" functions return the SCSI mid-level's 4 byte
516  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
517  * command completion, they can mask their return value with
518  * SDEG_RES_IMMED_MASK .
519  */
520 #define SDEG_RES_IMMED_MASK 0x40000000
521 
522 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
523 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
537 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
538 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
551 
552 static int sdebug_do_add_host(bool mk_new_store);
553 static int sdebug_add_host_helper(int per_host_idx);
554 static void sdebug_do_remove_host(bool the_end);
555 static int sdebug_add_store(void);
556 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
557 static void sdebug_erase_all_stores(bool apart_from_first);
558 
559 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
560 
561 /*
562  * The following are overflow arrays for cdbs that "hit" the same index in
563  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
564  * should be placed in opcode_info_arr[], the others should be placed here.
565  */
566 static const struct opcode_info_t msense_iarr[] = {
567 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
568 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
569 };
570 
571 static const struct opcode_info_t mselect_iarr[] = {
572 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
573 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574 };
575 
576 static const struct opcode_info_t read_iarr[] = {
577 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
578 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
579 	     0, 0, 0, 0} },
580 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
581 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
582 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
583 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
584 	     0xc7, 0, 0, 0, 0} },
585 };
586 
587 static const struct opcode_info_t write_iarr[] = {
588 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
589 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
590 		   0, 0, 0, 0, 0, 0} },
591 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
592 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
593 		   0, 0, 0} },
594 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
595 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
596 		   0xbf, 0xc7, 0, 0, 0, 0} },
597 };
598 
599 static const struct opcode_info_t verify_iarr[] = {
600 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
601 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
602 		   0, 0, 0, 0, 0, 0} },
603 };
604 
605 static const struct opcode_info_t sa_in_16_iarr[] = {
606 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
607 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
609 };
610 
611 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
612 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
613 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
614 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
615 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
616 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
617 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
618 };
619 
620 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
621 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
622 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
623 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
624 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
625 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
626 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
627 };
628 
629 static const struct opcode_info_t write_same_iarr[] = {
630 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
631 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
632 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
633 };
634 
635 static const struct opcode_info_t reserve_iarr[] = {
636 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
637 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
638 };
639 
640 static const struct opcode_info_t release_iarr[] = {
641 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
642 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
643 };
644 
645 static const struct opcode_info_t sync_cache_iarr[] = {
646 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
647 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
648 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
649 };
650 
651 static const struct opcode_info_t pre_fetch_iarr[] = {
652 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
653 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
655 };
656 
657 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
658 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
659 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
661 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
662 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
663 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
664 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
665 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
667 };
668 
669 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
670 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
671 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
673 };
674 
675 
676 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
677  * plus the terminating elements for logic that scans this table such as
678  * REPORT SUPPORTED OPERATION CODES. */
679 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
680 /* 0 */
681 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
682 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
683 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
684 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
685 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
686 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
687 	     0, 0} },					/* REPORT LUNS */
688 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
689 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
691 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 /* 5 */
693 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
694 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
695 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
697 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
698 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
699 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
700 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
701 	     0, 0, 0} },
702 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
703 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
704 	     0, 0} },
705 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
706 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
707 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
708 /* 10 */
709 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
710 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
711 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
712 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
713 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
714 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
715 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
716 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
717 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
719 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
720 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
721 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
722 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
723 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
724 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
725 				0xff, 0, 0xc7, 0, 0, 0, 0} },
726 /* 15 */
727 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
728 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
729 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
730 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
731 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
732 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
733 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
734 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
735 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
736 	     0xff, 0xff} },
737 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
738 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
739 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
740 	     0} },
741 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
742 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
743 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
744 	     0} },
745 /* 20 */
746 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
747 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
748 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
749 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
750 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
751 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
752 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
753 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
755 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
756 /* 25 */
757 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
758 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
759 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
760 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
761 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
762 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
763 		 0, 0, 0, 0, 0} },
764 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
765 	    resp_sync_cache, sync_cache_iarr,
766 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
767 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
768 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
769 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
770 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
771 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
772 	    resp_pre_fetch, pre_fetch_iarr,
773 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
774 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
775 
776 /* 30 */
777 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
778 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
779 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
780 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
781 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
782 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
783 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
784 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
785 /* sentinel */
786 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
787 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
788 };
789 
790 static int sdebug_num_hosts;
791 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
792 static int sdebug_ato = DEF_ATO;
793 static int sdebug_cdb_len = DEF_CDB_LEN;
794 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
795 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
796 static int sdebug_dif = DEF_DIF;
797 static int sdebug_dix = DEF_DIX;
798 static int sdebug_dsense = DEF_D_SENSE;
799 static int sdebug_every_nth = DEF_EVERY_NTH;
800 static int sdebug_fake_rw = DEF_FAKE_RW;
801 static unsigned int sdebug_guard = DEF_GUARD;
802 static int sdebug_host_max_queue;	/* per host */
803 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
804 static int sdebug_max_luns = DEF_MAX_LUNS;
805 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
806 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
807 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
808 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
809 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
810 static int sdebug_no_uld;
811 static int sdebug_num_parts = DEF_NUM_PARTS;
812 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
813 static int sdebug_opt_blks = DEF_OPT_BLKS;
814 static int sdebug_opts = DEF_OPTS;
815 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
816 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
817 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
818 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
819 static int sdebug_sector_size = DEF_SECTOR_SIZE;
820 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
821 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
822 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
823 static unsigned int sdebug_lbpu = DEF_LBPU;
824 static unsigned int sdebug_lbpws = DEF_LBPWS;
825 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
826 static unsigned int sdebug_lbprz = DEF_LBPRZ;
827 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
828 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
829 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
830 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
831 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
832 static int sdebug_uuid_ctl = DEF_UUID_CTL;
833 static bool sdebug_random = DEF_RANDOM;
834 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
835 static bool sdebug_removable = DEF_REMOVABLE;
836 static bool sdebug_clustering;
837 static bool sdebug_host_lock = DEF_HOST_LOCK;
838 static bool sdebug_strict = DEF_STRICT;
839 static bool sdebug_any_injecting_opt;
840 static bool sdebug_no_rwlock;
841 static bool sdebug_verbose;
842 static bool have_dif_prot;
843 static bool write_since_sync;
844 static bool sdebug_statistics = DEF_STATISTICS;
845 static bool sdebug_wp;
846 static bool sdebug_allow_restart;
847 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
848 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
849 static char *sdeb_zbc_model_s;
850 
851 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
852 			  SAM_LUN_AM_FLAT = 0x1,
853 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
854 			  SAM_LUN_AM_EXTENDED = 0x3};
855 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
856 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
857 
858 static unsigned int sdebug_store_sectors;
859 static sector_t sdebug_capacity;	/* in sectors */
860 
861 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
862    may still need them */
863 static int sdebug_heads;		/* heads per disk */
864 static int sdebug_cylinders_per;	/* cylinders per surface */
865 static int sdebug_sectors_per;		/* sectors per cylinder */
866 
867 static LIST_HEAD(sdebug_host_list);
868 static DEFINE_MUTEX(sdebug_host_list_mutex);
869 
870 static struct xarray per_store_arr;
871 static struct xarray *per_store_ap = &per_store_arr;
872 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
873 static int sdeb_most_recent_idx = -1;
874 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
875 
876 static unsigned long map_size;
877 static int num_aborts;
878 static int num_dev_resets;
879 static int num_target_resets;
880 static int num_bus_resets;
881 static int num_host_resets;
882 static int dix_writes;
883 static int dix_reads;
884 static int dif_errors;
885 
886 /* ZBC global data */
887 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
888 static int sdeb_zbc_zone_cap_mb;
889 static int sdeb_zbc_zone_size_mb;
890 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
891 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
892 
893 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
894 static int poll_queues; /* iouring iopoll interface.*/
895 
896 static char sdebug_proc_name[] = MY_NAME;
897 static const char *my_name = MY_NAME;
898 
899 static struct bus_type pseudo_lld_bus;
900 
901 static struct device_driver sdebug_driverfs_driver = {
902 	.name 		= sdebug_proc_name,
903 	.bus		= &pseudo_lld_bus,
904 };
905 
906 static const int check_condition_result =
907 	SAM_STAT_CHECK_CONDITION;
908 
909 static const int illegal_condition_result =
910 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
911 
912 static const int device_qfull_result =
913 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
914 
915 static const int condition_met_result = SAM_STAT_CONDITION_MET;
916 
917 static struct dentry *sdebug_debugfs_root;
918 
919 static void sdebug_err_free(struct rcu_head *head)
920 {
921 	struct sdebug_err_inject *inject =
922 		container_of(head, typeof(*inject), rcu);
923 
924 	kfree(inject);
925 }
926 
927 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
928 {
929 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
930 	struct sdebug_err_inject *err;
931 
932 	spin_lock(&devip->list_lock);
933 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
934 		if (err->type == new->type && err->cmd == new->cmd) {
935 			list_del_rcu(&err->list);
936 			call_rcu(&err->rcu, sdebug_err_free);
937 		}
938 	}
939 
940 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
941 	spin_unlock(&devip->list_lock);
942 }
943 
944 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
945 {
946 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
947 	struct sdebug_err_inject *err;
948 	int type;
949 	unsigned char cmd;
950 
951 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
952 		kfree(buf);
953 		return -EINVAL;
954 	}
955 
956 	spin_lock(&devip->list_lock);
957 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
958 		if (err->type == type && err->cmd == cmd) {
959 			list_del_rcu(&err->list);
960 			call_rcu(&err->rcu, sdebug_err_free);
961 			spin_unlock(&devip->list_lock);
962 			kfree(buf);
963 			return count;
964 		}
965 	}
966 	spin_unlock(&devip->list_lock);
967 
968 	kfree(buf);
969 	return -EINVAL;
970 }
971 
972 static int sdebug_error_show(struct seq_file *m, void *p)
973 {
974 	struct scsi_device *sdev = (struct scsi_device *)m->private;
975 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
976 	struct sdebug_err_inject *err;
977 
978 	seq_puts(m, "Type\tCount\tCommand\n");
979 
980 	rcu_read_lock();
981 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
982 		switch (err->type) {
983 		case ERR_TMOUT_CMD:
984 		case ERR_ABORT_CMD_FAILED:
985 		case ERR_LUN_RESET_FAILED:
986 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
987 				err->cmd);
988 		break;
989 
990 		case ERR_FAIL_QUEUE_CMD:
991 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
992 				err->cnt, err->cmd, err->queuecmd_ret);
993 		break;
994 
995 		case ERR_FAIL_CMD:
996 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
997 				err->type, err->cnt, err->cmd,
998 				err->host_byte, err->driver_byte,
999 				err->status_byte, err->sense_key,
1000 				err->asc, err->asq);
1001 		break;
1002 		}
1003 	}
1004 	rcu_read_unlock();
1005 
1006 	return 0;
1007 }
1008 
1009 static int sdebug_error_open(struct inode *inode, struct file *file)
1010 {
1011 	return single_open(file, sdebug_error_show, inode->i_private);
1012 }
1013 
1014 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1015 		size_t count, loff_t *ppos)
1016 {
1017 	char *buf;
1018 	unsigned int inject_type;
1019 	struct sdebug_err_inject *inject;
1020 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1021 
1022 	buf = kzalloc(count + 1, GFP_KERNEL);
1023 	if (!buf)
1024 		return -ENOMEM;
1025 
1026 	if (copy_from_user(buf, ubuf, count)) {
1027 		kfree(buf);
1028 		return -EFAULT;
1029 	}
1030 
1031 	if (buf[0] == '-')
1032 		return sdebug_err_remove(sdev, buf, count);
1033 
1034 	if (sscanf(buf, "%d", &inject_type) != 1) {
1035 		kfree(buf);
1036 		return -EINVAL;
1037 	}
1038 
1039 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1040 	if (!inject) {
1041 		kfree(buf);
1042 		return -ENOMEM;
1043 	}
1044 
1045 	switch (inject_type) {
1046 	case ERR_TMOUT_CMD:
1047 	case ERR_ABORT_CMD_FAILED:
1048 	case ERR_LUN_RESET_FAILED:
1049 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1050 			   &inject->cmd) != 3)
1051 			goto out_error;
1052 	break;
1053 
1054 	case ERR_FAIL_QUEUE_CMD:
1055 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1056 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1057 			goto out_error;
1058 	break;
1059 
1060 	case ERR_FAIL_CMD:
1061 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1062 			   &inject->type, &inject->cnt, &inject->cmd,
1063 			   &inject->host_byte, &inject->driver_byte,
1064 			   &inject->status_byte, &inject->sense_key,
1065 			   &inject->asc, &inject->asq) != 9)
1066 			goto out_error;
1067 	break;
1068 
1069 	default:
1070 		goto out_error;
1071 	break;
1072 	}
1073 
1074 	kfree(buf);
1075 	sdebug_err_add(sdev, inject);
1076 
1077 	return count;
1078 
1079 out_error:
1080 	kfree(buf);
1081 	kfree(inject);
1082 	return -EINVAL;
1083 }
1084 
1085 static const struct file_operations sdebug_error_fops = {
1086 	.open	= sdebug_error_open,
1087 	.read	= seq_read,
1088 	.write	= sdebug_error_write,
1089 	.release = single_release,
1090 };
1091 
1092 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1093 {
1094 	struct scsi_target *starget = (struct scsi_target *)m->private;
1095 	struct sdebug_target_info *targetip =
1096 		(struct sdebug_target_info *)starget->hostdata;
1097 
1098 	if (targetip)
1099 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1100 
1101 	return 0;
1102 }
1103 
1104 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1105 {
1106 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1107 }
1108 
1109 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1110 		const char __user *ubuf, size_t count, loff_t *ppos)
1111 {
1112 	int ret;
1113 	struct scsi_target *starget =
1114 		(struct scsi_target *)file->f_inode->i_private;
1115 	struct sdebug_target_info *targetip =
1116 		(struct sdebug_target_info *)starget->hostdata;
1117 
1118 	if (targetip) {
1119 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1120 		return ret < 0 ? ret : count;
1121 	}
1122 	return -ENODEV;
1123 }
1124 
1125 static const struct file_operations sdebug_target_reset_fail_fops = {
1126 	.open	= sdebug_target_reset_fail_open,
1127 	.read	= seq_read,
1128 	.write	= sdebug_target_reset_fail_write,
1129 	.release = single_release,
1130 };
1131 
1132 static int sdebug_target_alloc(struct scsi_target *starget)
1133 {
1134 	struct sdebug_target_info *targetip;
1135 
1136 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1137 	if (!targetip)
1138 		return -ENOMEM;
1139 
1140 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1141 				sdebug_debugfs_root);
1142 
1143 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1144 				&sdebug_target_reset_fail_fops);
1145 
1146 	starget->hostdata = targetip;
1147 
1148 	return 0;
1149 }
1150 
1151 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1152 {
1153 	struct sdebug_target_info *targetip = data;
1154 
1155 	debugfs_remove(targetip->debugfs_entry);
1156 	kfree(targetip);
1157 }
1158 
1159 static void sdebug_target_destroy(struct scsi_target *starget)
1160 {
1161 	struct sdebug_target_info *targetip;
1162 
1163 	targetip = (struct sdebug_target_info *)starget->hostdata;
1164 	if (targetip) {
1165 		starget->hostdata = NULL;
1166 		async_schedule(sdebug_tartget_cleanup_async, targetip);
1167 	}
1168 }
1169 
1170 /* Only do the extra work involved in logical block provisioning if one or
1171  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1172  * real reads and writes (i.e. not skipping them for speed).
1173  */
1174 static inline bool scsi_debug_lbp(void)
1175 {
1176 	return 0 == sdebug_fake_rw &&
1177 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1178 }
1179 
1180 static void *lba2fake_store(struct sdeb_store_info *sip,
1181 			    unsigned long long lba)
1182 {
1183 	struct sdeb_store_info *lsip = sip;
1184 
1185 	lba = do_div(lba, sdebug_store_sectors);
1186 	if (!sip || !sip->storep) {
1187 		WARN_ON_ONCE(true);
1188 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1189 	}
1190 	return lsip->storep + lba * sdebug_sector_size;
1191 }
1192 
1193 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1194 				      sector_t sector)
1195 {
1196 	sector = sector_div(sector, sdebug_store_sectors);
1197 
1198 	return sip->dif_storep + sector;
1199 }
1200 
1201 static void sdebug_max_tgts_luns(void)
1202 {
1203 	struct sdebug_host_info *sdbg_host;
1204 	struct Scsi_Host *hpnt;
1205 
1206 	mutex_lock(&sdebug_host_list_mutex);
1207 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1208 		hpnt = sdbg_host->shost;
1209 		if ((hpnt->this_id >= 0) &&
1210 		    (sdebug_num_tgts > hpnt->this_id))
1211 			hpnt->max_id = sdebug_num_tgts + 1;
1212 		else
1213 			hpnt->max_id = sdebug_num_tgts;
1214 		/* sdebug_max_luns; */
1215 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1216 	}
1217 	mutex_unlock(&sdebug_host_list_mutex);
1218 }
1219 
1220 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1221 
1222 /* Set in_bit to -1 to indicate no bit position of invalid field */
1223 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1224 				 enum sdeb_cmd_data c_d,
1225 				 int in_byte, int in_bit)
1226 {
1227 	unsigned char *sbuff;
1228 	u8 sks[4];
1229 	int sl, asc;
1230 
1231 	sbuff = scp->sense_buffer;
1232 	if (!sbuff) {
1233 		sdev_printk(KERN_ERR, scp->device,
1234 			    "%s: sense_buffer is NULL\n", __func__);
1235 		return;
1236 	}
1237 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1238 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1239 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1240 	memset(sks, 0, sizeof(sks));
1241 	sks[0] = 0x80;
1242 	if (c_d)
1243 		sks[0] |= 0x40;
1244 	if (in_bit >= 0) {
1245 		sks[0] |= 0x8;
1246 		sks[0] |= 0x7 & in_bit;
1247 	}
1248 	put_unaligned_be16(in_byte, sks + 1);
1249 	if (sdebug_dsense) {
1250 		sl = sbuff[7] + 8;
1251 		sbuff[7] = sl;
1252 		sbuff[sl] = 0x2;
1253 		sbuff[sl + 1] = 0x6;
1254 		memcpy(sbuff + sl + 4, sks, 3);
1255 	} else
1256 		memcpy(sbuff + 15, sks, 3);
1257 	if (sdebug_verbose)
1258 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1259 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1260 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1261 }
1262 
1263 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1264 {
1265 	if (!scp->sense_buffer) {
1266 		sdev_printk(KERN_ERR, scp->device,
1267 			    "%s: sense_buffer is NULL\n", __func__);
1268 		return;
1269 	}
1270 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1271 
1272 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1273 
1274 	if (sdebug_verbose)
1275 		sdev_printk(KERN_INFO, scp->device,
1276 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1277 			    my_name, key, asc, asq);
1278 }
1279 
1280 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1281 {
1282 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1283 }
1284 
1285 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1286 			    void __user *arg)
1287 {
1288 	if (sdebug_verbose) {
1289 		if (0x1261 == cmd)
1290 			sdev_printk(KERN_INFO, dev,
1291 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1292 		else if (0x5331 == cmd)
1293 			sdev_printk(KERN_INFO, dev,
1294 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1295 				    __func__);
1296 		else
1297 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1298 				    __func__, cmd);
1299 	}
1300 	return -EINVAL;
1301 	/* return -ENOTTY; // correct return but upsets fdisk */
1302 }
1303 
1304 static void config_cdb_len(struct scsi_device *sdev)
1305 {
1306 	switch (sdebug_cdb_len) {
1307 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1308 		sdev->use_10_for_rw = false;
1309 		sdev->use_16_for_rw = false;
1310 		sdev->use_10_for_ms = false;
1311 		break;
1312 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1313 		sdev->use_10_for_rw = true;
1314 		sdev->use_16_for_rw = false;
1315 		sdev->use_10_for_ms = false;
1316 		break;
1317 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1318 		sdev->use_10_for_rw = true;
1319 		sdev->use_16_for_rw = false;
1320 		sdev->use_10_for_ms = true;
1321 		break;
1322 	case 16:
1323 		sdev->use_10_for_rw = false;
1324 		sdev->use_16_for_rw = true;
1325 		sdev->use_10_for_ms = true;
1326 		break;
1327 	case 32: /* No knobs to suggest this so same as 16 for now */
1328 		sdev->use_10_for_rw = false;
1329 		sdev->use_16_for_rw = true;
1330 		sdev->use_10_for_ms = true;
1331 		break;
1332 	default:
1333 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1334 			sdebug_cdb_len);
1335 		sdev->use_10_for_rw = true;
1336 		sdev->use_16_for_rw = false;
1337 		sdev->use_10_for_ms = false;
1338 		sdebug_cdb_len = 10;
1339 		break;
1340 	}
1341 }
1342 
1343 static void all_config_cdb_len(void)
1344 {
1345 	struct sdebug_host_info *sdbg_host;
1346 	struct Scsi_Host *shost;
1347 	struct scsi_device *sdev;
1348 
1349 	mutex_lock(&sdebug_host_list_mutex);
1350 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1351 		shost = sdbg_host->shost;
1352 		shost_for_each_device(sdev, shost) {
1353 			config_cdb_len(sdev);
1354 		}
1355 	}
1356 	mutex_unlock(&sdebug_host_list_mutex);
1357 }
1358 
1359 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1360 {
1361 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1362 	struct sdebug_dev_info *dp;
1363 
1364 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1365 		if ((devip->sdbg_host == dp->sdbg_host) &&
1366 		    (devip->target == dp->target)) {
1367 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1368 		}
1369 	}
1370 }
1371 
1372 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1373 {
1374 	int k;
1375 
1376 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1377 	if (k != SDEBUG_NUM_UAS) {
1378 		const char *cp = NULL;
1379 
1380 		switch (k) {
1381 		case SDEBUG_UA_POR:
1382 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1383 					POWER_ON_RESET_ASCQ);
1384 			if (sdebug_verbose)
1385 				cp = "power on reset";
1386 			break;
1387 		case SDEBUG_UA_POOCCUR:
1388 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1389 					POWER_ON_OCCURRED_ASCQ);
1390 			if (sdebug_verbose)
1391 				cp = "power on occurred";
1392 			break;
1393 		case SDEBUG_UA_BUS_RESET:
1394 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1395 					BUS_RESET_ASCQ);
1396 			if (sdebug_verbose)
1397 				cp = "bus reset";
1398 			break;
1399 		case SDEBUG_UA_MODE_CHANGED:
1400 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1401 					MODE_CHANGED_ASCQ);
1402 			if (sdebug_verbose)
1403 				cp = "mode parameters changed";
1404 			break;
1405 		case SDEBUG_UA_CAPACITY_CHANGED:
1406 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1407 					CAPACITY_CHANGED_ASCQ);
1408 			if (sdebug_verbose)
1409 				cp = "capacity data changed";
1410 			break;
1411 		case SDEBUG_UA_MICROCODE_CHANGED:
1412 			mk_sense_buffer(scp, UNIT_ATTENTION,
1413 					TARGET_CHANGED_ASC,
1414 					MICROCODE_CHANGED_ASCQ);
1415 			if (sdebug_verbose)
1416 				cp = "microcode has been changed";
1417 			break;
1418 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1419 			mk_sense_buffer(scp, UNIT_ATTENTION,
1420 					TARGET_CHANGED_ASC,
1421 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1422 			if (sdebug_verbose)
1423 				cp = "microcode has been changed without reset";
1424 			break;
1425 		case SDEBUG_UA_LUNS_CHANGED:
1426 			/*
1427 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1428 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1429 			 * on the target, until a REPORT LUNS command is
1430 			 * received.  SPC-4 behavior is to report it only once.
1431 			 * NOTE:  sdebug_scsi_level does not use the same
1432 			 * values as struct scsi_device->scsi_level.
1433 			 */
1434 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1435 				clear_luns_changed_on_target(devip);
1436 			mk_sense_buffer(scp, UNIT_ATTENTION,
1437 					TARGET_CHANGED_ASC,
1438 					LUNS_CHANGED_ASCQ);
1439 			if (sdebug_verbose)
1440 				cp = "reported luns data has changed";
1441 			break;
1442 		default:
1443 			pr_warn("unexpected unit attention code=%d\n", k);
1444 			if (sdebug_verbose)
1445 				cp = "unknown";
1446 			break;
1447 		}
1448 		clear_bit(k, devip->uas_bm);
1449 		if (sdebug_verbose)
1450 			sdev_printk(KERN_INFO, scp->device,
1451 				   "%s reports: Unit attention: %s\n",
1452 				   my_name, cp);
1453 		return check_condition_result;
1454 	}
1455 	return 0;
1456 }
1457 
1458 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1459 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1460 				int arr_len)
1461 {
1462 	int act_len;
1463 	struct scsi_data_buffer *sdb = &scp->sdb;
1464 
1465 	if (!sdb->length)
1466 		return 0;
1467 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1468 		return DID_ERROR << 16;
1469 
1470 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1471 				      arr, arr_len);
1472 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1473 
1474 	return 0;
1475 }
1476 
1477 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1478  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1479  * calls, not required to write in ascending offset order. Assumes resid
1480  * set to scsi_bufflen() prior to any calls.
1481  */
1482 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1483 				  int arr_len, unsigned int off_dst)
1484 {
1485 	unsigned int act_len, n;
1486 	struct scsi_data_buffer *sdb = &scp->sdb;
1487 	off_t skip = off_dst;
1488 
1489 	if (sdb->length <= off_dst)
1490 		return 0;
1491 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1492 		return DID_ERROR << 16;
1493 
1494 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1495 				       arr, arr_len, skip);
1496 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1497 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1498 		 scsi_get_resid(scp));
1499 	n = scsi_bufflen(scp) - (off_dst + act_len);
1500 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1501 	return 0;
1502 }
1503 
1504 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1505  * 'arr' or -1 if error.
1506  */
1507 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1508 			       int arr_len)
1509 {
1510 	if (!scsi_bufflen(scp))
1511 		return 0;
1512 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1513 		return -1;
1514 
1515 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1516 }
1517 
1518 
1519 static char sdebug_inq_vendor_id[9] = "Linux   ";
1520 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1521 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1522 /* Use some locally assigned NAAs for SAS addresses. */
1523 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1524 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1525 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1526 
1527 /* Device identification VPD page. Returns number of bytes placed in arr */
1528 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1529 			  int target_dev_id, int dev_id_num,
1530 			  const char *dev_id_str, int dev_id_str_len,
1531 			  const uuid_t *lu_name)
1532 {
1533 	int num, port_a;
1534 	char b[32];
1535 
1536 	port_a = target_dev_id + 1;
1537 	/* T10 vendor identifier field format (faked) */
1538 	arr[0] = 0x2;	/* ASCII */
1539 	arr[1] = 0x1;
1540 	arr[2] = 0x0;
1541 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1542 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1543 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1544 	num = 8 + 16 + dev_id_str_len;
1545 	arr[3] = num;
1546 	num += 4;
1547 	if (dev_id_num >= 0) {
1548 		if (sdebug_uuid_ctl) {
1549 			/* Locally assigned UUID */
1550 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1551 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1552 			arr[num++] = 0x0;
1553 			arr[num++] = 0x12;
1554 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1555 			arr[num++] = 0x0;
1556 			memcpy(arr + num, lu_name, 16);
1557 			num += 16;
1558 		} else {
1559 			/* NAA-3, Logical unit identifier (binary) */
1560 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1561 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1562 			arr[num++] = 0x0;
1563 			arr[num++] = 0x8;
1564 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1565 			num += 8;
1566 		}
1567 		/* Target relative port number */
1568 		arr[num++] = 0x61;	/* proto=sas, binary */
1569 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1570 		arr[num++] = 0x0;	/* reserved */
1571 		arr[num++] = 0x4;	/* length */
1572 		arr[num++] = 0x0;	/* reserved */
1573 		arr[num++] = 0x0;	/* reserved */
1574 		arr[num++] = 0x0;
1575 		arr[num++] = 0x1;	/* relative port A */
1576 	}
1577 	/* NAA-3, Target port identifier */
1578 	arr[num++] = 0x61;	/* proto=sas, binary */
1579 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1580 	arr[num++] = 0x0;
1581 	arr[num++] = 0x8;
1582 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1583 	num += 8;
1584 	/* NAA-3, Target port group identifier */
1585 	arr[num++] = 0x61;	/* proto=sas, binary */
1586 	arr[num++] = 0x95;	/* piv=1, target port group id */
1587 	arr[num++] = 0x0;
1588 	arr[num++] = 0x4;
1589 	arr[num++] = 0;
1590 	arr[num++] = 0;
1591 	put_unaligned_be16(port_group_id, arr + num);
1592 	num += 2;
1593 	/* NAA-3, Target device identifier */
1594 	arr[num++] = 0x61;	/* proto=sas, binary */
1595 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1596 	arr[num++] = 0x0;
1597 	arr[num++] = 0x8;
1598 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1599 	num += 8;
1600 	/* SCSI name string: Target device identifier */
1601 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1602 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1603 	arr[num++] = 0x0;
1604 	arr[num++] = 24;
1605 	memcpy(arr + num, "naa.32222220", 12);
1606 	num += 12;
1607 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1608 	memcpy(arr + num, b, 8);
1609 	num += 8;
1610 	memset(arr + num, 0, 4);
1611 	num += 4;
1612 	return num;
1613 }
1614 
1615 static unsigned char vpd84_data[] = {
1616 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1617     0x22,0x22,0x22,0x0,0xbb,0x1,
1618     0x22,0x22,0x22,0x0,0xbb,0x2,
1619 };
1620 
1621 /*  Software interface identification VPD page */
1622 static int inquiry_vpd_84(unsigned char *arr)
1623 {
1624 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1625 	return sizeof(vpd84_data);
1626 }
1627 
1628 /* Management network addresses VPD page */
1629 static int inquiry_vpd_85(unsigned char *arr)
1630 {
1631 	int num = 0;
1632 	const char *na1 = "https://www.kernel.org/config";
1633 	const char *na2 = "http://www.kernel.org/log";
1634 	int plen, olen;
1635 
1636 	arr[num++] = 0x1;	/* lu, storage config */
1637 	arr[num++] = 0x0;	/* reserved */
1638 	arr[num++] = 0x0;
1639 	olen = strlen(na1);
1640 	plen = olen + 1;
1641 	if (plen % 4)
1642 		plen = ((plen / 4) + 1) * 4;
1643 	arr[num++] = plen;	/* length, null termianted, padded */
1644 	memcpy(arr + num, na1, olen);
1645 	memset(arr + num + olen, 0, plen - olen);
1646 	num += plen;
1647 
1648 	arr[num++] = 0x4;	/* lu, logging */
1649 	arr[num++] = 0x0;	/* reserved */
1650 	arr[num++] = 0x0;
1651 	olen = strlen(na2);
1652 	plen = olen + 1;
1653 	if (plen % 4)
1654 		plen = ((plen / 4) + 1) * 4;
1655 	arr[num++] = plen;	/* length, null terminated, padded */
1656 	memcpy(arr + num, na2, olen);
1657 	memset(arr + num + olen, 0, plen - olen);
1658 	num += plen;
1659 
1660 	return num;
1661 }
1662 
1663 /* SCSI ports VPD page */
1664 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1665 {
1666 	int num = 0;
1667 	int port_a, port_b;
1668 
1669 	port_a = target_dev_id + 1;
1670 	port_b = port_a + 1;
1671 	arr[num++] = 0x0;	/* reserved */
1672 	arr[num++] = 0x0;	/* reserved */
1673 	arr[num++] = 0x0;
1674 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1675 	memset(arr + num, 0, 6);
1676 	num += 6;
1677 	arr[num++] = 0x0;
1678 	arr[num++] = 12;	/* length tp descriptor */
1679 	/* naa-5 target port identifier (A) */
1680 	arr[num++] = 0x61;	/* proto=sas, binary */
1681 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1682 	arr[num++] = 0x0;	/* reserved */
1683 	arr[num++] = 0x8;	/* length */
1684 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1685 	num += 8;
1686 	arr[num++] = 0x0;	/* reserved */
1687 	arr[num++] = 0x0;	/* reserved */
1688 	arr[num++] = 0x0;
1689 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1690 	memset(arr + num, 0, 6);
1691 	num += 6;
1692 	arr[num++] = 0x0;
1693 	arr[num++] = 12;	/* length tp descriptor */
1694 	/* naa-5 target port identifier (B) */
1695 	arr[num++] = 0x61;	/* proto=sas, binary */
1696 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1697 	arr[num++] = 0x0;	/* reserved */
1698 	arr[num++] = 0x8;	/* length */
1699 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1700 	num += 8;
1701 
1702 	return num;
1703 }
1704 
1705 
1706 static unsigned char vpd89_data[] = {
1707 /* from 4th byte */ 0,0,0,0,
1708 'l','i','n','u','x',' ',' ',' ',
1709 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1710 '1','2','3','4',
1711 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1712 0xec,0,0,0,
1713 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1714 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1715 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1716 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1717 0x53,0x41,
1718 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1719 0x20,0x20,
1720 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1721 0x10,0x80,
1722 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1723 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1724 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1725 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1726 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1727 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1728 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1729 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1730 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1731 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1732 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1733 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1734 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1735 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1736 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1737 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1738 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1739 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1740 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1741 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1742 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1743 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1744 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1745 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1746 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1748 };
1749 
1750 /* ATA Information VPD page */
1751 static int inquiry_vpd_89(unsigned char *arr)
1752 {
1753 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1754 	return sizeof(vpd89_data);
1755 }
1756 
1757 
1758 static unsigned char vpdb0_data[] = {
1759 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1760 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1761 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1762 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1763 };
1764 
1765 /* Block limits VPD page (SBC-3) */
1766 static int inquiry_vpd_b0(unsigned char *arr)
1767 {
1768 	unsigned int gran;
1769 
1770 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1771 
1772 	/* Optimal transfer length granularity */
1773 	if (sdebug_opt_xferlen_exp != 0 &&
1774 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1775 		gran = 1 << sdebug_opt_xferlen_exp;
1776 	else
1777 		gran = 1 << sdebug_physblk_exp;
1778 	put_unaligned_be16(gran, arr + 2);
1779 
1780 	/* Maximum Transfer Length */
1781 	if (sdebug_store_sectors > 0x400)
1782 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1783 
1784 	/* Optimal Transfer Length */
1785 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1786 
1787 	if (sdebug_lbpu) {
1788 		/* Maximum Unmap LBA Count */
1789 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1790 
1791 		/* Maximum Unmap Block Descriptor Count */
1792 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1793 	}
1794 
1795 	/* Unmap Granularity Alignment */
1796 	if (sdebug_unmap_alignment) {
1797 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1798 		arr[28] |= 0x80; /* UGAVALID */
1799 	}
1800 
1801 	/* Optimal Unmap Granularity */
1802 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1803 
1804 	/* Maximum WRITE SAME Length */
1805 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1806 
1807 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1808 }
1809 
1810 /* Block device characteristics VPD page (SBC-3) */
1811 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1812 {
1813 	memset(arr, 0, 0x3c);
1814 	arr[0] = 0;
1815 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1816 	arr[2] = 0;
1817 	arr[3] = 5;	/* less than 1.8" */
1818 	if (devip->zmodel == BLK_ZONED_HA)
1819 		arr[4] = 1 << 4;	/* zoned field = 01b */
1820 
1821 	return 0x3c;
1822 }
1823 
1824 /* Logical block provisioning VPD page (SBC-4) */
1825 static int inquiry_vpd_b2(unsigned char *arr)
1826 {
1827 	memset(arr, 0, 0x4);
1828 	arr[0] = 0;			/* threshold exponent */
1829 	if (sdebug_lbpu)
1830 		arr[1] = 1 << 7;
1831 	if (sdebug_lbpws)
1832 		arr[1] |= 1 << 6;
1833 	if (sdebug_lbpws10)
1834 		arr[1] |= 1 << 5;
1835 	if (sdebug_lbprz && scsi_debug_lbp())
1836 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1837 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1838 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1839 	/* threshold_percentage=0 */
1840 	return 0x4;
1841 }
1842 
1843 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1844 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1845 {
1846 	memset(arr, 0, 0x3c);
1847 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1848 	/*
1849 	 * Set Optimal number of open sequential write preferred zones and
1850 	 * Optimal number of non-sequentially written sequential write
1851 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1852 	 * fields set to zero, apart from Max. number of open swrz_s field.
1853 	 */
1854 	put_unaligned_be32(0xffffffff, &arr[4]);
1855 	put_unaligned_be32(0xffffffff, &arr[8]);
1856 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1857 		put_unaligned_be32(devip->max_open, &arr[12]);
1858 	else
1859 		put_unaligned_be32(0xffffffff, &arr[12]);
1860 	if (devip->zcap < devip->zsize) {
1861 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1862 		put_unaligned_be64(devip->zsize, &arr[20]);
1863 	} else {
1864 		arr[19] = 0;
1865 	}
1866 	return 0x3c;
1867 }
1868 
1869 #define SDEBUG_LONG_INQ_SZ 96
1870 #define SDEBUG_MAX_INQ_ARR_SZ 584
1871 
1872 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1873 {
1874 	unsigned char pq_pdt;
1875 	unsigned char *arr;
1876 	unsigned char *cmd = scp->cmnd;
1877 	u32 alloc_len, n;
1878 	int ret;
1879 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1880 
1881 	alloc_len = get_unaligned_be16(cmd + 3);
1882 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1883 	if (! arr)
1884 		return DID_REQUEUE << 16;
1885 	is_disk = (sdebug_ptype == TYPE_DISK);
1886 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1887 	is_disk_zbc = (is_disk || is_zbc);
1888 	have_wlun = scsi_is_wlun(scp->device->lun);
1889 	if (have_wlun)
1890 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1891 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1892 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1893 	else
1894 		pq_pdt = (sdebug_ptype & 0x1f);
1895 	arr[0] = pq_pdt;
1896 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1897 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1898 		kfree(arr);
1899 		return check_condition_result;
1900 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1901 		int lu_id_num, port_group_id, target_dev_id;
1902 		u32 len;
1903 		char lu_id_str[6];
1904 		int host_no = devip->sdbg_host->shost->host_no;
1905 
1906 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1907 		    (devip->channel & 0x7f);
1908 		if (sdebug_vpd_use_hostno == 0)
1909 			host_no = 0;
1910 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1911 			    (devip->target * 1000) + devip->lun);
1912 		target_dev_id = ((host_no + 1) * 2000) +
1913 				 (devip->target * 1000) - 3;
1914 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1915 		if (0 == cmd[2]) { /* supported vital product data pages */
1916 			arr[1] = cmd[2];	/*sanity */
1917 			n = 4;
1918 			arr[n++] = 0x0;   /* this page */
1919 			arr[n++] = 0x80;  /* unit serial number */
1920 			arr[n++] = 0x83;  /* device identification */
1921 			arr[n++] = 0x84;  /* software interface ident. */
1922 			arr[n++] = 0x85;  /* management network addresses */
1923 			arr[n++] = 0x86;  /* extended inquiry */
1924 			arr[n++] = 0x87;  /* mode page policy */
1925 			arr[n++] = 0x88;  /* SCSI ports */
1926 			if (is_disk_zbc) {	  /* SBC or ZBC */
1927 				arr[n++] = 0x89;  /* ATA information */
1928 				arr[n++] = 0xb0;  /* Block limits */
1929 				arr[n++] = 0xb1;  /* Block characteristics */
1930 				if (is_disk)
1931 					arr[n++] = 0xb2;  /* LB Provisioning */
1932 				if (is_zbc)
1933 					arr[n++] = 0xb6;  /* ZB dev. char. */
1934 			}
1935 			arr[3] = n - 4;	  /* number of supported VPD pages */
1936 		} else if (0x80 == cmd[2]) { /* unit serial number */
1937 			arr[1] = cmd[2];	/*sanity */
1938 			arr[3] = len;
1939 			memcpy(&arr[4], lu_id_str, len);
1940 		} else if (0x83 == cmd[2]) { /* device identification */
1941 			arr[1] = cmd[2];	/*sanity */
1942 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1943 						target_dev_id, lu_id_num,
1944 						lu_id_str, len,
1945 						&devip->lu_name);
1946 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1947 			arr[1] = cmd[2];	/*sanity */
1948 			arr[3] = inquiry_vpd_84(&arr[4]);
1949 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1950 			arr[1] = cmd[2];	/*sanity */
1951 			arr[3] = inquiry_vpd_85(&arr[4]);
1952 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1953 			arr[1] = cmd[2];	/*sanity */
1954 			arr[3] = 0x3c;	/* number of following entries */
1955 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1956 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1957 			else if (have_dif_prot)
1958 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1959 			else
1960 				arr[4] = 0x0;   /* no protection stuff */
1961 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1962 		} else if (0x87 == cmd[2]) { /* mode page policy */
1963 			arr[1] = cmd[2];	/*sanity */
1964 			arr[3] = 0x8;	/* number of following entries */
1965 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1966 			arr[6] = 0x80;	/* mlus, shared */
1967 			arr[8] = 0x18;	 /* protocol specific lu */
1968 			arr[10] = 0x82;	 /* mlus, per initiator port */
1969 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1970 			arr[1] = cmd[2];	/*sanity */
1971 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1972 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1973 			arr[1] = cmd[2];        /*sanity */
1974 			n = inquiry_vpd_89(&arr[4]);
1975 			put_unaligned_be16(n, arr + 2);
1976 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1977 			arr[1] = cmd[2];        /*sanity */
1978 			arr[3] = inquiry_vpd_b0(&arr[4]);
1979 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1980 			arr[1] = cmd[2];        /*sanity */
1981 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1982 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1983 			arr[1] = cmd[2];        /*sanity */
1984 			arr[3] = inquiry_vpd_b2(&arr[4]);
1985 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1986 			arr[1] = cmd[2];        /*sanity */
1987 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1988 		} else {
1989 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1990 			kfree(arr);
1991 			return check_condition_result;
1992 		}
1993 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1994 		ret = fill_from_dev_buffer(scp, arr,
1995 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1996 		kfree(arr);
1997 		return ret;
1998 	}
1999 	/* drops through here for a standard inquiry */
2000 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2001 	arr[2] = sdebug_scsi_level;
2002 	arr[3] = 2;    /* response_data_format==2 */
2003 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2004 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2005 	if (sdebug_vpd_use_hostno == 0)
2006 		arr[5] |= 0x10; /* claim: implicit TPGS */
2007 	arr[6] = 0x10; /* claim: MultiP */
2008 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2009 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2010 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2011 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2012 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2013 	/* Use Vendor Specific area to place driver date in ASCII hex */
2014 	memcpy(&arr[36], sdebug_version_date, 8);
2015 	/* version descriptors (2 bytes each) follow */
2016 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2017 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2018 	n = 62;
2019 	if (is_disk) {		/* SBC-4 no version claimed */
2020 		put_unaligned_be16(0x600, arr + n);
2021 		n += 2;
2022 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
2023 		put_unaligned_be16(0x525, arr + n);
2024 		n += 2;
2025 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2026 		put_unaligned_be16(0x624, arr + n);
2027 		n += 2;
2028 	}
2029 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2030 	ret = fill_from_dev_buffer(scp, arr,
2031 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2032 	kfree(arr);
2033 	return ret;
2034 }
2035 
2036 /* See resp_iec_m_pg() for how this data is manipulated */
2037 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2038 				   0, 0, 0x0, 0x0};
2039 
2040 static int resp_requests(struct scsi_cmnd *scp,
2041 			 struct sdebug_dev_info *devip)
2042 {
2043 	unsigned char *cmd = scp->cmnd;
2044 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2045 	bool dsense = !!(cmd[1] & 1);
2046 	u32 alloc_len = cmd[4];
2047 	u32 len = 18;
2048 	int stopped_state = atomic_read(&devip->stopped);
2049 
2050 	memset(arr, 0, sizeof(arr));
2051 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2052 		if (dsense) {
2053 			arr[0] = 0x72;
2054 			arr[1] = NOT_READY;
2055 			arr[2] = LOGICAL_UNIT_NOT_READY;
2056 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2057 			len = 8;
2058 		} else {
2059 			arr[0] = 0x70;
2060 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2061 			arr[7] = 0xa;			/* 18 byte sense buffer */
2062 			arr[12] = LOGICAL_UNIT_NOT_READY;
2063 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2064 		}
2065 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2066 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2067 		if (dsense) {
2068 			arr[0] = 0x72;
2069 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2070 			arr[2] = THRESHOLD_EXCEEDED;
2071 			arr[3] = 0xff;		/* Failure prediction(false) */
2072 			len = 8;
2073 		} else {
2074 			arr[0] = 0x70;
2075 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2076 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2077 			arr[12] = THRESHOLD_EXCEEDED;
2078 			arr[13] = 0xff;		/* Failure prediction(false) */
2079 		}
2080 	} else {	/* nothing to report */
2081 		if (dsense) {
2082 			len = 8;
2083 			memset(arr, 0, len);
2084 			arr[0] = 0x72;
2085 		} else {
2086 			memset(arr, 0, len);
2087 			arr[0] = 0x70;
2088 			arr[7] = 0xa;
2089 		}
2090 	}
2091 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2092 }
2093 
2094 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2095 {
2096 	unsigned char *cmd = scp->cmnd;
2097 	int power_cond, want_stop, stopped_state;
2098 	bool changing;
2099 
2100 	power_cond = (cmd[4] & 0xf0) >> 4;
2101 	if (power_cond) {
2102 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2103 		return check_condition_result;
2104 	}
2105 	want_stop = !(cmd[4] & 1);
2106 	stopped_state = atomic_read(&devip->stopped);
2107 	if (stopped_state == 2) {
2108 		ktime_t now_ts = ktime_get_boottime();
2109 
2110 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2111 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2112 
2113 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2114 				/* tur_ms_to_ready timer extinguished */
2115 				atomic_set(&devip->stopped, 0);
2116 				stopped_state = 0;
2117 			}
2118 		}
2119 		if (stopped_state == 2) {
2120 			if (want_stop) {
2121 				stopped_state = 1;	/* dummy up success */
2122 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2123 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2124 				return check_condition_result;
2125 			}
2126 		}
2127 	}
2128 	changing = (stopped_state != want_stop);
2129 	if (changing)
2130 		atomic_xchg(&devip->stopped, want_stop);
2131 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2132 		return SDEG_RES_IMMED_MASK;
2133 	else
2134 		return 0;
2135 }
2136 
2137 static sector_t get_sdebug_capacity(void)
2138 {
2139 	static const unsigned int gibibyte = 1073741824;
2140 
2141 	if (sdebug_virtual_gb > 0)
2142 		return (sector_t)sdebug_virtual_gb *
2143 			(gibibyte / sdebug_sector_size);
2144 	else
2145 		return sdebug_store_sectors;
2146 }
2147 
2148 #define SDEBUG_READCAP_ARR_SZ 8
2149 static int resp_readcap(struct scsi_cmnd *scp,
2150 			struct sdebug_dev_info *devip)
2151 {
2152 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2153 	unsigned int capac;
2154 
2155 	/* following just in case virtual_gb changed */
2156 	sdebug_capacity = get_sdebug_capacity();
2157 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2158 	if (sdebug_capacity < 0xffffffff) {
2159 		capac = (unsigned int)sdebug_capacity - 1;
2160 		put_unaligned_be32(capac, arr + 0);
2161 	} else
2162 		put_unaligned_be32(0xffffffff, arr + 0);
2163 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2164 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2165 }
2166 
2167 #define SDEBUG_READCAP16_ARR_SZ 32
2168 static int resp_readcap16(struct scsi_cmnd *scp,
2169 			  struct sdebug_dev_info *devip)
2170 {
2171 	unsigned char *cmd = scp->cmnd;
2172 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2173 	u32 alloc_len;
2174 
2175 	alloc_len = get_unaligned_be32(cmd + 10);
2176 	/* following just in case virtual_gb changed */
2177 	sdebug_capacity = get_sdebug_capacity();
2178 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2179 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2180 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2181 	arr[13] = sdebug_physblk_exp & 0xf;
2182 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2183 
2184 	if (scsi_debug_lbp()) {
2185 		arr[14] |= 0x80; /* LBPME */
2186 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2187 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2188 		 * in the wider field maps to 0 in this field.
2189 		 */
2190 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2191 			arr[14] |= 0x40;
2192 	}
2193 
2194 	/*
2195 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2196 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2197 	 */
2198 	if (devip->zmodel == BLK_ZONED_HM)
2199 		arr[12] |= 1 << 4;
2200 
2201 	arr[15] = sdebug_lowest_aligned & 0xff;
2202 
2203 	if (have_dif_prot) {
2204 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2205 		arr[12] |= 1; /* PROT_EN */
2206 	}
2207 
2208 	return fill_from_dev_buffer(scp, arr,
2209 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2210 }
2211 
2212 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2213 
2214 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2215 			      struct sdebug_dev_info *devip)
2216 {
2217 	unsigned char *cmd = scp->cmnd;
2218 	unsigned char *arr;
2219 	int host_no = devip->sdbg_host->shost->host_no;
2220 	int port_group_a, port_group_b, port_a, port_b;
2221 	u32 alen, n, rlen;
2222 	int ret;
2223 
2224 	alen = get_unaligned_be32(cmd + 6);
2225 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2226 	if (! arr)
2227 		return DID_REQUEUE << 16;
2228 	/*
2229 	 * EVPD page 0x88 states we have two ports, one
2230 	 * real and a fake port with no device connected.
2231 	 * So we create two port groups with one port each
2232 	 * and set the group with port B to unavailable.
2233 	 */
2234 	port_a = 0x1; /* relative port A */
2235 	port_b = 0x2; /* relative port B */
2236 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2237 			(devip->channel & 0x7f);
2238 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2239 			(devip->channel & 0x7f) + 0x80;
2240 
2241 	/*
2242 	 * The asymmetric access state is cycled according to the host_id.
2243 	 */
2244 	n = 4;
2245 	if (sdebug_vpd_use_hostno == 0) {
2246 		arr[n++] = host_no % 3; /* Asymm access state */
2247 		arr[n++] = 0x0F; /* claim: all states are supported */
2248 	} else {
2249 		arr[n++] = 0x0; /* Active/Optimized path */
2250 		arr[n++] = 0x01; /* only support active/optimized paths */
2251 	}
2252 	put_unaligned_be16(port_group_a, arr + n);
2253 	n += 2;
2254 	arr[n++] = 0;    /* Reserved */
2255 	arr[n++] = 0;    /* Status code */
2256 	arr[n++] = 0;    /* Vendor unique */
2257 	arr[n++] = 0x1;  /* One port per group */
2258 	arr[n++] = 0;    /* Reserved */
2259 	arr[n++] = 0;    /* Reserved */
2260 	put_unaligned_be16(port_a, arr + n);
2261 	n += 2;
2262 	arr[n++] = 3;    /* Port unavailable */
2263 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2264 	put_unaligned_be16(port_group_b, arr + n);
2265 	n += 2;
2266 	arr[n++] = 0;    /* Reserved */
2267 	arr[n++] = 0;    /* Status code */
2268 	arr[n++] = 0;    /* Vendor unique */
2269 	arr[n++] = 0x1;  /* One port per group */
2270 	arr[n++] = 0;    /* Reserved */
2271 	arr[n++] = 0;    /* Reserved */
2272 	put_unaligned_be16(port_b, arr + n);
2273 	n += 2;
2274 
2275 	rlen = n - 4;
2276 	put_unaligned_be32(rlen, arr + 0);
2277 
2278 	/*
2279 	 * Return the smallest value of either
2280 	 * - The allocated length
2281 	 * - The constructed command length
2282 	 * - The maximum array size
2283 	 */
2284 	rlen = min(alen, n);
2285 	ret = fill_from_dev_buffer(scp, arr,
2286 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2287 	kfree(arr);
2288 	return ret;
2289 }
2290 
2291 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2292 			     struct sdebug_dev_info *devip)
2293 {
2294 	bool rctd;
2295 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2296 	u16 req_sa, u;
2297 	u32 alloc_len, a_len;
2298 	int k, offset, len, errsts, count, bump, na;
2299 	const struct opcode_info_t *oip;
2300 	const struct opcode_info_t *r_oip;
2301 	u8 *arr;
2302 	u8 *cmd = scp->cmnd;
2303 
2304 	rctd = !!(cmd[2] & 0x80);
2305 	reporting_opts = cmd[2] & 0x7;
2306 	req_opcode = cmd[3];
2307 	req_sa = get_unaligned_be16(cmd + 4);
2308 	alloc_len = get_unaligned_be32(cmd + 6);
2309 	if (alloc_len < 4 || alloc_len > 0xffff) {
2310 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2311 		return check_condition_result;
2312 	}
2313 	if (alloc_len > 8192)
2314 		a_len = 8192;
2315 	else
2316 		a_len = alloc_len;
2317 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2318 	if (NULL == arr) {
2319 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2320 				INSUFF_RES_ASCQ);
2321 		return check_condition_result;
2322 	}
2323 	switch (reporting_opts) {
2324 	case 0:	/* all commands */
2325 		/* count number of commands */
2326 		for (count = 0, oip = opcode_info_arr;
2327 		     oip->num_attached != 0xff; ++oip) {
2328 			if (F_INV_OP & oip->flags)
2329 				continue;
2330 			count += (oip->num_attached + 1);
2331 		}
2332 		bump = rctd ? 20 : 8;
2333 		put_unaligned_be32(count * bump, arr);
2334 		for (offset = 4, oip = opcode_info_arr;
2335 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2336 			if (F_INV_OP & oip->flags)
2337 				continue;
2338 			na = oip->num_attached;
2339 			arr[offset] = oip->opcode;
2340 			put_unaligned_be16(oip->sa, arr + offset + 2);
2341 			if (rctd)
2342 				arr[offset + 5] |= 0x2;
2343 			if (FF_SA & oip->flags)
2344 				arr[offset + 5] |= 0x1;
2345 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2346 			if (rctd)
2347 				put_unaligned_be16(0xa, arr + offset + 8);
2348 			r_oip = oip;
2349 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2350 				if (F_INV_OP & oip->flags)
2351 					continue;
2352 				offset += bump;
2353 				arr[offset] = oip->opcode;
2354 				put_unaligned_be16(oip->sa, arr + offset + 2);
2355 				if (rctd)
2356 					arr[offset + 5] |= 0x2;
2357 				if (FF_SA & oip->flags)
2358 					arr[offset + 5] |= 0x1;
2359 				put_unaligned_be16(oip->len_mask[0],
2360 						   arr + offset + 6);
2361 				if (rctd)
2362 					put_unaligned_be16(0xa,
2363 							   arr + offset + 8);
2364 			}
2365 			oip = r_oip;
2366 			offset += bump;
2367 		}
2368 		break;
2369 	case 1:	/* one command: opcode only */
2370 	case 2:	/* one command: opcode plus service action */
2371 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2372 		sdeb_i = opcode_ind_arr[req_opcode];
2373 		oip = &opcode_info_arr[sdeb_i];
2374 		if (F_INV_OP & oip->flags) {
2375 			supp = 1;
2376 			offset = 4;
2377 		} else {
2378 			if (1 == reporting_opts) {
2379 				if (FF_SA & oip->flags) {
2380 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2381 							     2, 2);
2382 					kfree(arr);
2383 					return check_condition_result;
2384 				}
2385 				req_sa = 0;
2386 			} else if (2 == reporting_opts &&
2387 				   0 == (FF_SA & oip->flags)) {
2388 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2389 				kfree(arr);	/* point at requested sa */
2390 				return check_condition_result;
2391 			}
2392 			if (0 == (FF_SA & oip->flags) &&
2393 			    req_opcode == oip->opcode)
2394 				supp = 3;
2395 			else if (0 == (FF_SA & oip->flags)) {
2396 				na = oip->num_attached;
2397 				for (k = 0, oip = oip->arrp; k < na;
2398 				     ++k, ++oip) {
2399 					if (req_opcode == oip->opcode)
2400 						break;
2401 				}
2402 				supp = (k >= na) ? 1 : 3;
2403 			} else if (req_sa != oip->sa) {
2404 				na = oip->num_attached;
2405 				for (k = 0, oip = oip->arrp; k < na;
2406 				     ++k, ++oip) {
2407 					if (req_sa == oip->sa)
2408 						break;
2409 				}
2410 				supp = (k >= na) ? 1 : 3;
2411 			} else
2412 				supp = 3;
2413 			if (3 == supp) {
2414 				u = oip->len_mask[0];
2415 				put_unaligned_be16(u, arr + 2);
2416 				arr[4] = oip->opcode;
2417 				for (k = 1; k < u; ++k)
2418 					arr[4 + k] = (k < 16) ?
2419 						 oip->len_mask[k] : 0xff;
2420 				offset = 4 + u;
2421 			} else
2422 				offset = 4;
2423 		}
2424 		arr[1] = (rctd ? 0x80 : 0) | supp;
2425 		if (rctd) {
2426 			put_unaligned_be16(0xa, arr + offset);
2427 			offset += 12;
2428 		}
2429 		break;
2430 	default:
2431 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2432 		kfree(arr);
2433 		return check_condition_result;
2434 	}
2435 	offset = (offset < a_len) ? offset : a_len;
2436 	len = (offset < alloc_len) ? offset : alloc_len;
2437 	errsts = fill_from_dev_buffer(scp, arr, len);
2438 	kfree(arr);
2439 	return errsts;
2440 }
2441 
2442 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2443 			  struct sdebug_dev_info *devip)
2444 {
2445 	bool repd;
2446 	u32 alloc_len, len;
2447 	u8 arr[16];
2448 	u8 *cmd = scp->cmnd;
2449 
2450 	memset(arr, 0, sizeof(arr));
2451 	repd = !!(cmd[2] & 0x80);
2452 	alloc_len = get_unaligned_be32(cmd + 6);
2453 	if (alloc_len < 4) {
2454 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2455 		return check_condition_result;
2456 	}
2457 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2458 	arr[1] = 0x1;		/* ITNRS */
2459 	if (repd) {
2460 		arr[3] = 0xc;
2461 		len = 16;
2462 	} else
2463 		len = 4;
2464 
2465 	len = (len < alloc_len) ? len : alloc_len;
2466 	return fill_from_dev_buffer(scp, arr, len);
2467 }
2468 
2469 /* <<Following mode page info copied from ST318451LW>> */
2470 
2471 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2472 {	/* Read-Write Error Recovery page for mode_sense */
2473 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2474 					5, 0, 0xff, 0xff};
2475 
2476 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2477 	if (1 == pcontrol)
2478 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2479 	return sizeof(err_recov_pg);
2480 }
2481 
2482 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2483 { 	/* Disconnect-Reconnect page for mode_sense */
2484 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2485 					 0, 0, 0, 0, 0, 0, 0, 0};
2486 
2487 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2488 	if (1 == pcontrol)
2489 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2490 	return sizeof(disconnect_pg);
2491 }
2492 
2493 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2494 {       /* Format device page for mode_sense */
2495 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2496 				     0, 0, 0, 0, 0, 0, 0, 0,
2497 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2498 
2499 	memcpy(p, format_pg, sizeof(format_pg));
2500 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2501 	put_unaligned_be16(sdebug_sector_size, p + 12);
2502 	if (sdebug_removable)
2503 		p[20] |= 0x20; /* should agree with INQUIRY */
2504 	if (1 == pcontrol)
2505 		memset(p + 2, 0, sizeof(format_pg) - 2);
2506 	return sizeof(format_pg);
2507 }
2508 
2509 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2510 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2511 				     0, 0, 0, 0};
2512 
2513 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2514 { 	/* Caching page for mode_sense */
2515 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2516 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2517 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2518 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2519 
2520 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2521 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2522 	memcpy(p, caching_pg, sizeof(caching_pg));
2523 	if (1 == pcontrol)
2524 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2525 	else if (2 == pcontrol)
2526 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2527 	return sizeof(caching_pg);
2528 }
2529 
2530 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2531 				    0, 0, 0x2, 0x4b};
2532 
2533 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2534 { 	/* Control mode page for mode_sense */
2535 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2536 					0, 0, 0, 0};
2537 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2538 				     0, 0, 0x2, 0x4b};
2539 
2540 	if (sdebug_dsense)
2541 		ctrl_m_pg[2] |= 0x4;
2542 	else
2543 		ctrl_m_pg[2] &= ~0x4;
2544 
2545 	if (sdebug_ato)
2546 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2547 
2548 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2549 	if (1 == pcontrol)
2550 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2551 	else if (2 == pcontrol)
2552 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2553 	return sizeof(ctrl_m_pg);
2554 }
2555 
2556 
2557 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2558 {	/* Informational Exceptions control mode page for mode_sense */
2559 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2560 				       0, 0, 0x0, 0x0};
2561 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2562 				      0, 0, 0x0, 0x0};
2563 
2564 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2565 	if (1 == pcontrol)
2566 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2567 	else if (2 == pcontrol)
2568 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2569 	return sizeof(iec_m_pg);
2570 }
2571 
2572 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2573 {	/* SAS SSP mode page - short format for mode_sense */
2574 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2575 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2576 
2577 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2578 	if (1 == pcontrol)
2579 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2580 	return sizeof(sas_sf_m_pg);
2581 }
2582 
2583 
2584 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2585 			      int target_dev_id)
2586 {	/* SAS phy control and discover mode page for mode_sense */
2587 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2588 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2589 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2590 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2591 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2592 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2593 		    0, 0, 0, 0, 0, 0, 0, 0,
2594 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2595 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2596 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2597 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2598 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2599 		    0, 0, 0, 0, 0, 0, 0, 0,
2600 		};
2601 	int port_a, port_b;
2602 
2603 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2604 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2605 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2606 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2607 	port_a = target_dev_id + 1;
2608 	port_b = port_a + 1;
2609 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2610 	put_unaligned_be32(port_a, p + 20);
2611 	put_unaligned_be32(port_b, p + 48 + 20);
2612 	if (1 == pcontrol)
2613 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2614 	return sizeof(sas_pcd_m_pg);
2615 }
2616 
2617 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2618 {	/* SAS SSP shared protocol specific port mode subpage */
2619 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2620 		    0, 0, 0, 0, 0, 0, 0, 0,
2621 		};
2622 
2623 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2624 	if (1 == pcontrol)
2625 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2626 	return sizeof(sas_sha_m_pg);
2627 }
2628 
2629 #define SDEBUG_MAX_MSENSE_SZ 256
2630 
2631 static int resp_mode_sense(struct scsi_cmnd *scp,
2632 			   struct sdebug_dev_info *devip)
2633 {
2634 	int pcontrol, pcode, subpcode, bd_len;
2635 	unsigned char dev_spec;
2636 	u32 alloc_len, offset, len;
2637 	int target_dev_id;
2638 	int target = scp->device->id;
2639 	unsigned char *ap;
2640 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2641 	unsigned char *cmd = scp->cmnd;
2642 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2643 
2644 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2645 	pcontrol = (cmd[2] & 0xc0) >> 6;
2646 	pcode = cmd[2] & 0x3f;
2647 	subpcode = cmd[3];
2648 	msense_6 = (MODE_SENSE == cmd[0]);
2649 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2650 	is_disk = (sdebug_ptype == TYPE_DISK);
2651 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2652 	if ((is_disk || is_zbc) && !dbd)
2653 		bd_len = llbaa ? 16 : 8;
2654 	else
2655 		bd_len = 0;
2656 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2657 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2658 	if (0x3 == pcontrol) {  /* Saving values not supported */
2659 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2660 		return check_condition_result;
2661 	}
2662 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2663 			(devip->target * 1000) - 3;
2664 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2665 	if (is_disk || is_zbc) {
2666 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2667 		if (sdebug_wp)
2668 			dev_spec |= 0x80;
2669 	} else
2670 		dev_spec = 0x0;
2671 	if (msense_6) {
2672 		arr[2] = dev_spec;
2673 		arr[3] = bd_len;
2674 		offset = 4;
2675 	} else {
2676 		arr[3] = dev_spec;
2677 		if (16 == bd_len)
2678 			arr[4] = 0x1;	/* set LONGLBA bit */
2679 		arr[7] = bd_len;	/* assume 255 or less */
2680 		offset = 8;
2681 	}
2682 	ap = arr + offset;
2683 	if ((bd_len > 0) && (!sdebug_capacity))
2684 		sdebug_capacity = get_sdebug_capacity();
2685 
2686 	if (8 == bd_len) {
2687 		if (sdebug_capacity > 0xfffffffe)
2688 			put_unaligned_be32(0xffffffff, ap + 0);
2689 		else
2690 			put_unaligned_be32(sdebug_capacity, ap + 0);
2691 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2692 		offset += bd_len;
2693 		ap = arr + offset;
2694 	} else if (16 == bd_len) {
2695 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2696 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2697 		offset += bd_len;
2698 		ap = arr + offset;
2699 	}
2700 
2701 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2702 		/* TODO: Control Extension page */
2703 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2704 		return check_condition_result;
2705 	}
2706 	bad_pcode = false;
2707 
2708 	switch (pcode) {
2709 	case 0x1:	/* Read-Write error recovery page, direct access */
2710 		len = resp_err_recov_pg(ap, pcontrol, target);
2711 		offset += len;
2712 		break;
2713 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2714 		len = resp_disconnect_pg(ap, pcontrol, target);
2715 		offset += len;
2716 		break;
2717 	case 0x3:       /* Format device page, direct access */
2718 		if (is_disk) {
2719 			len = resp_format_pg(ap, pcontrol, target);
2720 			offset += len;
2721 		} else
2722 			bad_pcode = true;
2723 		break;
2724 	case 0x8:	/* Caching page, direct access */
2725 		if (is_disk || is_zbc) {
2726 			len = resp_caching_pg(ap, pcontrol, target);
2727 			offset += len;
2728 		} else
2729 			bad_pcode = true;
2730 		break;
2731 	case 0xa:	/* Control Mode page, all devices */
2732 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2733 		offset += len;
2734 		break;
2735 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2736 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2737 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2738 			return check_condition_result;
2739 		}
2740 		len = 0;
2741 		if ((0x0 == subpcode) || (0xff == subpcode))
2742 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2743 		if ((0x1 == subpcode) || (0xff == subpcode))
2744 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2745 						  target_dev_id);
2746 		if ((0x2 == subpcode) || (0xff == subpcode))
2747 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2748 		offset += len;
2749 		break;
2750 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2751 		len = resp_iec_m_pg(ap, pcontrol, target);
2752 		offset += len;
2753 		break;
2754 	case 0x3f:	/* Read all Mode pages */
2755 		if ((0 == subpcode) || (0xff == subpcode)) {
2756 			len = resp_err_recov_pg(ap, pcontrol, target);
2757 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2758 			if (is_disk) {
2759 				len += resp_format_pg(ap + len, pcontrol,
2760 						      target);
2761 				len += resp_caching_pg(ap + len, pcontrol,
2762 						       target);
2763 			} else if (is_zbc) {
2764 				len += resp_caching_pg(ap + len, pcontrol,
2765 						       target);
2766 			}
2767 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2768 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2769 			if (0xff == subpcode) {
2770 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2771 						  target, target_dev_id);
2772 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2773 			}
2774 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2775 			offset += len;
2776 		} else {
2777 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2778 			return check_condition_result;
2779 		}
2780 		break;
2781 	default:
2782 		bad_pcode = true;
2783 		break;
2784 	}
2785 	if (bad_pcode) {
2786 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2787 		return check_condition_result;
2788 	}
2789 	if (msense_6)
2790 		arr[0] = offset - 1;
2791 	else
2792 		put_unaligned_be16((offset - 2), arr + 0);
2793 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2794 }
2795 
2796 #define SDEBUG_MAX_MSELECT_SZ 512
2797 
2798 static int resp_mode_select(struct scsi_cmnd *scp,
2799 			    struct sdebug_dev_info *devip)
2800 {
2801 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2802 	int param_len, res, mpage;
2803 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2804 	unsigned char *cmd = scp->cmnd;
2805 	int mselect6 = (MODE_SELECT == cmd[0]);
2806 
2807 	memset(arr, 0, sizeof(arr));
2808 	pf = cmd[1] & 0x10;
2809 	sp = cmd[1] & 0x1;
2810 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2811 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2812 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2813 		return check_condition_result;
2814 	}
2815 	res = fetch_to_dev_buffer(scp, arr, param_len);
2816 	if (-1 == res)
2817 		return DID_ERROR << 16;
2818 	else if (sdebug_verbose && (res < param_len))
2819 		sdev_printk(KERN_INFO, scp->device,
2820 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2821 			    __func__, param_len, res);
2822 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2823 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2824 	off = bd_len + (mselect6 ? 4 : 8);
2825 	if (md_len > 2 || off >= res) {
2826 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2827 		return check_condition_result;
2828 	}
2829 	mpage = arr[off] & 0x3f;
2830 	ps = !!(arr[off] & 0x80);
2831 	if (ps) {
2832 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2833 		return check_condition_result;
2834 	}
2835 	spf = !!(arr[off] & 0x40);
2836 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2837 		       (arr[off + 1] + 2);
2838 	if ((pg_len + off) > param_len) {
2839 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2840 				PARAMETER_LIST_LENGTH_ERR, 0);
2841 		return check_condition_result;
2842 	}
2843 	switch (mpage) {
2844 	case 0x8:      /* Caching Mode page */
2845 		if (caching_pg[1] == arr[off + 1]) {
2846 			memcpy(caching_pg + 2, arr + off + 2,
2847 			       sizeof(caching_pg) - 2);
2848 			goto set_mode_changed_ua;
2849 		}
2850 		break;
2851 	case 0xa:      /* Control Mode page */
2852 		if (ctrl_m_pg[1] == arr[off + 1]) {
2853 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2854 			       sizeof(ctrl_m_pg) - 2);
2855 			if (ctrl_m_pg[4] & 0x8)
2856 				sdebug_wp = true;
2857 			else
2858 				sdebug_wp = false;
2859 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2860 			goto set_mode_changed_ua;
2861 		}
2862 		break;
2863 	case 0x1c:      /* Informational Exceptions Mode page */
2864 		if (iec_m_pg[1] == arr[off + 1]) {
2865 			memcpy(iec_m_pg + 2, arr + off + 2,
2866 			       sizeof(iec_m_pg) - 2);
2867 			goto set_mode_changed_ua;
2868 		}
2869 		break;
2870 	default:
2871 		break;
2872 	}
2873 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2874 	return check_condition_result;
2875 set_mode_changed_ua:
2876 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2877 	return 0;
2878 }
2879 
2880 static int resp_temp_l_pg(unsigned char *arr)
2881 {
2882 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2883 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2884 		};
2885 
2886 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2887 	return sizeof(temp_l_pg);
2888 }
2889 
2890 static int resp_ie_l_pg(unsigned char *arr)
2891 {
2892 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2893 		};
2894 
2895 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2896 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2897 		arr[4] = THRESHOLD_EXCEEDED;
2898 		arr[5] = 0xff;
2899 	}
2900 	return sizeof(ie_l_pg);
2901 }
2902 
2903 static int resp_env_rep_l_spg(unsigned char *arr)
2904 {
2905 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2906 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2907 					 0x1, 0x0, 0x23, 0x8,
2908 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2909 		};
2910 
2911 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2912 	return sizeof(env_rep_l_spg);
2913 }
2914 
2915 #define SDEBUG_MAX_LSENSE_SZ 512
2916 
2917 static int resp_log_sense(struct scsi_cmnd *scp,
2918 			  struct sdebug_dev_info *devip)
2919 {
2920 	int ppc, sp, pcode, subpcode;
2921 	u32 alloc_len, len, n;
2922 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2923 	unsigned char *cmd = scp->cmnd;
2924 
2925 	memset(arr, 0, sizeof(arr));
2926 	ppc = cmd[1] & 0x2;
2927 	sp = cmd[1] & 0x1;
2928 	if (ppc || sp) {
2929 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2930 		return check_condition_result;
2931 	}
2932 	pcode = cmd[2] & 0x3f;
2933 	subpcode = cmd[3] & 0xff;
2934 	alloc_len = get_unaligned_be16(cmd + 7);
2935 	arr[0] = pcode;
2936 	if (0 == subpcode) {
2937 		switch (pcode) {
2938 		case 0x0:	/* Supported log pages log page */
2939 			n = 4;
2940 			arr[n++] = 0x0;		/* this page */
2941 			arr[n++] = 0xd;		/* Temperature */
2942 			arr[n++] = 0x2f;	/* Informational exceptions */
2943 			arr[3] = n - 4;
2944 			break;
2945 		case 0xd:	/* Temperature log page */
2946 			arr[3] = resp_temp_l_pg(arr + 4);
2947 			break;
2948 		case 0x2f:	/* Informational exceptions log page */
2949 			arr[3] = resp_ie_l_pg(arr + 4);
2950 			break;
2951 		default:
2952 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2953 			return check_condition_result;
2954 		}
2955 	} else if (0xff == subpcode) {
2956 		arr[0] |= 0x40;
2957 		arr[1] = subpcode;
2958 		switch (pcode) {
2959 		case 0x0:	/* Supported log pages and subpages log page */
2960 			n = 4;
2961 			arr[n++] = 0x0;
2962 			arr[n++] = 0x0;		/* 0,0 page */
2963 			arr[n++] = 0x0;
2964 			arr[n++] = 0xff;	/* this page */
2965 			arr[n++] = 0xd;
2966 			arr[n++] = 0x0;		/* Temperature */
2967 			arr[n++] = 0xd;
2968 			arr[n++] = 0x1;		/* Environment reporting */
2969 			arr[n++] = 0xd;
2970 			arr[n++] = 0xff;	/* all 0xd subpages */
2971 			arr[n++] = 0x2f;
2972 			arr[n++] = 0x0;	/* Informational exceptions */
2973 			arr[n++] = 0x2f;
2974 			arr[n++] = 0xff;	/* all 0x2f subpages */
2975 			arr[3] = n - 4;
2976 			break;
2977 		case 0xd:	/* Temperature subpages */
2978 			n = 4;
2979 			arr[n++] = 0xd;
2980 			arr[n++] = 0x0;		/* Temperature */
2981 			arr[n++] = 0xd;
2982 			arr[n++] = 0x1;		/* Environment reporting */
2983 			arr[n++] = 0xd;
2984 			arr[n++] = 0xff;	/* these subpages */
2985 			arr[3] = n - 4;
2986 			break;
2987 		case 0x2f:	/* Informational exceptions subpages */
2988 			n = 4;
2989 			arr[n++] = 0x2f;
2990 			arr[n++] = 0x0;		/* Informational exceptions */
2991 			arr[n++] = 0x2f;
2992 			arr[n++] = 0xff;	/* these subpages */
2993 			arr[3] = n - 4;
2994 			break;
2995 		default:
2996 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2997 			return check_condition_result;
2998 		}
2999 	} else if (subpcode > 0) {
3000 		arr[0] |= 0x40;
3001 		arr[1] = subpcode;
3002 		if (pcode == 0xd && subpcode == 1)
3003 			arr[3] = resp_env_rep_l_spg(arr + 4);
3004 		else {
3005 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3006 			return check_condition_result;
3007 		}
3008 	} else {
3009 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3010 		return check_condition_result;
3011 	}
3012 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3013 	return fill_from_dev_buffer(scp, arr,
3014 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3015 }
3016 
3017 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3018 {
3019 	return devip->nr_zones != 0;
3020 }
3021 
3022 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3023 					unsigned long long lba)
3024 {
3025 	u32 zno = lba >> devip->zsize_shift;
3026 	struct sdeb_zone_state *zsp;
3027 
3028 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3029 		return &devip->zstate[zno];
3030 
3031 	/*
3032 	 * If the zone capacity is less than the zone size, adjust for gap
3033 	 * zones.
3034 	 */
3035 	zno = 2 * zno - devip->nr_conv_zones;
3036 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3037 	zsp = &devip->zstate[zno];
3038 	if (lba >= zsp->z_start + zsp->z_size)
3039 		zsp++;
3040 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3041 	return zsp;
3042 }
3043 
3044 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3045 {
3046 	return zsp->z_type == ZBC_ZTYPE_CNV;
3047 }
3048 
3049 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3050 {
3051 	return zsp->z_type == ZBC_ZTYPE_GAP;
3052 }
3053 
3054 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3055 {
3056 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3057 }
3058 
3059 static void zbc_close_zone(struct sdebug_dev_info *devip,
3060 			   struct sdeb_zone_state *zsp)
3061 {
3062 	enum sdebug_z_cond zc;
3063 
3064 	if (!zbc_zone_is_seq(zsp))
3065 		return;
3066 
3067 	zc = zsp->z_cond;
3068 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3069 		return;
3070 
3071 	if (zc == ZC2_IMPLICIT_OPEN)
3072 		devip->nr_imp_open--;
3073 	else
3074 		devip->nr_exp_open--;
3075 
3076 	if (zsp->z_wp == zsp->z_start) {
3077 		zsp->z_cond = ZC1_EMPTY;
3078 	} else {
3079 		zsp->z_cond = ZC4_CLOSED;
3080 		devip->nr_closed++;
3081 	}
3082 }
3083 
3084 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3085 {
3086 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3087 	unsigned int i;
3088 
3089 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3090 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3091 			zbc_close_zone(devip, zsp);
3092 			return;
3093 		}
3094 	}
3095 }
3096 
3097 static void zbc_open_zone(struct sdebug_dev_info *devip,
3098 			  struct sdeb_zone_state *zsp, bool explicit)
3099 {
3100 	enum sdebug_z_cond zc;
3101 
3102 	if (!zbc_zone_is_seq(zsp))
3103 		return;
3104 
3105 	zc = zsp->z_cond;
3106 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3107 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3108 		return;
3109 
3110 	/* Close an implicit open zone if necessary */
3111 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3112 		zbc_close_zone(devip, zsp);
3113 	else if (devip->max_open &&
3114 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3115 		zbc_close_imp_open_zone(devip);
3116 
3117 	if (zsp->z_cond == ZC4_CLOSED)
3118 		devip->nr_closed--;
3119 	if (explicit) {
3120 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3121 		devip->nr_exp_open++;
3122 	} else {
3123 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3124 		devip->nr_imp_open++;
3125 	}
3126 }
3127 
3128 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3129 				     struct sdeb_zone_state *zsp)
3130 {
3131 	switch (zsp->z_cond) {
3132 	case ZC2_IMPLICIT_OPEN:
3133 		devip->nr_imp_open--;
3134 		break;
3135 	case ZC3_EXPLICIT_OPEN:
3136 		devip->nr_exp_open--;
3137 		break;
3138 	default:
3139 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3140 			  zsp->z_start, zsp->z_cond);
3141 		break;
3142 	}
3143 	zsp->z_cond = ZC5_FULL;
3144 }
3145 
3146 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3147 		       unsigned long long lba, unsigned int num)
3148 {
3149 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3150 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3151 
3152 	if (!zbc_zone_is_seq(zsp))
3153 		return;
3154 
3155 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3156 		zsp->z_wp += num;
3157 		if (zsp->z_wp >= zend)
3158 			zbc_set_zone_full(devip, zsp);
3159 		return;
3160 	}
3161 
3162 	while (num) {
3163 		if (lba != zsp->z_wp)
3164 			zsp->z_non_seq_resource = true;
3165 
3166 		end = lba + num;
3167 		if (end >= zend) {
3168 			n = zend - lba;
3169 			zsp->z_wp = zend;
3170 		} else if (end > zsp->z_wp) {
3171 			n = num;
3172 			zsp->z_wp = end;
3173 		} else {
3174 			n = num;
3175 		}
3176 		if (zsp->z_wp >= zend)
3177 			zbc_set_zone_full(devip, zsp);
3178 
3179 		num -= n;
3180 		lba += n;
3181 		if (num) {
3182 			zsp++;
3183 			zend = zsp->z_start + zsp->z_size;
3184 		}
3185 	}
3186 }
3187 
3188 static int check_zbc_access_params(struct scsi_cmnd *scp,
3189 			unsigned long long lba, unsigned int num, bool write)
3190 {
3191 	struct scsi_device *sdp = scp->device;
3192 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3193 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3194 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3195 
3196 	if (!write) {
3197 		if (devip->zmodel == BLK_ZONED_HA)
3198 			return 0;
3199 		/* For host-managed, reads cannot cross zone types boundaries */
3200 		if (zsp->z_type != zsp_end->z_type) {
3201 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3202 					LBA_OUT_OF_RANGE,
3203 					READ_INVDATA_ASCQ);
3204 			return check_condition_result;
3205 		}
3206 		return 0;
3207 	}
3208 
3209 	/* Writing into a gap zone is not allowed */
3210 	if (zbc_zone_is_gap(zsp)) {
3211 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3212 				ATTEMPT_ACCESS_GAP);
3213 		return check_condition_result;
3214 	}
3215 
3216 	/* No restrictions for writes within conventional zones */
3217 	if (zbc_zone_is_conv(zsp)) {
3218 		if (!zbc_zone_is_conv(zsp_end)) {
3219 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3220 					LBA_OUT_OF_RANGE,
3221 					WRITE_BOUNDARY_ASCQ);
3222 			return check_condition_result;
3223 		}
3224 		return 0;
3225 	}
3226 
3227 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3228 		/* Writes cannot cross sequential zone boundaries */
3229 		if (zsp_end != zsp) {
3230 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3231 					LBA_OUT_OF_RANGE,
3232 					WRITE_BOUNDARY_ASCQ);
3233 			return check_condition_result;
3234 		}
3235 		/* Cannot write full zones */
3236 		if (zsp->z_cond == ZC5_FULL) {
3237 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3238 					INVALID_FIELD_IN_CDB, 0);
3239 			return check_condition_result;
3240 		}
3241 		/* Writes must be aligned to the zone WP */
3242 		if (lba != zsp->z_wp) {
3243 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3244 					LBA_OUT_OF_RANGE,
3245 					UNALIGNED_WRITE_ASCQ);
3246 			return check_condition_result;
3247 		}
3248 	}
3249 
3250 	/* Handle implicit open of closed and empty zones */
3251 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3252 		if (devip->max_open &&
3253 		    devip->nr_exp_open >= devip->max_open) {
3254 			mk_sense_buffer(scp, DATA_PROTECT,
3255 					INSUFF_RES_ASC,
3256 					INSUFF_ZONE_ASCQ);
3257 			return check_condition_result;
3258 		}
3259 		zbc_open_zone(devip, zsp, false);
3260 	}
3261 
3262 	return 0;
3263 }
3264 
3265 static inline int check_device_access_params
3266 			(struct scsi_cmnd *scp, unsigned long long lba,
3267 			 unsigned int num, bool write)
3268 {
3269 	struct scsi_device *sdp = scp->device;
3270 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3271 
3272 	if (lba + num > sdebug_capacity) {
3273 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3274 		return check_condition_result;
3275 	}
3276 	/* transfer length excessive (tie in to block limits VPD page) */
3277 	if (num > sdebug_store_sectors) {
3278 		/* needs work to find which cdb byte 'num' comes from */
3279 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3280 		return check_condition_result;
3281 	}
3282 	if (write && unlikely(sdebug_wp)) {
3283 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3284 		return check_condition_result;
3285 	}
3286 	if (sdebug_dev_is_zoned(devip))
3287 		return check_zbc_access_params(scp, lba, num, write);
3288 
3289 	return 0;
3290 }
3291 
3292 /*
3293  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3294  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3295  * that access any of the "stores" in struct sdeb_store_info should call this
3296  * function with bug_if_fake_rw set to true.
3297  */
3298 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3299 						bool bug_if_fake_rw)
3300 {
3301 	if (sdebug_fake_rw) {
3302 		BUG_ON(bug_if_fake_rw);	/* See note above */
3303 		return NULL;
3304 	}
3305 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3306 }
3307 
3308 /* Returns number of bytes copied or -1 if error. */
3309 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3310 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3311 {
3312 	int ret;
3313 	u64 block, rest = 0;
3314 	enum dma_data_direction dir;
3315 	struct scsi_data_buffer *sdb = &scp->sdb;
3316 	u8 *fsp;
3317 
3318 	if (do_write) {
3319 		dir = DMA_TO_DEVICE;
3320 		write_since_sync = true;
3321 	} else {
3322 		dir = DMA_FROM_DEVICE;
3323 	}
3324 
3325 	if (!sdb->length || !sip)
3326 		return 0;
3327 	if (scp->sc_data_direction != dir)
3328 		return -1;
3329 	fsp = sip->storep;
3330 
3331 	block = do_div(lba, sdebug_store_sectors);
3332 	if (block + num > sdebug_store_sectors)
3333 		rest = block + num - sdebug_store_sectors;
3334 
3335 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3336 		   fsp + (block * sdebug_sector_size),
3337 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3338 	if (ret != (num - rest) * sdebug_sector_size)
3339 		return ret;
3340 
3341 	if (rest) {
3342 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3343 			    fsp, rest * sdebug_sector_size,
3344 			    sg_skip + ((num - rest) * sdebug_sector_size),
3345 			    do_write);
3346 	}
3347 
3348 	return ret;
3349 }
3350 
3351 /* Returns number of bytes copied or -1 if error. */
3352 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3353 {
3354 	struct scsi_data_buffer *sdb = &scp->sdb;
3355 
3356 	if (!sdb->length)
3357 		return 0;
3358 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3359 		return -1;
3360 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3361 			      num * sdebug_sector_size, 0, true);
3362 }
3363 
3364 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3365  * arr into sip->storep+lba and return true. If comparison fails then
3366  * return false. */
3367 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3368 			      const u8 *arr, bool compare_only)
3369 {
3370 	bool res;
3371 	u64 block, rest = 0;
3372 	u32 store_blks = sdebug_store_sectors;
3373 	u32 lb_size = sdebug_sector_size;
3374 	u8 *fsp = sip->storep;
3375 
3376 	block = do_div(lba, store_blks);
3377 	if (block + num > store_blks)
3378 		rest = block + num - store_blks;
3379 
3380 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3381 	if (!res)
3382 		return res;
3383 	if (rest)
3384 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3385 			     rest * lb_size);
3386 	if (!res)
3387 		return res;
3388 	if (compare_only)
3389 		return true;
3390 	arr += num * lb_size;
3391 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3392 	if (rest)
3393 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3394 	return res;
3395 }
3396 
3397 static __be16 dif_compute_csum(const void *buf, int len)
3398 {
3399 	__be16 csum;
3400 
3401 	if (sdebug_guard)
3402 		csum = (__force __be16)ip_compute_csum(buf, len);
3403 	else
3404 		csum = cpu_to_be16(crc_t10dif(buf, len));
3405 
3406 	return csum;
3407 }
3408 
3409 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3410 		      sector_t sector, u32 ei_lba)
3411 {
3412 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3413 
3414 	if (sdt->guard_tag != csum) {
3415 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3416 			(unsigned long)sector,
3417 			be16_to_cpu(sdt->guard_tag),
3418 			be16_to_cpu(csum));
3419 		return 0x01;
3420 	}
3421 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3422 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3423 		pr_err("REF check failed on sector %lu\n",
3424 			(unsigned long)sector);
3425 		return 0x03;
3426 	}
3427 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3428 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3429 		pr_err("REF check failed on sector %lu\n",
3430 			(unsigned long)sector);
3431 		return 0x03;
3432 	}
3433 	return 0;
3434 }
3435 
3436 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3437 			  unsigned int sectors, bool read)
3438 {
3439 	size_t resid;
3440 	void *paddr;
3441 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3442 						scp->device->hostdata, true);
3443 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3444 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3445 	struct sg_mapping_iter miter;
3446 
3447 	/* Bytes of protection data to copy into sgl */
3448 	resid = sectors * sizeof(*dif_storep);
3449 
3450 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3451 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3452 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3453 
3454 	while (sg_miter_next(&miter) && resid > 0) {
3455 		size_t len = min_t(size_t, miter.length, resid);
3456 		void *start = dif_store(sip, sector);
3457 		size_t rest = 0;
3458 
3459 		if (dif_store_end < start + len)
3460 			rest = start + len - dif_store_end;
3461 
3462 		paddr = miter.addr;
3463 
3464 		if (read)
3465 			memcpy(paddr, start, len - rest);
3466 		else
3467 			memcpy(start, paddr, len - rest);
3468 
3469 		if (rest) {
3470 			if (read)
3471 				memcpy(paddr + len - rest, dif_storep, rest);
3472 			else
3473 				memcpy(dif_storep, paddr + len - rest, rest);
3474 		}
3475 
3476 		sector += len / sizeof(*dif_storep);
3477 		resid -= len;
3478 	}
3479 	sg_miter_stop(&miter);
3480 }
3481 
3482 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3483 			    unsigned int sectors, u32 ei_lba)
3484 {
3485 	int ret = 0;
3486 	unsigned int i;
3487 	sector_t sector;
3488 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3489 						scp->device->hostdata, true);
3490 	struct t10_pi_tuple *sdt;
3491 
3492 	for (i = 0; i < sectors; i++, ei_lba++) {
3493 		sector = start_sec + i;
3494 		sdt = dif_store(sip, sector);
3495 
3496 		if (sdt->app_tag == cpu_to_be16(0xffff))
3497 			continue;
3498 
3499 		/*
3500 		 * Because scsi_debug acts as both initiator and
3501 		 * target we proceed to verify the PI even if
3502 		 * RDPROTECT=3. This is done so the "initiator" knows
3503 		 * which type of error to return. Otherwise we would
3504 		 * have to iterate over the PI twice.
3505 		 */
3506 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3507 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3508 					 sector, ei_lba);
3509 			if (ret) {
3510 				dif_errors++;
3511 				break;
3512 			}
3513 		}
3514 	}
3515 
3516 	dif_copy_prot(scp, start_sec, sectors, true);
3517 	dix_reads++;
3518 
3519 	return ret;
3520 }
3521 
3522 static inline void
3523 sdeb_read_lock(struct sdeb_store_info *sip)
3524 {
3525 	if (sdebug_no_rwlock) {
3526 		if (sip)
3527 			__acquire(&sip->macc_lck);
3528 		else
3529 			__acquire(&sdeb_fake_rw_lck);
3530 	} else {
3531 		if (sip)
3532 			read_lock(&sip->macc_lck);
3533 		else
3534 			read_lock(&sdeb_fake_rw_lck);
3535 	}
3536 }
3537 
3538 static inline void
3539 sdeb_read_unlock(struct sdeb_store_info *sip)
3540 {
3541 	if (sdebug_no_rwlock) {
3542 		if (sip)
3543 			__release(&sip->macc_lck);
3544 		else
3545 			__release(&sdeb_fake_rw_lck);
3546 	} else {
3547 		if (sip)
3548 			read_unlock(&sip->macc_lck);
3549 		else
3550 			read_unlock(&sdeb_fake_rw_lck);
3551 	}
3552 }
3553 
3554 static inline void
3555 sdeb_write_lock(struct sdeb_store_info *sip)
3556 {
3557 	if (sdebug_no_rwlock) {
3558 		if (sip)
3559 			__acquire(&sip->macc_lck);
3560 		else
3561 			__acquire(&sdeb_fake_rw_lck);
3562 	} else {
3563 		if (sip)
3564 			write_lock(&sip->macc_lck);
3565 		else
3566 			write_lock(&sdeb_fake_rw_lck);
3567 	}
3568 }
3569 
3570 static inline void
3571 sdeb_write_unlock(struct sdeb_store_info *sip)
3572 {
3573 	if (sdebug_no_rwlock) {
3574 		if (sip)
3575 			__release(&sip->macc_lck);
3576 		else
3577 			__release(&sdeb_fake_rw_lck);
3578 	} else {
3579 		if (sip)
3580 			write_unlock(&sip->macc_lck);
3581 		else
3582 			write_unlock(&sdeb_fake_rw_lck);
3583 	}
3584 }
3585 
3586 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3587 {
3588 	bool check_prot;
3589 	u32 num;
3590 	u32 ei_lba;
3591 	int ret;
3592 	u64 lba;
3593 	struct sdeb_store_info *sip = devip2sip(devip, true);
3594 	u8 *cmd = scp->cmnd;
3595 
3596 	switch (cmd[0]) {
3597 	case READ_16:
3598 		ei_lba = 0;
3599 		lba = get_unaligned_be64(cmd + 2);
3600 		num = get_unaligned_be32(cmd + 10);
3601 		check_prot = true;
3602 		break;
3603 	case READ_10:
3604 		ei_lba = 0;
3605 		lba = get_unaligned_be32(cmd + 2);
3606 		num = get_unaligned_be16(cmd + 7);
3607 		check_prot = true;
3608 		break;
3609 	case READ_6:
3610 		ei_lba = 0;
3611 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3612 		      (u32)(cmd[1] & 0x1f) << 16;
3613 		num = (0 == cmd[4]) ? 256 : cmd[4];
3614 		check_prot = true;
3615 		break;
3616 	case READ_12:
3617 		ei_lba = 0;
3618 		lba = get_unaligned_be32(cmd + 2);
3619 		num = get_unaligned_be32(cmd + 6);
3620 		check_prot = true;
3621 		break;
3622 	case XDWRITEREAD_10:
3623 		ei_lba = 0;
3624 		lba = get_unaligned_be32(cmd + 2);
3625 		num = get_unaligned_be16(cmd + 7);
3626 		check_prot = false;
3627 		break;
3628 	default:	/* assume READ(32) */
3629 		lba = get_unaligned_be64(cmd + 12);
3630 		ei_lba = get_unaligned_be32(cmd + 20);
3631 		num = get_unaligned_be32(cmd + 28);
3632 		check_prot = false;
3633 		break;
3634 	}
3635 	if (unlikely(have_dif_prot && check_prot)) {
3636 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3637 		    (cmd[1] & 0xe0)) {
3638 			mk_sense_invalid_opcode(scp);
3639 			return check_condition_result;
3640 		}
3641 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3642 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3643 		    (cmd[1] & 0xe0) == 0)
3644 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3645 				    "to DIF device\n");
3646 	}
3647 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3648 		     atomic_read(&sdeb_inject_pending))) {
3649 		num /= 2;
3650 		atomic_set(&sdeb_inject_pending, 0);
3651 	}
3652 
3653 	ret = check_device_access_params(scp, lba, num, false);
3654 	if (ret)
3655 		return ret;
3656 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3657 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3658 		     ((lba + num) > sdebug_medium_error_start))) {
3659 		/* claim unrecoverable read error */
3660 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3661 		/* set info field and valid bit for fixed descriptor */
3662 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3663 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3664 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3665 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3666 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3667 		}
3668 		scsi_set_resid(scp, scsi_bufflen(scp));
3669 		return check_condition_result;
3670 	}
3671 
3672 	sdeb_read_lock(sip);
3673 
3674 	/* DIX + T10 DIF */
3675 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3676 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3677 		case 1: /* Guard tag error */
3678 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3679 				sdeb_read_unlock(sip);
3680 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3681 				return check_condition_result;
3682 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3683 				sdeb_read_unlock(sip);
3684 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3685 				return illegal_condition_result;
3686 			}
3687 			break;
3688 		case 3: /* Reference tag error */
3689 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3690 				sdeb_read_unlock(sip);
3691 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3692 				return check_condition_result;
3693 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3694 				sdeb_read_unlock(sip);
3695 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3696 				return illegal_condition_result;
3697 			}
3698 			break;
3699 		}
3700 	}
3701 
3702 	ret = do_device_access(sip, scp, 0, lba, num, false);
3703 	sdeb_read_unlock(sip);
3704 	if (unlikely(ret == -1))
3705 		return DID_ERROR << 16;
3706 
3707 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3708 
3709 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3710 		     atomic_read(&sdeb_inject_pending))) {
3711 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3712 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3713 			atomic_set(&sdeb_inject_pending, 0);
3714 			return check_condition_result;
3715 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3716 			/* Logical block guard check failed */
3717 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3718 			atomic_set(&sdeb_inject_pending, 0);
3719 			return illegal_condition_result;
3720 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3721 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3722 			atomic_set(&sdeb_inject_pending, 0);
3723 			return illegal_condition_result;
3724 		}
3725 	}
3726 	return 0;
3727 }
3728 
3729 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3730 			     unsigned int sectors, u32 ei_lba)
3731 {
3732 	int ret;
3733 	struct t10_pi_tuple *sdt;
3734 	void *daddr;
3735 	sector_t sector = start_sec;
3736 	int ppage_offset;
3737 	int dpage_offset;
3738 	struct sg_mapping_iter diter;
3739 	struct sg_mapping_iter piter;
3740 
3741 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3742 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3743 
3744 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3745 			scsi_prot_sg_count(SCpnt),
3746 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3747 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3748 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3749 
3750 	/* For each protection page */
3751 	while (sg_miter_next(&piter)) {
3752 		dpage_offset = 0;
3753 		if (WARN_ON(!sg_miter_next(&diter))) {
3754 			ret = 0x01;
3755 			goto out;
3756 		}
3757 
3758 		for (ppage_offset = 0; ppage_offset < piter.length;
3759 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3760 			/* If we're at the end of the current
3761 			 * data page advance to the next one
3762 			 */
3763 			if (dpage_offset >= diter.length) {
3764 				if (WARN_ON(!sg_miter_next(&diter))) {
3765 					ret = 0x01;
3766 					goto out;
3767 				}
3768 				dpage_offset = 0;
3769 			}
3770 
3771 			sdt = piter.addr + ppage_offset;
3772 			daddr = diter.addr + dpage_offset;
3773 
3774 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3775 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3776 				if (ret)
3777 					goto out;
3778 			}
3779 
3780 			sector++;
3781 			ei_lba++;
3782 			dpage_offset += sdebug_sector_size;
3783 		}
3784 		diter.consumed = dpage_offset;
3785 		sg_miter_stop(&diter);
3786 	}
3787 	sg_miter_stop(&piter);
3788 
3789 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3790 	dix_writes++;
3791 
3792 	return 0;
3793 
3794 out:
3795 	dif_errors++;
3796 	sg_miter_stop(&diter);
3797 	sg_miter_stop(&piter);
3798 	return ret;
3799 }
3800 
3801 static unsigned long lba_to_map_index(sector_t lba)
3802 {
3803 	if (sdebug_unmap_alignment)
3804 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3805 	sector_div(lba, sdebug_unmap_granularity);
3806 	return lba;
3807 }
3808 
3809 static sector_t map_index_to_lba(unsigned long index)
3810 {
3811 	sector_t lba = index * sdebug_unmap_granularity;
3812 
3813 	if (sdebug_unmap_alignment)
3814 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3815 	return lba;
3816 }
3817 
3818 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3819 			      unsigned int *num)
3820 {
3821 	sector_t end;
3822 	unsigned int mapped;
3823 	unsigned long index;
3824 	unsigned long next;
3825 
3826 	index = lba_to_map_index(lba);
3827 	mapped = test_bit(index, sip->map_storep);
3828 
3829 	if (mapped)
3830 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3831 	else
3832 		next = find_next_bit(sip->map_storep, map_size, index);
3833 
3834 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3835 	*num = end - lba;
3836 	return mapped;
3837 }
3838 
3839 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3840 		       unsigned int len)
3841 {
3842 	sector_t end = lba + len;
3843 
3844 	while (lba < end) {
3845 		unsigned long index = lba_to_map_index(lba);
3846 
3847 		if (index < map_size)
3848 			set_bit(index, sip->map_storep);
3849 
3850 		lba = map_index_to_lba(index + 1);
3851 	}
3852 }
3853 
3854 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3855 			 unsigned int len)
3856 {
3857 	sector_t end = lba + len;
3858 	u8 *fsp = sip->storep;
3859 
3860 	while (lba < end) {
3861 		unsigned long index = lba_to_map_index(lba);
3862 
3863 		if (lba == map_index_to_lba(index) &&
3864 		    lba + sdebug_unmap_granularity <= end &&
3865 		    index < map_size) {
3866 			clear_bit(index, sip->map_storep);
3867 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3868 				memset(fsp + lba * sdebug_sector_size,
3869 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3870 				       sdebug_sector_size *
3871 				       sdebug_unmap_granularity);
3872 			}
3873 			if (sip->dif_storep) {
3874 				memset(sip->dif_storep + lba, 0xff,
3875 				       sizeof(*sip->dif_storep) *
3876 				       sdebug_unmap_granularity);
3877 			}
3878 		}
3879 		lba = map_index_to_lba(index + 1);
3880 	}
3881 }
3882 
3883 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3884 {
3885 	bool check_prot;
3886 	u32 num;
3887 	u32 ei_lba;
3888 	int ret;
3889 	u64 lba;
3890 	struct sdeb_store_info *sip = devip2sip(devip, true);
3891 	u8 *cmd = scp->cmnd;
3892 
3893 	switch (cmd[0]) {
3894 	case WRITE_16:
3895 		ei_lba = 0;
3896 		lba = get_unaligned_be64(cmd + 2);
3897 		num = get_unaligned_be32(cmd + 10);
3898 		check_prot = true;
3899 		break;
3900 	case WRITE_10:
3901 		ei_lba = 0;
3902 		lba = get_unaligned_be32(cmd + 2);
3903 		num = get_unaligned_be16(cmd + 7);
3904 		check_prot = true;
3905 		break;
3906 	case WRITE_6:
3907 		ei_lba = 0;
3908 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3909 		      (u32)(cmd[1] & 0x1f) << 16;
3910 		num = (0 == cmd[4]) ? 256 : cmd[4];
3911 		check_prot = true;
3912 		break;
3913 	case WRITE_12:
3914 		ei_lba = 0;
3915 		lba = get_unaligned_be32(cmd + 2);
3916 		num = get_unaligned_be32(cmd + 6);
3917 		check_prot = true;
3918 		break;
3919 	case 0x53:	/* XDWRITEREAD(10) */
3920 		ei_lba = 0;
3921 		lba = get_unaligned_be32(cmd + 2);
3922 		num = get_unaligned_be16(cmd + 7);
3923 		check_prot = false;
3924 		break;
3925 	default:	/* assume WRITE(32) */
3926 		lba = get_unaligned_be64(cmd + 12);
3927 		ei_lba = get_unaligned_be32(cmd + 20);
3928 		num = get_unaligned_be32(cmd + 28);
3929 		check_prot = false;
3930 		break;
3931 	}
3932 	if (unlikely(have_dif_prot && check_prot)) {
3933 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3934 		    (cmd[1] & 0xe0)) {
3935 			mk_sense_invalid_opcode(scp);
3936 			return check_condition_result;
3937 		}
3938 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3939 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3940 		    (cmd[1] & 0xe0) == 0)
3941 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3942 				    "to DIF device\n");
3943 	}
3944 
3945 	sdeb_write_lock(sip);
3946 	ret = check_device_access_params(scp, lba, num, true);
3947 	if (ret) {
3948 		sdeb_write_unlock(sip);
3949 		return ret;
3950 	}
3951 
3952 	/* DIX + T10 DIF */
3953 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3954 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3955 		case 1: /* Guard tag error */
3956 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3957 				sdeb_write_unlock(sip);
3958 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3959 				return illegal_condition_result;
3960 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3961 				sdeb_write_unlock(sip);
3962 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3963 				return check_condition_result;
3964 			}
3965 			break;
3966 		case 3: /* Reference tag error */
3967 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3968 				sdeb_write_unlock(sip);
3969 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3970 				return illegal_condition_result;
3971 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3972 				sdeb_write_unlock(sip);
3973 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3974 				return check_condition_result;
3975 			}
3976 			break;
3977 		}
3978 	}
3979 
3980 	ret = do_device_access(sip, scp, 0, lba, num, true);
3981 	if (unlikely(scsi_debug_lbp()))
3982 		map_region(sip, lba, num);
3983 	/* If ZBC zone then bump its write pointer */
3984 	if (sdebug_dev_is_zoned(devip))
3985 		zbc_inc_wp(devip, lba, num);
3986 	sdeb_write_unlock(sip);
3987 	if (unlikely(-1 == ret))
3988 		return DID_ERROR << 16;
3989 	else if (unlikely(sdebug_verbose &&
3990 			  (ret < (num * sdebug_sector_size))))
3991 		sdev_printk(KERN_INFO, scp->device,
3992 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3993 			    my_name, num * sdebug_sector_size, ret);
3994 
3995 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3996 		     atomic_read(&sdeb_inject_pending))) {
3997 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3998 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3999 			atomic_set(&sdeb_inject_pending, 0);
4000 			return check_condition_result;
4001 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4002 			/* Logical block guard check failed */
4003 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4004 			atomic_set(&sdeb_inject_pending, 0);
4005 			return illegal_condition_result;
4006 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4007 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4008 			atomic_set(&sdeb_inject_pending, 0);
4009 			return illegal_condition_result;
4010 		}
4011 	}
4012 	return 0;
4013 }
4014 
4015 /*
4016  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4017  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4018  */
4019 static int resp_write_scat(struct scsi_cmnd *scp,
4020 			   struct sdebug_dev_info *devip)
4021 {
4022 	u8 *cmd = scp->cmnd;
4023 	u8 *lrdp = NULL;
4024 	u8 *up;
4025 	struct sdeb_store_info *sip = devip2sip(devip, true);
4026 	u8 wrprotect;
4027 	u16 lbdof, num_lrd, k;
4028 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4029 	u32 lb_size = sdebug_sector_size;
4030 	u32 ei_lba;
4031 	u64 lba;
4032 	int ret, res;
4033 	bool is_16;
4034 	static const u32 lrd_size = 32; /* + parameter list header size */
4035 
4036 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
4037 		is_16 = false;
4038 		wrprotect = (cmd[10] >> 5) & 0x7;
4039 		lbdof = get_unaligned_be16(cmd + 12);
4040 		num_lrd = get_unaligned_be16(cmd + 16);
4041 		bt_len = get_unaligned_be32(cmd + 28);
4042 	} else {        /* that leaves WRITE SCATTERED(16) */
4043 		is_16 = true;
4044 		wrprotect = (cmd[2] >> 5) & 0x7;
4045 		lbdof = get_unaligned_be16(cmd + 4);
4046 		num_lrd = get_unaligned_be16(cmd + 8);
4047 		bt_len = get_unaligned_be32(cmd + 10);
4048 		if (unlikely(have_dif_prot)) {
4049 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4050 			    wrprotect) {
4051 				mk_sense_invalid_opcode(scp);
4052 				return illegal_condition_result;
4053 			}
4054 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4055 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4056 			     wrprotect == 0)
4057 				sdev_printk(KERN_ERR, scp->device,
4058 					    "Unprotected WR to DIF device\n");
4059 		}
4060 	}
4061 	if ((num_lrd == 0) || (bt_len == 0))
4062 		return 0;       /* T10 says these do-nothings are not errors */
4063 	if (lbdof == 0) {
4064 		if (sdebug_verbose)
4065 			sdev_printk(KERN_INFO, scp->device,
4066 				"%s: %s: LB Data Offset field bad\n",
4067 				my_name, __func__);
4068 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4069 		return illegal_condition_result;
4070 	}
4071 	lbdof_blen = lbdof * lb_size;
4072 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4073 		if (sdebug_verbose)
4074 			sdev_printk(KERN_INFO, scp->device,
4075 				"%s: %s: LBA range descriptors don't fit\n",
4076 				my_name, __func__);
4077 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4078 		return illegal_condition_result;
4079 	}
4080 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4081 	if (lrdp == NULL)
4082 		return SCSI_MLQUEUE_HOST_BUSY;
4083 	if (sdebug_verbose)
4084 		sdev_printk(KERN_INFO, scp->device,
4085 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4086 			my_name, __func__, lbdof_blen);
4087 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4088 	if (res == -1) {
4089 		ret = DID_ERROR << 16;
4090 		goto err_out;
4091 	}
4092 
4093 	sdeb_write_lock(sip);
4094 	sg_off = lbdof_blen;
4095 	/* Spec says Buffer xfer Length field in number of LBs in dout */
4096 	cum_lb = 0;
4097 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4098 		lba = get_unaligned_be64(up + 0);
4099 		num = get_unaligned_be32(up + 8);
4100 		if (sdebug_verbose)
4101 			sdev_printk(KERN_INFO, scp->device,
4102 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4103 				my_name, __func__, k, lba, num, sg_off);
4104 		if (num == 0)
4105 			continue;
4106 		ret = check_device_access_params(scp, lba, num, true);
4107 		if (ret)
4108 			goto err_out_unlock;
4109 		num_by = num * lb_size;
4110 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4111 
4112 		if ((cum_lb + num) > bt_len) {
4113 			if (sdebug_verbose)
4114 				sdev_printk(KERN_INFO, scp->device,
4115 				    "%s: %s: sum of blocks > data provided\n",
4116 				    my_name, __func__);
4117 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4118 					0);
4119 			ret = illegal_condition_result;
4120 			goto err_out_unlock;
4121 		}
4122 
4123 		/* DIX + T10 DIF */
4124 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4125 			int prot_ret = prot_verify_write(scp, lba, num,
4126 							 ei_lba);
4127 
4128 			if (prot_ret) {
4129 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4130 						prot_ret);
4131 				ret = illegal_condition_result;
4132 				goto err_out_unlock;
4133 			}
4134 		}
4135 
4136 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
4137 		/* If ZBC zone then bump its write pointer */
4138 		if (sdebug_dev_is_zoned(devip))
4139 			zbc_inc_wp(devip, lba, num);
4140 		if (unlikely(scsi_debug_lbp()))
4141 			map_region(sip, lba, num);
4142 		if (unlikely(-1 == ret)) {
4143 			ret = DID_ERROR << 16;
4144 			goto err_out_unlock;
4145 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
4146 			sdev_printk(KERN_INFO, scp->device,
4147 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4148 			    my_name, num_by, ret);
4149 
4150 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4151 			     atomic_read(&sdeb_inject_pending))) {
4152 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4153 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4154 				atomic_set(&sdeb_inject_pending, 0);
4155 				ret = check_condition_result;
4156 				goto err_out_unlock;
4157 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4158 				/* Logical block guard check failed */
4159 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4160 				atomic_set(&sdeb_inject_pending, 0);
4161 				ret = illegal_condition_result;
4162 				goto err_out_unlock;
4163 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4164 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4165 				atomic_set(&sdeb_inject_pending, 0);
4166 				ret = illegal_condition_result;
4167 				goto err_out_unlock;
4168 			}
4169 		}
4170 		sg_off += num_by;
4171 		cum_lb += num;
4172 	}
4173 	ret = 0;
4174 err_out_unlock:
4175 	sdeb_write_unlock(sip);
4176 err_out:
4177 	kfree(lrdp);
4178 	return ret;
4179 }
4180 
4181 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4182 			   u32 ei_lba, bool unmap, bool ndob)
4183 {
4184 	struct scsi_device *sdp = scp->device;
4185 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4186 	unsigned long long i;
4187 	u64 block, lbaa;
4188 	u32 lb_size = sdebug_sector_size;
4189 	int ret;
4190 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4191 						scp->device->hostdata, true);
4192 	u8 *fs1p;
4193 	u8 *fsp;
4194 
4195 	sdeb_write_lock(sip);
4196 
4197 	ret = check_device_access_params(scp, lba, num, true);
4198 	if (ret) {
4199 		sdeb_write_unlock(sip);
4200 		return ret;
4201 	}
4202 
4203 	if (unmap && scsi_debug_lbp()) {
4204 		unmap_region(sip, lba, num);
4205 		goto out;
4206 	}
4207 	lbaa = lba;
4208 	block = do_div(lbaa, sdebug_store_sectors);
4209 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
4210 	fsp = sip->storep;
4211 	fs1p = fsp + (block * lb_size);
4212 	if (ndob) {
4213 		memset(fs1p, 0, lb_size);
4214 		ret = 0;
4215 	} else
4216 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4217 
4218 	if (-1 == ret) {
4219 		sdeb_write_unlock(sip);
4220 		return DID_ERROR << 16;
4221 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
4222 		sdev_printk(KERN_INFO, scp->device,
4223 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
4224 			    my_name, "write same", lb_size, ret);
4225 
4226 	/* Copy first sector to remaining blocks */
4227 	for (i = 1 ; i < num ; i++) {
4228 		lbaa = lba + i;
4229 		block = do_div(lbaa, sdebug_store_sectors);
4230 		memmove(fsp + (block * lb_size), fs1p, lb_size);
4231 	}
4232 	if (scsi_debug_lbp())
4233 		map_region(sip, lba, num);
4234 	/* If ZBC zone then bump its write pointer */
4235 	if (sdebug_dev_is_zoned(devip))
4236 		zbc_inc_wp(devip, lba, num);
4237 out:
4238 	sdeb_write_unlock(sip);
4239 
4240 	return 0;
4241 }
4242 
4243 static int resp_write_same_10(struct scsi_cmnd *scp,
4244 			      struct sdebug_dev_info *devip)
4245 {
4246 	u8 *cmd = scp->cmnd;
4247 	u32 lba;
4248 	u16 num;
4249 	u32 ei_lba = 0;
4250 	bool unmap = false;
4251 
4252 	if (cmd[1] & 0x8) {
4253 		if (sdebug_lbpws10 == 0) {
4254 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4255 			return check_condition_result;
4256 		} else
4257 			unmap = true;
4258 	}
4259 	lba = get_unaligned_be32(cmd + 2);
4260 	num = get_unaligned_be16(cmd + 7);
4261 	if (num > sdebug_write_same_length) {
4262 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4263 		return check_condition_result;
4264 	}
4265 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4266 }
4267 
4268 static int resp_write_same_16(struct scsi_cmnd *scp,
4269 			      struct sdebug_dev_info *devip)
4270 {
4271 	u8 *cmd = scp->cmnd;
4272 	u64 lba;
4273 	u32 num;
4274 	u32 ei_lba = 0;
4275 	bool unmap = false;
4276 	bool ndob = false;
4277 
4278 	if (cmd[1] & 0x8) {	/* UNMAP */
4279 		if (sdebug_lbpws == 0) {
4280 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4281 			return check_condition_result;
4282 		} else
4283 			unmap = true;
4284 	}
4285 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4286 		ndob = true;
4287 	lba = get_unaligned_be64(cmd + 2);
4288 	num = get_unaligned_be32(cmd + 10);
4289 	if (num > sdebug_write_same_length) {
4290 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4291 		return check_condition_result;
4292 	}
4293 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4294 }
4295 
4296 /* Note the mode field is in the same position as the (lower) service action
4297  * field. For the Report supported operation codes command, SPC-4 suggests
4298  * each mode of this command should be reported separately; for future. */
4299 static int resp_write_buffer(struct scsi_cmnd *scp,
4300 			     struct sdebug_dev_info *devip)
4301 {
4302 	u8 *cmd = scp->cmnd;
4303 	struct scsi_device *sdp = scp->device;
4304 	struct sdebug_dev_info *dp;
4305 	u8 mode;
4306 
4307 	mode = cmd[1] & 0x1f;
4308 	switch (mode) {
4309 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4310 		/* set UAs on this device only */
4311 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4312 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4313 		break;
4314 	case 0x5:	/* download MC, save and ACT */
4315 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4316 		break;
4317 	case 0x6:	/* download MC with offsets and ACT */
4318 		/* set UAs on most devices (LUs) in this target */
4319 		list_for_each_entry(dp,
4320 				    &devip->sdbg_host->dev_info_list,
4321 				    dev_list)
4322 			if (dp->target == sdp->id) {
4323 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4324 				if (devip != dp)
4325 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4326 						dp->uas_bm);
4327 			}
4328 		break;
4329 	case 0x7:	/* download MC with offsets, save, and ACT */
4330 		/* set UA on all devices (LUs) in this target */
4331 		list_for_each_entry(dp,
4332 				    &devip->sdbg_host->dev_info_list,
4333 				    dev_list)
4334 			if (dp->target == sdp->id)
4335 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4336 					dp->uas_bm);
4337 		break;
4338 	default:
4339 		/* do nothing for this command for other mode values */
4340 		break;
4341 	}
4342 	return 0;
4343 }
4344 
4345 static int resp_comp_write(struct scsi_cmnd *scp,
4346 			   struct sdebug_dev_info *devip)
4347 {
4348 	u8 *cmd = scp->cmnd;
4349 	u8 *arr;
4350 	struct sdeb_store_info *sip = devip2sip(devip, true);
4351 	u64 lba;
4352 	u32 dnum;
4353 	u32 lb_size = sdebug_sector_size;
4354 	u8 num;
4355 	int ret;
4356 	int retval = 0;
4357 
4358 	lba = get_unaligned_be64(cmd + 2);
4359 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4360 	if (0 == num)
4361 		return 0;	/* degenerate case, not an error */
4362 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4363 	    (cmd[1] & 0xe0)) {
4364 		mk_sense_invalid_opcode(scp);
4365 		return check_condition_result;
4366 	}
4367 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4368 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4369 	    (cmd[1] & 0xe0) == 0)
4370 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4371 			    "to DIF device\n");
4372 	ret = check_device_access_params(scp, lba, num, false);
4373 	if (ret)
4374 		return ret;
4375 	dnum = 2 * num;
4376 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4377 	if (NULL == arr) {
4378 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4379 				INSUFF_RES_ASCQ);
4380 		return check_condition_result;
4381 	}
4382 
4383 	sdeb_write_lock(sip);
4384 
4385 	ret = do_dout_fetch(scp, dnum, arr);
4386 	if (ret == -1) {
4387 		retval = DID_ERROR << 16;
4388 		goto cleanup;
4389 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4390 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4391 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4392 			    dnum * lb_size, ret);
4393 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4394 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4395 		retval = check_condition_result;
4396 		goto cleanup;
4397 	}
4398 	if (scsi_debug_lbp())
4399 		map_region(sip, lba, num);
4400 cleanup:
4401 	sdeb_write_unlock(sip);
4402 	kfree(arr);
4403 	return retval;
4404 }
4405 
4406 struct unmap_block_desc {
4407 	__be64	lba;
4408 	__be32	blocks;
4409 	__be32	__reserved;
4410 };
4411 
4412 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4413 {
4414 	unsigned char *buf;
4415 	struct unmap_block_desc *desc;
4416 	struct sdeb_store_info *sip = devip2sip(devip, true);
4417 	unsigned int i, payload_len, descriptors;
4418 	int ret;
4419 
4420 	if (!scsi_debug_lbp())
4421 		return 0;	/* fib and say its done */
4422 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4423 	BUG_ON(scsi_bufflen(scp) != payload_len);
4424 
4425 	descriptors = (payload_len - 8) / 16;
4426 	if (descriptors > sdebug_unmap_max_desc) {
4427 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4428 		return check_condition_result;
4429 	}
4430 
4431 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4432 	if (!buf) {
4433 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4434 				INSUFF_RES_ASCQ);
4435 		return check_condition_result;
4436 	}
4437 
4438 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4439 
4440 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4441 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4442 
4443 	desc = (void *)&buf[8];
4444 
4445 	sdeb_write_lock(sip);
4446 
4447 	for (i = 0 ; i < descriptors ; i++) {
4448 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4449 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4450 
4451 		ret = check_device_access_params(scp, lba, num, true);
4452 		if (ret)
4453 			goto out;
4454 
4455 		unmap_region(sip, lba, num);
4456 	}
4457 
4458 	ret = 0;
4459 
4460 out:
4461 	sdeb_write_unlock(sip);
4462 	kfree(buf);
4463 
4464 	return ret;
4465 }
4466 
4467 #define SDEBUG_GET_LBA_STATUS_LEN 32
4468 
4469 static int resp_get_lba_status(struct scsi_cmnd *scp,
4470 			       struct sdebug_dev_info *devip)
4471 {
4472 	u8 *cmd = scp->cmnd;
4473 	u64 lba;
4474 	u32 alloc_len, mapped, num;
4475 	int ret;
4476 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4477 
4478 	lba = get_unaligned_be64(cmd + 2);
4479 	alloc_len = get_unaligned_be32(cmd + 10);
4480 
4481 	if (alloc_len < 24)
4482 		return 0;
4483 
4484 	ret = check_device_access_params(scp, lba, 1, false);
4485 	if (ret)
4486 		return ret;
4487 
4488 	if (scsi_debug_lbp()) {
4489 		struct sdeb_store_info *sip = devip2sip(devip, true);
4490 
4491 		mapped = map_state(sip, lba, &num);
4492 	} else {
4493 		mapped = 1;
4494 		/* following just in case virtual_gb changed */
4495 		sdebug_capacity = get_sdebug_capacity();
4496 		if (sdebug_capacity - lba <= 0xffffffff)
4497 			num = sdebug_capacity - lba;
4498 		else
4499 			num = 0xffffffff;
4500 	}
4501 
4502 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4503 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4504 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4505 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4506 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4507 
4508 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4509 }
4510 
4511 static int resp_sync_cache(struct scsi_cmnd *scp,
4512 			   struct sdebug_dev_info *devip)
4513 {
4514 	int res = 0;
4515 	u64 lba;
4516 	u32 num_blocks;
4517 	u8 *cmd = scp->cmnd;
4518 
4519 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4520 		lba = get_unaligned_be32(cmd + 2);
4521 		num_blocks = get_unaligned_be16(cmd + 7);
4522 	} else {				/* SYNCHRONIZE_CACHE(16) */
4523 		lba = get_unaligned_be64(cmd + 2);
4524 		num_blocks = get_unaligned_be32(cmd + 10);
4525 	}
4526 	if (lba + num_blocks > sdebug_capacity) {
4527 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4528 		return check_condition_result;
4529 	}
4530 	if (!write_since_sync || (cmd[1] & 0x2))
4531 		res = SDEG_RES_IMMED_MASK;
4532 	else		/* delay if write_since_sync and IMMED clear */
4533 		write_since_sync = false;
4534 	return res;
4535 }
4536 
4537 /*
4538  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4539  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4540  * a GOOD status otherwise. Model a disk with a big cache and yield
4541  * CONDITION MET. Actually tries to bring range in main memory into the
4542  * cache associated with the CPU(s).
4543  */
4544 static int resp_pre_fetch(struct scsi_cmnd *scp,
4545 			  struct sdebug_dev_info *devip)
4546 {
4547 	int res = 0;
4548 	u64 lba;
4549 	u64 block, rest = 0;
4550 	u32 nblks;
4551 	u8 *cmd = scp->cmnd;
4552 	struct sdeb_store_info *sip = devip2sip(devip, true);
4553 	u8 *fsp = sip->storep;
4554 
4555 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4556 		lba = get_unaligned_be32(cmd + 2);
4557 		nblks = get_unaligned_be16(cmd + 7);
4558 	} else {			/* PRE-FETCH(16) */
4559 		lba = get_unaligned_be64(cmd + 2);
4560 		nblks = get_unaligned_be32(cmd + 10);
4561 	}
4562 	if (lba + nblks > sdebug_capacity) {
4563 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4564 		return check_condition_result;
4565 	}
4566 	if (!fsp)
4567 		goto fini;
4568 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4569 	block = do_div(lba, sdebug_store_sectors);
4570 	if (block + nblks > sdebug_store_sectors)
4571 		rest = block + nblks - sdebug_store_sectors;
4572 
4573 	/* Try to bring the PRE-FETCH range into CPU's cache */
4574 	sdeb_read_lock(sip);
4575 	prefetch_range(fsp + (sdebug_sector_size * block),
4576 		       (nblks - rest) * sdebug_sector_size);
4577 	if (rest)
4578 		prefetch_range(fsp, rest * sdebug_sector_size);
4579 	sdeb_read_unlock(sip);
4580 fini:
4581 	if (cmd[1] & 0x2)
4582 		res = SDEG_RES_IMMED_MASK;
4583 	return res | condition_met_result;
4584 }
4585 
4586 #define RL_BUCKET_ELEMS 8
4587 
4588 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4589  * (W-LUN), the normal Linux scanning logic does not associate it with a
4590  * device (e.g. /dev/sg7). The following magic will make that association:
4591  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4592  * where <n> is a host number. If there are multiple targets in a host then
4593  * the above will associate a W-LUN to each target. To only get a W-LUN
4594  * for target 2, then use "echo '- 2 49409' > scan" .
4595  */
4596 static int resp_report_luns(struct scsi_cmnd *scp,
4597 			    struct sdebug_dev_info *devip)
4598 {
4599 	unsigned char *cmd = scp->cmnd;
4600 	unsigned int alloc_len;
4601 	unsigned char select_report;
4602 	u64 lun;
4603 	struct scsi_lun *lun_p;
4604 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4605 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4606 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4607 	unsigned int tlun_cnt;	/* total LUN count */
4608 	unsigned int rlen;	/* response length (in bytes) */
4609 	int k, j, n, res;
4610 	unsigned int off_rsp = 0;
4611 	const int sz_lun = sizeof(struct scsi_lun);
4612 
4613 	clear_luns_changed_on_target(devip);
4614 
4615 	select_report = cmd[2];
4616 	alloc_len = get_unaligned_be32(cmd + 6);
4617 
4618 	if (alloc_len < 4) {
4619 		pr_err("alloc len too small %d\n", alloc_len);
4620 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4621 		return check_condition_result;
4622 	}
4623 
4624 	switch (select_report) {
4625 	case 0:		/* all LUNs apart from W-LUNs */
4626 		lun_cnt = sdebug_max_luns;
4627 		wlun_cnt = 0;
4628 		break;
4629 	case 1:		/* only W-LUNs */
4630 		lun_cnt = 0;
4631 		wlun_cnt = 1;
4632 		break;
4633 	case 2:		/* all LUNs */
4634 		lun_cnt = sdebug_max_luns;
4635 		wlun_cnt = 1;
4636 		break;
4637 	case 0x10:	/* only administrative LUs */
4638 	case 0x11:	/* see SPC-5 */
4639 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4640 	default:
4641 		pr_debug("select report invalid %d\n", select_report);
4642 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4643 		return check_condition_result;
4644 	}
4645 
4646 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4647 		--lun_cnt;
4648 
4649 	tlun_cnt = lun_cnt + wlun_cnt;
4650 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4651 	scsi_set_resid(scp, scsi_bufflen(scp));
4652 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4653 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4654 
4655 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4656 	lun = sdebug_no_lun_0 ? 1 : 0;
4657 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4658 		memset(arr, 0, sizeof(arr));
4659 		lun_p = (struct scsi_lun *)&arr[0];
4660 		if (k == 0) {
4661 			put_unaligned_be32(rlen, &arr[0]);
4662 			++lun_p;
4663 			j = 1;
4664 		}
4665 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4666 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4667 				break;
4668 			int_to_scsilun(lun++, lun_p);
4669 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4670 				lun_p->scsi_lun[0] |= 0x40;
4671 		}
4672 		if (j < RL_BUCKET_ELEMS)
4673 			break;
4674 		n = j * sz_lun;
4675 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4676 		if (res)
4677 			return res;
4678 		off_rsp += n;
4679 	}
4680 	if (wlun_cnt) {
4681 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4682 		++j;
4683 	}
4684 	if (j > 0)
4685 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4686 	return res;
4687 }
4688 
4689 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4690 {
4691 	bool is_bytchk3 = false;
4692 	u8 bytchk;
4693 	int ret, j;
4694 	u32 vnum, a_num, off;
4695 	const u32 lb_size = sdebug_sector_size;
4696 	u64 lba;
4697 	u8 *arr;
4698 	u8 *cmd = scp->cmnd;
4699 	struct sdeb_store_info *sip = devip2sip(devip, true);
4700 
4701 	bytchk = (cmd[1] >> 1) & 0x3;
4702 	if (bytchk == 0) {
4703 		return 0;	/* always claim internal verify okay */
4704 	} else if (bytchk == 2) {
4705 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4706 		return check_condition_result;
4707 	} else if (bytchk == 3) {
4708 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4709 	}
4710 	switch (cmd[0]) {
4711 	case VERIFY_16:
4712 		lba = get_unaligned_be64(cmd + 2);
4713 		vnum = get_unaligned_be32(cmd + 10);
4714 		break;
4715 	case VERIFY:		/* is VERIFY(10) */
4716 		lba = get_unaligned_be32(cmd + 2);
4717 		vnum = get_unaligned_be16(cmd + 7);
4718 		break;
4719 	default:
4720 		mk_sense_invalid_opcode(scp);
4721 		return check_condition_result;
4722 	}
4723 	if (vnum == 0)
4724 		return 0;	/* not an error */
4725 	a_num = is_bytchk3 ? 1 : vnum;
4726 	/* Treat following check like one for read (i.e. no write) access */
4727 	ret = check_device_access_params(scp, lba, a_num, false);
4728 	if (ret)
4729 		return ret;
4730 
4731 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4732 	if (!arr) {
4733 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4734 				INSUFF_RES_ASCQ);
4735 		return check_condition_result;
4736 	}
4737 	/* Not changing store, so only need read access */
4738 	sdeb_read_lock(sip);
4739 
4740 	ret = do_dout_fetch(scp, a_num, arr);
4741 	if (ret == -1) {
4742 		ret = DID_ERROR << 16;
4743 		goto cleanup;
4744 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4745 		sdev_printk(KERN_INFO, scp->device,
4746 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4747 			    my_name, __func__, a_num * lb_size, ret);
4748 	}
4749 	if (is_bytchk3) {
4750 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4751 			memcpy(arr + off, arr, lb_size);
4752 	}
4753 	ret = 0;
4754 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4755 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4756 		ret = check_condition_result;
4757 		goto cleanup;
4758 	}
4759 cleanup:
4760 	sdeb_read_unlock(sip);
4761 	kfree(arr);
4762 	return ret;
4763 }
4764 
4765 #define RZONES_DESC_HD 64
4766 
4767 /* Report zones depending on start LBA and reporting options */
4768 static int resp_report_zones(struct scsi_cmnd *scp,
4769 			     struct sdebug_dev_info *devip)
4770 {
4771 	unsigned int rep_max_zones, nrz = 0;
4772 	int ret = 0;
4773 	u32 alloc_len, rep_opts, rep_len;
4774 	bool partial;
4775 	u64 lba, zs_lba;
4776 	u8 *arr = NULL, *desc;
4777 	u8 *cmd = scp->cmnd;
4778 	struct sdeb_zone_state *zsp = NULL;
4779 	struct sdeb_store_info *sip = devip2sip(devip, false);
4780 
4781 	if (!sdebug_dev_is_zoned(devip)) {
4782 		mk_sense_invalid_opcode(scp);
4783 		return check_condition_result;
4784 	}
4785 	zs_lba = get_unaligned_be64(cmd + 2);
4786 	alloc_len = get_unaligned_be32(cmd + 10);
4787 	if (alloc_len == 0)
4788 		return 0;	/* not an error */
4789 	rep_opts = cmd[14] & 0x3f;
4790 	partial = cmd[14] & 0x80;
4791 
4792 	if (zs_lba >= sdebug_capacity) {
4793 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4794 		return check_condition_result;
4795 	}
4796 
4797 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4798 
4799 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4800 	if (!arr) {
4801 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4802 				INSUFF_RES_ASCQ);
4803 		return check_condition_result;
4804 	}
4805 
4806 	sdeb_read_lock(sip);
4807 
4808 	desc = arr + 64;
4809 	for (lba = zs_lba; lba < sdebug_capacity;
4810 	     lba = zsp->z_start + zsp->z_size) {
4811 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4812 			break;
4813 		zsp = zbc_zone(devip, lba);
4814 		switch (rep_opts) {
4815 		case 0x00:
4816 			/* All zones */
4817 			break;
4818 		case 0x01:
4819 			/* Empty zones */
4820 			if (zsp->z_cond != ZC1_EMPTY)
4821 				continue;
4822 			break;
4823 		case 0x02:
4824 			/* Implicit open zones */
4825 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4826 				continue;
4827 			break;
4828 		case 0x03:
4829 			/* Explicit open zones */
4830 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4831 				continue;
4832 			break;
4833 		case 0x04:
4834 			/* Closed zones */
4835 			if (zsp->z_cond != ZC4_CLOSED)
4836 				continue;
4837 			break;
4838 		case 0x05:
4839 			/* Full zones */
4840 			if (zsp->z_cond != ZC5_FULL)
4841 				continue;
4842 			break;
4843 		case 0x06:
4844 		case 0x07:
4845 		case 0x10:
4846 			/*
4847 			 * Read-only, offline, reset WP recommended are
4848 			 * not emulated: no zones to report;
4849 			 */
4850 			continue;
4851 		case 0x11:
4852 			/* non-seq-resource set */
4853 			if (!zsp->z_non_seq_resource)
4854 				continue;
4855 			break;
4856 		case 0x3e:
4857 			/* All zones except gap zones. */
4858 			if (zbc_zone_is_gap(zsp))
4859 				continue;
4860 			break;
4861 		case 0x3f:
4862 			/* Not write pointer (conventional) zones */
4863 			if (zbc_zone_is_seq(zsp))
4864 				continue;
4865 			break;
4866 		default:
4867 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4868 					INVALID_FIELD_IN_CDB, 0);
4869 			ret = check_condition_result;
4870 			goto fini;
4871 		}
4872 
4873 		if (nrz < rep_max_zones) {
4874 			/* Fill zone descriptor */
4875 			desc[0] = zsp->z_type;
4876 			desc[1] = zsp->z_cond << 4;
4877 			if (zsp->z_non_seq_resource)
4878 				desc[1] |= 1 << 1;
4879 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4880 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4881 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4882 			desc += 64;
4883 		}
4884 
4885 		if (partial && nrz >= rep_max_zones)
4886 			break;
4887 
4888 		nrz++;
4889 	}
4890 
4891 	/* Report header */
4892 	/* Zone list length. */
4893 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4894 	/* Maximum LBA */
4895 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4896 	/* Zone starting LBA granularity. */
4897 	if (devip->zcap < devip->zsize)
4898 		put_unaligned_be64(devip->zsize, arr + 16);
4899 
4900 	rep_len = (unsigned long)desc - (unsigned long)arr;
4901 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4902 
4903 fini:
4904 	sdeb_read_unlock(sip);
4905 	kfree(arr);
4906 	return ret;
4907 }
4908 
4909 /* Logic transplanted from tcmu-runner, file_zbc.c */
4910 static void zbc_open_all(struct sdebug_dev_info *devip)
4911 {
4912 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4913 	unsigned int i;
4914 
4915 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4916 		if (zsp->z_cond == ZC4_CLOSED)
4917 			zbc_open_zone(devip, &devip->zstate[i], true);
4918 	}
4919 }
4920 
4921 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4922 {
4923 	int res = 0;
4924 	u64 z_id;
4925 	enum sdebug_z_cond zc;
4926 	u8 *cmd = scp->cmnd;
4927 	struct sdeb_zone_state *zsp;
4928 	bool all = cmd[14] & 0x01;
4929 	struct sdeb_store_info *sip = devip2sip(devip, false);
4930 
4931 	if (!sdebug_dev_is_zoned(devip)) {
4932 		mk_sense_invalid_opcode(scp);
4933 		return check_condition_result;
4934 	}
4935 
4936 	sdeb_write_lock(sip);
4937 
4938 	if (all) {
4939 		/* Check if all closed zones can be open */
4940 		if (devip->max_open &&
4941 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4942 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4943 					INSUFF_ZONE_ASCQ);
4944 			res = check_condition_result;
4945 			goto fini;
4946 		}
4947 		/* Open all closed zones */
4948 		zbc_open_all(devip);
4949 		goto fini;
4950 	}
4951 
4952 	/* Open the specified zone */
4953 	z_id = get_unaligned_be64(cmd + 2);
4954 	if (z_id >= sdebug_capacity) {
4955 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4956 		res = check_condition_result;
4957 		goto fini;
4958 	}
4959 
4960 	zsp = zbc_zone(devip, z_id);
4961 	if (z_id != zsp->z_start) {
4962 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4963 		res = check_condition_result;
4964 		goto fini;
4965 	}
4966 	if (zbc_zone_is_conv(zsp)) {
4967 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4968 		res = check_condition_result;
4969 		goto fini;
4970 	}
4971 
4972 	zc = zsp->z_cond;
4973 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4974 		goto fini;
4975 
4976 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4977 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4978 				INSUFF_ZONE_ASCQ);
4979 		res = check_condition_result;
4980 		goto fini;
4981 	}
4982 
4983 	zbc_open_zone(devip, zsp, true);
4984 fini:
4985 	sdeb_write_unlock(sip);
4986 	return res;
4987 }
4988 
4989 static void zbc_close_all(struct sdebug_dev_info *devip)
4990 {
4991 	unsigned int i;
4992 
4993 	for (i = 0; i < devip->nr_zones; i++)
4994 		zbc_close_zone(devip, &devip->zstate[i]);
4995 }
4996 
4997 static int resp_close_zone(struct scsi_cmnd *scp,
4998 			   struct sdebug_dev_info *devip)
4999 {
5000 	int res = 0;
5001 	u64 z_id;
5002 	u8 *cmd = scp->cmnd;
5003 	struct sdeb_zone_state *zsp;
5004 	bool all = cmd[14] & 0x01;
5005 	struct sdeb_store_info *sip = devip2sip(devip, false);
5006 
5007 	if (!sdebug_dev_is_zoned(devip)) {
5008 		mk_sense_invalid_opcode(scp);
5009 		return check_condition_result;
5010 	}
5011 
5012 	sdeb_write_lock(sip);
5013 
5014 	if (all) {
5015 		zbc_close_all(devip);
5016 		goto fini;
5017 	}
5018 
5019 	/* Close specified zone */
5020 	z_id = get_unaligned_be64(cmd + 2);
5021 	if (z_id >= sdebug_capacity) {
5022 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5023 		res = check_condition_result;
5024 		goto fini;
5025 	}
5026 
5027 	zsp = zbc_zone(devip, z_id);
5028 	if (z_id != zsp->z_start) {
5029 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5030 		res = check_condition_result;
5031 		goto fini;
5032 	}
5033 	if (zbc_zone_is_conv(zsp)) {
5034 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5035 		res = check_condition_result;
5036 		goto fini;
5037 	}
5038 
5039 	zbc_close_zone(devip, zsp);
5040 fini:
5041 	sdeb_write_unlock(sip);
5042 	return res;
5043 }
5044 
5045 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5046 			    struct sdeb_zone_state *zsp, bool empty)
5047 {
5048 	enum sdebug_z_cond zc = zsp->z_cond;
5049 
5050 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5051 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5052 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5053 			zbc_close_zone(devip, zsp);
5054 		if (zsp->z_cond == ZC4_CLOSED)
5055 			devip->nr_closed--;
5056 		zsp->z_wp = zsp->z_start + zsp->z_size;
5057 		zsp->z_cond = ZC5_FULL;
5058 	}
5059 }
5060 
5061 static void zbc_finish_all(struct sdebug_dev_info *devip)
5062 {
5063 	unsigned int i;
5064 
5065 	for (i = 0; i < devip->nr_zones; i++)
5066 		zbc_finish_zone(devip, &devip->zstate[i], false);
5067 }
5068 
5069 static int resp_finish_zone(struct scsi_cmnd *scp,
5070 			    struct sdebug_dev_info *devip)
5071 {
5072 	struct sdeb_zone_state *zsp;
5073 	int res = 0;
5074 	u64 z_id;
5075 	u8 *cmd = scp->cmnd;
5076 	bool all = cmd[14] & 0x01;
5077 	struct sdeb_store_info *sip = devip2sip(devip, false);
5078 
5079 	if (!sdebug_dev_is_zoned(devip)) {
5080 		mk_sense_invalid_opcode(scp);
5081 		return check_condition_result;
5082 	}
5083 
5084 	sdeb_write_lock(sip);
5085 
5086 	if (all) {
5087 		zbc_finish_all(devip);
5088 		goto fini;
5089 	}
5090 
5091 	/* Finish the specified zone */
5092 	z_id = get_unaligned_be64(cmd + 2);
5093 	if (z_id >= sdebug_capacity) {
5094 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5095 		res = check_condition_result;
5096 		goto fini;
5097 	}
5098 
5099 	zsp = zbc_zone(devip, z_id);
5100 	if (z_id != zsp->z_start) {
5101 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5102 		res = check_condition_result;
5103 		goto fini;
5104 	}
5105 	if (zbc_zone_is_conv(zsp)) {
5106 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5107 		res = check_condition_result;
5108 		goto fini;
5109 	}
5110 
5111 	zbc_finish_zone(devip, zsp, true);
5112 fini:
5113 	sdeb_write_unlock(sip);
5114 	return res;
5115 }
5116 
5117 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5118 			 struct sdeb_zone_state *zsp)
5119 {
5120 	enum sdebug_z_cond zc;
5121 	struct sdeb_store_info *sip = devip2sip(devip, false);
5122 
5123 	if (!zbc_zone_is_seq(zsp))
5124 		return;
5125 
5126 	zc = zsp->z_cond;
5127 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5128 		zbc_close_zone(devip, zsp);
5129 
5130 	if (zsp->z_cond == ZC4_CLOSED)
5131 		devip->nr_closed--;
5132 
5133 	if (zsp->z_wp > zsp->z_start)
5134 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5135 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5136 
5137 	zsp->z_non_seq_resource = false;
5138 	zsp->z_wp = zsp->z_start;
5139 	zsp->z_cond = ZC1_EMPTY;
5140 }
5141 
5142 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5143 {
5144 	unsigned int i;
5145 
5146 	for (i = 0; i < devip->nr_zones; i++)
5147 		zbc_rwp_zone(devip, &devip->zstate[i]);
5148 }
5149 
5150 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5151 {
5152 	struct sdeb_zone_state *zsp;
5153 	int res = 0;
5154 	u64 z_id;
5155 	u8 *cmd = scp->cmnd;
5156 	bool all = cmd[14] & 0x01;
5157 	struct sdeb_store_info *sip = devip2sip(devip, false);
5158 
5159 	if (!sdebug_dev_is_zoned(devip)) {
5160 		mk_sense_invalid_opcode(scp);
5161 		return check_condition_result;
5162 	}
5163 
5164 	sdeb_write_lock(sip);
5165 
5166 	if (all) {
5167 		zbc_rwp_all(devip);
5168 		goto fini;
5169 	}
5170 
5171 	z_id = get_unaligned_be64(cmd + 2);
5172 	if (z_id >= sdebug_capacity) {
5173 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5174 		res = check_condition_result;
5175 		goto fini;
5176 	}
5177 
5178 	zsp = zbc_zone(devip, z_id);
5179 	if (z_id != zsp->z_start) {
5180 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5181 		res = check_condition_result;
5182 		goto fini;
5183 	}
5184 	if (zbc_zone_is_conv(zsp)) {
5185 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5186 		res = check_condition_result;
5187 		goto fini;
5188 	}
5189 
5190 	zbc_rwp_zone(devip, zsp);
5191 fini:
5192 	sdeb_write_unlock(sip);
5193 	return res;
5194 }
5195 
5196 static u32 get_tag(struct scsi_cmnd *cmnd)
5197 {
5198 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5199 }
5200 
5201 /* Queued (deferred) command completions converge here. */
5202 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5203 {
5204 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5205 	unsigned long flags;
5206 	struct scsi_cmnd *scp = sqcp->scmd;
5207 	struct sdebug_scsi_cmd *sdsc;
5208 	bool aborted;
5209 
5210 	if (sdebug_statistics) {
5211 		atomic_inc(&sdebug_completions);
5212 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5213 			atomic_inc(&sdebug_miss_cpus);
5214 	}
5215 
5216 	if (!scp) {
5217 		pr_err("scmd=NULL\n");
5218 		goto out;
5219 	}
5220 
5221 	sdsc = scsi_cmd_priv(scp);
5222 	spin_lock_irqsave(&sdsc->lock, flags);
5223 	aborted = sd_dp->aborted;
5224 	if (unlikely(aborted))
5225 		sd_dp->aborted = false;
5226 	ASSIGN_QUEUED_CMD(scp, NULL);
5227 
5228 	spin_unlock_irqrestore(&sdsc->lock, flags);
5229 
5230 	if (aborted) {
5231 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5232 		blk_abort_request(scsi_cmd_to_rq(scp));
5233 		goto out;
5234 	}
5235 
5236 	scsi_done(scp); /* callback to mid level */
5237 out:
5238 	sdebug_free_queued_cmd(sqcp);
5239 }
5240 
5241 /* When high resolution timer goes off this function is called. */
5242 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5243 {
5244 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5245 						  hrt);
5246 	sdebug_q_cmd_complete(sd_dp);
5247 	return HRTIMER_NORESTART;
5248 }
5249 
5250 /* When work queue schedules work, it calls this function. */
5251 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5252 {
5253 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5254 						  ew.work);
5255 	sdebug_q_cmd_complete(sd_dp);
5256 }
5257 
5258 static bool got_shared_uuid;
5259 static uuid_t shared_uuid;
5260 
5261 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5262 {
5263 	struct sdeb_zone_state *zsp;
5264 	sector_t capacity = get_sdebug_capacity();
5265 	sector_t conv_capacity;
5266 	sector_t zstart = 0;
5267 	unsigned int i;
5268 
5269 	/*
5270 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5271 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5272 	 * use the specified zone size checking that at least 2 zones can be
5273 	 * created for the device.
5274 	 */
5275 	if (!sdeb_zbc_zone_size_mb) {
5276 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5277 			>> ilog2(sdebug_sector_size);
5278 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5279 			devip->zsize >>= 1;
5280 		if (devip->zsize < 2) {
5281 			pr_err("Device capacity too small\n");
5282 			return -EINVAL;
5283 		}
5284 	} else {
5285 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5286 			pr_err("Zone size is not a power of 2\n");
5287 			return -EINVAL;
5288 		}
5289 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5290 			>> ilog2(sdebug_sector_size);
5291 		if (devip->zsize >= capacity) {
5292 			pr_err("Zone size too large for device capacity\n");
5293 			return -EINVAL;
5294 		}
5295 	}
5296 
5297 	devip->zsize_shift = ilog2(devip->zsize);
5298 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5299 
5300 	if (sdeb_zbc_zone_cap_mb == 0) {
5301 		devip->zcap = devip->zsize;
5302 	} else {
5303 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5304 			      ilog2(sdebug_sector_size);
5305 		if (devip->zcap > devip->zsize) {
5306 			pr_err("Zone capacity too large\n");
5307 			return -EINVAL;
5308 		}
5309 	}
5310 
5311 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5312 	if (conv_capacity >= capacity) {
5313 		pr_err("Number of conventional zones too large\n");
5314 		return -EINVAL;
5315 	}
5316 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5317 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5318 			      devip->zsize_shift;
5319 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5320 
5321 	/* Add gap zones if zone capacity is smaller than the zone size */
5322 	if (devip->zcap < devip->zsize)
5323 		devip->nr_zones += devip->nr_seq_zones;
5324 
5325 	if (devip->zmodel == BLK_ZONED_HM) {
5326 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5327 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5328 			devip->max_open = (devip->nr_zones - 1) / 2;
5329 		else
5330 			devip->max_open = sdeb_zbc_max_open;
5331 	}
5332 
5333 	devip->zstate = kcalloc(devip->nr_zones,
5334 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5335 	if (!devip->zstate)
5336 		return -ENOMEM;
5337 
5338 	for (i = 0; i < devip->nr_zones; i++) {
5339 		zsp = &devip->zstate[i];
5340 
5341 		zsp->z_start = zstart;
5342 
5343 		if (i < devip->nr_conv_zones) {
5344 			zsp->z_type = ZBC_ZTYPE_CNV;
5345 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5346 			zsp->z_wp = (sector_t)-1;
5347 			zsp->z_size =
5348 				min_t(u64, devip->zsize, capacity - zstart);
5349 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5350 			if (devip->zmodel == BLK_ZONED_HM)
5351 				zsp->z_type = ZBC_ZTYPE_SWR;
5352 			else
5353 				zsp->z_type = ZBC_ZTYPE_SWP;
5354 			zsp->z_cond = ZC1_EMPTY;
5355 			zsp->z_wp = zsp->z_start;
5356 			zsp->z_size =
5357 				min_t(u64, devip->zcap, capacity - zstart);
5358 		} else {
5359 			zsp->z_type = ZBC_ZTYPE_GAP;
5360 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5361 			zsp->z_wp = (sector_t)-1;
5362 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5363 					    capacity - zstart);
5364 		}
5365 
5366 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5367 		zstart += zsp->z_size;
5368 	}
5369 
5370 	return 0;
5371 }
5372 
5373 static struct sdebug_dev_info *sdebug_device_create(
5374 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5375 {
5376 	struct sdebug_dev_info *devip;
5377 
5378 	devip = kzalloc(sizeof(*devip), flags);
5379 	if (devip) {
5380 		if (sdebug_uuid_ctl == 1)
5381 			uuid_gen(&devip->lu_name);
5382 		else if (sdebug_uuid_ctl == 2) {
5383 			if (got_shared_uuid)
5384 				devip->lu_name = shared_uuid;
5385 			else {
5386 				uuid_gen(&shared_uuid);
5387 				got_shared_uuid = true;
5388 				devip->lu_name = shared_uuid;
5389 			}
5390 		}
5391 		devip->sdbg_host = sdbg_host;
5392 		if (sdeb_zbc_in_use) {
5393 			devip->zmodel = sdeb_zbc_model;
5394 			if (sdebug_device_create_zones(devip)) {
5395 				kfree(devip);
5396 				return NULL;
5397 			}
5398 		} else {
5399 			devip->zmodel = BLK_ZONED_NONE;
5400 		}
5401 		devip->create_ts = ktime_get_boottime();
5402 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5403 		spin_lock_init(&devip->list_lock);
5404 		INIT_LIST_HEAD(&devip->inject_err_list);
5405 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5406 	}
5407 	return devip;
5408 }
5409 
5410 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5411 {
5412 	struct sdebug_host_info *sdbg_host;
5413 	struct sdebug_dev_info *open_devip = NULL;
5414 	struct sdebug_dev_info *devip;
5415 
5416 	sdbg_host = shost_to_sdebug_host(sdev->host);
5417 
5418 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5419 		if ((devip->used) && (devip->channel == sdev->channel) &&
5420 		    (devip->target == sdev->id) &&
5421 		    (devip->lun == sdev->lun))
5422 			return devip;
5423 		else {
5424 			if ((!devip->used) && (!open_devip))
5425 				open_devip = devip;
5426 		}
5427 	}
5428 	if (!open_devip) { /* try and make a new one */
5429 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5430 		if (!open_devip) {
5431 			pr_err("out of memory at line %d\n", __LINE__);
5432 			return NULL;
5433 		}
5434 	}
5435 
5436 	open_devip->channel = sdev->channel;
5437 	open_devip->target = sdev->id;
5438 	open_devip->lun = sdev->lun;
5439 	open_devip->sdbg_host = sdbg_host;
5440 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5441 	open_devip->used = true;
5442 	return open_devip;
5443 }
5444 
5445 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5446 {
5447 	if (sdebug_verbose)
5448 		pr_info("slave_alloc <%u %u %u %llu>\n",
5449 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5450 
5451 	return 0;
5452 }
5453 
5454 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5455 {
5456 	struct sdebug_dev_info *devip =
5457 			(struct sdebug_dev_info *)sdp->hostdata;
5458 	struct dentry *dentry;
5459 
5460 	if (sdebug_verbose)
5461 		pr_info("slave_configure <%u %u %u %llu>\n",
5462 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5463 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5464 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5465 	if (devip == NULL) {
5466 		devip = find_build_dev_info(sdp);
5467 		if (devip == NULL)
5468 			return 1;  /* no resources, will be marked offline */
5469 	}
5470 	sdp->hostdata = devip;
5471 	if (sdebug_no_uld)
5472 		sdp->no_uld_attach = 1;
5473 	config_cdb_len(sdp);
5474 
5475 	if (sdebug_allow_restart)
5476 		sdp->allow_restart = 1;
5477 
5478 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5479 				sdebug_debugfs_root);
5480 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
5481 		pr_info("%s: failed to create debugfs directory for device %s\n",
5482 			__func__, dev_name(&sdp->sdev_gendev));
5483 
5484 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5485 				&sdebug_error_fops);
5486 	if (IS_ERR_OR_NULL(dentry))
5487 		pr_info("%s: failed to create error file for device %s\n",
5488 			__func__, dev_name(&sdp->sdev_gendev));
5489 
5490 	return 0;
5491 }
5492 
5493 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5494 {
5495 	struct sdebug_dev_info *devip =
5496 		(struct sdebug_dev_info *)sdp->hostdata;
5497 	struct sdebug_err_inject *err;
5498 
5499 	if (sdebug_verbose)
5500 		pr_info("slave_destroy <%u %u %u %llu>\n",
5501 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5502 
5503 	if (!devip)
5504 		return;
5505 
5506 	spin_lock(&devip->list_lock);
5507 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5508 		list_del_rcu(&err->list);
5509 		call_rcu(&err->rcu, sdebug_err_free);
5510 	}
5511 	spin_unlock(&devip->list_lock);
5512 
5513 	debugfs_remove(devip->debugfs_entry);
5514 
5515 	/* make this slot available for re-use */
5516 	devip->used = false;
5517 	sdp->hostdata = NULL;
5518 }
5519 
5520 /* Returns true if we require the queued memory to be freed by the caller. */
5521 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5522 			   enum sdeb_defer_type defer_t)
5523 {
5524 	if (defer_t == SDEB_DEFER_HRT) {
5525 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5526 
5527 		switch (res) {
5528 		case 0: /* Not active, it must have already run */
5529 		case -1: /* -1 It's executing the CB */
5530 			return false;
5531 		case 1: /* Was active, we've now cancelled */
5532 		default:
5533 			return true;
5534 		}
5535 	} else if (defer_t == SDEB_DEFER_WQ) {
5536 		/* Cancel if pending */
5537 		if (cancel_work_sync(&sd_dp->ew.work))
5538 			return true;
5539 		/* Was not pending, so it must have run */
5540 		return false;
5541 	} else if (defer_t == SDEB_DEFER_POLL) {
5542 		return true;
5543 	}
5544 
5545 	return false;
5546 }
5547 
5548 
5549 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5550 {
5551 	enum sdeb_defer_type l_defer_t;
5552 	struct sdebug_defer *sd_dp;
5553 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5554 	struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5555 
5556 	lockdep_assert_held(&sdsc->lock);
5557 
5558 	if (!sqcp)
5559 		return false;
5560 	sd_dp = &sqcp->sd_dp;
5561 	l_defer_t = READ_ONCE(sd_dp->defer_t);
5562 	ASSIGN_QUEUED_CMD(cmnd, NULL);
5563 
5564 	if (stop_qc_helper(sd_dp, l_defer_t))
5565 		sdebug_free_queued_cmd(sqcp);
5566 
5567 	return true;
5568 }
5569 
5570 /*
5571  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5572  */
5573 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5574 {
5575 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5576 	unsigned long flags;
5577 	bool res;
5578 
5579 	spin_lock_irqsave(&sdsc->lock, flags);
5580 	res = scsi_debug_stop_cmnd(cmnd);
5581 	spin_unlock_irqrestore(&sdsc->lock, flags);
5582 
5583 	return res;
5584 }
5585 
5586 /*
5587  * All we can do is set the cmnd as internally aborted and wait for it to
5588  * finish. We cannot call scsi_done() as normal completion path may do that.
5589  */
5590 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5591 {
5592 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5593 
5594 	return true;
5595 }
5596 
5597 /* Deletes (stops) timers or work queues of all queued commands */
5598 static void stop_all_queued(void)
5599 {
5600 	struct sdebug_host_info *sdhp;
5601 
5602 	mutex_lock(&sdebug_host_list_mutex);
5603 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5604 		struct Scsi_Host *shost = sdhp->shost;
5605 
5606 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5607 	}
5608 	mutex_unlock(&sdebug_host_list_mutex);
5609 }
5610 
5611 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5612 {
5613 	struct scsi_device *sdp = cmnd->device;
5614 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5615 	struct sdebug_err_inject *err;
5616 	unsigned char *cmd = cmnd->cmnd;
5617 	int ret = 0;
5618 
5619 	if (devip == NULL)
5620 		return 0;
5621 
5622 	rcu_read_lock();
5623 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5624 		if (err->type == ERR_ABORT_CMD_FAILED &&
5625 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5626 			ret = !!err->cnt;
5627 			if (err->cnt < 0)
5628 				err->cnt++;
5629 
5630 			rcu_read_unlock();
5631 			return ret;
5632 		}
5633 	}
5634 	rcu_read_unlock();
5635 
5636 	return 0;
5637 }
5638 
5639 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5640 {
5641 	bool ok = scsi_debug_abort_cmnd(SCpnt);
5642 	u8 *cmd = SCpnt->cmnd;
5643 	u8 opcode = cmd[0];
5644 
5645 	++num_aborts;
5646 
5647 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5648 		sdev_printk(KERN_INFO, SCpnt->device,
5649 			    "%s: command%s found\n", __func__,
5650 			    ok ? "" : " not");
5651 
5652 	if (sdebug_fail_abort(SCpnt)) {
5653 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5654 			    opcode);
5655 		return FAILED;
5656 	}
5657 
5658 	return SUCCESS;
5659 }
5660 
5661 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5662 {
5663 	struct scsi_device *sdp = data;
5664 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5665 
5666 	if (scmd->device == sdp)
5667 		scsi_debug_abort_cmnd(scmd);
5668 
5669 	return true;
5670 }
5671 
5672 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5673 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5674 {
5675 	struct Scsi_Host *shost = sdp->host;
5676 
5677 	blk_mq_tagset_busy_iter(&shost->tag_set,
5678 				scsi_debug_stop_all_queued_iter, sdp);
5679 }
5680 
5681 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5682 {
5683 	struct scsi_device *sdp = cmnd->device;
5684 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5685 	struct sdebug_err_inject *err;
5686 	unsigned char *cmd = cmnd->cmnd;
5687 	int ret = 0;
5688 
5689 	if (devip == NULL)
5690 		return 0;
5691 
5692 	rcu_read_lock();
5693 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5694 		if (err->type == ERR_LUN_RESET_FAILED &&
5695 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5696 			ret = !!err->cnt;
5697 			if (err->cnt < 0)
5698 				err->cnt++;
5699 
5700 			rcu_read_unlock();
5701 			return ret;
5702 		}
5703 	}
5704 	rcu_read_unlock();
5705 
5706 	return 0;
5707 }
5708 
5709 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5710 {
5711 	struct scsi_device *sdp = SCpnt->device;
5712 	struct sdebug_dev_info *devip = sdp->hostdata;
5713 	u8 *cmd = SCpnt->cmnd;
5714 	u8 opcode = cmd[0];
5715 
5716 	++num_dev_resets;
5717 
5718 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5719 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5720 
5721 	scsi_debug_stop_all_queued(sdp);
5722 	if (devip)
5723 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5724 
5725 	if (sdebug_fail_lun_reset(SCpnt)) {
5726 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5727 		return FAILED;
5728 	}
5729 
5730 	return SUCCESS;
5731 }
5732 
5733 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5734 {
5735 	struct scsi_target *starget = scsi_target(cmnd->device);
5736 	struct sdebug_target_info *targetip =
5737 		(struct sdebug_target_info *)starget->hostdata;
5738 
5739 	if (targetip)
5740 		return targetip->reset_fail;
5741 
5742 	return 0;
5743 }
5744 
5745 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5746 {
5747 	struct scsi_device *sdp = SCpnt->device;
5748 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5749 	struct sdebug_dev_info *devip;
5750 	u8 *cmd = SCpnt->cmnd;
5751 	u8 opcode = cmd[0];
5752 	int k = 0;
5753 
5754 	++num_target_resets;
5755 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5756 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5757 
5758 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5759 		if (devip->target == sdp->id) {
5760 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5761 			++k;
5762 		}
5763 	}
5764 
5765 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5766 		sdev_printk(KERN_INFO, sdp,
5767 			    "%s: %d device(s) found in target\n", __func__, k);
5768 
5769 	if (sdebug_fail_target_reset(SCpnt)) {
5770 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5771 			    opcode);
5772 		return FAILED;
5773 	}
5774 
5775 	return SUCCESS;
5776 }
5777 
5778 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5779 {
5780 	struct scsi_device *sdp = SCpnt->device;
5781 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5782 	struct sdebug_dev_info *devip;
5783 	int k = 0;
5784 
5785 	++num_bus_resets;
5786 
5787 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5788 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5789 
5790 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5791 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5792 		++k;
5793 	}
5794 
5795 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5796 		sdev_printk(KERN_INFO, sdp,
5797 			    "%s: %d device(s) found in host\n", __func__, k);
5798 	return SUCCESS;
5799 }
5800 
5801 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5802 {
5803 	struct sdebug_host_info *sdbg_host;
5804 	struct sdebug_dev_info *devip;
5805 	int k = 0;
5806 
5807 	++num_host_resets;
5808 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5809 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5810 	mutex_lock(&sdebug_host_list_mutex);
5811 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5812 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5813 				    dev_list) {
5814 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5815 			++k;
5816 		}
5817 	}
5818 	mutex_unlock(&sdebug_host_list_mutex);
5819 	stop_all_queued();
5820 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5821 		sdev_printk(KERN_INFO, SCpnt->device,
5822 			    "%s: %d device(s) found\n", __func__, k);
5823 	return SUCCESS;
5824 }
5825 
5826 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5827 {
5828 	struct msdos_partition *pp;
5829 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5830 	int sectors_per_part, num_sectors, k;
5831 	int heads_by_sects, start_sec, end_sec;
5832 
5833 	/* assume partition table already zeroed */
5834 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5835 		return;
5836 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5837 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5838 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5839 	}
5840 	num_sectors = (int)get_sdebug_capacity();
5841 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5842 			   / sdebug_num_parts;
5843 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5844 	starts[0] = sdebug_sectors_per;
5845 	max_part_secs = sectors_per_part;
5846 	for (k = 1; k < sdebug_num_parts; ++k) {
5847 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5848 			    * heads_by_sects;
5849 		if (starts[k] - starts[k - 1] < max_part_secs)
5850 			max_part_secs = starts[k] - starts[k - 1];
5851 	}
5852 	starts[sdebug_num_parts] = num_sectors;
5853 	starts[sdebug_num_parts + 1] = 0;
5854 
5855 	ramp[510] = 0x55;	/* magic partition markings */
5856 	ramp[511] = 0xAA;
5857 	pp = (struct msdos_partition *)(ramp + 0x1be);
5858 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5859 		start_sec = starts[k];
5860 		end_sec = starts[k] + max_part_secs - 1;
5861 		pp->boot_ind = 0;
5862 
5863 		pp->cyl = start_sec / heads_by_sects;
5864 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5865 			   / sdebug_sectors_per;
5866 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5867 
5868 		pp->end_cyl = end_sec / heads_by_sects;
5869 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5870 			       / sdebug_sectors_per;
5871 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5872 
5873 		pp->start_sect = cpu_to_le32(start_sec);
5874 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5875 		pp->sys_ind = 0x83;	/* plain Linux partition */
5876 	}
5877 }
5878 
5879 static void block_unblock_all_queues(bool block)
5880 {
5881 	struct sdebug_host_info *sdhp;
5882 
5883 	lockdep_assert_held(&sdebug_host_list_mutex);
5884 
5885 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5886 		struct Scsi_Host *shost = sdhp->shost;
5887 
5888 		if (block)
5889 			scsi_block_requests(shost);
5890 		else
5891 			scsi_unblock_requests(shost);
5892 	}
5893 }
5894 
5895 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5896  * commands will be processed normally before triggers occur.
5897  */
5898 static void tweak_cmnd_count(void)
5899 {
5900 	int count, modulo;
5901 
5902 	modulo = abs(sdebug_every_nth);
5903 	if (modulo < 2)
5904 		return;
5905 
5906 	mutex_lock(&sdebug_host_list_mutex);
5907 	block_unblock_all_queues(true);
5908 	count = atomic_read(&sdebug_cmnd_count);
5909 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5910 	block_unblock_all_queues(false);
5911 	mutex_unlock(&sdebug_host_list_mutex);
5912 }
5913 
5914 static void clear_queue_stats(void)
5915 {
5916 	atomic_set(&sdebug_cmnd_count, 0);
5917 	atomic_set(&sdebug_completions, 0);
5918 	atomic_set(&sdebug_miss_cpus, 0);
5919 	atomic_set(&sdebug_a_tsf, 0);
5920 }
5921 
5922 static bool inject_on_this_cmd(void)
5923 {
5924 	if (sdebug_every_nth == 0)
5925 		return false;
5926 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5927 }
5928 
5929 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5930 
5931 
5932 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5933 {
5934 	if (sqcp)
5935 		kmem_cache_free(queued_cmd_cache, sqcp);
5936 }
5937 
5938 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5939 {
5940 	struct sdebug_queued_cmd *sqcp;
5941 	struct sdebug_defer *sd_dp;
5942 
5943 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5944 	if (!sqcp)
5945 		return NULL;
5946 
5947 	sd_dp = &sqcp->sd_dp;
5948 
5949 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5950 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5951 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5952 
5953 	sqcp->scmd = scmd;
5954 
5955 	return sqcp;
5956 }
5957 
5958 /* Complete the processing of the thread that queued a SCSI command to this
5959  * driver. It either completes the command by calling cmnd_done() or
5960  * schedules a hr timer or work queue then returns 0. Returns
5961  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5962  */
5963 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5964 			 int scsi_result,
5965 			 int (*pfp)(struct scsi_cmnd *,
5966 				    struct sdebug_dev_info *),
5967 			 int delta_jiff, int ndelay)
5968 {
5969 	struct request *rq = scsi_cmd_to_rq(cmnd);
5970 	bool polled = rq->cmd_flags & REQ_POLLED;
5971 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5972 	unsigned long flags;
5973 	u64 ns_from_boot = 0;
5974 	struct sdebug_queued_cmd *sqcp;
5975 	struct scsi_device *sdp;
5976 	struct sdebug_defer *sd_dp;
5977 
5978 	if (unlikely(devip == NULL)) {
5979 		if (scsi_result == 0)
5980 			scsi_result = DID_NO_CONNECT << 16;
5981 		goto respond_in_thread;
5982 	}
5983 	sdp = cmnd->device;
5984 
5985 	if (delta_jiff == 0)
5986 		goto respond_in_thread;
5987 
5988 
5989 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5990 		     (scsi_result == 0))) {
5991 		int num_in_q = scsi_device_busy(sdp);
5992 		int qdepth = cmnd->device->queue_depth;
5993 
5994 		if ((num_in_q == qdepth) &&
5995 		    (atomic_inc_return(&sdebug_a_tsf) >=
5996 		     abs(sdebug_every_nth))) {
5997 			atomic_set(&sdebug_a_tsf, 0);
5998 			scsi_result = device_qfull_result;
5999 
6000 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6001 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6002 					    __func__, num_in_q);
6003 		}
6004 	}
6005 
6006 	sqcp = sdebug_alloc_queued_cmd(cmnd);
6007 	if (!sqcp) {
6008 		pr_err("%s no alloc\n", __func__);
6009 		return SCSI_MLQUEUE_HOST_BUSY;
6010 	}
6011 	sd_dp = &sqcp->sd_dp;
6012 
6013 	if (polled)
6014 		ns_from_boot = ktime_get_boottime_ns();
6015 
6016 	/* one of the resp_*() response functions is called here */
6017 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6018 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
6019 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
6020 		delta_jiff = ndelay = 0;
6021 	}
6022 	if (cmnd->result == 0 && scsi_result != 0)
6023 		cmnd->result = scsi_result;
6024 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6025 		if (atomic_read(&sdeb_inject_pending)) {
6026 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6027 			atomic_set(&sdeb_inject_pending, 0);
6028 			cmnd->result = check_condition_result;
6029 		}
6030 	}
6031 
6032 	if (unlikely(sdebug_verbose && cmnd->result))
6033 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6034 			    __func__, cmnd->result);
6035 
6036 	if (delta_jiff > 0 || ndelay > 0) {
6037 		ktime_t kt;
6038 
6039 		if (delta_jiff > 0) {
6040 			u64 ns = jiffies_to_nsecs(delta_jiff);
6041 
6042 			if (sdebug_random && ns < U32_MAX) {
6043 				ns = get_random_u32_below((u32)ns);
6044 			} else if (sdebug_random) {
6045 				ns >>= 12;	/* scale to 4 usec precision */
6046 				if (ns < U32_MAX)	/* over 4 hours max */
6047 					ns = get_random_u32_below((u32)ns);
6048 				ns <<= 12;
6049 			}
6050 			kt = ns_to_ktime(ns);
6051 		} else {	/* ndelay has a 4.2 second max */
6052 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6053 					     (u32)ndelay;
6054 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6055 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
6056 
6057 				if (kt <= d) {	/* elapsed duration >= kt */
6058 					/* call scsi_done() from this thread */
6059 					sdebug_free_queued_cmd(sqcp);
6060 					scsi_done(cmnd);
6061 					return 0;
6062 				}
6063 				/* otherwise reduce kt by elapsed time */
6064 				kt -= d;
6065 			}
6066 		}
6067 		if (sdebug_statistics)
6068 			sd_dp->issuing_cpu = raw_smp_processor_id();
6069 		if (polled) {
6070 			spin_lock_irqsave(&sdsc->lock, flags);
6071 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6072 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6073 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6074 			spin_unlock_irqrestore(&sdsc->lock, flags);
6075 		} else {
6076 			/* schedule the invocation of scsi_done() for a later time */
6077 			spin_lock_irqsave(&sdsc->lock, flags);
6078 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6079 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6080 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6081 			/*
6082 			 * The completion handler will try to grab sqcp->lock,
6083 			 * so there is no chance that the completion handler
6084 			 * will call scsi_done() until we release the lock
6085 			 * here (so ok to keep referencing sdsc).
6086 			 */
6087 			spin_unlock_irqrestore(&sdsc->lock, flags);
6088 		}
6089 	} else {	/* jdelay < 0, use work queue */
6090 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6091 			     atomic_read(&sdeb_inject_pending))) {
6092 			sd_dp->aborted = true;
6093 			atomic_set(&sdeb_inject_pending, 0);
6094 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6095 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6096 		}
6097 
6098 		if (sdebug_statistics)
6099 			sd_dp->issuing_cpu = raw_smp_processor_id();
6100 		if (polled) {
6101 			spin_lock_irqsave(&sdsc->lock, flags);
6102 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6103 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6104 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6105 			spin_unlock_irqrestore(&sdsc->lock, flags);
6106 		} else {
6107 			spin_lock_irqsave(&sdsc->lock, flags);
6108 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6109 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6110 			schedule_work(&sd_dp->ew.work);
6111 			spin_unlock_irqrestore(&sdsc->lock, flags);
6112 		}
6113 	}
6114 
6115 	return 0;
6116 
6117 respond_in_thread:	/* call back to mid-layer using invocation thread */
6118 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6119 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
6120 	if (cmnd->result == 0 && scsi_result != 0)
6121 		cmnd->result = scsi_result;
6122 	scsi_done(cmnd);
6123 	return 0;
6124 }
6125 
6126 /* Note: The following macros create attribute files in the
6127    /sys/module/scsi_debug/parameters directory. Unfortunately this
6128    driver is unaware of a change and cannot trigger auxiliary actions
6129    as it can when the corresponding attribute in the
6130    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6131  */
6132 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6133 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6134 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6135 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6136 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6137 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6138 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6139 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6140 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6141 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6142 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6143 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6144 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6145 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6146 module_param_string(inq_product, sdebug_inq_product_id,
6147 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6148 module_param_string(inq_rev, sdebug_inq_product_rev,
6149 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6150 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6151 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6152 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6153 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6154 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6155 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6156 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6157 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6158 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6159 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6160 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6161 		   S_IRUGO | S_IWUSR);
6162 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6163 		   S_IRUGO | S_IWUSR);
6164 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6165 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6166 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6167 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6168 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6169 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6170 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6171 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6172 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6173 module_param_named(per_host_store, sdebug_per_host_store, bool,
6174 		   S_IRUGO | S_IWUSR);
6175 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6176 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6177 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6178 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6179 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6180 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6181 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6182 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6183 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6184 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6185 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6186 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6187 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6188 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6189 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6190 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6191 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6192 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6193 		   S_IRUGO | S_IWUSR);
6194 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6195 module_param_named(write_same_length, sdebug_write_same_length, int,
6196 		   S_IRUGO | S_IWUSR);
6197 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6198 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6199 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6200 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6201 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6202 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6203 
6204 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6205 MODULE_DESCRIPTION("SCSI debug adapter driver");
6206 MODULE_LICENSE("GPL");
6207 MODULE_VERSION(SDEBUG_VERSION);
6208 
6209 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6210 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6211 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6212 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6213 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6214 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6215 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6216 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6217 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6218 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6219 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6220 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6221 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6222 MODULE_PARM_DESC(host_max_queue,
6223 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6224 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6225 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6226 		 SDEBUG_VERSION "\")");
6227 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6228 MODULE_PARM_DESC(lbprz,
6229 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6230 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6231 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6232 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6233 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6234 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6235 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6236 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6237 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6238 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6239 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6240 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6241 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6242 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6243 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6244 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6245 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6246 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6247 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6248 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6249 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6250 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6251 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6252 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6253 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6254 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6255 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6256 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6257 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6258 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6259 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6260 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6261 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6262 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6263 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6264 MODULE_PARM_DESC(uuid_ctl,
6265 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6266 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6267 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6268 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6269 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6270 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6271 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6272 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6273 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6274 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6275 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6276 
6277 #define SDEBUG_INFO_LEN 256
6278 static char sdebug_info[SDEBUG_INFO_LEN];
6279 
6280 static const char *scsi_debug_info(struct Scsi_Host *shp)
6281 {
6282 	int k;
6283 
6284 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6285 		      my_name, SDEBUG_VERSION, sdebug_version_date);
6286 	if (k >= (SDEBUG_INFO_LEN - 1))
6287 		return sdebug_info;
6288 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6289 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6290 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
6291 		  "statistics", (int)sdebug_statistics);
6292 	return sdebug_info;
6293 }
6294 
6295 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6296 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6297 				 int length)
6298 {
6299 	char arr[16];
6300 	int opts;
6301 	int minLen = length > 15 ? 15 : length;
6302 
6303 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6304 		return -EACCES;
6305 	memcpy(arr, buffer, minLen);
6306 	arr[minLen] = '\0';
6307 	if (1 != sscanf(arr, "%d", &opts))
6308 		return -EINVAL;
6309 	sdebug_opts = opts;
6310 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6311 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6312 	if (sdebug_every_nth != 0)
6313 		tweak_cmnd_count();
6314 	return length;
6315 }
6316 
6317 struct sdebug_submit_queue_data {
6318 	int *first;
6319 	int *last;
6320 	int queue_num;
6321 };
6322 
6323 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6324 {
6325 	struct sdebug_submit_queue_data *data = opaque;
6326 	u32 unique_tag = blk_mq_unique_tag(rq);
6327 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6328 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6329 	int queue_num = data->queue_num;
6330 
6331 	if (hwq != queue_num)
6332 		return true;
6333 
6334 	/* Rely on iter'ing in ascending tag order */
6335 	if (*data->first == -1)
6336 		*data->first = *data->last = tag;
6337 	else
6338 		*data->last = tag;
6339 
6340 	return true;
6341 }
6342 
6343 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6344  * same for each scsi_debug host (if more than one). Some of the counters
6345  * output are not atomics so might be inaccurate in a busy system. */
6346 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6347 {
6348 	struct sdebug_host_info *sdhp;
6349 	int j;
6350 
6351 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6352 		   SDEBUG_VERSION, sdebug_version_date);
6353 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6354 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6355 		   sdebug_opts, sdebug_every_nth);
6356 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6357 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6358 		   sdebug_sector_size, "bytes");
6359 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6360 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6361 		   num_aborts);
6362 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6363 		   num_dev_resets, num_target_resets, num_bus_resets,
6364 		   num_host_resets);
6365 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6366 		   dix_reads, dix_writes, dif_errors);
6367 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6368 		   sdebug_statistics);
6369 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6370 		   atomic_read(&sdebug_cmnd_count),
6371 		   atomic_read(&sdebug_completions),
6372 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6373 		   atomic_read(&sdebug_a_tsf),
6374 		   atomic_read(&sdeb_mq_poll_count));
6375 
6376 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6377 	for (j = 0; j < submit_queues; ++j) {
6378 		int f = -1, l = -1;
6379 		struct sdebug_submit_queue_data data = {
6380 			.queue_num = j,
6381 			.first = &f,
6382 			.last = &l,
6383 		};
6384 		seq_printf(m, "  queue %d:\n", j);
6385 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6386 					&data);
6387 		if (f >= 0) {
6388 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6389 				   "first,last bits", f, l);
6390 		}
6391 	}
6392 
6393 	seq_printf(m, "this host_no=%d\n", host->host_no);
6394 	if (!xa_empty(per_store_ap)) {
6395 		bool niu;
6396 		int idx;
6397 		unsigned long l_idx;
6398 		struct sdeb_store_info *sip;
6399 
6400 		seq_puts(m, "\nhost list:\n");
6401 		j = 0;
6402 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6403 			idx = sdhp->si_idx;
6404 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6405 				   sdhp->shost->host_no, idx);
6406 			++j;
6407 		}
6408 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6409 			   sdeb_most_recent_idx);
6410 		j = 0;
6411 		xa_for_each(per_store_ap, l_idx, sip) {
6412 			niu = xa_get_mark(per_store_ap, l_idx,
6413 					  SDEB_XA_NOT_IN_USE);
6414 			idx = (int)l_idx;
6415 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6416 				   (niu ? "  not_in_use" : ""));
6417 			++j;
6418 		}
6419 	}
6420 	return 0;
6421 }
6422 
6423 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6424 {
6425 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6426 }
6427 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6428  * of delay is jiffies.
6429  */
6430 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6431 			   size_t count)
6432 {
6433 	int jdelay, res;
6434 
6435 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6436 		res = count;
6437 		if (sdebug_jdelay != jdelay) {
6438 			struct sdebug_host_info *sdhp;
6439 
6440 			mutex_lock(&sdebug_host_list_mutex);
6441 			block_unblock_all_queues(true);
6442 
6443 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6444 				struct Scsi_Host *shost = sdhp->shost;
6445 
6446 				if (scsi_host_busy(shost)) {
6447 					res = -EBUSY;   /* queued commands */
6448 					break;
6449 				}
6450 			}
6451 			if (res > 0) {
6452 				sdebug_jdelay = jdelay;
6453 				sdebug_ndelay = 0;
6454 			}
6455 			block_unblock_all_queues(false);
6456 			mutex_unlock(&sdebug_host_list_mutex);
6457 		}
6458 		return res;
6459 	}
6460 	return -EINVAL;
6461 }
6462 static DRIVER_ATTR_RW(delay);
6463 
6464 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6465 {
6466 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6467 }
6468 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6469 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6470 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6471 			    size_t count)
6472 {
6473 	int ndelay, res;
6474 
6475 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6476 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6477 		res = count;
6478 		if (sdebug_ndelay != ndelay) {
6479 			struct sdebug_host_info *sdhp;
6480 
6481 			mutex_lock(&sdebug_host_list_mutex);
6482 			block_unblock_all_queues(true);
6483 
6484 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6485 				struct Scsi_Host *shost = sdhp->shost;
6486 
6487 				if (scsi_host_busy(shost)) {
6488 					res = -EBUSY;   /* queued commands */
6489 					break;
6490 				}
6491 			}
6492 
6493 			if (res > 0) {
6494 				sdebug_ndelay = ndelay;
6495 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6496 							: DEF_JDELAY;
6497 			}
6498 			block_unblock_all_queues(false);
6499 			mutex_unlock(&sdebug_host_list_mutex);
6500 		}
6501 		return res;
6502 	}
6503 	return -EINVAL;
6504 }
6505 static DRIVER_ATTR_RW(ndelay);
6506 
6507 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6508 {
6509 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6510 }
6511 
6512 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6513 			  size_t count)
6514 {
6515 	int opts;
6516 	char work[20];
6517 
6518 	if (sscanf(buf, "%10s", work) == 1) {
6519 		if (strncasecmp(work, "0x", 2) == 0) {
6520 			if (kstrtoint(work + 2, 16, &opts) == 0)
6521 				goto opts_done;
6522 		} else {
6523 			if (kstrtoint(work, 10, &opts) == 0)
6524 				goto opts_done;
6525 		}
6526 	}
6527 	return -EINVAL;
6528 opts_done:
6529 	sdebug_opts = opts;
6530 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6531 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6532 	tweak_cmnd_count();
6533 	return count;
6534 }
6535 static DRIVER_ATTR_RW(opts);
6536 
6537 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6538 {
6539 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6540 }
6541 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6542 			   size_t count)
6543 {
6544 	int n;
6545 
6546 	/* Cannot change from or to TYPE_ZBC with sysfs */
6547 	if (sdebug_ptype == TYPE_ZBC)
6548 		return -EINVAL;
6549 
6550 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6551 		if (n == TYPE_ZBC)
6552 			return -EINVAL;
6553 		sdebug_ptype = n;
6554 		return count;
6555 	}
6556 	return -EINVAL;
6557 }
6558 static DRIVER_ATTR_RW(ptype);
6559 
6560 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6561 {
6562 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6563 }
6564 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6565 			    size_t count)
6566 {
6567 	int n;
6568 
6569 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6570 		sdebug_dsense = n;
6571 		return count;
6572 	}
6573 	return -EINVAL;
6574 }
6575 static DRIVER_ATTR_RW(dsense);
6576 
6577 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6578 {
6579 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6580 }
6581 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6582 			     size_t count)
6583 {
6584 	int n, idx;
6585 
6586 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6587 		bool want_store = (n == 0);
6588 		struct sdebug_host_info *sdhp;
6589 
6590 		n = (n > 0);
6591 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6592 		if (sdebug_fake_rw == n)
6593 			return count;	/* not transitioning so do nothing */
6594 
6595 		if (want_store) {	/* 1 --> 0 transition, set up store */
6596 			if (sdeb_first_idx < 0) {
6597 				idx = sdebug_add_store();
6598 				if (idx < 0)
6599 					return idx;
6600 			} else {
6601 				idx = sdeb_first_idx;
6602 				xa_clear_mark(per_store_ap, idx,
6603 					      SDEB_XA_NOT_IN_USE);
6604 			}
6605 			/* make all hosts use same store */
6606 			list_for_each_entry(sdhp, &sdebug_host_list,
6607 					    host_list) {
6608 				if (sdhp->si_idx != idx) {
6609 					xa_set_mark(per_store_ap, sdhp->si_idx,
6610 						    SDEB_XA_NOT_IN_USE);
6611 					sdhp->si_idx = idx;
6612 				}
6613 			}
6614 			sdeb_most_recent_idx = idx;
6615 		} else {	/* 0 --> 1 transition is trigger for shrink */
6616 			sdebug_erase_all_stores(true /* apart from first */);
6617 		}
6618 		sdebug_fake_rw = n;
6619 		return count;
6620 	}
6621 	return -EINVAL;
6622 }
6623 static DRIVER_ATTR_RW(fake_rw);
6624 
6625 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6626 {
6627 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6628 }
6629 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6630 			      size_t count)
6631 {
6632 	int n;
6633 
6634 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6635 		sdebug_no_lun_0 = n;
6636 		return count;
6637 	}
6638 	return -EINVAL;
6639 }
6640 static DRIVER_ATTR_RW(no_lun_0);
6641 
6642 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6643 {
6644 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6645 }
6646 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6647 			      size_t count)
6648 {
6649 	int n;
6650 
6651 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6652 		sdebug_num_tgts = n;
6653 		sdebug_max_tgts_luns();
6654 		return count;
6655 	}
6656 	return -EINVAL;
6657 }
6658 static DRIVER_ATTR_RW(num_tgts);
6659 
6660 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6661 {
6662 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6663 }
6664 static DRIVER_ATTR_RO(dev_size_mb);
6665 
6666 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6667 {
6668 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6669 }
6670 
6671 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6672 				    size_t count)
6673 {
6674 	bool v;
6675 
6676 	if (kstrtobool(buf, &v))
6677 		return -EINVAL;
6678 
6679 	sdebug_per_host_store = v;
6680 	return count;
6681 }
6682 static DRIVER_ATTR_RW(per_host_store);
6683 
6684 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6685 {
6686 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6687 }
6688 static DRIVER_ATTR_RO(num_parts);
6689 
6690 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6691 {
6692 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6693 }
6694 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6695 			       size_t count)
6696 {
6697 	int nth;
6698 	char work[20];
6699 
6700 	if (sscanf(buf, "%10s", work) == 1) {
6701 		if (strncasecmp(work, "0x", 2) == 0) {
6702 			if (kstrtoint(work + 2, 16, &nth) == 0)
6703 				goto every_nth_done;
6704 		} else {
6705 			if (kstrtoint(work, 10, &nth) == 0)
6706 				goto every_nth_done;
6707 		}
6708 	}
6709 	return -EINVAL;
6710 
6711 every_nth_done:
6712 	sdebug_every_nth = nth;
6713 	if (nth && !sdebug_statistics) {
6714 		pr_info("every_nth needs statistics=1, set it\n");
6715 		sdebug_statistics = true;
6716 	}
6717 	tweak_cmnd_count();
6718 	return count;
6719 }
6720 static DRIVER_ATTR_RW(every_nth);
6721 
6722 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6723 {
6724 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6725 }
6726 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6727 				size_t count)
6728 {
6729 	int n;
6730 	bool changed;
6731 
6732 	if (kstrtoint(buf, 0, &n))
6733 		return -EINVAL;
6734 	if (n >= 0) {
6735 		if (n > (int)SAM_LUN_AM_FLAT) {
6736 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6737 			return -EINVAL;
6738 		}
6739 		changed = ((int)sdebug_lun_am != n);
6740 		sdebug_lun_am = n;
6741 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6742 			struct sdebug_host_info *sdhp;
6743 			struct sdebug_dev_info *dp;
6744 
6745 			mutex_lock(&sdebug_host_list_mutex);
6746 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6747 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6748 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6749 				}
6750 			}
6751 			mutex_unlock(&sdebug_host_list_mutex);
6752 		}
6753 		return count;
6754 	}
6755 	return -EINVAL;
6756 }
6757 static DRIVER_ATTR_RW(lun_format);
6758 
6759 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6760 {
6761 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6762 }
6763 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6764 			      size_t count)
6765 {
6766 	int n;
6767 	bool changed;
6768 
6769 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6770 		if (n > 256) {
6771 			pr_warn("max_luns can be no more than 256\n");
6772 			return -EINVAL;
6773 		}
6774 		changed = (sdebug_max_luns != n);
6775 		sdebug_max_luns = n;
6776 		sdebug_max_tgts_luns();
6777 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6778 			struct sdebug_host_info *sdhp;
6779 			struct sdebug_dev_info *dp;
6780 
6781 			mutex_lock(&sdebug_host_list_mutex);
6782 			list_for_each_entry(sdhp, &sdebug_host_list,
6783 					    host_list) {
6784 				list_for_each_entry(dp, &sdhp->dev_info_list,
6785 						    dev_list) {
6786 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6787 						dp->uas_bm);
6788 				}
6789 			}
6790 			mutex_unlock(&sdebug_host_list_mutex);
6791 		}
6792 		return count;
6793 	}
6794 	return -EINVAL;
6795 }
6796 static DRIVER_ATTR_RW(max_luns);
6797 
6798 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6799 {
6800 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6801 }
6802 /* N.B. max_queue can be changed while there are queued commands. In flight
6803  * commands beyond the new max_queue will be completed. */
6804 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6805 			       size_t count)
6806 {
6807 	int n;
6808 
6809 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6810 	    (n <= SDEBUG_CANQUEUE) &&
6811 	    (sdebug_host_max_queue == 0)) {
6812 		mutex_lock(&sdebug_host_list_mutex);
6813 
6814 		/* We may only change sdebug_max_queue when we have no shosts */
6815 		if (list_empty(&sdebug_host_list))
6816 			sdebug_max_queue = n;
6817 		else
6818 			count = -EBUSY;
6819 		mutex_unlock(&sdebug_host_list_mutex);
6820 		return count;
6821 	}
6822 	return -EINVAL;
6823 }
6824 static DRIVER_ATTR_RW(max_queue);
6825 
6826 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6827 {
6828 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6829 }
6830 
6831 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6832 {
6833 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6834 }
6835 
6836 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6837 {
6838 	bool v;
6839 
6840 	if (kstrtobool(buf, &v))
6841 		return -EINVAL;
6842 
6843 	sdebug_no_rwlock = v;
6844 	return count;
6845 }
6846 static DRIVER_ATTR_RW(no_rwlock);
6847 
6848 /*
6849  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6850  * in range [0, sdebug_host_max_queue), we can't change it.
6851  */
6852 static DRIVER_ATTR_RO(host_max_queue);
6853 
6854 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6855 {
6856 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6857 }
6858 static DRIVER_ATTR_RO(no_uld);
6859 
6860 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6861 {
6862 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6863 }
6864 static DRIVER_ATTR_RO(scsi_level);
6865 
6866 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6867 {
6868 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6869 }
6870 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6871 				size_t count)
6872 {
6873 	int n;
6874 	bool changed;
6875 
6876 	/* Ignore capacity change for ZBC drives for now */
6877 	if (sdeb_zbc_in_use)
6878 		return -ENOTSUPP;
6879 
6880 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6881 		changed = (sdebug_virtual_gb != n);
6882 		sdebug_virtual_gb = n;
6883 		sdebug_capacity = get_sdebug_capacity();
6884 		if (changed) {
6885 			struct sdebug_host_info *sdhp;
6886 			struct sdebug_dev_info *dp;
6887 
6888 			mutex_lock(&sdebug_host_list_mutex);
6889 			list_for_each_entry(sdhp, &sdebug_host_list,
6890 					    host_list) {
6891 				list_for_each_entry(dp, &sdhp->dev_info_list,
6892 						    dev_list) {
6893 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6894 						dp->uas_bm);
6895 				}
6896 			}
6897 			mutex_unlock(&sdebug_host_list_mutex);
6898 		}
6899 		return count;
6900 	}
6901 	return -EINVAL;
6902 }
6903 static DRIVER_ATTR_RW(virtual_gb);
6904 
6905 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6906 {
6907 	/* absolute number of hosts currently active is what is shown */
6908 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6909 }
6910 
6911 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6912 			      size_t count)
6913 {
6914 	bool found;
6915 	unsigned long idx;
6916 	struct sdeb_store_info *sip;
6917 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6918 	int delta_hosts;
6919 
6920 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6921 		return -EINVAL;
6922 	if (delta_hosts > 0) {
6923 		do {
6924 			found = false;
6925 			if (want_phs) {
6926 				xa_for_each_marked(per_store_ap, idx, sip,
6927 						   SDEB_XA_NOT_IN_USE) {
6928 					sdeb_most_recent_idx = (int)idx;
6929 					found = true;
6930 					break;
6931 				}
6932 				if (found)	/* re-use case */
6933 					sdebug_add_host_helper((int)idx);
6934 				else
6935 					sdebug_do_add_host(true);
6936 			} else {
6937 				sdebug_do_add_host(false);
6938 			}
6939 		} while (--delta_hosts);
6940 	} else if (delta_hosts < 0) {
6941 		do {
6942 			sdebug_do_remove_host(false);
6943 		} while (++delta_hosts);
6944 	}
6945 	return count;
6946 }
6947 static DRIVER_ATTR_RW(add_host);
6948 
6949 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6950 {
6951 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6952 }
6953 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6954 				    size_t count)
6955 {
6956 	int n;
6957 
6958 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6959 		sdebug_vpd_use_hostno = n;
6960 		return count;
6961 	}
6962 	return -EINVAL;
6963 }
6964 static DRIVER_ATTR_RW(vpd_use_hostno);
6965 
6966 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6967 {
6968 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6969 }
6970 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6971 				size_t count)
6972 {
6973 	int n;
6974 
6975 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6976 		if (n > 0)
6977 			sdebug_statistics = true;
6978 		else {
6979 			clear_queue_stats();
6980 			sdebug_statistics = false;
6981 		}
6982 		return count;
6983 	}
6984 	return -EINVAL;
6985 }
6986 static DRIVER_ATTR_RW(statistics);
6987 
6988 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6989 {
6990 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6991 }
6992 static DRIVER_ATTR_RO(sector_size);
6993 
6994 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6995 {
6996 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6997 }
6998 static DRIVER_ATTR_RO(submit_queues);
6999 
7000 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7001 {
7002 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7003 }
7004 static DRIVER_ATTR_RO(dix);
7005 
7006 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7007 {
7008 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7009 }
7010 static DRIVER_ATTR_RO(dif);
7011 
7012 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7013 {
7014 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7015 }
7016 static DRIVER_ATTR_RO(guard);
7017 
7018 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7019 {
7020 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7021 }
7022 static DRIVER_ATTR_RO(ato);
7023 
7024 static ssize_t map_show(struct device_driver *ddp, char *buf)
7025 {
7026 	ssize_t count = 0;
7027 
7028 	if (!scsi_debug_lbp())
7029 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7030 				 sdebug_store_sectors);
7031 
7032 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7033 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7034 
7035 		if (sip)
7036 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7037 					  (int)map_size, sip->map_storep);
7038 	}
7039 	buf[count++] = '\n';
7040 	buf[count] = '\0';
7041 
7042 	return count;
7043 }
7044 static DRIVER_ATTR_RO(map);
7045 
7046 static ssize_t random_show(struct device_driver *ddp, char *buf)
7047 {
7048 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7049 }
7050 
7051 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7052 			    size_t count)
7053 {
7054 	bool v;
7055 
7056 	if (kstrtobool(buf, &v))
7057 		return -EINVAL;
7058 
7059 	sdebug_random = v;
7060 	return count;
7061 }
7062 static DRIVER_ATTR_RW(random);
7063 
7064 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7065 {
7066 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7067 }
7068 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7069 			       size_t count)
7070 {
7071 	int n;
7072 
7073 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7074 		sdebug_removable = (n > 0);
7075 		return count;
7076 	}
7077 	return -EINVAL;
7078 }
7079 static DRIVER_ATTR_RW(removable);
7080 
7081 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7082 {
7083 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7084 }
7085 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7086 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7087 			       size_t count)
7088 {
7089 	int n;
7090 
7091 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7092 		sdebug_host_lock = (n > 0);
7093 		return count;
7094 	}
7095 	return -EINVAL;
7096 }
7097 static DRIVER_ATTR_RW(host_lock);
7098 
7099 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7100 {
7101 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7102 }
7103 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7104 			    size_t count)
7105 {
7106 	int n;
7107 
7108 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7109 		sdebug_strict = (n > 0);
7110 		return count;
7111 	}
7112 	return -EINVAL;
7113 }
7114 static DRIVER_ATTR_RW(strict);
7115 
7116 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7117 {
7118 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7119 }
7120 static DRIVER_ATTR_RO(uuid_ctl);
7121 
7122 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7123 {
7124 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7125 }
7126 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7127 			     size_t count)
7128 {
7129 	int ret, n;
7130 
7131 	ret = kstrtoint(buf, 0, &n);
7132 	if (ret)
7133 		return ret;
7134 	sdebug_cdb_len = n;
7135 	all_config_cdb_len();
7136 	return count;
7137 }
7138 static DRIVER_ATTR_RW(cdb_len);
7139 
7140 static const char * const zbc_model_strs_a[] = {
7141 	[BLK_ZONED_NONE] = "none",
7142 	[BLK_ZONED_HA]   = "host-aware",
7143 	[BLK_ZONED_HM]   = "host-managed",
7144 };
7145 
7146 static const char * const zbc_model_strs_b[] = {
7147 	[BLK_ZONED_NONE] = "no",
7148 	[BLK_ZONED_HA]   = "aware",
7149 	[BLK_ZONED_HM]   = "managed",
7150 };
7151 
7152 static const char * const zbc_model_strs_c[] = {
7153 	[BLK_ZONED_NONE] = "0",
7154 	[BLK_ZONED_HA]   = "1",
7155 	[BLK_ZONED_HM]   = "2",
7156 };
7157 
7158 static int sdeb_zbc_model_str(const char *cp)
7159 {
7160 	int res = sysfs_match_string(zbc_model_strs_a, cp);
7161 
7162 	if (res < 0) {
7163 		res = sysfs_match_string(zbc_model_strs_b, cp);
7164 		if (res < 0) {
7165 			res = sysfs_match_string(zbc_model_strs_c, cp);
7166 			if (res < 0)
7167 				return -EINVAL;
7168 		}
7169 	}
7170 	return res;
7171 }
7172 
7173 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7174 {
7175 	return scnprintf(buf, PAGE_SIZE, "%s\n",
7176 			 zbc_model_strs_a[sdeb_zbc_model]);
7177 }
7178 static DRIVER_ATTR_RO(zbc);
7179 
7180 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7181 {
7182 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7183 }
7184 static DRIVER_ATTR_RO(tur_ms_to_ready);
7185 
7186 /* Note: The following array creates attribute files in the
7187    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7188    files (over those found in the /sys/module/scsi_debug/parameters
7189    directory) is that auxiliary actions can be triggered when an attribute
7190    is changed. For example see: add_host_store() above.
7191  */
7192 
7193 static struct attribute *sdebug_drv_attrs[] = {
7194 	&driver_attr_delay.attr,
7195 	&driver_attr_opts.attr,
7196 	&driver_attr_ptype.attr,
7197 	&driver_attr_dsense.attr,
7198 	&driver_attr_fake_rw.attr,
7199 	&driver_attr_host_max_queue.attr,
7200 	&driver_attr_no_lun_0.attr,
7201 	&driver_attr_num_tgts.attr,
7202 	&driver_attr_dev_size_mb.attr,
7203 	&driver_attr_num_parts.attr,
7204 	&driver_attr_every_nth.attr,
7205 	&driver_attr_lun_format.attr,
7206 	&driver_attr_max_luns.attr,
7207 	&driver_attr_max_queue.attr,
7208 	&driver_attr_no_rwlock.attr,
7209 	&driver_attr_no_uld.attr,
7210 	&driver_attr_scsi_level.attr,
7211 	&driver_attr_virtual_gb.attr,
7212 	&driver_attr_add_host.attr,
7213 	&driver_attr_per_host_store.attr,
7214 	&driver_attr_vpd_use_hostno.attr,
7215 	&driver_attr_sector_size.attr,
7216 	&driver_attr_statistics.attr,
7217 	&driver_attr_submit_queues.attr,
7218 	&driver_attr_dix.attr,
7219 	&driver_attr_dif.attr,
7220 	&driver_attr_guard.attr,
7221 	&driver_attr_ato.attr,
7222 	&driver_attr_map.attr,
7223 	&driver_attr_random.attr,
7224 	&driver_attr_removable.attr,
7225 	&driver_attr_host_lock.attr,
7226 	&driver_attr_ndelay.attr,
7227 	&driver_attr_strict.attr,
7228 	&driver_attr_uuid_ctl.attr,
7229 	&driver_attr_cdb_len.attr,
7230 	&driver_attr_tur_ms_to_ready.attr,
7231 	&driver_attr_zbc.attr,
7232 	NULL,
7233 };
7234 ATTRIBUTE_GROUPS(sdebug_drv);
7235 
7236 static struct device *pseudo_primary;
7237 
7238 static int __init scsi_debug_init(void)
7239 {
7240 	bool want_store = (sdebug_fake_rw == 0);
7241 	unsigned long sz;
7242 	int k, ret, hosts_to_add;
7243 	int idx = -1;
7244 
7245 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7246 		pr_warn("ndelay must be less than 1 second, ignored\n");
7247 		sdebug_ndelay = 0;
7248 	} else if (sdebug_ndelay > 0)
7249 		sdebug_jdelay = JDELAY_OVERRIDDEN;
7250 
7251 	switch (sdebug_sector_size) {
7252 	case  512:
7253 	case 1024:
7254 	case 2048:
7255 	case 4096:
7256 		break;
7257 	default:
7258 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
7259 		return -EINVAL;
7260 	}
7261 
7262 	switch (sdebug_dif) {
7263 	case T10_PI_TYPE0_PROTECTION:
7264 		break;
7265 	case T10_PI_TYPE1_PROTECTION:
7266 	case T10_PI_TYPE2_PROTECTION:
7267 	case T10_PI_TYPE3_PROTECTION:
7268 		have_dif_prot = true;
7269 		break;
7270 
7271 	default:
7272 		pr_err("dif must be 0, 1, 2 or 3\n");
7273 		return -EINVAL;
7274 	}
7275 
7276 	if (sdebug_num_tgts < 0) {
7277 		pr_err("num_tgts must be >= 0\n");
7278 		return -EINVAL;
7279 	}
7280 
7281 	if (sdebug_guard > 1) {
7282 		pr_err("guard must be 0 or 1\n");
7283 		return -EINVAL;
7284 	}
7285 
7286 	if (sdebug_ato > 1) {
7287 		pr_err("ato must be 0 or 1\n");
7288 		return -EINVAL;
7289 	}
7290 
7291 	if (sdebug_physblk_exp > 15) {
7292 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7293 		return -EINVAL;
7294 	}
7295 
7296 	sdebug_lun_am = sdebug_lun_am_i;
7297 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7298 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7299 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7300 	}
7301 
7302 	if (sdebug_max_luns > 256) {
7303 		if (sdebug_max_luns > 16384) {
7304 			pr_warn("max_luns can be no more than 16384, use default\n");
7305 			sdebug_max_luns = DEF_MAX_LUNS;
7306 		}
7307 		sdebug_lun_am = SAM_LUN_AM_FLAT;
7308 	}
7309 
7310 	if (sdebug_lowest_aligned > 0x3fff) {
7311 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7312 		return -EINVAL;
7313 	}
7314 
7315 	if (submit_queues < 1) {
7316 		pr_err("submit_queues must be 1 or more\n");
7317 		return -EINVAL;
7318 	}
7319 
7320 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7321 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7322 		return -EINVAL;
7323 	}
7324 
7325 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7326 	    (sdebug_host_max_queue < 0)) {
7327 		pr_err("host_max_queue must be in range [0 %d]\n",
7328 		       SDEBUG_CANQUEUE);
7329 		return -EINVAL;
7330 	}
7331 
7332 	if (sdebug_host_max_queue &&
7333 	    (sdebug_max_queue != sdebug_host_max_queue)) {
7334 		sdebug_max_queue = sdebug_host_max_queue;
7335 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7336 			sdebug_max_queue);
7337 	}
7338 
7339 	/*
7340 	 * check for host managed zoned block device specified with
7341 	 * ptype=0x14 or zbc=XXX.
7342 	 */
7343 	if (sdebug_ptype == TYPE_ZBC) {
7344 		sdeb_zbc_model = BLK_ZONED_HM;
7345 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7346 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7347 		if (k < 0)
7348 			return k;
7349 		sdeb_zbc_model = k;
7350 		switch (sdeb_zbc_model) {
7351 		case BLK_ZONED_NONE:
7352 		case BLK_ZONED_HA:
7353 			sdebug_ptype = TYPE_DISK;
7354 			break;
7355 		case BLK_ZONED_HM:
7356 			sdebug_ptype = TYPE_ZBC;
7357 			break;
7358 		default:
7359 			pr_err("Invalid ZBC model\n");
7360 			return -EINVAL;
7361 		}
7362 	}
7363 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7364 		sdeb_zbc_in_use = true;
7365 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7366 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7367 	}
7368 
7369 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7370 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7371 	if (sdebug_dev_size_mb < 1)
7372 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7373 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7374 	sdebug_store_sectors = sz / sdebug_sector_size;
7375 	sdebug_capacity = get_sdebug_capacity();
7376 
7377 	/* play around with geometry, don't waste too much on track 0 */
7378 	sdebug_heads = 8;
7379 	sdebug_sectors_per = 32;
7380 	if (sdebug_dev_size_mb >= 256)
7381 		sdebug_heads = 64;
7382 	else if (sdebug_dev_size_mb >= 16)
7383 		sdebug_heads = 32;
7384 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7385 			       (sdebug_sectors_per * sdebug_heads);
7386 	if (sdebug_cylinders_per >= 1024) {
7387 		/* other LLDs do this; implies >= 1GB ram disk ... */
7388 		sdebug_heads = 255;
7389 		sdebug_sectors_per = 63;
7390 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7391 			       (sdebug_sectors_per * sdebug_heads);
7392 	}
7393 	if (scsi_debug_lbp()) {
7394 		sdebug_unmap_max_blocks =
7395 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7396 
7397 		sdebug_unmap_max_desc =
7398 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7399 
7400 		sdebug_unmap_granularity =
7401 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7402 
7403 		if (sdebug_unmap_alignment &&
7404 		    sdebug_unmap_granularity <=
7405 		    sdebug_unmap_alignment) {
7406 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7407 			return -EINVAL;
7408 		}
7409 	}
7410 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7411 	if (want_store) {
7412 		idx = sdebug_add_store();
7413 		if (idx < 0)
7414 			return idx;
7415 	}
7416 
7417 	pseudo_primary = root_device_register("pseudo_0");
7418 	if (IS_ERR(pseudo_primary)) {
7419 		pr_warn("root_device_register() error\n");
7420 		ret = PTR_ERR(pseudo_primary);
7421 		goto free_vm;
7422 	}
7423 	ret = bus_register(&pseudo_lld_bus);
7424 	if (ret < 0) {
7425 		pr_warn("bus_register error: %d\n", ret);
7426 		goto dev_unreg;
7427 	}
7428 	ret = driver_register(&sdebug_driverfs_driver);
7429 	if (ret < 0) {
7430 		pr_warn("driver_register error: %d\n", ret);
7431 		goto bus_unreg;
7432 	}
7433 
7434 	hosts_to_add = sdebug_add_host;
7435 	sdebug_add_host = 0;
7436 
7437 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7438 	if (!queued_cmd_cache) {
7439 		ret = -ENOMEM;
7440 		goto driver_unreg;
7441 	}
7442 
7443 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7444 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7445 		pr_info("%s: failed to create initial debugfs directory\n", __func__);
7446 
7447 	for (k = 0; k < hosts_to_add; k++) {
7448 		if (want_store && k == 0) {
7449 			ret = sdebug_add_host_helper(idx);
7450 			if (ret < 0) {
7451 				pr_err("add_host_helper k=%d, error=%d\n",
7452 				       k, -ret);
7453 				break;
7454 			}
7455 		} else {
7456 			ret = sdebug_do_add_host(want_store &&
7457 						 sdebug_per_host_store);
7458 			if (ret < 0) {
7459 				pr_err("add_host k=%d error=%d\n", k, -ret);
7460 				break;
7461 			}
7462 		}
7463 	}
7464 	if (sdebug_verbose)
7465 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7466 
7467 	return 0;
7468 
7469 driver_unreg:
7470 	driver_unregister(&sdebug_driverfs_driver);
7471 bus_unreg:
7472 	bus_unregister(&pseudo_lld_bus);
7473 dev_unreg:
7474 	root_device_unregister(pseudo_primary);
7475 free_vm:
7476 	sdebug_erase_store(idx, NULL);
7477 	return ret;
7478 }
7479 
7480 static void __exit scsi_debug_exit(void)
7481 {
7482 	int k = sdebug_num_hosts;
7483 
7484 	for (; k; k--)
7485 		sdebug_do_remove_host(true);
7486 	kmem_cache_destroy(queued_cmd_cache);
7487 	driver_unregister(&sdebug_driverfs_driver);
7488 	bus_unregister(&pseudo_lld_bus);
7489 	root_device_unregister(pseudo_primary);
7490 
7491 	sdebug_erase_all_stores(false);
7492 	xa_destroy(per_store_ap);
7493 	debugfs_remove(sdebug_debugfs_root);
7494 }
7495 
7496 device_initcall(scsi_debug_init);
7497 module_exit(scsi_debug_exit);
7498 
7499 static void sdebug_release_adapter(struct device *dev)
7500 {
7501 	struct sdebug_host_info *sdbg_host;
7502 
7503 	sdbg_host = dev_to_sdebug_host(dev);
7504 	kfree(sdbg_host);
7505 }
7506 
7507 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7508 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7509 {
7510 	if (idx < 0)
7511 		return;
7512 	if (!sip) {
7513 		if (xa_empty(per_store_ap))
7514 			return;
7515 		sip = xa_load(per_store_ap, idx);
7516 		if (!sip)
7517 			return;
7518 	}
7519 	vfree(sip->map_storep);
7520 	vfree(sip->dif_storep);
7521 	vfree(sip->storep);
7522 	xa_erase(per_store_ap, idx);
7523 	kfree(sip);
7524 }
7525 
7526 /* Assume apart_from_first==false only in shutdown case. */
7527 static void sdebug_erase_all_stores(bool apart_from_first)
7528 {
7529 	unsigned long idx;
7530 	struct sdeb_store_info *sip = NULL;
7531 
7532 	xa_for_each(per_store_ap, idx, sip) {
7533 		if (apart_from_first)
7534 			apart_from_first = false;
7535 		else
7536 			sdebug_erase_store(idx, sip);
7537 	}
7538 	if (apart_from_first)
7539 		sdeb_most_recent_idx = sdeb_first_idx;
7540 }
7541 
7542 /*
7543  * Returns store xarray new element index (idx) if >=0 else negated errno.
7544  * Limit the number of stores to 65536.
7545  */
7546 static int sdebug_add_store(void)
7547 {
7548 	int res;
7549 	u32 n_idx;
7550 	unsigned long iflags;
7551 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7552 	struct sdeb_store_info *sip = NULL;
7553 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7554 
7555 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7556 	if (!sip)
7557 		return -ENOMEM;
7558 
7559 	xa_lock_irqsave(per_store_ap, iflags);
7560 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7561 	if (unlikely(res < 0)) {
7562 		xa_unlock_irqrestore(per_store_ap, iflags);
7563 		kfree(sip);
7564 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7565 		return res;
7566 	}
7567 	sdeb_most_recent_idx = n_idx;
7568 	if (sdeb_first_idx < 0)
7569 		sdeb_first_idx = n_idx;
7570 	xa_unlock_irqrestore(per_store_ap, iflags);
7571 
7572 	res = -ENOMEM;
7573 	sip->storep = vzalloc(sz);
7574 	if (!sip->storep) {
7575 		pr_err("user data oom\n");
7576 		goto err;
7577 	}
7578 	if (sdebug_num_parts > 0)
7579 		sdebug_build_parts(sip->storep, sz);
7580 
7581 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7582 	if (sdebug_dix) {
7583 		int dif_size;
7584 
7585 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7586 		sip->dif_storep = vmalloc(dif_size);
7587 
7588 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7589 			sip->dif_storep);
7590 
7591 		if (!sip->dif_storep) {
7592 			pr_err("DIX oom\n");
7593 			goto err;
7594 		}
7595 		memset(sip->dif_storep, 0xff, dif_size);
7596 	}
7597 	/* Logical Block Provisioning */
7598 	if (scsi_debug_lbp()) {
7599 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7600 		sip->map_storep = vmalloc(array_size(sizeof(long),
7601 						     BITS_TO_LONGS(map_size)));
7602 
7603 		pr_info("%lu provisioning blocks\n", map_size);
7604 
7605 		if (!sip->map_storep) {
7606 			pr_err("LBP map oom\n");
7607 			goto err;
7608 		}
7609 
7610 		bitmap_zero(sip->map_storep, map_size);
7611 
7612 		/* Map first 1KB for partition table */
7613 		if (sdebug_num_parts)
7614 			map_region(sip, 0, 2);
7615 	}
7616 
7617 	rwlock_init(&sip->macc_lck);
7618 	return (int)n_idx;
7619 err:
7620 	sdebug_erase_store((int)n_idx, sip);
7621 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7622 	return res;
7623 }
7624 
7625 static int sdebug_add_host_helper(int per_host_idx)
7626 {
7627 	int k, devs_per_host, idx;
7628 	int error = -ENOMEM;
7629 	struct sdebug_host_info *sdbg_host;
7630 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7631 
7632 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7633 	if (!sdbg_host)
7634 		return -ENOMEM;
7635 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7636 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7637 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7638 	sdbg_host->si_idx = idx;
7639 
7640 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7641 
7642 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7643 	for (k = 0; k < devs_per_host; k++) {
7644 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7645 		if (!sdbg_devinfo)
7646 			goto clean;
7647 	}
7648 
7649 	mutex_lock(&sdebug_host_list_mutex);
7650 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7651 	mutex_unlock(&sdebug_host_list_mutex);
7652 
7653 	sdbg_host->dev.bus = &pseudo_lld_bus;
7654 	sdbg_host->dev.parent = pseudo_primary;
7655 	sdbg_host->dev.release = &sdebug_release_adapter;
7656 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7657 
7658 	error = device_register(&sdbg_host->dev);
7659 	if (error) {
7660 		mutex_lock(&sdebug_host_list_mutex);
7661 		list_del(&sdbg_host->host_list);
7662 		mutex_unlock(&sdebug_host_list_mutex);
7663 		goto clean;
7664 	}
7665 
7666 	++sdebug_num_hosts;
7667 	return 0;
7668 
7669 clean:
7670 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7671 				 dev_list) {
7672 		list_del(&sdbg_devinfo->dev_list);
7673 		kfree(sdbg_devinfo->zstate);
7674 		kfree(sdbg_devinfo);
7675 	}
7676 	if (sdbg_host->dev.release)
7677 		put_device(&sdbg_host->dev);
7678 	else
7679 		kfree(sdbg_host);
7680 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7681 	return error;
7682 }
7683 
7684 static int sdebug_do_add_host(bool mk_new_store)
7685 {
7686 	int ph_idx = sdeb_most_recent_idx;
7687 
7688 	if (mk_new_store) {
7689 		ph_idx = sdebug_add_store();
7690 		if (ph_idx < 0)
7691 			return ph_idx;
7692 	}
7693 	return sdebug_add_host_helper(ph_idx);
7694 }
7695 
7696 static void sdebug_do_remove_host(bool the_end)
7697 {
7698 	int idx = -1;
7699 	struct sdebug_host_info *sdbg_host = NULL;
7700 	struct sdebug_host_info *sdbg_host2;
7701 
7702 	mutex_lock(&sdebug_host_list_mutex);
7703 	if (!list_empty(&sdebug_host_list)) {
7704 		sdbg_host = list_entry(sdebug_host_list.prev,
7705 				       struct sdebug_host_info, host_list);
7706 		idx = sdbg_host->si_idx;
7707 	}
7708 	if (!the_end && idx >= 0) {
7709 		bool unique = true;
7710 
7711 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7712 			if (sdbg_host2 == sdbg_host)
7713 				continue;
7714 			if (idx == sdbg_host2->si_idx) {
7715 				unique = false;
7716 				break;
7717 			}
7718 		}
7719 		if (unique) {
7720 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7721 			if (idx == sdeb_most_recent_idx)
7722 				--sdeb_most_recent_idx;
7723 		}
7724 	}
7725 	if (sdbg_host)
7726 		list_del(&sdbg_host->host_list);
7727 	mutex_unlock(&sdebug_host_list_mutex);
7728 
7729 	if (!sdbg_host)
7730 		return;
7731 
7732 	device_unregister(&sdbg_host->dev);
7733 	--sdebug_num_hosts;
7734 }
7735 
7736 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7737 {
7738 	struct sdebug_dev_info *devip = sdev->hostdata;
7739 
7740 	if (!devip)
7741 		return	-ENODEV;
7742 
7743 	mutex_lock(&sdebug_host_list_mutex);
7744 	block_unblock_all_queues(true);
7745 
7746 	if (qdepth > SDEBUG_CANQUEUE) {
7747 		qdepth = SDEBUG_CANQUEUE;
7748 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7749 			qdepth, SDEBUG_CANQUEUE);
7750 	}
7751 	if (qdepth < 1)
7752 		qdepth = 1;
7753 	if (qdepth != sdev->queue_depth)
7754 		scsi_change_queue_depth(sdev, qdepth);
7755 
7756 	block_unblock_all_queues(false);
7757 	mutex_unlock(&sdebug_host_list_mutex);
7758 
7759 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7760 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7761 
7762 	return sdev->queue_depth;
7763 }
7764 
7765 static bool fake_timeout(struct scsi_cmnd *scp)
7766 {
7767 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7768 		if (sdebug_every_nth < -1)
7769 			sdebug_every_nth = -1;
7770 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7771 			return true; /* ignore command causing timeout */
7772 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7773 			 scsi_medium_access_command(scp))
7774 			return true; /* time out reads and writes */
7775 	}
7776 	return false;
7777 }
7778 
7779 /* Response to TUR or media access command when device stopped */
7780 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7781 {
7782 	int stopped_state;
7783 	u64 diff_ns = 0;
7784 	ktime_t now_ts = ktime_get_boottime();
7785 	struct scsi_device *sdp = scp->device;
7786 
7787 	stopped_state = atomic_read(&devip->stopped);
7788 	if (stopped_state == 2) {
7789 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7790 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7791 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7792 				/* tur_ms_to_ready timer extinguished */
7793 				atomic_set(&devip->stopped, 0);
7794 				return 0;
7795 			}
7796 		}
7797 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7798 		if (sdebug_verbose)
7799 			sdev_printk(KERN_INFO, sdp,
7800 				    "%s: Not ready: in process of becoming ready\n", my_name);
7801 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7802 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7803 
7804 			if (diff_ns <= tur_nanosecs_to_ready)
7805 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7806 			else
7807 				diff_ns = tur_nanosecs_to_ready;
7808 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7809 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7810 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7811 						   diff_ns);
7812 			return check_condition_result;
7813 		}
7814 	}
7815 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7816 	if (sdebug_verbose)
7817 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7818 			    my_name);
7819 	return check_condition_result;
7820 }
7821 
7822 static void sdebug_map_queues(struct Scsi_Host *shost)
7823 {
7824 	int i, qoff;
7825 
7826 	if (shost->nr_hw_queues == 1)
7827 		return;
7828 
7829 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7830 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7831 
7832 		map->nr_queues  = 0;
7833 
7834 		if (i == HCTX_TYPE_DEFAULT)
7835 			map->nr_queues = submit_queues - poll_queues;
7836 		else if (i == HCTX_TYPE_POLL)
7837 			map->nr_queues = poll_queues;
7838 
7839 		if (!map->nr_queues) {
7840 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7841 			continue;
7842 		}
7843 
7844 		map->queue_offset = qoff;
7845 		blk_mq_map_queues(map);
7846 
7847 		qoff += map->nr_queues;
7848 	}
7849 }
7850 
7851 struct sdebug_blk_mq_poll_data {
7852 	unsigned int queue_num;
7853 	int *num_entries;
7854 };
7855 
7856 /*
7857  * We don't handle aborted commands here, but it does not seem possible to have
7858  * aborted polled commands from schedule_resp()
7859  */
7860 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7861 {
7862 	struct sdebug_blk_mq_poll_data *data = opaque;
7863 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7864 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7865 	struct sdebug_defer *sd_dp;
7866 	u32 unique_tag = blk_mq_unique_tag(rq);
7867 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7868 	struct sdebug_queued_cmd *sqcp;
7869 	unsigned long flags;
7870 	int queue_num = data->queue_num;
7871 	ktime_t time;
7872 
7873 	/* We're only interested in one queue for this iteration */
7874 	if (hwq != queue_num)
7875 		return true;
7876 
7877 	/* Subsequent checks would fail if this failed, but check anyway */
7878 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7879 		return true;
7880 
7881 	time = ktime_get_boottime();
7882 
7883 	spin_lock_irqsave(&sdsc->lock, flags);
7884 	sqcp = TO_QUEUED_CMD(cmd);
7885 	if (!sqcp) {
7886 		spin_unlock_irqrestore(&sdsc->lock, flags);
7887 		return true;
7888 	}
7889 
7890 	sd_dp = &sqcp->sd_dp;
7891 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7892 		spin_unlock_irqrestore(&sdsc->lock, flags);
7893 		return true;
7894 	}
7895 
7896 	if (time < sd_dp->cmpl_ts) {
7897 		spin_unlock_irqrestore(&sdsc->lock, flags);
7898 		return true;
7899 	}
7900 
7901 	ASSIGN_QUEUED_CMD(cmd, NULL);
7902 	spin_unlock_irqrestore(&sdsc->lock, flags);
7903 
7904 	if (sdebug_statistics) {
7905 		atomic_inc(&sdebug_completions);
7906 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7907 			atomic_inc(&sdebug_miss_cpus);
7908 	}
7909 
7910 	sdebug_free_queued_cmd(sqcp);
7911 
7912 	scsi_done(cmd); /* callback to mid level */
7913 	(*data->num_entries)++;
7914 	return true;
7915 }
7916 
7917 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7918 {
7919 	int num_entries = 0;
7920 	struct sdebug_blk_mq_poll_data data = {
7921 		.queue_num = queue_num,
7922 		.num_entries = &num_entries,
7923 	};
7924 
7925 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7926 				&data);
7927 
7928 	if (num_entries > 0)
7929 		atomic_add(num_entries, &sdeb_mq_poll_count);
7930 	return num_entries;
7931 }
7932 
7933 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
7934 {
7935 	struct scsi_device *sdp = cmnd->device;
7936 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7937 	struct sdebug_err_inject *err;
7938 	unsigned char *cmd = cmnd->cmnd;
7939 	int ret = 0;
7940 
7941 	if (devip == NULL)
7942 		return 0;
7943 
7944 	rcu_read_lock();
7945 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7946 		if (err->type == ERR_TMOUT_CMD &&
7947 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
7948 			ret = !!err->cnt;
7949 			if (err->cnt < 0)
7950 				err->cnt++;
7951 
7952 			rcu_read_unlock();
7953 			return ret;
7954 		}
7955 	}
7956 	rcu_read_unlock();
7957 
7958 	return 0;
7959 }
7960 
7961 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
7962 {
7963 	struct scsi_device *sdp = cmnd->device;
7964 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7965 	struct sdebug_err_inject *err;
7966 	unsigned char *cmd = cmnd->cmnd;
7967 	int ret = 0;
7968 
7969 	if (devip == NULL)
7970 		return 0;
7971 
7972 	rcu_read_lock();
7973 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7974 		if (err->type == ERR_FAIL_QUEUE_CMD &&
7975 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
7976 			ret = err->cnt ? err->queuecmd_ret : 0;
7977 			if (err->cnt < 0)
7978 				err->cnt++;
7979 
7980 			rcu_read_unlock();
7981 			return ret;
7982 		}
7983 	}
7984 	rcu_read_unlock();
7985 
7986 	return 0;
7987 }
7988 
7989 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
7990 			   struct sdebug_err_inject *info)
7991 {
7992 	struct scsi_device *sdp = cmnd->device;
7993 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7994 	struct sdebug_err_inject *err;
7995 	unsigned char *cmd = cmnd->cmnd;
7996 	int ret = 0;
7997 	int result;
7998 
7999 	if (devip == NULL)
8000 		return 0;
8001 
8002 	rcu_read_lock();
8003 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8004 		if (err->type == ERR_FAIL_CMD &&
8005 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8006 			if (!err->cnt) {
8007 				rcu_read_unlock();
8008 				return 0;
8009 			}
8010 
8011 			ret = !!err->cnt;
8012 			rcu_read_unlock();
8013 			goto out_handle;
8014 		}
8015 	}
8016 	rcu_read_unlock();
8017 
8018 	return 0;
8019 
8020 out_handle:
8021 	if (err->cnt < 0)
8022 		err->cnt++;
8023 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8024 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8025 	*info = *err;
8026 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8027 
8028 	return ret;
8029 }
8030 
8031 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8032 				   struct scsi_cmnd *scp)
8033 {
8034 	u8 sdeb_i;
8035 	struct scsi_device *sdp = scp->device;
8036 	const struct opcode_info_t *oip;
8037 	const struct opcode_info_t *r_oip;
8038 	struct sdebug_dev_info *devip;
8039 	u8 *cmd = scp->cmnd;
8040 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8041 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8042 	int k, na;
8043 	int errsts = 0;
8044 	u64 lun_index = sdp->lun & 0x3FFF;
8045 	u32 flags;
8046 	u16 sa;
8047 	u8 opcode = cmd[0];
8048 	bool has_wlun_rl;
8049 	bool inject_now;
8050 	int ret = 0;
8051 	struct sdebug_err_inject err;
8052 
8053 	scsi_set_resid(scp, 0);
8054 	if (sdebug_statistics) {
8055 		atomic_inc(&sdebug_cmnd_count);
8056 		inject_now = inject_on_this_cmd();
8057 	} else {
8058 		inject_now = false;
8059 	}
8060 	if (unlikely(sdebug_verbose &&
8061 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8062 		char b[120];
8063 		int n, len, sb;
8064 
8065 		len = scp->cmd_len;
8066 		sb = (int)sizeof(b);
8067 		if (len > 32)
8068 			strcpy(b, "too long, over 32 bytes");
8069 		else {
8070 			for (k = 0, n = 0; k < len && n < sb; ++k)
8071 				n += scnprintf(b + n, sb - n, "%02x ",
8072 					       (u32)cmd[k]);
8073 		}
8074 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8075 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8076 	}
8077 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8078 		return SCSI_MLQUEUE_HOST_BUSY;
8079 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8080 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8081 		goto err_out;
8082 
8083 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
8084 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
8085 	devip = (struct sdebug_dev_info *)sdp->hostdata;
8086 	if (unlikely(!devip)) {
8087 		devip = find_build_dev_info(sdp);
8088 		if (NULL == devip)
8089 			goto err_out;
8090 	}
8091 
8092 	if (sdebug_timeout_cmd(scp)) {
8093 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8094 		return 0;
8095 	}
8096 
8097 	ret = sdebug_fail_queue_cmd(scp);
8098 	if (ret) {
8099 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8100 				opcode, ret);
8101 		return ret;
8102 	}
8103 
8104 	if (sdebug_fail_cmd(scp, &ret, &err)) {
8105 		scmd_printk(KERN_INFO, scp,
8106 			"fail command 0x%x with hostbyte=0x%x, "
8107 			"driverbyte=0x%x, statusbyte=0x%x, "
8108 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8109 			opcode, err.host_byte, err.driver_byte,
8110 			err.status_byte, err.sense_key, err.asc, err.asq);
8111 		return ret;
8112 	}
8113 
8114 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8115 		atomic_set(&sdeb_inject_pending, 1);
8116 
8117 	na = oip->num_attached;
8118 	r_pfp = oip->pfp;
8119 	if (na) {	/* multiple commands with this opcode */
8120 		r_oip = oip;
8121 		if (FF_SA & r_oip->flags) {
8122 			if (F_SA_LOW & oip->flags)
8123 				sa = 0x1f & cmd[1];
8124 			else
8125 				sa = get_unaligned_be16(cmd + 8);
8126 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8127 				if (opcode == oip->opcode && sa == oip->sa)
8128 					break;
8129 			}
8130 		} else {   /* since no service action only check opcode */
8131 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8132 				if (opcode == oip->opcode)
8133 					break;
8134 			}
8135 		}
8136 		if (k > na) {
8137 			if (F_SA_LOW & r_oip->flags)
8138 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8139 			else if (F_SA_HIGH & r_oip->flags)
8140 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8141 			else
8142 				mk_sense_invalid_opcode(scp);
8143 			goto check_cond;
8144 		}
8145 	}	/* else (when na==0) we assume the oip is a match */
8146 	flags = oip->flags;
8147 	if (unlikely(F_INV_OP & flags)) {
8148 		mk_sense_invalid_opcode(scp);
8149 		goto check_cond;
8150 	}
8151 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8152 		if (sdebug_verbose)
8153 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8154 				    my_name, opcode, " supported for wlun");
8155 		mk_sense_invalid_opcode(scp);
8156 		goto check_cond;
8157 	}
8158 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
8159 		u8 rem;
8160 		int j;
8161 
8162 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8163 			rem = ~oip->len_mask[k] & cmd[k];
8164 			if (rem) {
8165 				for (j = 7; j >= 0; --j, rem <<= 1) {
8166 					if (0x80 & rem)
8167 						break;
8168 				}
8169 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8170 				goto check_cond;
8171 			}
8172 		}
8173 	}
8174 	if (unlikely(!(F_SKIP_UA & flags) &&
8175 		     find_first_bit(devip->uas_bm,
8176 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8177 		errsts = make_ua(scp, devip);
8178 		if (errsts)
8179 			goto check_cond;
8180 	}
8181 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8182 		     atomic_read(&devip->stopped))) {
8183 		errsts = resp_not_ready(scp, devip);
8184 		if (errsts)
8185 			goto fini;
8186 	}
8187 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
8188 		goto fini;
8189 	if (unlikely(sdebug_every_nth)) {
8190 		if (fake_timeout(scp))
8191 			return 0;	/* ignore command: make trouble */
8192 	}
8193 	if (likely(oip->pfp))
8194 		pfp = oip->pfp;	/* calls a resp_* function */
8195 	else
8196 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8197 
8198 fini:
8199 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
8200 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8201 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8202 					    sdebug_ndelay > 10000)) {
8203 		/*
8204 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8205 		 * for Start Stop Unit (SSU) want at least 1 second delay and
8206 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
8207 		 * For Synchronize Cache want 1/20 of SSU's delay.
8208 		 */
8209 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8210 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8211 
8212 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8213 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8214 	} else
8215 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8216 				     sdebug_ndelay);
8217 check_cond:
8218 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8219 err_out:
8220 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8221 }
8222 
8223 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8224 {
8225 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8226 
8227 	spin_lock_init(&sdsc->lock);
8228 
8229 	return 0;
8230 }
8231 
8232 static struct scsi_host_template sdebug_driver_template = {
8233 	.show_info =		scsi_debug_show_info,
8234 	.write_info =		scsi_debug_write_info,
8235 	.proc_name =		sdebug_proc_name,
8236 	.name =			"SCSI DEBUG",
8237 	.info =			scsi_debug_info,
8238 	.slave_alloc =		scsi_debug_slave_alloc,
8239 	.slave_configure =	scsi_debug_slave_configure,
8240 	.slave_destroy =	scsi_debug_slave_destroy,
8241 	.ioctl =		scsi_debug_ioctl,
8242 	.queuecommand =		scsi_debug_queuecommand,
8243 	.change_queue_depth =	sdebug_change_qdepth,
8244 	.map_queues =		sdebug_map_queues,
8245 	.mq_poll =		sdebug_blk_mq_poll,
8246 	.eh_abort_handler =	scsi_debug_abort,
8247 	.eh_device_reset_handler = scsi_debug_device_reset,
8248 	.eh_target_reset_handler = scsi_debug_target_reset,
8249 	.eh_bus_reset_handler = scsi_debug_bus_reset,
8250 	.eh_host_reset_handler = scsi_debug_host_reset,
8251 	.can_queue =		SDEBUG_CANQUEUE,
8252 	.this_id =		7,
8253 	.sg_tablesize =		SG_MAX_SEGMENTS,
8254 	.cmd_per_lun =		DEF_CMD_PER_LUN,
8255 	.max_sectors =		-1U,
8256 	.max_segment_size =	-1U,
8257 	.module =		THIS_MODULE,
8258 	.track_queue_depth =	1,
8259 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
8260 	.init_cmd_priv = sdebug_init_cmd_priv,
8261 	.target_alloc =		sdebug_target_alloc,
8262 	.target_destroy =	sdebug_target_destroy,
8263 };
8264 
8265 static int sdebug_driver_probe(struct device *dev)
8266 {
8267 	int error = 0;
8268 	struct sdebug_host_info *sdbg_host;
8269 	struct Scsi_Host *hpnt;
8270 	int hprot;
8271 
8272 	sdbg_host = dev_to_sdebug_host(dev);
8273 
8274 	sdebug_driver_template.can_queue = sdebug_max_queue;
8275 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8276 	if (!sdebug_clustering)
8277 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8278 
8279 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8280 	if (NULL == hpnt) {
8281 		pr_err("scsi_host_alloc failed\n");
8282 		error = -ENODEV;
8283 		return error;
8284 	}
8285 	if (submit_queues > nr_cpu_ids) {
8286 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8287 			my_name, submit_queues, nr_cpu_ids);
8288 		submit_queues = nr_cpu_ids;
8289 	}
8290 	/*
8291 	 * Decide whether to tell scsi subsystem that we want mq. The
8292 	 * following should give the same answer for each host.
8293 	 */
8294 	hpnt->nr_hw_queues = submit_queues;
8295 	if (sdebug_host_max_queue)
8296 		hpnt->host_tagset = 1;
8297 
8298 	/* poll queues are possible for nr_hw_queues > 1 */
8299 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8300 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8301 			 my_name, poll_queues, hpnt->nr_hw_queues);
8302 		poll_queues = 0;
8303 	}
8304 
8305 	/*
8306 	 * Poll queues don't need interrupts, but we need at least one I/O queue
8307 	 * left over for non-polled I/O.
8308 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
8309 	 */
8310 	if (poll_queues >= submit_queues) {
8311 		if (submit_queues < 3)
8312 			pr_warn("%s: trim poll_queues to 1\n", my_name);
8313 		else
8314 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8315 				my_name, submit_queues - 1);
8316 		poll_queues = 1;
8317 	}
8318 	if (poll_queues)
8319 		hpnt->nr_maps = 3;
8320 
8321 	sdbg_host->shost = hpnt;
8322 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8323 		hpnt->max_id = sdebug_num_tgts + 1;
8324 	else
8325 		hpnt->max_id = sdebug_num_tgts;
8326 	/* = sdebug_max_luns; */
8327 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8328 
8329 	hprot = 0;
8330 
8331 	switch (sdebug_dif) {
8332 
8333 	case T10_PI_TYPE1_PROTECTION:
8334 		hprot = SHOST_DIF_TYPE1_PROTECTION;
8335 		if (sdebug_dix)
8336 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
8337 		break;
8338 
8339 	case T10_PI_TYPE2_PROTECTION:
8340 		hprot = SHOST_DIF_TYPE2_PROTECTION;
8341 		if (sdebug_dix)
8342 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
8343 		break;
8344 
8345 	case T10_PI_TYPE3_PROTECTION:
8346 		hprot = SHOST_DIF_TYPE3_PROTECTION;
8347 		if (sdebug_dix)
8348 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
8349 		break;
8350 
8351 	default:
8352 		if (sdebug_dix)
8353 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
8354 		break;
8355 	}
8356 
8357 	scsi_host_set_prot(hpnt, hprot);
8358 
8359 	if (have_dif_prot || sdebug_dix)
8360 		pr_info("host protection%s%s%s%s%s%s%s\n",
8361 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8362 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8363 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8364 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8365 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8366 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8367 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8368 
8369 	if (sdebug_guard == 1)
8370 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8371 	else
8372 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8373 
8374 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8375 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8376 	if (sdebug_every_nth)	/* need stats counters for every_nth */
8377 		sdebug_statistics = true;
8378 	error = scsi_add_host(hpnt, &sdbg_host->dev);
8379 	if (error) {
8380 		pr_err("scsi_add_host failed\n");
8381 		error = -ENODEV;
8382 		scsi_host_put(hpnt);
8383 	} else {
8384 		scsi_scan_host(hpnt);
8385 	}
8386 
8387 	return error;
8388 }
8389 
8390 static void sdebug_driver_remove(struct device *dev)
8391 {
8392 	struct sdebug_host_info *sdbg_host;
8393 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8394 
8395 	sdbg_host = dev_to_sdebug_host(dev);
8396 
8397 	scsi_remove_host(sdbg_host->shost);
8398 
8399 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8400 				 dev_list) {
8401 		list_del(&sdbg_devinfo->dev_list);
8402 		kfree(sdbg_devinfo->zstate);
8403 		kfree(sdbg_devinfo);
8404 	}
8405 
8406 	scsi_host_put(sdbg_host->shost);
8407 }
8408 
8409 static struct bus_type pseudo_lld_bus = {
8410 	.name = "pseudo",
8411 	.probe = sdebug_driver_probe,
8412 	.remove = sdebug_driver_remove,
8413 	.drv_groups = sdebug_drv_groups,
8414 };
8415