xref: /linux/drivers/scsi/scsi_debug.c (revision e721eb0616f62e766882b80fd3433b80635abd5f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_UUID_CTL 0
155 #define JDELAY_OVERRIDDEN -9999
156 
157 /* Default parameters for ZBC drives */
158 #define DEF_ZBC_ZONE_SIZE_MB	128
159 #define DEF_ZBC_MAX_OPEN_ZONES	8
160 #define DEF_ZBC_NR_CONV_ZONES	1
161 
162 #define SDEBUG_LUN_0_VAL 0
163 
164 /* bit mask values for sdebug_opts */
165 #define SDEBUG_OPT_NOISE		1
166 #define SDEBUG_OPT_MEDIUM_ERR		2
167 #define SDEBUG_OPT_TIMEOUT		4
168 #define SDEBUG_OPT_RECOVERED_ERR	8
169 #define SDEBUG_OPT_TRANSPORT_ERR	16
170 #define SDEBUG_OPT_DIF_ERR		32
171 #define SDEBUG_OPT_DIX_ERR		64
172 #define SDEBUG_OPT_MAC_TIMEOUT		128
173 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
174 #define SDEBUG_OPT_Q_NOISE		0x200
175 #define SDEBUG_OPT_ALL_TSF		0x400
176 #define SDEBUG_OPT_RARE_TSF		0x800
177 #define SDEBUG_OPT_N_WCE		0x1000
178 #define SDEBUG_OPT_RESET_NOISE		0x2000
179 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
180 #define SDEBUG_OPT_HOST_BUSY		0x8000
181 #define SDEBUG_OPT_CMD_ABORT		0x10000
182 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
183 			      SDEBUG_OPT_RESET_NOISE)
184 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
185 				  SDEBUG_OPT_TRANSPORT_ERR | \
186 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
187 				  SDEBUG_OPT_SHORT_TRANSFER | \
188 				  SDEBUG_OPT_HOST_BUSY | \
189 				  SDEBUG_OPT_CMD_ABORT)
190 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
191 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
192 
193 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
194  * priority order. In the subset implemented here lower numbers have higher
195  * priority. The UA numbers should be a sequence starting from 0 with
196  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
197 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
198 #define SDEBUG_UA_BUS_RESET 1
199 #define SDEBUG_UA_MODE_CHANGED 2
200 #define SDEBUG_UA_CAPACITY_CHANGED 3
201 #define SDEBUG_UA_LUNS_CHANGED 4
202 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
203 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
204 #define SDEBUG_NUM_UAS 7
205 
206 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
207  * sector on read commands: */
208 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
209 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
210 
211 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
212  * or "peripheral device" addressing (value 0) */
213 #define SAM2_LUN_ADDRESS_METHOD 0
214 
215 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
216  * (for response) per submit queue at one time. Can be reduced by max_queue
217  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
218  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
219  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
220  * but cannot exceed SDEBUG_CANQUEUE .
221  */
222 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
223 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
224 #define DEF_CMD_PER_LUN  255
225 
226 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
227 #define F_D_IN			1	/* Data-in command (e.g. READ) */
228 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
229 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
230 #define F_D_UNKN		8
231 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
232 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
233 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
234 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
235 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
236 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
237 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
238 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
239 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
240 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
241 
242 /* Useful combinations of the above flags */
243 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
244 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
245 #define FF_SA (F_SA_HIGH | F_SA_LOW)
246 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
247 
248 #define SDEBUG_MAX_PARTS 4
249 
250 #define SDEBUG_MAX_CMD_LEN 32
251 
252 #define SDEB_XA_NOT_IN_USE XA_MARK_1
253 
254 /* Zone types (zbcr05 table 25) */
255 enum sdebug_z_type {
256 	ZBC_ZONE_TYPE_CNV	= 0x1,
257 	ZBC_ZONE_TYPE_SWR	= 0x2,
258 	ZBC_ZONE_TYPE_SWP	= 0x3,
259 };
260 
261 /* enumeration names taken from table 26, zbcr05 */
262 enum sdebug_z_cond {
263 	ZBC_NOT_WRITE_POINTER	= 0x0,
264 	ZC1_EMPTY		= 0x1,
265 	ZC2_IMPLICIT_OPEN	= 0x2,
266 	ZC3_EXPLICIT_OPEN	= 0x3,
267 	ZC4_CLOSED		= 0x4,
268 	ZC6_READ_ONLY		= 0xd,
269 	ZC5_FULL		= 0xe,
270 	ZC7_OFFLINE		= 0xf,
271 };
272 
273 struct sdeb_zone_state {	/* ZBC: per zone state */
274 	enum sdebug_z_type z_type;
275 	enum sdebug_z_cond z_cond;
276 	bool z_non_seq_resource;
277 	unsigned int z_size;
278 	sector_t z_start;
279 	sector_t z_wp;
280 };
281 
282 struct sdebug_dev_info {
283 	struct list_head dev_list;
284 	unsigned int channel;
285 	unsigned int target;
286 	u64 lun;
287 	uuid_t lu_name;
288 	struct sdebug_host_info *sdbg_host;
289 	unsigned long uas_bm[1];
290 	atomic_t num_in_q;
291 	atomic_t stopped;
292 	bool used;
293 
294 	/* For ZBC devices */
295 	enum blk_zoned_model zmodel;
296 	unsigned int zsize;
297 	unsigned int zsize_shift;
298 	unsigned int nr_zones;
299 	unsigned int nr_conv_zones;
300 	unsigned int nr_imp_open;
301 	unsigned int nr_exp_open;
302 	unsigned int nr_closed;
303 	unsigned int max_open;
304 	struct sdeb_zone_state *zstate;
305 };
306 
307 struct sdebug_host_info {
308 	struct list_head host_list;
309 	int si_idx;	/* sdeb_store_info (per host) xarray index */
310 	struct Scsi_Host *shost;
311 	struct device dev;
312 	struct list_head dev_info_list;
313 };
314 
315 /* There is an xarray of pointers to this struct's objects, one per host */
316 struct sdeb_store_info {
317 	rwlock_t macc_lck;	/* for atomic media access on this store */
318 	u8 *storep;		/* user data storage (ram) */
319 	struct t10_pi_tuple *dif_storep; /* protection info */
320 	void *map_storep;	/* provisioning map */
321 };
322 
323 #define to_sdebug_host(d)	\
324 	container_of(d, struct sdebug_host_info, dev)
325 
326 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
327 		      SDEB_DEFER_WQ = 2};
328 
329 struct sdebug_defer {
330 	struct hrtimer hrt;
331 	struct execute_work ew;
332 	int sqa_idx;	/* index of sdebug_queue array */
333 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
334 	int hc_idx;	/* hostwide tag index */
335 	int issuing_cpu;
336 	bool init_hrt;
337 	bool init_wq;
338 	bool aborted;	/* true when blk_abort_request() already called */
339 	enum sdeb_defer_type defer_t;
340 };
341 
342 struct sdebug_queued_cmd {
343 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 	 * instance indicates this slot is in use.
345 	 */
346 	struct sdebug_defer *sd_dp;
347 	struct scsi_cmnd *a_cmnd;
348 };
349 
350 struct sdebug_queue {
351 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
353 	spinlock_t qc_lock;
354 	atomic_t blocked;	/* to temporarily stop more being queued */
355 };
356 
357 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
358 static atomic_t sdebug_completions;  /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 
363 struct opcode_info_t {
364 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
365 				/* for terminating element */
366 	u8 opcode;		/* if num_attached > 0, preferred */
367 	u16 sa;			/* service action */
368 	u32 flags;		/* OR-ed set of SDEB_F_* */
369 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
370 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
371 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
372 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
373 };
374 
375 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
376 enum sdeb_opcode_index {
377 	SDEB_I_INVALID_OPCODE =	0,
378 	SDEB_I_INQUIRY = 1,
379 	SDEB_I_REPORT_LUNS = 2,
380 	SDEB_I_REQUEST_SENSE = 3,
381 	SDEB_I_TEST_UNIT_READY = 4,
382 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
383 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
384 	SDEB_I_LOG_SENSE = 7,
385 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
386 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
387 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
388 	SDEB_I_START_STOP = 11,
389 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
390 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
391 	SDEB_I_MAINT_IN = 14,
392 	SDEB_I_MAINT_OUT = 15,
393 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
394 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
395 	SDEB_I_RESERVE = 18,		/* 6, 10 */
396 	SDEB_I_RELEASE = 19,		/* 6, 10 */
397 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
398 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
399 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
400 	SDEB_I_SEND_DIAG = 23,
401 	SDEB_I_UNMAP = 24,
402 	SDEB_I_WRITE_BUFFER = 25,
403 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
404 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
405 	SDEB_I_COMP_WRITE = 28,
406 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
407 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
408 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
409 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
410 };
411 
412 
413 static const unsigned char opcode_ind_arr[256] = {
414 /* 0x0; 0x0->0x1f: 6 byte cdbs */
415 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
416 	    0, 0, 0, 0,
417 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
418 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
419 	    SDEB_I_RELEASE,
420 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
421 	    SDEB_I_ALLOW_REMOVAL, 0,
422 /* 0x20; 0x20->0x3f: 10 byte cdbs */
423 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
424 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
425 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
426 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
427 /* 0x40; 0x40->0x5f: 10 byte cdbs */
428 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
429 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
430 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
431 	    SDEB_I_RELEASE,
432 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
433 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
434 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
435 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 	0, SDEB_I_VARIABLE_LEN,
437 /* 0x80; 0x80->0x9f: 16 byte cdbs */
438 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
439 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
440 	0, 0, 0, SDEB_I_VERIFY,
441 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
442 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
443 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
444 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
445 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
446 	     SDEB_I_MAINT_OUT, 0, 0, 0,
447 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
448 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
449 	0, 0, 0, 0, 0, 0, 0, 0,
450 	0, 0, 0, 0, 0, 0, 0, 0,
451 /* 0xc0; 0xc0->0xff: vendor specific */
452 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 };
457 
458 /*
459  * The following "response" functions return the SCSI mid-level's 4 byte
460  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
461  * command completion, they can mask their return value with
462  * SDEG_RES_IMMED_MASK .
463  */
464 #define SDEG_RES_IMMED_MASK 0x40000000
465 
466 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
467 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 
496 static int sdebug_do_add_host(bool mk_new_store);
497 static int sdebug_add_host_helper(int per_host_idx);
498 static void sdebug_do_remove_host(bool the_end);
499 static int sdebug_add_store(void);
500 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
501 static void sdebug_erase_all_stores(bool apart_from_first);
502 
503 /*
504  * The following are overflow arrays for cdbs that "hit" the same index in
505  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
506  * should be placed in opcode_info_arr[], the others should be placed here.
507  */
508 static const struct opcode_info_t msense_iarr[] = {
509 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
510 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
511 };
512 
513 static const struct opcode_info_t mselect_iarr[] = {
514 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
515 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 };
517 
518 static const struct opcode_info_t read_iarr[] = {
519 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
520 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
521 	     0, 0, 0, 0} },
522 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
523 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
524 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
525 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
526 	     0xc7, 0, 0, 0, 0} },
527 };
528 
529 static const struct opcode_info_t write_iarr[] = {
530 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
531 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
532 		   0, 0, 0, 0, 0, 0} },
533 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
534 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
535 		   0, 0, 0} },
536 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
537 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
538 		   0xbf, 0xc7, 0, 0, 0, 0} },
539 };
540 
541 static const struct opcode_info_t verify_iarr[] = {
542 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
543 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
544 		   0, 0, 0, 0, 0, 0} },
545 };
546 
547 static const struct opcode_info_t sa_in_16_iarr[] = {
548 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
549 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
550 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
551 };
552 
553 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
554 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
555 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
556 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
557 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
558 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
559 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
560 };
561 
562 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
563 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
564 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
565 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
566 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
567 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
568 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
569 };
570 
571 static const struct opcode_info_t write_same_iarr[] = {
572 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
573 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
574 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
575 };
576 
577 static const struct opcode_info_t reserve_iarr[] = {
578 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
579 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
580 };
581 
582 static const struct opcode_info_t release_iarr[] = {
583 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
584 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 };
586 
587 static const struct opcode_info_t sync_cache_iarr[] = {
588 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
589 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
590 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
591 };
592 
593 static const struct opcode_info_t pre_fetch_iarr[] = {
594 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
595 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
596 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
597 };
598 
599 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
600 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
601 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
602 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
603 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
604 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
605 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
606 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
607 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
609 };
610 
611 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
612 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
613 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
614 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
615 };
616 
617 
618 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
619  * plus the terminating elements for logic that scans this table such as
620  * REPORT SUPPORTED OPERATION CODES. */
621 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
622 /* 0 */
623 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
624 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
626 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
627 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
628 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
629 	     0, 0} },					/* REPORT LUNS */
630 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
631 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
633 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 /* 5 */
635 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
636 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
637 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
638 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
639 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
640 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
641 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
642 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
643 	     0, 0, 0} },
644 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
645 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
646 	     0, 0} },
647 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
648 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
649 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
650 /* 10 */
651 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
652 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
653 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
655 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
656 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
657 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
658 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
659 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
661 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
662 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
663 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
664 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
665 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
666 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
667 				0xff, 0, 0xc7, 0, 0, 0, 0} },
668 /* 15 */
669 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
670 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
671 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
672 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
673 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
674 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
675 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
676 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
677 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
678 	     0xff, 0xff} },
679 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
680 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
681 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
682 	     0} },
683 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
684 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
685 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
686 	     0} },
687 /* 20 */
688 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
689 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
691 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
693 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
695 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
697 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
698 /* 25 */
699 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
700 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
701 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
702 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
703 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
704 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
705 		 0, 0, 0, 0, 0} },
706 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
707 	    resp_sync_cache, sync_cache_iarr,
708 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
709 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
710 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
711 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
712 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
713 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
714 	    resp_pre_fetch, pre_fetch_iarr,
715 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
717 
718 /* 30 */
719 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
720 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
721 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
722 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
723 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
724 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
725 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
726 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
727 /* sentinel */
728 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
729 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730 };
731 
732 static int sdebug_num_hosts;
733 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
734 static int sdebug_ato = DEF_ATO;
735 static int sdebug_cdb_len = DEF_CDB_LEN;
736 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
737 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
738 static int sdebug_dif = DEF_DIF;
739 static int sdebug_dix = DEF_DIX;
740 static int sdebug_dsense = DEF_D_SENSE;
741 static int sdebug_every_nth = DEF_EVERY_NTH;
742 static int sdebug_fake_rw = DEF_FAKE_RW;
743 static unsigned int sdebug_guard = DEF_GUARD;
744 static int sdebug_host_max_queue;	/* per host */
745 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
746 static int sdebug_max_luns = DEF_MAX_LUNS;
747 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
748 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
749 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
750 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
751 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
752 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
753 static int sdebug_no_uld;
754 static int sdebug_num_parts = DEF_NUM_PARTS;
755 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
756 static int sdebug_opt_blks = DEF_OPT_BLKS;
757 static int sdebug_opts = DEF_OPTS;
758 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
759 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
760 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
761 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
762 static int sdebug_sector_size = DEF_SECTOR_SIZE;
763 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
764 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
765 static unsigned int sdebug_lbpu = DEF_LBPU;
766 static unsigned int sdebug_lbpws = DEF_LBPWS;
767 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
768 static unsigned int sdebug_lbprz = DEF_LBPRZ;
769 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
770 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
771 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
772 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
773 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
774 static int sdebug_uuid_ctl = DEF_UUID_CTL;
775 static bool sdebug_random = DEF_RANDOM;
776 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
777 static bool sdebug_removable = DEF_REMOVABLE;
778 static bool sdebug_clustering;
779 static bool sdebug_host_lock = DEF_HOST_LOCK;
780 static bool sdebug_strict = DEF_STRICT;
781 static bool sdebug_any_injecting_opt;
782 static bool sdebug_verbose;
783 static bool have_dif_prot;
784 static bool write_since_sync;
785 static bool sdebug_statistics = DEF_STATISTICS;
786 static bool sdebug_wp;
787 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
788 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
789 static char *sdeb_zbc_model_s;
790 
791 static unsigned int sdebug_store_sectors;
792 static sector_t sdebug_capacity;	/* in sectors */
793 
794 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
795    may still need them */
796 static int sdebug_heads;		/* heads per disk */
797 static int sdebug_cylinders_per;	/* cylinders per surface */
798 static int sdebug_sectors_per;		/* sectors per cylinder */
799 
800 static LIST_HEAD(sdebug_host_list);
801 static DEFINE_SPINLOCK(sdebug_host_list_lock);
802 
803 static struct xarray per_store_arr;
804 static struct xarray *per_store_ap = &per_store_arr;
805 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
806 static int sdeb_most_recent_idx = -1;
807 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
808 
809 static unsigned long map_size;
810 static int num_aborts;
811 static int num_dev_resets;
812 static int num_target_resets;
813 static int num_bus_resets;
814 static int num_host_resets;
815 static int dix_writes;
816 static int dix_reads;
817 static int dif_errors;
818 
819 /* ZBC global data */
820 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
821 static int sdeb_zbc_zone_size_mb;
822 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
823 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
824 
825 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
826 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
827 
828 static DEFINE_RWLOCK(atomic_rw);
829 static DEFINE_RWLOCK(atomic_rw2);
830 
831 static rwlock_t *ramdisk_lck_a[2];
832 
833 static char sdebug_proc_name[] = MY_NAME;
834 static const char *my_name = MY_NAME;
835 
836 static struct bus_type pseudo_lld_bus;
837 
838 static struct device_driver sdebug_driverfs_driver = {
839 	.name 		= sdebug_proc_name,
840 	.bus		= &pseudo_lld_bus,
841 };
842 
843 static const int check_condition_result =
844 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
845 
846 static const int illegal_condition_result =
847 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
848 
849 static const int device_qfull_result =
850 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
851 
852 static const int condition_met_result = SAM_STAT_CONDITION_MET;
853 
854 
855 /* Only do the extra work involved in logical block provisioning if one or
856  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
857  * real reads and writes (i.e. not skipping them for speed).
858  */
859 static inline bool scsi_debug_lbp(void)
860 {
861 	return 0 == sdebug_fake_rw &&
862 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
863 }
864 
865 static void *lba2fake_store(struct sdeb_store_info *sip,
866 			    unsigned long long lba)
867 {
868 	struct sdeb_store_info *lsip = sip;
869 
870 	lba = do_div(lba, sdebug_store_sectors);
871 	if (!sip || !sip->storep) {
872 		WARN_ON_ONCE(true);
873 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
874 	}
875 	return lsip->storep + lba * sdebug_sector_size;
876 }
877 
878 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
879 				      sector_t sector)
880 {
881 	sector = sector_div(sector, sdebug_store_sectors);
882 
883 	return sip->dif_storep + sector;
884 }
885 
886 static void sdebug_max_tgts_luns(void)
887 {
888 	struct sdebug_host_info *sdbg_host;
889 	struct Scsi_Host *hpnt;
890 
891 	spin_lock(&sdebug_host_list_lock);
892 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
893 		hpnt = sdbg_host->shost;
894 		if ((hpnt->this_id >= 0) &&
895 		    (sdebug_num_tgts > hpnt->this_id))
896 			hpnt->max_id = sdebug_num_tgts + 1;
897 		else
898 			hpnt->max_id = sdebug_num_tgts;
899 		/* sdebug_max_luns; */
900 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
901 	}
902 	spin_unlock(&sdebug_host_list_lock);
903 }
904 
905 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
906 
907 /* Set in_bit to -1 to indicate no bit position of invalid field */
908 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
909 				 enum sdeb_cmd_data c_d,
910 				 int in_byte, int in_bit)
911 {
912 	unsigned char *sbuff;
913 	u8 sks[4];
914 	int sl, asc;
915 
916 	sbuff = scp->sense_buffer;
917 	if (!sbuff) {
918 		sdev_printk(KERN_ERR, scp->device,
919 			    "%s: sense_buffer is NULL\n", __func__);
920 		return;
921 	}
922 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
923 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
924 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
925 	memset(sks, 0, sizeof(sks));
926 	sks[0] = 0x80;
927 	if (c_d)
928 		sks[0] |= 0x40;
929 	if (in_bit >= 0) {
930 		sks[0] |= 0x8;
931 		sks[0] |= 0x7 & in_bit;
932 	}
933 	put_unaligned_be16(in_byte, sks + 1);
934 	if (sdebug_dsense) {
935 		sl = sbuff[7] + 8;
936 		sbuff[7] = sl;
937 		sbuff[sl] = 0x2;
938 		sbuff[sl + 1] = 0x6;
939 		memcpy(sbuff + sl + 4, sks, 3);
940 	} else
941 		memcpy(sbuff + 15, sks, 3);
942 	if (sdebug_verbose)
943 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
944 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
945 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
946 }
947 
948 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
949 {
950 	unsigned char *sbuff;
951 
952 	sbuff = scp->sense_buffer;
953 	if (!sbuff) {
954 		sdev_printk(KERN_ERR, scp->device,
955 			    "%s: sense_buffer is NULL\n", __func__);
956 		return;
957 	}
958 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
959 
960 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
961 
962 	if (sdebug_verbose)
963 		sdev_printk(KERN_INFO, scp->device,
964 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
965 			    my_name, key, asc, asq);
966 }
967 
968 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
969 {
970 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
971 }
972 
973 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
974 			    void __user *arg)
975 {
976 	if (sdebug_verbose) {
977 		if (0x1261 == cmd)
978 			sdev_printk(KERN_INFO, dev,
979 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
980 		else if (0x5331 == cmd)
981 			sdev_printk(KERN_INFO, dev,
982 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
983 				    __func__);
984 		else
985 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
986 				    __func__, cmd);
987 	}
988 	return -EINVAL;
989 	/* return -ENOTTY; // correct return but upsets fdisk */
990 }
991 
992 static void config_cdb_len(struct scsi_device *sdev)
993 {
994 	switch (sdebug_cdb_len) {
995 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
996 		sdev->use_10_for_rw = false;
997 		sdev->use_16_for_rw = false;
998 		sdev->use_10_for_ms = false;
999 		break;
1000 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1001 		sdev->use_10_for_rw = true;
1002 		sdev->use_16_for_rw = false;
1003 		sdev->use_10_for_ms = false;
1004 		break;
1005 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1006 		sdev->use_10_for_rw = true;
1007 		sdev->use_16_for_rw = false;
1008 		sdev->use_10_for_ms = true;
1009 		break;
1010 	case 16:
1011 		sdev->use_10_for_rw = false;
1012 		sdev->use_16_for_rw = true;
1013 		sdev->use_10_for_ms = true;
1014 		break;
1015 	case 32: /* No knobs to suggest this so same as 16 for now */
1016 		sdev->use_10_for_rw = false;
1017 		sdev->use_16_for_rw = true;
1018 		sdev->use_10_for_ms = true;
1019 		break;
1020 	default:
1021 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1022 			sdebug_cdb_len);
1023 		sdev->use_10_for_rw = true;
1024 		sdev->use_16_for_rw = false;
1025 		sdev->use_10_for_ms = false;
1026 		sdebug_cdb_len = 10;
1027 		break;
1028 	}
1029 }
1030 
1031 static void all_config_cdb_len(void)
1032 {
1033 	struct sdebug_host_info *sdbg_host;
1034 	struct Scsi_Host *shost;
1035 	struct scsi_device *sdev;
1036 
1037 	spin_lock(&sdebug_host_list_lock);
1038 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1039 		shost = sdbg_host->shost;
1040 		shost_for_each_device(sdev, shost) {
1041 			config_cdb_len(sdev);
1042 		}
1043 	}
1044 	spin_unlock(&sdebug_host_list_lock);
1045 }
1046 
1047 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1048 {
1049 	struct sdebug_host_info *sdhp;
1050 	struct sdebug_dev_info *dp;
1051 
1052 	spin_lock(&sdebug_host_list_lock);
1053 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1054 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1055 			if ((devip->sdbg_host == dp->sdbg_host) &&
1056 			    (devip->target == dp->target))
1057 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1058 		}
1059 	}
1060 	spin_unlock(&sdebug_host_list_lock);
1061 }
1062 
1063 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1064 {
1065 	int k;
1066 
1067 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1068 	if (k != SDEBUG_NUM_UAS) {
1069 		const char *cp = NULL;
1070 
1071 		switch (k) {
1072 		case SDEBUG_UA_POR:
1073 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1074 					POWER_ON_RESET_ASCQ);
1075 			if (sdebug_verbose)
1076 				cp = "power on reset";
1077 			break;
1078 		case SDEBUG_UA_BUS_RESET:
1079 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1080 					BUS_RESET_ASCQ);
1081 			if (sdebug_verbose)
1082 				cp = "bus reset";
1083 			break;
1084 		case SDEBUG_UA_MODE_CHANGED:
1085 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1086 					MODE_CHANGED_ASCQ);
1087 			if (sdebug_verbose)
1088 				cp = "mode parameters changed";
1089 			break;
1090 		case SDEBUG_UA_CAPACITY_CHANGED:
1091 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1092 					CAPACITY_CHANGED_ASCQ);
1093 			if (sdebug_verbose)
1094 				cp = "capacity data changed";
1095 			break;
1096 		case SDEBUG_UA_MICROCODE_CHANGED:
1097 			mk_sense_buffer(scp, UNIT_ATTENTION,
1098 					TARGET_CHANGED_ASC,
1099 					MICROCODE_CHANGED_ASCQ);
1100 			if (sdebug_verbose)
1101 				cp = "microcode has been changed";
1102 			break;
1103 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1104 			mk_sense_buffer(scp, UNIT_ATTENTION,
1105 					TARGET_CHANGED_ASC,
1106 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1107 			if (sdebug_verbose)
1108 				cp = "microcode has been changed without reset";
1109 			break;
1110 		case SDEBUG_UA_LUNS_CHANGED:
1111 			/*
1112 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1113 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1114 			 * on the target, until a REPORT LUNS command is
1115 			 * received.  SPC-4 behavior is to report it only once.
1116 			 * NOTE:  sdebug_scsi_level does not use the same
1117 			 * values as struct scsi_device->scsi_level.
1118 			 */
1119 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1120 				clear_luns_changed_on_target(devip);
1121 			mk_sense_buffer(scp, UNIT_ATTENTION,
1122 					TARGET_CHANGED_ASC,
1123 					LUNS_CHANGED_ASCQ);
1124 			if (sdebug_verbose)
1125 				cp = "reported luns data has changed";
1126 			break;
1127 		default:
1128 			pr_warn("unexpected unit attention code=%d\n", k);
1129 			if (sdebug_verbose)
1130 				cp = "unknown";
1131 			break;
1132 		}
1133 		clear_bit(k, devip->uas_bm);
1134 		if (sdebug_verbose)
1135 			sdev_printk(KERN_INFO, scp->device,
1136 				   "%s reports: Unit attention: %s\n",
1137 				   my_name, cp);
1138 		return check_condition_result;
1139 	}
1140 	return 0;
1141 }
1142 
1143 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1144 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1145 				int arr_len)
1146 {
1147 	int act_len;
1148 	struct scsi_data_buffer *sdb = &scp->sdb;
1149 
1150 	if (!sdb->length)
1151 		return 0;
1152 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1153 		return DID_ERROR << 16;
1154 
1155 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1156 				      arr, arr_len);
1157 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1158 
1159 	return 0;
1160 }
1161 
1162 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1163  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1164  * calls, not required to write in ascending offset order. Assumes resid
1165  * set to scsi_bufflen() prior to any calls.
1166  */
1167 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1168 				  int arr_len, unsigned int off_dst)
1169 {
1170 	unsigned int act_len, n;
1171 	struct scsi_data_buffer *sdb = &scp->sdb;
1172 	off_t skip = off_dst;
1173 
1174 	if (sdb->length <= off_dst)
1175 		return 0;
1176 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1177 		return DID_ERROR << 16;
1178 
1179 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1180 				       arr, arr_len, skip);
1181 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1182 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1183 		 scsi_get_resid(scp));
1184 	n = scsi_bufflen(scp) - (off_dst + act_len);
1185 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1186 	return 0;
1187 }
1188 
1189 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1190  * 'arr' or -1 if error.
1191  */
1192 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1193 			       int arr_len)
1194 {
1195 	if (!scsi_bufflen(scp))
1196 		return 0;
1197 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1198 		return -1;
1199 
1200 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1201 }
1202 
1203 
1204 static char sdebug_inq_vendor_id[9] = "Linux   ";
1205 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1206 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1207 /* Use some locally assigned NAAs for SAS addresses. */
1208 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1209 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1210 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1211 
1212 /* Device identification VPD page. Returns number of bytes placed in arr */
1213 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1214 			  int target_dev_id, int dev_id_num,
1215 			  const char *dev_id_str, int dev_id_str_len,
1216 			  const uuid_t *lu_name)
1217 {
1218 	int num, port_a;
1219 	char b[32];
1220 
1221 	port_a = target_dev_id + 1;
1222 	/* T10 vendor identifier field format (faked) */
1223 	arr[0] = 0x2;	/* ASCII */
1224 	arr[1] = 0x1;
1225 	arr[2] = 0x0;
1226 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1227 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1228 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1229 	num = 8 + 16 + dev_id_str_len;
1230 	arr[3] = num;
1231 	num += 4;
1232 	if (dev_id_num >= 0) {
1233 		if (sdebug_uuid_ctl) {
1234 			/* Locally assigned UUID */
1235 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1236 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1237 			arr[num++] = 0x0;
1238 			arr[num++] = 0x12;
1239 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1240 			arr[num++] = 0x0;
1241 			memcpy(arr + num, lu_name, 16);
1242 			num += 16;
1243 		} else {
1244 			/* NAA-3, Logical unit identifier (binary) */
1245 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1246 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1247 			arr[num++] = 0x0;
1248 			arr[num++] = 0x8;
1249 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1250 			num += 8;
1251 		}
1252 		/* Target relative port number */
1253 		arr[num++] = 0x61;	/* proto=sas, binary */
1254 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1255 		arr[num++] = 0x0;	/* reserved */
1256 		arr[num++] = 0x4;	/* length */
1257 		arr[num++] = 0x0;	/* reserved */
1258 		arr[num++] = 0x0;	/* reserved */
1259 		arr[num++] = 0x0;
1260 		arr[num++] = 0x1;	/* relative port A */
1261 	}
1262 	/* NAA-3, Target port identifier */
1263 	arr[num++] = 0x61;	/* proto=sas, binary */
1264 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1265 	arr[num++] = 0x0;
1266 	arr[num++] = 0x8;
1267 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1268 	num += 8;
1269 	/* NAA-3, Target port group identifier */
1270 	arr[num++] = 0x61;	/* proto=sas, binary */
1271 	arr[num++] = 0x95;	/* piv=1, target port group id */
1272 	arr[num++] = 0x0;
1273 	arr[num++] = 0x4;
1274 	arr[num++] = 0;
1275 	arr[num++] = 0;
1276 	put_unaligned_be16(port_group_id, arr + num);
1277 	num += 2;
1278 	/* NAA-3, Target device identifier */
1279 	arr[num++] = 0x61;	/* proto=sas, binary */
1280 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1281 	arr[num++] = 0x0;
1282 	arr[num++] = 0x8;
1283 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1284 	num += 8;
1285 	/* SCSI name string: Target device identifier */
1286 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1287 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1288 	arr[num++] = 0x0;
1289 	arr[num++] = 24;
1290 	memcpy(arr + num, "naa.32222220", 12);
1291 	num += 12;
1292 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1293 	memcpy(arr + num, b, 8);
1294 	num += 8;
1295 	memset(arr + num, 0, 4);
1296 	num += 4;
1297 	return num;
1298 }
1299 
1300 static unsigned char vpd84_data[] = {
1301 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1302     0x22,0x22,0x22,0x0,0xbb,0x1,
1303     0x22,0x22,0x22,0x0,0xbb,0x2,
1304 };
1305 
1306 /*  Software interface identification VPD page */
1307 static int inquiry_vpd_84(unsigned char *arr)
1308 {
1309 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1310 	return sizeof(vpd84_data);
1311 }
1312 
1313 /* Management network addresses VPD page */
1314 static int inquiry_vpd_85(unsigned char *arr)
1315 {
1316 	int num = 0;
1317 	const char *na1 = "https://www.kernel.org/config";
1318 	const char *na2 = "http://www.kernel.org/log";
1319 	int plen, olen;
1320 
1321 	arr[num++] = 0x1;	/* lu, storage config */
1322 	arr[num++] = 0x0;	/* reserved */
1323 	arr[num++] = 0x0;
1324 	olen = strlen(na1);
1325 	plen = olen + 1;
1326 	if (plen % 4)
1327 		plen = ((plen / 4) + 1) * 4;
1328 	arr[num++] = plen;	/* length, null termianted, padded */
1329 	memcpy(arr + num, na1, olen);
1330 	memset(arr + num + olen, 0, plen - olen);
1331 	num += plen;
1332 
1333 	arr[num++] = 0x4;	/* lu, logging */
1334 	arr[num++] = 0x0;	/* reserved */
1335 	arr[num++] = 0x0;
1336 	olen = strlen(na2);
1337 	plen = olen + 1;
1338 	if (plen % 4)
1339 		plen = ((plen / 4) + 1) * 4;
1340 	arr[num++] = plen;	/* length, null terminated, padded */
1341 	memcpy(arr + num, na2, olen);
1342 	memset(arr + num + olen, 0, plen - olen);
1343 	num += plen;
1344 
1345 	return num;
1346 }
1347 
1348 /* SCSI ports VPD page */
1349 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1350 {
1351 	int num = 0;
1352 	int port_a, port_b;
1353 
1354 	port_a = target_dev_id + 1;
1355 	port_b = port_a + 1;
1356 	arr[num++] = 0x0;	/* reserved */
1357 	arr[num++] = 0x0;	/* reserved */
1358 	arr[num++] = 0x0;
1359 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1360 	memset(arr + num, 0, 6);
1361 	num += 6;
1362 	arr[num++] = 0x0;
1363 	arr[num++] = 12;	/* length tp descriptor */
1364 	/* naa-5 target port identifier (A) */
1365 	arr[num++] = 0x61;	/* proto=sas, binary */
1366 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1367 	arr[num++] = 0x0;	/* reserved */
1368 	arr[num++] = 0x8;	/* length */
1369 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1370 	num += 8;
1371 	arr[num++] = 0x0;	/* reserved */
1372 	arr[num++] = 0x0;	/* reserved */
1373 	arr[num++] = 0x0;
1374 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1375 	memset(arr + num, 0, 6);
1376 	num += 6;
1377 	arr[num++] = 0x0;
1378 	arr[num++] = 12;	/* length tp descriptor */
1379 	/* naa-5 target port identifier (B) */
1380 	arr[num++] = 0x61;	/* proto=sas, binary */
1381 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x8;	/* length */
1384 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1385 	num += 8;
1386 
1387 	return num;
1388 }
1389 
1390 
1391 static unsigned char vpd89_data[] = {
1392 /* from 4th byte */ 0,0,0,0,
1393 'l','i','n','u','x',' ',' ',' ',
1394 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1395 '1','2','3','4',
1396 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1397 0xec,0,0,0,
1398 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1399 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1400 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1401 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1402 0x53,0x41,
1403 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1404 0x20,0x20,
1405 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1406 0x10,0x80,
1407 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1408 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1409 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1410 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1411 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1412 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1413 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1414 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1415 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1417 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1418 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1419 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1420 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1433 };
1434 
1435 /* ATA Information VPD page */
1436 static int inquiry_vpd_89(unsigned char *arr)
1437 {
1438 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1439 	return sizeof(vpd89_data);
1440 }
1441 
1442 
1443 static unsigned char vpdb0_data[] = {
1444 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1445 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 };
1449 
1450 /* Block limits VPD page (SBC-3) */
1451 static int inquiry_vpd_b0(unsigned char *arr)
1452 {
1453 	unsigned int gran;
1454 
1455 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1456 
1457 	/* Optimal transfer length granularity */
1458 	if (sdebug_opt_xferlen_exp != 0 &&
1459 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1460 		gran = 1 << sdebug_opt_xferlen_exp;
1461 	else
1462 		gran = 1 << sdebug_physblk_exp;
1463 	put_unaligned_be16(gran, arr + 2);
1464 
1465 	/* Maximum Transfer Length */
1466 	if (sdebug_store_sectors > 0x400)
1467 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1468 
1469 	/* Optimal Transfer Length */
1470 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1471 
1472 	if (sdebug_lbpu) {
1473 		/* Maximum Unmap LBA Count */
1474 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1475 
1476 		/* Maximum Unmap Block Descriptor Count */
1477 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1478 	}
1479 
1480 	/* Unmap Granularity Alignment */
1481 	if (sdebug_unmap_alignment) {
1482 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1483 		arr[28] |= 0x80; /* UGAVALID */
1484 	}
1485 
1486 	/* Optimal Unmap Granularity */
1487 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1488 
1489 	/* Maximum WRITE SAME Length */
1490 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1491 
1492 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1493 
1494 	return sizeof(vpdb0_data);
1495 }
1496 
1497 /* Block device characteristics VPD page (SBC-3) */
1498 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1499 {
1500 	memset(arr, 0, 0x3c);
1501 	arr[0] = 0;
1502 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1503 	arr[2] = 0;
1504 	arr[3] = 5;	/* less than 1.8" */
1505 	if (devip->zmodel == BLK_ZONED_HA)
1506 		arr[4] = 1 << 4;	/* zoned field = 01b */
1507 
1508 	return 0x3c;
1509 }
1510 
1511 /* Logical block provisioning VPD page (SBC-4) */
1512 static int inquiry_vpd_b2(unsigned char *arr)
1513 {
1514 	memset(arr, 0, 0x4);
1515 	arr[0] = 0;			/* threshold exponent */
1516 	if (sdebug_lbpu)
1517 		arr[1] = 1 << 7;
1518 	if (sdebug_lbpws)
1519 		arr[1] |= 1 << 6;
1520 	if (sdebug_lbpws10)
1521 		arr[1] |= 1 << 5;
1522 	if (sdebug_lbprz && scsi_debug_lbp())
1523 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1524 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1525 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1526 	/* threshold_percentage=0 */
1527 	return 0x4;
1528 }
1529 
1530 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1531 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1532 {
1533 	memset(arr, 0, 0x3c);
1534 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1535 	/*
1536 	 * Set Optimal number of open sequential write preferred zones and
1537 	 * Optimal number of non-sequentially written sequential write
1538 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1539 	 * fields set to zero, apart from Max. number of open swrz_s field.
1540 	 */
1541 	put_unaligned_be32(0xffffffff, &arr[4]);
1542 	put_unaligned_be32(0xffffffff, &arr[8]);
1543 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1544 		put_unaligned_be32(devip->max_open, &arr[12]);
1545 	else
1546 		put_unaligned_be32(0xffffffff, &arr[12]);
1547 	return 0x3c;
1548 }
1549 
1550 #define SDEBUG_LONG_INQ_SZ 96
1551 #define SDEBUG_MAX_INQ_ARR_SZ 584
1552 
1553 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1554 {
1555 	unsigned char pq_pdt;
1556 	unsigned char *arr;
1557 	unsigned char *cmd = scp->cmnd;
1558 	int alloc_len, n, ret;
1559 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1560 
1561 	alloc_len = get_unaligned_be16(cmd + 3);
1562 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1563 	if (! arr)
1564 		return DID_REQUEUE << 16;
1565 	is_disk = (sdebug_ptype == TYPE_DISK);
1566 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1567 	is_disk_zbc = (is_disk || is_zbc);
1568 	have_wlun = scsi_is_wlun(scp->device->lun);
1569 	if (have_wlun)
1570 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1571 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1572 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1573 	else
1574 		pq_pdt = (sdebug_ptype & 0x1f);
1575 	arr[0] = pq_pdt;
1576 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1577 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1578 		kfree(arr);
1579 		return check_condition_result;
1580 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1581 		int lu_id_num, port_group_id, target_dev_id, len;
1582 		char lu_id_str[6];
1583 		int host_no = devip->sdbg_host->shost->host_no;
1584 
1585 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1586 		    (devip->channel & 0x7f);
1587 		if (sdebug_vpd_use_hostno == 0)
1588 			host_no = 0;
1589 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1590 			    (devip->target * 1000) + devip->lun);
1591 		target_dev_id = ((host_no + 1) * 2000) +
1592 				 (devip->target * 1000) - 3;
1593 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1594 		if (0 == cmd[2]) { /* supported vital product data pages */
1595 			arr[1] = cmd[2];	/*sanity */
1596 			n = 4;
1597 			arr[n++] = 0x0;   /* this page */
1598 			arr[n++] = 0x80;  /* unit serial number */
1599 			arr[n++] = 0x83;  /* device identification */
1600 			arr[n++] = 0x84;  /* software interface ident. */
1601 			arr[n++] = 0x85;  /* management network addresses */
1602 			arr[n++] = 0x86;  /* extended inquiry */
1603 			arr[n++] = 0x87;  /* mode page policy */
1604 			arr[n++] = 0x88;  /* SCSI ports */
1605 			if (is_disk_zbc) {	  /* SBC or ZBC */
1606 				arr[n++] = 0x89;  /* ATA information */
1607 				arr[n++] = 0xb0;  /* Block limits */
1608 				arr[n++] = 0xb1;  /* Block characteristics */
1609 				if (is_disk)
1610 					arr[n++] = 0xb2;  /* LB Provisioning */
1611 				if (is_zbc)
1612 					arr[n++] = 0xb6;  /* ZB dev. char. */
1613 			}
1614 			arr[3] = n - 4;	  /* number of supported VPD pages */
1615 		} else if (0x80 == cmd[2]) { /* unit serial number */
1616 			arr[1] = cmd[2];	/*sanity */
1617 			arr[3] = len;
1618 			memcpy(&arr[4], lu_id_str, len);
1619 		} else if (0x83 == cmd[2]) { /* device identification */
1620 			arr[1] = cmd[2];	/*sanity */
1621 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1622 						target_dev_id, lu_id_num,
1623 						lu_id_str, len,
1624 						&devip->lu_name);
1625 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1626 			arr[1] = cmd[2];	/*sanity */
1627 			arr[3] = inquiry_vpd_84(&arr[4]);
1628 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1629 			arr[1] = cmd[2];	/*sanity */
1630 			arr[3] = inquiry_vpd_85(&arr[4]);
1631 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1632 			arr[1] = cmd[2];	/*sanity */
1633 			arr[3] = 0x3c;	/* number of following entries */
1634 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1635 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1636 			else if (have_dif_prot)
1637 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1638 			else
1639 				arr[4] = 0x0;   /* no protection stuff */
1640 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1641 		} else if (0x87 == cmd[2]) { /* mode page policy */
1642 			arr[1] = cmd[2];	/*sanity */
1643 			arr[3] = 0x8;	/* number of following entries */
1644 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1645 			arr[6] = 0x80;	/* mlus, shared */
1646 			arr[8] = 0x18;	 /* protocol specific lu */
1647 			arr[10] = 0x82;	 /* mlus, per initiator port */
1648 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1649 			arr[1] = cmd[2];	/*sanity */
1650 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1651 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1652 			arr[1] = cmd[2];        /*sanity */
1653 			n = inquiry_vpd_89(&arr[4]);
1654 			put_unaligned_be16(n, arr + 2);
1655 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1656 			arr[1] = cmd[2];        /*sanity */
1657 			arr[3] = inquiry_vpd_b0(&arr[4]);
1658 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1659 			arr[1] = cmd[2];        /*sanity */
1660 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1661 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1662 			arr[1] = cmd[2];        /*sanity */
1663 			arr[3] = inquiry_vpd_b2(&arr[4]);
1664 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1665 			arr[1] = cmd[2];        /*sanity */
1666 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1667 		} else {
1668 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1669 			kfree(arr);
1670 			return check_condition_result;
1671 		}
1672 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1673 		ret = fill_from_dev_buffer(scp, arr,
1674 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1675 		kfree(arr);
1676 		return ret;
1677 	}
1678 	/* drops through here for a standard inquiry */
1679 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1680 	arr[2] = sdebug_scsi_level;
1681 	arr[3] = 2;    /* response_data_format==2 */
1682 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1683 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1684 	if (sdebug_vpd_use_hostno == 0)
1685 		arr[5] |= 0x10; /* claim: implicit TPGS */
1686 	arr[6] = 0x10; /* claim: MultiP */
1687 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1688 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1689 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1690 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1691 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1692 	/* Use Vendor Specific area to place driver date in ASCII hex */
1693 	memcpy(&arr[36], sdebug_version_date, 8);
1694 	/* version descriptors (2 bytes each) follow */
1695 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1696 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1697 	n = 62;
1698 	if (is_disk) {		/* SBC-4 no version claimed */
1699 		put_unaligned_be16(0x600, arr + n);
1700 		n += 2;
1701 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1702 		put_unaligned_be16(0x525, arr + n);
1703 		n += 2;
1704 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1705 		put_unaligned_be16(0x624, arr + n);
1706 		n += 2;
1707 	}
1708 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1709 	ret = fill_from_dev_buffer(scp, arr,
1710 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1711 	kfree(arr);
1712 	return ret;
1713 }
1714 
1715 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1716 				   0, 0, 0x0, 0x0};
1717 
1718 static int resp_requests(struct scsi_cmnd *scp,
1719 			 struct sdebug_dev_info *devip)
1720 {
1721 	unsigned char *sbuff;
1722 	unsigned char *cmd = scp->cmnd;
1723 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1724 	bool dsense;
1725 	int len = 18;
1726 
1727 	memset(arr, 0, sizeof(arr));
1728 	dsense = !!(cmd[1] & 1);
1729 	sbuff = scp->sense_buffer;
1730 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1731 		if (dsense) {
1732 			arr[0] = 0x72;
1733 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1734 			arr[2] = THRESHOLD_EXCEEDED;
1735 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1736 			len = 8;
1737 		} else {
1738 			arr[0] = 0x70;
1739 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1740 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1741 			arr[12] = THRESHOLD_EXCEEDED;
1742 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1743 		}
1744 	} else {
1745 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1746 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1747 			;	/* have sense and formats match */
1748 		else if (arr[0] <= 0x70) {
1749 			if (dsense) {
1750 				memset(arr, 0, 8);
1751 				arr[0] = 0x72;
1752 				len = 8;
1753 			} else {
1754 				memset(arr, 0, 18);
1755 				arr[0] = 0x70;
1756 				arr[7] = 0xa;
1757 			}
1758 		} else if (dsense) {
1759 			memset(arr, 0, 8);
1760 			arr[0] = 0x72;
1761 			arr[1] = sbuff[2];     /* sense key */
1762 			arr[2] = sbuff[12];    /* asc */
1763 			arr[3] = sbuff[13];    /* ascq */
1764 			len = 8;
1765 		} else {
1766 			memset(arr, 0, 18);
1767 			arr[0] = 0x70;
1768 			arr[2] = sbuff[1];
1769 			arr[7] = 0xa;
1770 			arr[12] = sbuff[1];
1771 			arr[13] = sbuff[3];
1772 		}
1773 
1774 	}
1775 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1776 	return fill_from_dev_buffer(scp, arr, len);
1777 }
1778 
1779 static int resp_start_stop(struct scsi_cmnd *scp,
1780 			   struct sdebug_dev_info *devip)
1781 {
1782 	unsigned char *cmd = scp->cmnd;
1783 	int power_cond, stop;
1784 	bool changing;
1785 
1786 	power_cond = (cmd[4] & 0xf0) >> 4;
1787 	if (power_cond) {
1788 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1789 		return check_condition_result;
1790 	}
1791 	stop = !(cmd[4] & 1);
1792 	changing = atomic_read(&devip->stopped) == !stop;
1793 	atomic_xchg(&devip->stopped, stop);
1794 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1795 		return SDEG_RES_IMMED_MASK;
1796 	else
1797 		return 0;
1798 }
1799 
1800 static sector_t get_sdebug_capacity(void)
1801 {
1802 	static const unsigned int gibibyte = 1073741824;
1803 
1804 	if (sdebug_virtual_gb > 0)
1805 		return (sector_t)sdebug_virtual_gb *
1806 			(gibibyte / sdebug_sector_size);
1807 	else
1808 		return sdebug_store_sectors;
1809 }
1810 
1811 #define SDEBUG_READCAP_ARR_SZ 8
1812 static int resp_readcap(struct scsi_cmnd *scp,
1813 			struct sdebug_dev_info *devip)
1814 {
1815 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1816 	unsigned int capac;
1817 
1818 	/* following just in case virtual_gb changed */
1819 	sdebug_capacity = get_sdebug_capacity();
1820 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1821 	if (sdebug_capacity < 0xffffffff) {
1822 		capac = (unsigned int)sdebug_capacity - 1;
1823 		put_unaligned_be32(capac, arr + 0);
1824 	} else
1825 		put_unaligned_be32(0xffffffff, arr + 0);
1826 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1827 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1828 }
1829 
1830 #define SDEBUG_READCAP16_ARR_SZ 32
1831 static int resp_readcap16(struct scsi_cmnd *scp,
1832 			  struct sdebug_dev_info *devip)
1833 {
1834 	unsigned char *cmd = scp->cmnd;
1835 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1836 	int alloc_len;
1837 
1838 	alloc_len = get_unaligned_be32(cmd + 10);
1839 	/* following just in case virtual_gb changed */
1840 	sdebug_capacity = get_sdebug_capacity();
1841 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1842 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1843 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1844 	arr[13] = sdebug_physblk_exp & 0xf;
1845 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1846 
1847 	if (scsi_debug_lbp()) {
1848 		arr[14] |= 0x80; /* LBPME */
1849 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1850 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1851 		 * in the wider field maps to 0 in this field.
1852 		 */
1853 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1854 			arr[14] |= 0x40;
1855 	}
1856 
1857 	arr[15] = sdebug_lowest_aligned & 0xff;
1858 
1859 	if (have_dif_prot) {
1860 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1861 		arr[12] |= 1; /* PROT_EN */
1862 	}
1863 
1864 	return fill_from_dev_buffer(scp, arr,
1865 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1866 }
1867 
1868 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1869 
1870 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1871 			      struct sdebug_dev_info *devip)
1872 {
1873 	unsigned char *cmd = scp->cmnd;
1874 	unsigned char *arr;
1875 	int host_no = devip->sdbg_host->shost->host_no;
1876 	int n, ret, alen, rlen;
1877 	int port_group_a, port_group_b, port_a, port_b;
1878 
1879 	alen = get_unaligned_be32(cmd + 6);
1880 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1881 	if (! arr)
1882 		return DID_REQUEUE << 16;
1883 	/*
1884 	 * EVPD page 0x88 states we have two ports, one
1885 	 * real and a fake port with no device connected.
1886 	 * So we create two port groups with one port each
1887 	 * and set the group with port B to unavailable.
1888 	 */
1889 	port_a = 0x1; /* relative port A */
1890 	port_b = 0x2; /* relative port B */
1891 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1892 			(devip->channel & 0x7f);
1893 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1894 			(devip->channel & 0x7f) + 0x80;
1895 
1896 	/*
1897 	 * The asymmetric access state is cycled according to the host_id.
1898 	 */
1899 	n = 4;
1900 	if (sdebug_vpd_use_hostno == 0) {
1901 		arr[n++] = host_no % 3; /* Asymm access state */
1902 		arr[n++] = 0x0F; /* claim: all states are supported */
1903 	} else {
1904 		arr[n++] = 0x0; /* Active/Optimized path */
1905 		arr[n++] = 0x01; /* only support active/optimized paths */
1906 	}
1907 	put_unaligned_be16(port_group_a, arr + n);
1908 	n += 2;
1909 	arr[n++] = 0;    /* Reserved */
1910 	arr[n++] = 0;    /* Status code */
1911 	arr[n++] = 0;    /* Vendor unique */
1912 	arr[n++] = 0x1;  /* One port per group */
1913 	arr[n++] = 0;    /* Reserved */
1914 	arr[n++] = 0;    /* Reserved */
1915 	put_unaligned_be16(port_a, arr + n);
1916 	n += 2;
1917 	arr[n++] = 3;    /* Port unavailable */
1918 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1919 	put_unaligned_be16(port_group_b, arr + n);
1920 	n += 2;
1921 	arr[n++] = 0;    /* Reserved */
1922 	arr[n++] = 0;    /* Status code */
1923 	arr[n++] = 0;    /* Vendor unique */
1924 	arr[n++] = 0x1;  /* One port per group */
1925 	arr[n++] = 0;    /* Reserved */
1926 	arr[n++] = 0;    /* Reserved */
1927 	put_unaligned_be16(port_b, arr + n);
1928 	n += 2;
1929 
1930 	rlen = n - 4;
1931 	put_unaligned_be32(rlen, arr + 0);
1932 
1933 	/*
1934 	 * Return the smallest value of either
1935 	 * - The allocated length
1936 	 * - The constructed command length
1937 	 * - The maximum array size
1938 	 */
1939 	rlen = min_t(int, alen, n);
1940 	ret = fill_from_dev_buffer(scp, arr,
1941 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1942 	kfree(arr);
1943 	return ret;
1944 }
1945 
1946 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1947 			     struct sdebug_dev_info *devip)
1948 {
1949 	bool rctd;
1950 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1951 	u16 req_sa, u;
1952 	u32 alloc_len, a_len;
1953 	int k, offset, len, errsts, count, bump, na;
1954 	const struct opcode_info_t *oip;
1955 	const struct opcode_info_t *r_oip;
1956 	u8 *arr;
1957 	u8 *cmd = scp->cmnd;
1958 
1959 	rctd = !!(cmd[2] & 0x80);
1960 	reporting_opts = cmd[2] & 0x7;
1961 	req_opcode = cmd[3];
1962 	req_sa = get_unaligned_be16(cmd + 4);
1963 	alloc_len = get_unaligned_be32(cmd + 6);
1964 	if (alloc_len < 4 || alloc_len > 0xffff) {
1965 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1966 		return check_condition_result;
1967 	}
1968 	if (alloc_len > 8192)
1969 		a_len = 8192;
1970 	else
1971 		a_len = alloc_len;
1972 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1973 	if (NULL == arr) {
1974 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1975 				INSUFF_RES_ASCQ);
1976 		return check_condition_result;
1977 	}
1978 	switch (reporting_opts) {
1979 	case 0:	/* all commands */
1980 		/* count number of commands */
1981 		for (count = 0, oip = opcode_info_arr;
1982 		     oip->num_attached != 0xff; ++oip) {
1983 			if (F_INV_OP & oip->flags)
1984 				continue;
1985 			count += (oip->num_attached + 1);
1986 		}
1987 		bump = rctd ? 20 : 8;
1988 		put_unaligned_be32(count * bump, arr);
1989 		for (offset = 4, oip = opcode_info_arr;
1990 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1991 			if (F_INV_OP & oip->flags)
1992 				continue;
1993 			na = oip->num_attached;
1994 			arr[offset] = oip->opcode;
1995 			put_unaligned_be16(oip->sa, arr + offset + 2);
1996 			if (rctd)
1997 				arr[offset + 5] |= 0x2;
1998 			if (FF_SA & oip->flags)
1999 				arr[offset + 5] |= 0x1;
2000 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2001 			if (rctd)
2002 				put_unaligned_be16(0xa, arr + offset + 8);
2003 			r_oip = oip;
2004 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2005 				if (F_INV_OP & oip->flags)
2006 					continue;
2007 				offset += bump;
2008 				arr[offset] = oip->opcode;
2009 				put_unaligned_be16(oip->sa, arr + offset + 2);
2010 				if (rctd)
2011 					arr[offset + 5] |= 0x2;
2012 				if (FF_SA & oip->flags)
2013 					arr[offset + 5] |= 0x1;
2014 				put_unaligned_be16(oip->len_mask[0],
2015 						   arr + offset + 6);
2016 				if (rctd)
2017 					put_unaligned_be16(0xa,
2018 							   arr + offset + 8);
2019 			}
2020 			oip = r_oip;
2021 			offset += bump;
2022 		}
2023 		break;
2024 	case 1:	/* one command: opcode only */
2025 	case 2:	/* one command: opcode plus service action */
2026 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2027 		sdeb_i = opcode_ind_arr[req_opcode];
2028 		oip = &opcode_info_arr[sdeb_i];
2029 		if (F_INV_OP & oip->flags) {
2030 			supp = 1;
2031 			offset = 4;
2032 		} else {
2033 			if (1 == reporting_opts) {
2034 				if (FF_SA & oip->flags) {
2035 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2036 							     2, 2);
2037 					kfree(arr);
2038 					return check_condition_result;
2039 				}
2040 				req_sa = 0;
2041 			} else if (2 == reporting_opts &&
2042 				   0 == (FF_SA & oip->flags)) {
2043 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2044 				kfree(arr);	/* point at requested sa */
2045 				return check_condition_result;
2046 			}
2047 			if (0 == (FF_SA & oip->flags) &&
2048 			    req_opcode == oip->opcode)
2049 				supp = 3;
2050 			else if (0 == (FF_SA & oip->flags)) {
2051 				na = oip->num_attached;
2052 				for (k = 0, oip = oip->arrp; k < na;
2053 				     ++k, ++oip) {
2054 					if (req_opcode == oip->opcode)
2055 						break;
2056 				}
2057 				supp = (k >= na) ? 1 : 3;
2058 			} else if (req_sa != oip->sa) {
2059 				na = oip->num_attached;
2060 				for (k = 0, oip = oip->arrp; k < na;
2061 				     ++k, ++oip) {
2062 					if (req_sa == oip->sa)
2063 						break;
2064 				}
2065 				supp = (k >= na) ? 1 : 3;
2066 			} else
2067 				supp = 3;
2068 			if (3 == supp) {
2069 				u = oip->len_mask[0];
2070 				put_unaligned_be16(u, arr + 2);
2071 				arr[4] = oip->opcode;
2072 				for (k = 1; k < u; ++k)
2073 					arr[4 + k] = (k < 16) ?
2074 						 oip->len_mask[k] : 0xff;
2075 				offset = 4 + u;
2076 			} else
2077 				offset = 4;
2078 		}
2079 		arr[1] = (rctd ? 0x80 : 0) | supp;
2080 		if (rctd) {
2081 			put_unaligned_be16(0xa, arr + offset);
2082 			offset += 12;
2083 		}
2084 		break;
2085 	default:
2086 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2087 		kfree(arr);
2088 		return check_condition_result;
2089 	}
2090 	offset = (offset < a_len) ? offset : a_len;
2091 	len = (offset < alloc_len) ? offset : alloc_len;
2092 	errsts = fill_from_dev_buffer(scp, arr, len);
2093 	kfree(arr);
2094 	return errsts;
2095 }
2096 
2097 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2098 			  struct sdebug_dev_info *devip)
2099 {
2100 	bool repd;
2101 	u32 alloc_len, len;
2102 	u8 arr[16];
2103 	u8 *cmd = scp->cmnd;
2104 
2105 	memset(arr, 0, sizeof(arr));
2106 	repd = !!(cmd[2] & 0x80);
2107 	alloc_len = get_unaligned_be32(cmd + 6);
2108 	if (alloc_len < 4) {
2109 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2110 		return check_condition_result;
2111 	}
2112 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2113 	arr[1] = 0x1;		/* ITNRS */
2114 	if (repd) {
2115 		arr[3] = 0xc;
2116 		len = 16;
2117 	} else
2118 		len = 4;
2119 
2120 	len = (len < alloc_len) ? len : alloc_len;
2121 	return fill_from_dev_buffer(scp, arr, len);
2122 }
2123 
2124 /* <<Following mode page info copied from ST318451LW>> */
2125 
2126 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2127 {	/* Read-Write Error Recovery page for mode_sense */
2128 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2129 					5, 0, 0xff, 0xff};
2130 
2131 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2132 	if (1 == pcontrol)
2133 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2134 	return sizeof(err_recov_pg);
2135 }
2136 
2137 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2138 { 	/* Disconnect-Reconnect page for mode_sense */
2139 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2140 					 0, 0, 0, 0, 0, 0, 0, 0};
2141 
2142 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2143 	if (1 == pcontrol)
2144 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2145 	return sizeof(disconnect_pg);
2146 }
2147 
2148 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2149 {       /* Format device page for mode_sense */
2150 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2151 				     0, 0, 0, 0, 0, 0, 0, 0,
2152 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2153 
2154 	memcpy(p, format_pg, sizeof(format_pg));
2155 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2156 	put_unaligned_be16(sdebug_sector_size, p + 12);
2157 	if (sdebug_removable)
2158 		p[20] |= 0x20; /* should agree with INQUIRY */
2159 	if (1 == pcontrol)
2160 		memset(p + 2, 0, sizeof(format_pg) - 2);
2161 	return sizeof(format_pg);
2162 }
2163 
2164 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2165 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2166 				     0, 0, 0, 0};
2167 
2168 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2169 { 	/* Caching page for mode_sense */
2170 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2171 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2172 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2173 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2174 
2175 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2176 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2177 	memcpy(p, caching_pg, sizeof(caching_pg));
2178 	if (1 == pcontrol)
2179 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2180 	else if (2 == pcontrol)
2181 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2182 	return sizeof(caching_pg);
2183 }
2184 
2185 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2186 				    0, 0, 0x2, 0x4b};
2187 
2188 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2189 { 	/* Control mode page for mode_sense */
2190 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2191 					0, 0, 0, 0};
2192 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2193 				     0, 0, 0x2, 0x4b};
2194 
2195 	if (sdebug_dsense)
2196 		ctrl_m_pg[2] |= 0x4;
2197 	else
2198 		ctrl_m_pg[2] &= ~0x4;
2199 
2200 	if (sdebug_ato)
2201 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2202 
2203 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2204 	if (1 == pcontrol)
2205 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2206 	else if (2 == pcontrol)
2207 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2208 	return sizeof(ctrl_m_pg);
2209 }
2210 
2211 
2212 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2213 {	/* Informational Exceptions control mode page for mode_sense */
2214 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2215 				       0, 0, 0x0, 0x0};
2216 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2217 				      0, 0, 0x0, 0x0};
2218 
2219 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2220 	if (1 == pcontrol)
2221 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2222 	else if (2 == pcontrol)
2223 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2224 	return sizeof(iec_m_pg);
2225 }
2226 
2227 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2228 {	/* SAS SSP mode page - short format for mode_sense */
2229 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2230 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2231 
2232 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2233 	if (1 == pcontrol)
2234 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2235 	return sizeof(sas_sf_m_pg);
2236 }
2237 
2238 
2239 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2240 			      int target_dev_id)
2241 {	/* SAS phy control and discover mode page for mode_sense */
2242 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2243 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2244 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2245 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2246 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2247 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2248 		    0, 0, 0, 0, 0, 0, 0, 0,
2249 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2250 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2251 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2252 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2253 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2254 		    0, 0, 0, 0, 0, 0, 0, 0,
2255 		};
2256 	int port_a, port_b;
2257 
2258 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2259 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2260 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2261 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2262 	port_a = target_dev_id + 1;
2263 	port_b = port_a + 1;
2264 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2265 	put_unaligned_be32(port_a, p + 20);
2266 	put_unaligned_be32(port_b, p + 48 + 20);
2267 	if (1 == pcontrol)
2268 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2269 	return sizeof(sas_pcd_m_pg);
2270 }
2271 
2272 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2273 {	/* SAS SSP shared protocol specific port mode subpage */
2274 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2275 		    0, 0, 0, 0, 0, 0, 0, 0,
2276 		};
2277 
2278 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2279 	if (1 == pcontrol)
2280 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2281 	return sizeof(sas_sha_m_pg);
2282 }
2283 
2284 #define SDEBUG_MAX_MSENSE_SZ 256
2285 
2286 static int resp_mode_sense(struct scsi_cmnd *scp,
2287 			   struct sdebug_dev_info *devip)
2288 {
2289 	int pcontrol, pcode, subpcode, bd_len;
2290 	unsigned char dev_spec;
2291 	int alloc_len, offset, len, target_dev_id;
2292 	int target = scp->device->id;
2293 	unsigned char *ap;
2294 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2295 	unsigned char *cmd = scp->cmnd;
2296 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2297 
2298 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2299 	pcontrol = (cmd[2] & 0xc0) >> 6;
2300 	pcode = cmd[2] & 0x3f;
2301 	subpcode = cmd[3];
2302 	msense_6 = (MODE_SENSE == cmd[0]);
2303 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2304 	is_disk = (sdebug_ptype == TYPE_DISK);
2305 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2306 	if ((is_disk || is_zbc) && !dbd)
2307 		bd_len = llbaa ? 16 : 8;
2308 	else
2309 		bd_len = 0;
2310 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2311 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2312 	if (0x3 == pcontrol) {  /* Saving values not supported */
2313 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2314 		return check_condition_result;
2315 	}
2316 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2317 			(devip->target * 1000) - 3;
2318 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2319 	if (is_disk || is_zbc) {
2320 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2321 		if (sdebug_wp)
2322 			dev_spec |= 0x80;
2323 	} else
2324 		dev_spec = 0x0;
2325 	if (msense_6) {
2326 		arr[2] = dev_spec;
2327 		arr[3] = bd_len;
2328 		offset = 4;
2329 	} else {
2330 		arr[3] = dev_spec;
2331 		if (16 == bd_len)
2332 			arr[4] = 0x1;	/* set LONGLBA bit */
2333 		arr[7] = bd_len;	/* assume 255 or less */
2334 		offset = 8;
2335 	}
2336 	ap = arr + offset;
2337 	if ((bd_len > 0) && (!sdebug_capacity))
2338 		sdebug_capacity = get_sdebug_capacity();
2339 
2340 	if (8 == bd_len) {
2341 		if (sdebug_capacity > 0xfffffffe)
2342 			put_unaligned_be32(0xffffffff, ap + 0);
2343 		else
2344 			put_unaligned_be32(sdebug_capacity, ap + 0);
2345 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2346 		offset += bd_len;
2347 		ap = arr + offset;
2348 	} else if (16 == bd_len) {
2349 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2350 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2351 		offset += bd_len;
2352 		ap = arr + offset;
2353 	}
2354 
2355 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2356 		/* TODO: Control Extension page */
2357 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2358 		return check_condition_result;
2359 	}
2360 	bad_pcode = false;
2361 
2362 	switch (pcode) {
2363 	case 0x1:	/* Read-Write error recovery page, direct access */
2364 		len = resp_err_recov_pg(ap, pcontrol, target);
2365 		offset += len;
2366 		break;
2367 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2368 		len = resp_disconnect_pg(ap, pcontrol, target);
2369 		offset += len;
2370 		break;
2371 	case 0x3:       /* Format device page, direct access */
2372 		if (is_disk) {
2373 			len = resp_format_pg(ap, pcontrol, target);
2374 			offset += len;
2375 		} else
2376 			bad_pcode = true;
2377 		break;
2378 	case 0x8:	/* Caching page, direct access */
2379 		if (is_disk || is_zbc) {
2380 			len = resp_caching_pg(ap, pcontrol, target);
2381 			offset += len;
2382 		} else
2383 			bad_pcode = true;
2384 		break;
2385 	case 0xa:	/* Control Mode page, all devices */
2386 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2387 		offset += len;
2388 		break;
2389 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2390 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2391 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2392 			return check_condition_result;
2393 		}
2394 		len = 0;
2395 		if ((0x0 == subpcode) || (0xff == subpcode))
2396 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2397 		if ((0x1 == subpcode) || (0xff == subpcode))
2398 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2399 						  target_dev_id);
2400 		if ((0x2 == subpcode) || (0xff == subpcode))
2401 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2402 		offset += len;
2403 		break;
2404 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2405 		len = resp_iec_m_pg(ap, pcontrol, target);
2406 		offset += len;
2407 		break;
2408 	case 0x3f:	/* Read all Mode pages */
2409 		if ((0 == subpcode) || (0xff == subpcode)) {
2410 			len = resp_err_recov_pg(ap, pcontrol, target);
2411 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2412 			if (is_disk) {
2413 				len += resp_format_pg(ap + len, pcontrol,
2414 						      target);
2415 				len += resp_caching_pg(ap + len, pcontrol,
2416 						       target);
2417 			} else if (is_zbc) {
2418 				len += resp_caching_pg(ap + len, pcontrol,
2419 						       target);
2420 			}
2421 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2422 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2423 			if (0xff == subpcode) {
2424 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2425 						  target, target_dev_id);
2426 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2427 			}
2428 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2429 			offset += len;
2430 		} else {
2431 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2432 			return check_condition_result;
2433 		}
2434 		break;
2435 	default:
2436 		bad_pcode = true;
2437 		break;
2438 	}
2439 	if (bad_pcode) {
2440 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2441 		return check_condition_result;
2442 	}
2443 	if (msense_6)
2444 		arr[0] = offset - 1;
2445 	else
2446 		put_unaligned_be16((offset - 2), arr + 0);
2447 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2448 }
2449 
2450 #define SDEBUG_MAX_MSELECT_SZ 512
2451 
2452 static int resp_mode_select(struct scsi_cmnd *scp,
2453 			    struct sdebug_dev_info *devip)
2454 {
2455 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2456 	int param_len, res, mpage;
2457 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2458 	unsigned char *cmd = scp->cmnd;
2459 	int mselect6 = (MODE_SELECT == cmd[0]);
2460 
2461 	memset(arr, 0, sizeof(arr));
2462 	pf = cmd[1] & 0x10;
2463 	sp = cmd[1] & 0x1;
2464 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2465 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2466 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2467 		return check_condition_result;
2468 	}
2469 	res = fetch_to_dev_buffer(scp, arr, param_len);
2470 	if (-1 == res)
2471 		return DID_ERROR << 16;
2472 	else if (sdebug_verbose && (res < param_len))
2473 		sdev_printk(KERN_INFO, scp->device,
2474 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2475 			    __func__, param_len, res);
2476 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2477 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2478 	if (md_len > 2) {
2479 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2480 		return check_condition_result;
2481 	}
2482 	off = bd_len + (mselect6 ? 4 : 8);
2483 	mpage = arr[off] & 0x3f;
2484 	ps = !!(arr[off] & 0x80);
2485 	if (ps) {
2486 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2487 		return check_condition_result;
2488 	}
2489 	spf = !!(arr[off] & 0x40);
2490 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2491 		       (arr[off + 1] + 2);
2492 	if ((pg_len + off) > param_len) {
2493 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2494 				PARAMETER_LIST_LENGTH_ERR, 0);
2495 		return check_condition_result;
2496 	}
2497 	switch (mpage) {
2498 	case 0x8:      /* Caching Mode page */
2499 		if (caching_pg[1] == arr[off + 1]) {
2500 			memcpy(caching_pg + 2, arr + off + 2,
2501 			       sizeof(caching_pg) - 2);
2502 			goto set_mode_changed_ua;
2503 		}
2504 		break;
2505 	case 0xa:      /* Control Mode page */
2506 		if (ctrl_m_pg[1] == arr[off + 1]) {
2507 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2508 			       sizeof(ctrl_m_pg) - 2);
2509 			if (ctrl_m_pg[4] & 0x8)
2510 				sdebug_wp = true;
2511 			else
2512 				sdebug_wp = false;
2513 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2514 			goto set_mode_changed_ua;
2515 		}
2516 		break;
2517 	case 0x1c:      /* Informational Exceptions Mode page */
2518 		if (iec_m_pg[1] == arr[off + 1]) {
2519 			memcpy(iec_m_pg + 2, arr + off + 2,
2520 			       sizeof(iec_m_pg) - 2);
2521 			goto set_mode_changed_ua;
2522 		}
2523 		break;
2524 	default:
2525 		break;
2526 	}
2527 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2528 	return check_condition_result;
2529 set_mode_changed_ua:
2530 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2531 	return 0;
2532 }
2533 
2534 static int resp_temp_l_pg(unsigned char *arr)
2535 {
2536 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2537 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2538 		};
2539 
2540 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2541 	return sizeof(temp_l_pg);
2542 }
2543 
2544 static int resp_ie_l_pg(unsigned char *arr)
2545 {
2546 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2547 		};
2548 
2549 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2550 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2551 		arr[4] = THRESHOLD_EXCEEDED;
2552 		arr[5] = 0xff;
2553 	}
2554 	return sizeof(ie_l_pg);
2555 }
2556 
2557 #define SDEBUG_MAX_LSENSE_SZ 512
2558 
2559 static int resp_log_sense(struct scsi_cmnd *scp,
2560 			  struct sdebug_dev_info *devip)
2561 {
2562 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2563 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2564 	unsigned char *cmd = scp->cmnd;
2565 
2566 	memset(arr, 0, sizeof(arr));
2567 	ppc = cmd[1] & 0x2;
2568 	sp = cmd[1] & 0x1;
2569 	if (ppc || sp) {
2570 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2571 		return check_condition_result;
2572 	}
2573 	pcode = cmd[2] & 0x3f;
2574 	subpcode = cmd[3] & 0xff;
2575 	alloc_len = get_unaligned_be16(cmd + 7);
2576 	arr[0] = pcode;
2577 	if (0 == subpcode) {
2578 		switch (pcode) {
2579 		case 0x0:	/* Supported log pages log page */
2580 			n = 4;
2581 			arr[n++] = 0x0;		/* this page */
2582 			arr[n++] = 0xd;		/* Temperature */
2583 			arr[n++] = 0x2f;	/* Informational exceptions */
2584 			arr[3] = n - 4;
2585 			break;
2586 		case 0xd:	/* Temperature log page */
2587 			arr[3] = resp_temp_l_pg(arr + 4);
2588 			break;
2589 		case 0x2f:	/* Informational exceptions log page */
2590 			arr[3] = resp_ie_l_pg(arr + 4);
2591 			break;
2592 		default:
2593 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2594 			return check_condition_result;
2595 		}
2596 	} else if (0xff == subpcode) {
2597 		arr[0] |= 0x40;
2598 		arr[1] = subpcode;
2599 		switch (pcode) {
2600 		case 0x0:	/* Supported log pages and subpages log page */
2601 			n = 4;
2602 			arr[n++] = 0x0;
2603 			arr[n++] = 0x0;		/* 0,0 page */
2604 			arr[n++] = 0x0;
2605 			arr[n++] = 0xff;	/* this page */
2606 			arr[n++] = 0xd;
2607 			arr[n++] = 0x0;		/* Temperature */
2608 			arr[n++] = 0x2f;
2609 			arr[n++] = 0x0;	/* Informational exceptions */
2610 			arr[3] = n - 4;
2611 			break;
2612 		case 0xd:	/* Temperature subpages */
2613 			n = 4;
2614 			arr[n++] = 0xd;
2615 			arr[n++] = 0x0;		/* Temperature */
2616 			arr[3] = n - 4;
2617 			break;
2618 		case 0x2f:	/* Informational exceptions subpages */
2619 			n = 4;
2620 			arr[n++] = 0x2f;
2621 			arr[n++] = 0x0;		/* Informational exceptions */
2622 			arr[3] = n - 4;
2623 			break;
2624 		default:
2625 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2626 			return check_condition_result;
2627 		}
2628 	} else {
2629 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2630 		return check_condition_result;
2631 	}
2632 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2633 	return fill_from_dev_buffer(scp, arr,
2634 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2635 }
2636 
2637 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2638 {
2639 	return devip->nr_zones != 0;
2640 }
2641 
2642 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2643 					unsigned long long lba)
2644 {
2645 	return &devip->zstate[lba >> devip->zsize_shift];
2646 }
2647 
2648 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2649 {
2650 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2651 }
2652 
2653 static void zbc_close_zone(struct sdebug_dev_info *devip,
2654 			   struct sdeb_zone_state *zsp)
2655 {
2656 	enum sdebug_z_cond zc;
2657 
2658 	if (zbc_zone_is_conv(zsp))
2659 		return;
2660 
2661 	zc = zsp->z_cond;
2662 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2663 		return;
2664 
2665 	if (zc == ZC2_IMPLICIT_OPEN)
2666 		devip->nr_imp_open--;
2667 	else
2668 		devip->nr_exp_open--;
2669 
2670 	if (zsp->z_wp == zsp->z_start) {
2671 		zsp->z_cond = ZC1_EMPTY;
2672 	} else {
2673 		zsp->z_cond = ZC4_CLOSED;
2674 		devip->nr_closed++;
2675 	}
2676 }
2677 
2678 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2679 {
2680 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2681 	unsigned int i;
2682 
2683 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2684 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2685 			zbc_close_zone(devip, zsp);
2686 			return;
2687 		}
2688 	}
2689 }
2690 
2691 static void zbc_open_zone(struct sdebug_dev_info *devip,
2692 			  struct sdeb_zone_state *zsp, bool explicit)
2693 {
2694 	enum sdebug_z_cond zc;
2695 
2696 	if (zbc_zone_is_conv(zsp))
2697 		return;
2698 
2699 	zc = zsp->z_cond;
2700 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2701 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2702 		return;
2703 
2704 	/* Close an implicit open zone if necessary */
2705 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2706 		zbc_close_zone(devip, zsp);
2707 	else if (devip->max_open &&
2708 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2709 		zbc_close_imp_open_zone(devip);
2710 
2711 	if (zsp->z_cond == ZC4_CLOSED)
2712 		devip->nr_closed--;
2713 	if (explicit) {
2714 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2715 		devip->nr_exp_open++;
2716 	} else {
2717 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2718 		devip->nr_imp_open++;
2719 	}
2720 }
2721 
2722 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2723 		       unsigned long long lba, unsigned int num)
2724 {
2725 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2726 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2727 
2728 	if (zbc_zone_is_conv(zsp))
2729 		return;
2730 
2731 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2732 		zsp->z_wp += num;
2733 		if (zsp->z_wp >= zend)
2734 			zsp->z_cond = ZC5_FULL;
2735 		return;
2736 	}
2737 
2738 	while (num) {
2739 		if (lba != zsp->z_wp)
2740 			zsp->z_non_seq_resource = true;
2741 
2742 		end = lba + num;
2743 		if (end >= zend) {
2744 			n = zend - lba;
2745 			zsp->z_wp = zend;
2746 		} else if (end > zsp->z_wp) {
2747 			n = num;
2748 			zsp->z_wp = end;
2749 		} else {
2750 			n = num;
2751 		}
2752 		if (zsp->z_wp >= zend)
2753 			zsp->z_cond = ZC5_FULL;
2754 
2755 		num -= n;
2756 		lba += n;
2757 		if (num) {
2758 			zsp++;
2759 			zend = zsp->z_start + zsp->z_size;
2760 		}
2761 	}
2762 }
2763 
2764 static int check_zbc_access_params(struct scsi_cmnd *scp,
2765 			unsigned long long lba, unsigned int num, bool write)
2766 {
2767 	struct scsi_device *sdp = scp->device;
2768 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2769 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2770 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2771 
2772 	if (!write) {
2773 		if (devip->zmodel == BLK_ZONED_HA)
2774 			return 0;
2775 		/* For host-managed, reads cannot cross zone types boundaries */
2776 		if (zsp_end != zsp &&
2777 		    zbc_zone_is_conv(zsp) &&
2778 		    !zbc_zone_is_conv(zsp_end)) {
2779 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2780 					LBA_OUT_OF_RANGE,
2781 					READ_INVDATA_ASCQ);
2782 			return check_condition_result;
2783 		}
2784 		return 0;
2785 	}
2786 
2787 	/* No restrictions for writes within conventional zones */
2788 	if (zbc_zone_is_conv(zsp)) {
2789 		if (!zbc_zone_is_conv(zsp_end)) {
2790 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2791 					LBA_OUT_OF_RANGE,
2792 					WRITE_BOUNDARY_ASCQ);
2793 			return check_condition_result;
2794 		}
2795 		return 0;
2796 	}
2797 
2798 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2799 		/* Writes cannot cross sequential zone boundaries */
2800 		if (zsp_end != zsp) {
2801 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2802 					LBA_OUT_OF_RANGE,
2803 					WRITE_BOUNDARY_ASCQ);
2804 			return check_condition_result;
2805 		}
2806 		/* Cannot write full zones */
2807 		if (zsp->z_cond == ZC5_FULL) {
2808 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2809 					INVALID_FIELD_IN_CDB, 0);
2810 			return check_condition_result;
2811 		}
2812 		/* Writes must be aligned to the zone WP */
2813 		if (lba != zsp->z_wp) {
2814 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2815 					LBA_OUT_OF_RANGE,
2816 					UNALIGNED_WRITE_ASCQ);
2817 			return check_condition_result;
2818 		}
2819 	}
2820 
2821 	/* Handle implicit open of closed and empty zones */
2822 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2823 		if (devip->max_open &&
2824 		    devip->nr_exp_open >= devip->max_open) {
2825 			mk_sense_buffer(scp, DATA_PROTECT,
2826 					INSUFF_RES_ASC,
2827 					INSUFF_ZONE_ASCQ);
2828 			return check_condition_result;
2829 		}
2830 		zbc_open_zone(devip, zsp, false);
2831 	}
2832 
2833 	return 0;
2834 }
2835 
2836 static inline int check_device_access_params
2837 			(struct scsi_cmnd *scp, unsigned long long lba,
2838 			 unsigned int num, bool write)
2839 {
2840 	struct scsi_device *sdp = scp->device;
2841 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2842 
2843 	if (lba + num > sdebug_capacity) {
2844 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2845 		return check_condition_result;
2846 	}
2847 	/* transfer length excessive (tie in to block limits VPD page) */
2848 	if (num > sdebug_store_sectors) {
2849 		/* needs work to find which cdb byte 'num' comes from */
2850 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2851 		return check_condition_result;
2852 	}
2853 	if (write && unlikely(sdebug_wp)) {
2854 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2855 		return check_condition_result;
2856 	}
2857 	if (sdebug_dev_is_zoned(devip))
2858 		return check_zbc_access_params(scp, lba, num, write);
2859 
2860 	return 0;
2861 }
2862 
2863 /*
2864  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2865  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2866  * that access any of the "stores" in struct sdeb_store_info should call this
2867  * function with bug_if_fake_rw set to true.
2868  */
2869 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2870 						bool bug_if_fake_rw)
2871 {
2872 	if (sdebug_fake_rw) {
2873 		BUG_ON(bug_if_fake_rw);	/* See note above */
2874 		return NULL;
2875 	}
2876 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2877 }
2878 
2879 /* Returns number of bytes copied or -1 if error. */
2880 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2881 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2882 {
2883 	int ret;
2884 	u64 block, rest = 0;
2885 	enum dma_data_direction dir;
2886 	struct scsi_data_buffer *sdb = &scp->sdb;
2887 	u8 *fsp;
2888 
2889 	if (do_write) {
2890 		dir = DMA_TO_DEVICE;
2891 		write_since_sync = true;
2892 	} else {
2893 		dir = DMA_FROM_DEVICE;
2894 	}
2895 
2896 	if (!sdb->length || !sip)
2897 		return 0;
2898 	if (scp->sc_data_direction != dir)
2899 		return -1;
2900 	fsp = sip->storep;
2901 
2902 	block = do_div(lba, sdebug_store_sectors);
2903 	if (block + num > sdebug_store_sectors)
2904 		rest = block + num - sdebug_store_sectors;
2905 
2906 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2907 		   fsp + (block * sdebug_sector_size),
2908 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2909 	if (ret != (num - rest) * sdebug_sector_size)
2910 		return ret;
2911 
2912 	if (rest) {
2913 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2914 			    fsp, rest * sdebug_sector_size,
2915 			    sg_skip + ((num - rest) * sdebug_sector_size),
2916 			    do_write);
2917 	}
2918 
2919 	return ret;
2920 }
2921 
2922 /* Returns number of bytes copied or -1 if error. */
2923 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2924 {
2925 	struct scsi_data_buffer *sdb = &scp->sdb;
2926 
2927 	if (!sdb->length)
2928 		return 0;
2929 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2930 		return -1;
2931 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2932 			      num * sdebug_sector_size, 0, true);
2933 }
2934 
2935 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2936  * arr into sip->storep+lba and return true. If comparison fails then
2937  * return false. */
2938 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2939 			      const u8 *arr, bool compare_only)
2940 {
2941 	bool res;
2942 	u64 block, rest = 0;
2943 	u32 store_blks = sdebug_store_sectors;
2944 	u32 lb_size = sdebug_sector_size;
2945 	u8 *fsp = sip->storep;
2946 
2947 	block = do_div(lba, store_blks);
2948 	if (block + num > store_blks)
2949 		rest = block + num - store_blks;
2950 
2951 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2952 	if (!res)
2953 		return res;
2954 	if (rest)
2955 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2956 			     rest * lb_size);
2957 	if (!res)
2958 		return res;
2959 	if (compare_only)
2960 		return true;
2961 	arr += num * lb_size;
2962 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2963 	if (rest)
2964 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2965 	return res;
2966 }
2967 
2968 static __be16 dif_compute_csum(const void *buf, int len)
2969 {
2970 	__be16 csum;
2971 
2972 	if (sdebug_guard)
2973 		csum = (__force __be16)ip_compute_csum(buf, len);
2974 	else
2975 		csum = cpu_to_be16(crc_t10dif(buf, len));
2976 
2977 	return csum;
2978 }
2979 
2980 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2981 		      sector_t sector, u32 ei_lba)
2982 {
2983 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2984 
2985 	if (sdt->guard_tag != csum) {
2986 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2987 			(unsigned long)sector,
2988 			be16_to_cpu(sdt->guard_tag),
2989 			be16_to_cpu(csum));
2990 		return 0x01;
2991 	}
2992 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2993 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2994 		pr_err("REF check failed on sector %lu\n",
2995 			(unsigned long)sector);
2996 		return 0x03;
2997 	}
2998 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2999 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3000 		pr_err("REF check failed on sector %lu\n",
3001 			(unsigned long)sector);
3002 		return 0x03;
3003 	}
3004 	return 0;
3005 }
3006 
3007 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3008 			  unsigned int sectors, bool read)
3009 {
3010 	size_t resid;
3011 	void *paddr;
3012 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3013 						scp->device->hostdata, true);
3014 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3015 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3016 	struct sg_mapping_iter miter;
3017 
3018 	/* Bytes of protection data to copy into sgl */
3019 	resid = sectors * sizeof(*dif_storep);
3020 
3021 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3022 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3023 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3024 
3025 	while (sg_miter_next(&miter) && resid > 0) {
3026 		size_t len = min_t(size_t, miter.length, resid);
3027 		void *start = dif_store(sip, sector);
3028 		size_t rest = 0;
3029 
3030 		if (dif_store_end < start + len)
3031 			rest = start + len - dif_store_end;
3032 
3033 		paddr = miter.addr;
3034 
3035 		if (read)
3036 			memcpy(paddr, start, len - rest);
3037 		else
3038 			memcpy(start, paddr, len - rest);
3039 
3040 		if (rest) {
3041 			if (read)
3042 				memcpy(paddr + len - rest, dif_storep, rest);
3043 			else
3044 				memcpy(dif_storep, paddr + len - rest, rest);
3045 		}
3046 
3047 		sector += len / sizeof(*dif_storep);
3048 		resid -= len;
3049 	}
3050 	sg_miter_stop(&miter);
3051 }
3052 
3053 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3054 			    unsigned int sectors, u32 ei_lba)
3055 {
3056 	unsigned int i;
3057 	sector_t sector;
3058 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3059 						scp->device->hostdata, true);
3060 	struct t10_pi_tuple *sdt;
3061 
3062 	for (i = 0; i < sectors; i++, ei_lba++) {
3063 		int ret;
3064 
3065 		sector = start_sec + i;
3066 		sdt = dif_store(sip, sector);
3067 
3068 		if (sdt->app_tag == cpu_to_be16(0xffff))
3069 			continue;
3070 
3071 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3072 				 ei_lba);
3073 		if (ret) {
3074 			dif_errors++;
3075 			return ret;
3076 		}
3077 	}
3078 
3079 	dif_copy_prot(scp, start_sec, sectors, true);
3080 	dix_reads++;
3081 
3082 	return 0;
3083 }
3084 
3085 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3086 {
3087 	bool check_prot;
3088 	u32 num;
3089 	u32 ei_lba;
3090 	int ret;
3091 	u64 lba;
3092 	struct sdeb_store_info *sip = devip2sip(devip, true);
3093 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3094 	u8 *cmd = scp->cmnd;
3095 
3096 	switch (cmd[0]) {
3097 	case READ_16:
3098 		ei_lba = 0;
3099 		lba = get_unaligned_be64(cmd + 2);
3100 		num = get_unaligned_be32(cmd + 10);
3101 		check_prot = true;
3102 		break;
3103 	case READ_10:
3104 		ei_lba = 0;
3105 		lba = get_unaligned_be32(cmd + 2);
3106 		num = get_unaligned_be16(cmd + 7);
3107 		check_prot = true;
3108 		break;
3109 	case READ_6:
3110 		ei_lba = 0;
3111 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3112 		      (u32)(cmd[1] & 0x1f) << 16;
3113 		num = (0 == cmd[4]) ? 256 : cmd[4];
3114 		check_prot = true;
3115 		break;
3116 	case READ_12:
3117 		ei_lba = 0;
3118 		lba = get_unaligned_be32(cmd + 2);
3119 		num = get_unaligned_be32(cmd + 6);
3120 		check_prot = true;
3121 		break;
3122 	case XDWRITEREAD_10:
3123 		ei_lba = 0;
3124 		lba = get_unaligned_be32(cmd + 2);
3125 		num = get_unaligned_be16(cmd + 7);
3126 		check_prot = false;
3127 		break;
3128 	default:	/* assume READ(32) */
3129 		lba = get_unaligned_be64(cmd + 12);
3130 		ei_lba = get_unaligned_be32(cmd + 20);
3131 		num = get_unaligned_be32(cmd + 28);
3132 		check_prot = false;
3133 		break;
3134 	}
3135 	if (unlikely(have_dif_prot && check_prot)) {
3136 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3137 		    (cmd[1] & 0xe0)) {
3138 			mk_sense_invalid_opcode(scp);
3139 			return check_condition_result;
3140 		}
3141 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3142 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3143 		    (cmd[1] & 0xe0) == 0)
3144 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3145 				    "to DIF device\n");
3146 	}
3147 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3148 		     atomic_read(&sdeb_inject_pending))) {
3149 		num /= 2;
3150 		atomic_set(&sdeb_inject_pending, 0);
3151 	}
3152 
3153 	ret = check_device_access_params(scp, lba, num, false);
3154 	if (ret)
3155 		return ret;
3156 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3157 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3158 		     ((lba + num) > sdebug_medium_error_start))) {
3159 		/* claim unrecoverable read error */
3160 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3161 		/* set info field and valid bit for fixed descriptor */
3162 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3163 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3164 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3165 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3166 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3167 		}
3168 		scsi_set_resid(scp, scsi_bufflen(scp));
3169 		return check_condition_result;
3170 	}
3171 
3172 	read_lock(macc_lckp);
3173 
3174 	/* DIX + T10 DIF */
3175 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3176 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3177 
3178 		if (prot_ret) {
3179 			read_unlock(macc_lckp);
3180 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3181 			return illegal_condition_result;
3182 		}
3183 	}
3184 
3185 	ret = do_device_access(sip, scp, 0, lba, num, false);
3186 	read_unlock(macc_lckp);
3187 	if (unlikely(ret == -1))
3188 		return DID_ERROR << 16;
3189 
3190 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3191 
3192 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3193 		     atomic_read(&sdeb_inject_pending))) {
3194 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3195 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3196 			atomic_set(&sdeb_inject_pending, 0);
3197 			return check_condition_result;
3198 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3199 			/* Logical block guard check failed */
3200 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3201 			atomic_set(&sdeb_inject_pending, 0);
3202 			return illegal_condition_result;
3203 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3204 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3205 			atomic_set(&sdeb_inject_pending, 0);
3206 			return illegal_condition_result;
3207 		}
3208 	}
3209 	return 0;
3210 }
3211 
3212 static void dump_sector(unsigned char *buf, int len)
3213 {
3214 	int i, j, n;
3215 
3216 	pr_err(">>> Sector Dump <<<\n");
3217 	for (i = 0 ; i < len ; i += 16) {
3218 		char b[128];
3219 
3220 		for (j = 0, n = 0; j < 16; j++) {
3221 			unsigned char c = buf[i+j];
3222 
3223 			if (c >= 0x20 && c < 0x7e)
3224 				n += scnprintf(b + n, sizeof(b) - n,
3225 					       " %c ", buf[i+j]);
3226 			else
3227 				n += scnprintf(b + n, sizeof(b) - n,
3228 					       "%02x ", buf[i+j]);
3229 		}
3230 		pr_err("%04d: %s\n", i, b);
3231 	}
3232 }
3233 
3234 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3235 			     unsigned int sectors, u32 ei_lba)
3236 {
3237 	int ret;
3238 	struct t10_pi_tuple *sdt;
3239 	void *daddr;
3240 	sector_t sector = start_sec;
3241 	int ppage_offset;
3242 	int dpage_offset;
3243 	struct sg_mapping_iter diter;
3244 	struct sg_mapping_iter piter;
3245 
3246 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3247 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3248 
3249 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3250 			scsi_prot_sg_count(SCpnt),
3251 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3252 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3253 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3254 
3255 	/* For each protection page */
3256 	while (sg_miter_next(&piter)) {
3257 		dpage_offset = 0;
3258 		if (WARN_ON(!sg_miter_next(&diter))) {
3259 			ret = 0x01;
3260 			goto out;
3261 		}
3262 
3263 		for (ppage_offset = 0; ppage_offset < piter.length;
3264 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3265 			/* If we're at the end of the current
3266 			 * data page advance to the next one
3267 			 */
3268 			if (dpage_offset >= diter.length) {
3269 				if (WARN_ON(!sg_miter_next(&diter))) {
3270 					ret = 0x01;
3271 					goto out;
3272 				}
3273 				dpage_offset = 0;
3274 			}
3275 
3276 			sdt = piter.addr + ppage_offset;
3277 			daddr = diter.addr + dpage_offset;
3278 
3279 			ret = dif_verify(sdt, daddr, sector, ei_lba);
3280 			if (ret) {
3281 				dump_sector(daddr, sdebug_sector_size);
3282 				goto out;
3283 			}
3284 
3285 			sector++;
3286 			ei_lba++;
3287 			dpage_offset += sdebug_sector_size;
3288 		}
3289 		diter.consumed = dpage_offset;
3290 		sg_miter_stop(&diter);
3291 	}
3292 	sg_miter_stop(&piter);
3293 
3294 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3295 	dix_writes++;
3296 
3297 	return 0;
3298 
3299 out:
3300 	dif_errors++;
3301 	sg_miter_stop(&diter);
3302 	sg_miter_stop(&piter);
3303 	return ret;
3304 }
3305 
3306 static unsigned long lba_to_map_index(sector_t lba)
3307 {
3308 	if (sdebug_unmap_alignment)
3309 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3310 	sector_div(lba, sdebug_unmap_granularity);
3311 	return lba;
3312 }
3313 
3314 static sector_t map_index_to_lba(unsigned long index)
3315 {
3316 	sector_t lba = index * sdebug_unmap_granularity;
3317 
3318 	if (sdebug_unmap_alignment)
3319 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3320 	return lba;
3321 }
3322 
3323 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3324 			      unsigned int *num)
3325 {
3326 	sector_t end;
3327 	unsigned int mapped;
3328 	unsigned long index;
3329 	unsigned long next;
3330 
3331 	index = lba_to_map_index(lba);
3332 	mapped = test_bit(index, sip->map_storep);
3333 
3334 	if (mapped)
3335 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3336 	else
3337 		next = find_next_bit(sip->map_storep, map_size, index);
3338 
3339 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3340 	*num = end - lba;
3341 	return mapped;
3342 }
3343 
3344 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3345 		       unsigned int len)
3346 {
3347 	sector_t end = lba + len;
3348 
3349 	while (lba < end) {
3350 		unsigned long index = lba_to_map_index(lba);
3351 
3352 		if (index < map_size)
3353 			set_bit(index, sip->map_storep);
3354 
3355 		lba = map_index_to_lba(index + 1);
3356 	}
3357 }
3358 
3359 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3360 			 unsigned int len)
3361 {
3362 	sector_t end = lba + len;
3363 	u8 *fsp = sip->storep;
3364 
3365 	while (lba < end) {
3366 		unsigned long index = lba_to_map_index(lba);
3367 
3368 		if (lba == map_index_to_lba(index) &&
3369 		    lba + sdebug_unmap_granularity <= end &&
3370 		    index < map_size) {
3371 			clear_bit(index, sip->map_storep);
3372 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3373 				memset(fsp + lba * sdebug_sector_size,
3374 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3375 				       sdebug_sector_size *
3376 				       sdebug_unmap_granularity);
3377 			}
3378 			if (sip->dif_storep) {
3379 				memset(sip->dif_storep + lba, 0xff,
3380 				       sizeof(*sip->dif_storep) *
3381 				       sdebug_unmap_granularity);
3382 			}
3383 		}
3384 		lba = map_index_to_lba(index + 1);
3385 	}
3386 }
3387 
3388 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3389 {
3390 	bool check_prot;
3391 	u32 num;
3392 	u32 ei_lba;
3393 	int ret;
3394 	u64 lba;
3395 	struct sdeb_store_info *sip = devip2sip(devip, true);
3396 	rwlock_t *macc_lckp = &sip->macc_lck;
3397 	u8 *cmd = scp->cmnd;
3398 
3399 	switch (cmd[0]) {
3400 	case WRITE_16:
3401 		ei_lba = 0;
3402 		lba = get_unaligned_be64(cmd + 2);
3403 		num = get_unaligned_be32(cmd + 10);
3404 		check_prot = true;
3405 		break;
3406 	case WRITE_10:
3407 		ei_lba = 0;
3408 		lba = get_unaligned_be32(cmd + 2);
3409 		num = get_unaligned_be16(cmd + 7);
3410 		check_prot = true;
3411 		break;
3412 	case WRITE_6:
3413 		ei_lba = 0;
3414 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3415 		      (u32)(cmd[1] & 0x1f) << 16;
3416 		num = (0 == cmd[4]) ? 256 : cmd[4];
3417 		check_prot = true;
3418 		break;
3419 	case WRITE_12:
3420 		ei_lba = 0;
3421 		lba = get_unaligned_be32(cmd + 2);
3422 		num = get_unaligned_be32(cmd + 6);
3423 		check_prot = true;
3424 		break;
3425 	case 0x53:	/* XDWRITEREAD(10) */
3426 		ei_lba = 0;
3427 		lba = get_unaligned_be32(cmd + 2);
3428 		num = get_unaligned_be16(cmd + 7);
3429 		check_prot = false;
3430 		break;
3431 	default:	/* assume WRITE(32) */
3432 		lba = get_unaligned_be64(cmd + 12);
3433 		ei_lba = get_unaligned_be32(cmd + 20);
3434 		num = get_unaligned_be32(cmd + 28);
3435 		check_prot = false;
3436 		break;
3437 	}
3438 	if (unlikely(have_dif_prot && check_prot)) {
3439 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3440 		    (cmd[1] & 0xe0)) {
3441 			mk_sense_invalid_opcode(scp);
3442 			return check_condition_result;
3443 		}
3444 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3445 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3446 		    (cmd[1] & 0xe0) == 0)
3447 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3448 				    "to DIF device\n");
3449 	}
3450 
3451 	write_lock(macc_lckp);
3452 	ret = check_device_access_params(scp, lba, num, true);
3453 	if (ret) {
3454 		write_unlock(macc_lckp);
3455 		return ret;
3456 	}
3457 
3458 	/* DIX + T10 DIF */
3459 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3460 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3461 
3462 		if (prot_ret) {
3463 			write_unlock(macc_lckp);
3464 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3465 			return illegal_condition_result;
3466 		}
3467 	}
3468 
3469 	ret = do_device_access(sip, scp, 0, lba, num, true);
3470 	if (unlikely(scsi_debug_lbp()))
3471 		map_region(sip, lba, num);
3472 	/* If ZBC zone then bump its write pointer */
3473 	if (sdebug_dev_is_zoned(devip))
3474 		zbc_inc_wp(devip, lba, num);
3475 	write_unlock(macc_lckp);
3476 	if (unlikely(-1 == ret))
3477 		return DID_ERROR << 16;
3478 	else if (unlikely(sdebug_verbose &&
3479 			  (ret < (num * sdebug_sector_size))))
3480 		sdev_printk(KERN_INFO, scp->device,
3481 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3482 			    my_name, num * sdebug_sector_size, ret);
3483 
3484 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3485 		     atomic_read(&sdeb_inject_pending))) {
3486 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3487 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3488 			atomic_set(&sdeb_inject_pending, 0);
3489 			return check_condition_result;
3490 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3491 			/* Logical block guard check failed */
3492 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3493 			atomic_set(&sdeb_inject_pending, 0);
3494 			return illegal_condition_result;
3495 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3496 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3497 			atomic_set(&sdeb_inject_pending, 0);
3498 			return illegal_condition_result;
3499 		}
3500 	}
3501 	return 0;
3502 }
3503 
3504 /*
3505  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3506  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3507  */
3508 static int resp_write_scat(struct scsi_cmnd *scp,
3509 			   struct sdebug_dev_info *devip)
3510 {
3511 	u8 *cmd = scp->cmnd;
3512 	u8 *lrdp = NULL;
3513 	u8 *up;
3514 	struct sdeb_store_info *sip = devip2sip(devip, true);
3515 	rwlock_t *macc_lckp = &sip->macc_lck;
3516 	u8 wrprotect;
3517 	u16 lbdof, num_lrd, k;
3518 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3519 	u32 lb_size = sdebug_sector_size;
3520 	u32 ei_lba;
3521 	u64 lba;
3522 	int ret, res;
3523 	bool is_16;
3524 	static const u32 lrd_size = 32; /* + parameter list header size */
3525 
3526 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3527 		is_16 = false;
3528 		wrprotect = (cmd[10] >> 5) & 0x7;
3529 		lbdof = get_unaligned_be16(cmd + 12);
3530 		num_lrd = get_unaligned_be16(cmd + 16);
3531 		bt_len = get_unaligned_be32(cmd + 28);
3532 	} else {        /* that leaves WRITE SCATTERED(16) */
3533 		is_16 = true;
3534 		wrprotect = (cmd[2] >> 5) & 0x7;
3535 		lbdof = get_unaligned_be16(cmd + 4);
3536 		num_lrd = get_unaligned_be16(cmd + 8);
3537 		bt_len = get_unaligned_be32(cmd + 10);
3538 		if (unlikely(have_dif_prot)) {
3539 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3540 			    wrprotect) {
3541 				mk_sense_invalid_opcode(scp);
3542 				return illegal_condition_result;
3543 			}
3544 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3545 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3546 			     wrprotect == 0)
3547 				sdev_printk(KERN_ERR, scp->device,
3548 					    "Unprotected WR to DIF device\n");
3549 		}
3550 	}
3551 	if ((num_lrd == 0) || (bt_len == 0))
3552 		return 0;       /* T10 says these do-nothings are not errors */
3553 	if (lbdof == 0) {
3554 		if (sdebug_verbose)
3555 			sdev_printk(KERN_INFO, scp->device,
3556 				"%s: %s: LB Data Offset field bad\n",
3557 				my_name, __func__);
3558 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3559 		return illegal_condition_result;
3560 	}
3561 	lbdof_blen = lbdof * lb_size;
3562 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3563 		if (sdebug_verbose)
3564 			sdev_printk(KERN_INFO, scp->device,
3565 				"%s: %s: LBA range descriptors don't fit\n",
3566 				my_name, __func__);
3567 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3568 		return illegal_condition_result;
3569 	}
3570 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3571 	if (lrdp == NULL)
3572 		return SCSI_MLQUEUE_HOST_BUSY;
3573 	if (sdebug_verbose)
3574 		sdev_printk(KERN_INFO, scp->device,
3575 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3576 			my_name, __func__, lbdof_blen);
3577 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3578 	if (res == -1) {
3579 		ret = DID_ERROR << 16;
3580 		goto err_out;
3581 	}
3582 
3583 	write_lock(macc_lckp);
3584 	sg_off = lbdof_blen;
3585 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3586 	cum_lb = 0;
3587 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3588 		lba = get_unaligned_be64(up + 0);
3589 		num = get_unaligned_be32(up + 8);
3590 		if (sdebug_verbose)
3591 			sdev_printk(KERN_INFO, scp->device,
3592 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3593 				my_name, __func__, k, lba, num, sg_off);
3594 		if (num == 0)
3595 			continue;
3596 		ret = check_device_access_params(scp, lba, num, true);
3597 		if (ret)
3598 			goto err_out_unlock;
3599 		num_by = num * lb_size;
3600 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3601 
3602 		if ((cum_lb + num) > bt_len) {
3603 			if (sdebug_verbose)
3604 				sdev_printk(KERN_INFO, scp->device,
3605 				    "%s: %s: sum of blocks > data provided\n",
3606 				    my_name, __func__);
3607 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3608 					0);
3609 			ret = illegal_condition_result;
3610 			goto err_out_unlock;
3611 		}
3612 
3613 		/* DIX + T10 DIF */
3614 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3615 			int prot_ret = prot_verify_write(scp, lba, num,
3616 							 ei_lba);
3617 
3618 			if (prot_ret) {
3619 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3620 						prot_ret);
3621 				ret = illegal_condition_result;
3622 				goto err_out_unlock;
3623 			}
3624 		}
3625 
3626 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3627 		/* If ZBC zone then bump its write pointer */
3628 		if (sdebug_dev_is_zoned(devip))
3629 			zbc_inc_wp(devip, lba, num);
3630 		if (unlikely(scsi_debug_lbp()))
3631 			map_region(sip, lba, num);
3632 		if (unlikely(-1 == ret)) {
3633 			ret = DID_ERROR << 16;
3634 			goto err_out_unlock;
3635 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3636 			sdev_printk(KERN_INFO, scp->device,
3637 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3638 			    my_name, num_by, ret);
3639 
3640 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3641 			     atomic_read(&sdeb_inject_pending))) {
3642 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3643 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3644 				atomic_set(&sdeb_inject_pending, 0);
3645 				ret = check_condition_result;
3646 				goto err_out_unlock;
3647 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3648 				/* Logical block guard check failed */
3649 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3650 				atomic_set(&sdeb_inject_pending, 0);
3651 				ret = illegal_condition_result;
3652 				goto err_out_unlock;
3653 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3654 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3655 				atomic_set(&sdeb_inject_pending, 0);
3656 				ret = illegal_condition_result;
3657 				goto err_out_unlock;
3658 			}
3659 		}
3660 		sg_off += num_by;
3661 		cum_lb += num;
3662 	}
3663 	ret = 0;
3664 err_out_unlock:
3665 	write_unlock(macc_lckp);
3666 err_out:
3667 	kfree(lrdp);
3668 	return ret;
3669 }
3670 
3671 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3672 			   u32 ei_lba, bool unmap, bool ndob)
3673 {
3674 	struct scsi_device *sdp = scp->device;
3675 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3676 	unsigned long long i;
3677 	u64 block, lbaa;
3678 	u32 lb_size = sdebug_sector_size;
3679 	int ret;
3680 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3681 						scp->device->hostdata, true);
3682 	rwlock_t *macc_lckp = &sip->macc_lck;
3683 	u8 *fs1p;
3684 	u8 *fsp;
3685 
3686 	write_lock(macc_lckp);
3687 
3688 	ret = check_device_access_params(scp, lba, num, true);
3689 	if (ret) {
3690 		write_unlock(macc_lckp);
3691 		return ret;
3692 	}
3693 
3694 	if (unmap && scsi_debug_lbp()) {
3695 		unmap_region(sip, lba, num);
3696 		goto out;
3697 	}
3698 	lbaa = lba;
3699 	block = do_div(lbaa, sdebug_store_sectors);
3700 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3701 	fsp = sip->storep;
3702 	fs1p = fsp + (block * lb_size);
3703 	if (ndob) {
3704 		memset(fs1p, 0, lb_size);
3705 		ret = 0;
3706 	} else
3707 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3708 
3709 	if (-1 == ret) {
3710 		write_unlock(&sip->macc_lck);
3711 		return DID_ERROR << 16;
3712 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3713 		sdev_printk(KERN_INFO, scp->device,
3714 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3715 			    my_name, "write same", lb_size, ret);
3716 
3717 	/* Copy first sector to remaining blocks */
3718 	for (i = 1 ; i < num ; i++) {
3719 		lbaa = lba + i;
3720 		block = do_div(lbaa, sdebug_store_sectors);
3721 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3722 	}
3723 	if (scsi_debug_lbp())
3724 		map_region(sip, lba, num);
3725 	/* If ZBC zone then bump its write pointer */
3726 	if (sdebug_dev_is_zoned(devip))
3727 		zbc_inc_wp(devip, lba, num);
3728 out:
3729 	write_unlock(macc_lckp);
3730 
3731 	return 0;
3732 }
3733 
3734 static int resp_write_same_10(struct scsi_cmnd *scp,
3735 			      struct sdebug_dev_info *devip)
3736 {
3737 	u8 *cmd = scp->cmnd;
3738 	u32 lba;
3739 	u16 num;
3740 	u32 ei_lba = 0;
3741 	bool unmap = false;
3742 
3743 	if (cmd[1] & 0x8) {
3744 		if (sdebug_lbpws10 == 0) {
3745 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3746 			return check_condition_result;
3747 		} else
3748 			unmap = true;
3749 	}
3750 	lba = get_unaligned_be32(cmd + 2);
3751 	num = get_unaligned_be16(cmd + 7);
3752 	if (num > sdebug_write_same_length) {
3753 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3754 		return check_condition_result;
3755 	}
3756 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3757 }
3758 
3759 static int resp_write_same_16(struct scsi_cmnd *scp,
3760 			      struct sdebug_dev_info *devip)
3761 {
3762 	u8 *cmd = scp->cmnd;
3763 	u64 lba;
3764 	u32 num;
3765 	u32 ei_lba = 0;
3766 	bool unmap = false;
3767 	bool ndob = false;
3768 
3769 	if (cmd[1] & 0x8) {	/* UNMAP */
3770 		if (sdebug_lbpws == 0) {
3771 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3772 			return check_condition_result;
3773 		} else
3774 			unmap = true;
3775 	}
3776 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3777 		ndob = true;
3778 	lba = get_unaligned_be64(cmd + 2);
3779 	num = get_unaligned_be32(cmd + 10);
3780 	if (num > sdebug_write_same_length) {
3781 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3782 		return check_condition_result;
3783 	}
3784 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3785 }
3786 
3787 /* Note the mode field is in the same position as the (lower) service action
3788  * field. For the Report supported operation codes command, SPC-4 suggests
3789  * each mode of this command should be reported separately; for future. */
3790 static int resp_write_buffer(struct scsi_cmnd *scp,
3791 			     struct sdebug_dev_info *devip)
3792 {
3793 	u8 *cmd = scp->cmnd;
3794 	struct scsi_device *sdp = scp->device;
3795 	struct sdebug_dev_info *dp;
3796 	u8 mode;
3797 
3798 	mode = cmd[1] & 0x1f;
3799 	switch (mode) {
3800 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3801 		/* set UAs on this device only */
3802 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3803 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3804 		break;
3805 	case 0x5:	/* download MC, save and ACT */
3806 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3807 		break;
3808 	case 0x6:	/* download MC with offsets and ACT */
3809 		/* set UAs on most devices (LUs) in this target */
3810 		list_for_each_entry(dp,
3811 				    &devip->sdbg_host->dev_info_list,
3812 				    dev_list)
3813 			if (dp->target == sdp->id) {
3814 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3815 				if (devip != dp)
3816 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3817 						dp->uas_bm);
3818 			}
3819 		break;
3820 	case 0x7:	/* download MC with offsets, save, and ACT */
3821 		/* set UA on all devices (LUs) in this target */
3822 		list_for_each_entry(dp,
3823 				    &devip->sdbg_host->dev_info_list,
3824 				    dev_list)
3825 			if (dp->target == sdp->id)
3826 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3827 					dp->uas_bm);
3828 		break;
3829 	default:
3830 		/* do nothing for this command for other mode values */
3831 		break;
3832 	}
3833 	return 0;
3834 }
3835 
3836 static int resp_comp_write(struct scsi_cmnd *scp,
3837 			   struct sdebug_dev_info *devip)
3838 {
3839 	u8 *cmd = scp->cmnd;
3840 	u8 *arr;
3841 	struct sdeb_store_info *sip = devip2sip(devip, true);
3842 	rwlock_t *macc_lckp = &sip->macc_lck;
3843 	u64 lba;
3844 	u32 dnum;
3845 	u32 lb_size = sdebug_sector_size;
3846 	u8 num;
3847 	int ret;
3848 	int retval = 0;
3849 
3850 	lba = get_unaligned_be64(cmd + 2);
3851 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3852 	if (0 == num)
3853 		return 0;	/* degenerate case, not an error */
3854 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3855 	    (cmd[1] & 0xe0)) {
3856 		mk_sense_invalid_opcode(scp);
3857 		return check_condition_result;
3858 	}
3859 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3860 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3861 	    (cmd[1] & 0xe0) == 0)
3862 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3863 			    "to DIF device\n");
3864 	ret = check_device_access_params(scp, lba, num, false);
3865 	if (ret)
3866 		return ret;
3867 	dnum = 2 * num;
3868 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3869 	if (NULL == arr) {
3870 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3871 				INSUFF_RES_ASCQ);
3872 		return check_condition_result;
3873 	}
3874 
3875 	write_lock(macc_lckp);
3876 
3877 	ret = do_dout_fetch(scp, dnum, arr);
3878 	if (ret == -1) {
3879 		retval = DID_ERROR << 16;
3880 		goto cleanup;
3881 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3882 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3883 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3884 			    dnum * lb_size, ret);
3885 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3886 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3887 		retval = check_condition_result;
3888 		goto cleanup;
3889 	}
3890 	if (scsi_debug_lbp())
3891 		map_region(sip, lba, num);
3892 cleanup:
3893 	write_unlock(macc_lckp);
3894 	kfree(arr);
3895 	return retval;
3896 }
3897 
3898 struct unmap_block_desc {
3899 	__be64	lba;
3900 	__be32	blocks;
3901 	__be32	__reserved;
3902 };
3903 
3904 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3905 {
3906 	unsigned char *buf;
3907 	struct unmap_block_desc *desc;
3908 	struct sdeb_store_info *sip = devip2sip(devip, true);
3909 	rwlock_t *macc_lckp = &sip->macc_lck;
3910 	unsigned int i, payload_len, descriptors;
3911 	int ret;
3912 
3913 	if (!scsi_debug_lbp())
3914 		return 0;	/* fib and say its done */
3915 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3916 	BUG_ON(scsi_bufflen(scp) != payload_len);
3917 
3918 	descriptors = (payload_len - 8) / 16;
3919 	if (descriptors > sdebug_unmap_max_desc) {
3920 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3921 		return check_condition_result;
3922 	}
3923 
3924 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3925 	if (!buf) {
3926 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3927 				INSUFF_RES_ASCQ);
3928 		return check_condition_result;
3929 	}
3930 
3931 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3932 
3933 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3934 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3935 
3936 	desc = (void *)&buf[8];
3937 
3938 	write_lock(macc_lckp);
3939 
3940 	for (i = 0 ; i < descriptors ; i++) {
3941 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3942 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3943 
3944 		ret = check_device_access_params(scp, lba, num, true);
3945 		if (ret)
3946 			goto out;
3947 
3948 		unmap_region(sip, lba, num);
3949 	}
3950 
3951 	ret = 0;
3952 
3953 out:
3954 	write_unlock(macc_lckp);
3955 	kfree(buf);
3956 
3957 	return ret;
3958 }
3959 
3960 #define SDEBUG_GET_LBA_STATUS_LEN 32
3961 
3962 static int resp_get_lba_status(struct scsi_cmnd *scp,
3963 			       struct sdebug_dev_info *devip)
3964 {
3965 	u8 *cmd = scp->cmnd;
3966 	u64 lba;
3967 	u32 alloc_len, mapped, num;
3968 	int ret;
3969 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3970 
3971 	lba = get_unaligned_be64(cmd + 2);
3972 	alloc_len = get_unaligned_be32(cmd + 10);
3973 
3974 	if (alloc_len < 24)
3975 		return 0;
3976 
3977 	ret = check_device_access_params(scp, lba, 1, false);
3978 	if (ret)
3979 		return ret;
3980 
3981 	if (scsi_debug_lbp()) {
3982 		struct sdeb_store_info *sip = devip2sip(devip, true);
3983 
3984 		mapped = map_state(sip, lba, &num);
3985 	} else {
3986 		mapped = 1;
3987 		/* following just in case virtual_gb changed */
3988 		sdebug_capacity = get_sdebug_capacity();
3989 		if (sdebug_capacity - lba <= 0xffffffff)
3990 			num = sdebug_capacity - lba;
3991 		else
3992 			num = 0xffffffff;
3993 	}
3994 
3995 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3996 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3997 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3998 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3999 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4000 
4001 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4002 }
4003 
4004 static int resp_sync_cache(struct scsi_cmnd *scp,
4005 			   struct sdebug_dev_info *devip)
4006 {
4007 	int res = 0;
4008 	u64 lba;
4009 	u32 num_blocks;
4010 	u8 *cmd = scp->cmnd;
4011 
4012 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4013 		lba = get_unaligned_be32(cmd + 2);
4014 		num_blocks = get_unaligned_be16(cmd + 7);
4015 	} else {				/* SYNCHRONIZE_CACHE(16) */
4016 		lba = get_unaligned_be64(cmd + 2);
4017 		num_blocks = get_unaligned_be32(cmd + 10);
4018 	}
4019 	if (lba + num_blocks > sdebug_capacity) {
4020 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4021 		return check_condition_result;
4022 	}
4023 	if (!write_since_sync || cmd[1] & 0x2)
4024 		res = SDEG_RES_IMMED_MASK;
4025 	else		/* delay if write_since_sync and IMMED clear */
4026 		write_since_sync = false;
4027 	return res;
4028 }
4029 
4030 /*
4031  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4032  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4033  * a GOOD status otherwise. Model a disk with a big cache and yield
4034  * CONDITION MET. Actually tries to bring range in main memory into the
4035  * cache associated with the CPU(s).
4036  */
4037 static int resp_pre_fetch(struct scsi_cmnd *scp,
4038 			  struct sdebug_dev_info *devip)
4039 {
4040 	int res = 0;
4041 	u64 lba;
4042 	u64 block, rest = 0;
4043 	u32 nblks;
4044 	u8 *cmd = scp->cmnd;
4045 	struct sdeb_store_info *sip = devip2sip(devip, true);
4046 	rwlock_t *macc_lckp = &sip->macc_lck;
4047 	u8 *fsp = sip->storep;
4048 
4049 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4050 		lba = get_unaligned_be32(cmd + 2);
4051 		nblks = get_unaligned_be16(cmd + 7);
4052 	} else {			/* PRE-FETCH(16) */
4053 		lba = get_unaligned_be64(cmd + 2);
4054 		nblks = get_unaligned_be32(cmd + 10);
4055 	}
4056 	if (lba + nblks > sdebug_capacity) {
4057 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4058 		return check_condition_result;
4059 	}
4060 	if (!fsp)
4061 		goto fini;
4062 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4063 	block = do_div(lba, sdebug_store_sectors);
4064 	if (block + nblks > sdebug_store_sectors)
4065 		rest = block + nblks - sdebug_store_sectors;
4066 
4067 	/* Try to bring the PRE-FETCH range into CPU's cache */
4068 	read_lock(macc_lckp);
4069 	prefetch_range(fsp + (sdebug_sector_size * block),
4070 		       (nblks - rest) * sdebug_sector_size);
4071 	if (rest)
4072 		prefetch_range(fsp, rest * sdebug_sector_size);
4073 	read_unlock(macc_lckp);
4074 fini:
4075 	if (cmd[1] & 0x2)
4076 		res = SDEG_RES_IMMED_MASK;
4077 	return res | condition_met_result;
4078 }
4079 
4080 #define RL_BUCKET_ELEMS 8
4081 
4082 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4083  * (W-LUN), the normal Linux scanning logic does not associate it with a
4084  * device (e.g. /dev/sg7). The following magic will make that association:
4085  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4086  * where <n> is a host number. If there are multiple targets in a host then
4087  * the above will associate a W-LUN to each target. To only get a W-LUN
4088  * for target 2, then use "echo '- 2 49409' > scan" .
4089  */
4090 static int resp_report_luns(struct scsi_cmnd *scp,
4091 			    struct sdebug_dev_info *devip)
4092 {
4093 	unsigned char *cmd = scp->cmnd;
4094 	unsigned int alloc_len;
4095 	unsigned char select_report;
4096 	u64 lun;
4097 	struct scsi_lun *lun_p;
4098 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4099 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4100 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4101 	unsigned int tlun_cnt;	/* total LUN count */
4102 	unsigned int rlen;	/* response length (in bytes) */
4103 	int k, j, n, res;
4104 	unsigned int off_rsp = 0;
4105 	const int sz_lun = sizeof(struct scsi_lun);
4106 
4107 	clear_luns_changed_on_target(devip);
4108 
4109 	select_report = cmd[2];
4110 	alloc_len = get_unaligned_be32(cmd + 6);
4111 
4112 	if (alloc_len < 4) {
4113 		pr_err("alloc len too small %d\n", alloc_len);
4114 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4115 		return check_condition_result;
4116 	}
4117 
4118 	switch (select_report) {
4119 	case 0:		/* all LUNs apart from W-LUNs */
4120 		lun_cnt = sdebug_max_luns;
4121 		wlun_cnt = 0;
4122 		break;
4123 	case 1:		/* only W-LUNs */
4124 		lun_cnt = 0;
4125 		wlun_cnt = 1;
4126 		break;
4127 	case 2:		/* all LUNs */
4128 		lun_cnt = sdebug_max_luns;
4129 		wlun_cnt = 1;
4130 		break;
4131 	case 0x10:	/* only administrative LUs */
4132 	case 0x11:	/* see SPC-5 */
4133 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4134 	default:
4135 		pr_debug("select report invalid %d\n", select_report);
4136 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4137 		return check_condition_result;
4138 	}
4139 
4140 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4141 		--lun_cnt;
4142 
4143 	tlun_cnt = lun_cnt + wlun_cnt;
4144 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4145 	scsi_set_resid(scp, scsi_bufflen(scp));
4146 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4147 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4148 
4149 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4150 	lun = sdebug_no_lun_0 ? 1 : 0;
4151 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4152 		memset(arr, 0, sizeof(arr));
4153 		lun_p = (struct scsi_lun *)&arr[0];
4154 		if (k == 0) {
4155 			put_unaligned_be32(rlen, &arr[0]);
4156 			++lun_p;
4157 			j = 1;
4158 		}
4159 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4160 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4161 				break;
4162 			int_to_scsilun(lun++, lun_p);
4163 		}
4164 		if (j < RL_BUCKET_ELEMS)
4165 			break;
4166 		n = j * sz_lun;
4167 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4168 		if (res)
4169 			return res;
4170 		off_rsp += n;
4171 	}
4172 	if (wlun_cnt) {
4173 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4174 		++j;
4175 	}
4176 	if (j > 0)
4177 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4178 	return res;
4179 }
4180 
4181 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4182 {
4183 	bool is_bytchk3 = false;
4184 	u8 bytchk;
4185 	int ret, j;
4186 	u32 vnum, a_num, off;
4187 	const u32 lb_size = sdebug_sector_size;
4188 	u64 lba;
4189 	u8 *arr;
4190 	u8 *cmd = scp->cmnd;
4191 	struct sdeb_store_info *sip = devip2sip(devip, true);
4192 	rwlock_t *macc_lckp = &sip->macc_lck;
4193 
4194 	bytchk = (cmd[1] >> 1) & 0x3;
4195 	if (bytchk == 0) {
4196 		return 0;	/* always claim internal verify okay */
4197 	} else if (bytchk == 2) {
4198 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4199 		return check_condition_result;
4200 	} else if (bytchk == 3) {
4201 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4202 	}
4203 	switch (cmd[0]) {
4204 	case VERIFY_16:
4205 		lba = get_unaligned_be64(cmd + 2);
4206 		vnum = get_unaligned_be32(cmd + 10);
4207 		break;
4208 	case VERIFY:		/* is VERIFY(10) */
4209 		lba = get_unaligned_be32(cmd + 2);
4210 		vnum = get_unaligned_be16(cmd + 7);
4211 		break;
4212 	default:
4213 		mk_sense_invalid_opcode(scp);
4214 		return check_condition_result;
4215 	}
4216 	a_num = is_bytchk3 ? 1 : vnum;
4217 	/* Treat following check like one for read (i.e. no write) access */
4218 	ret = check_device_access_params(scp, lba, a_num, false);
4219 	if (ret)
4220 		return ret;
4221 
4222 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4223 	if (!arr) {
4224 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4225 				INSUFF_RES_ASCQ);
4226 		return check_condition_result;
4227 	}
4228 	/* Not changing store, so only need read access */
4229 	read_lock(macc_lckp);
4230 
4231 	ret = do_dout_fetch(scp, a_num, arr);
4232 	if (ret == -1) {
4233 		ret = DID_ERROR << 16;
4234 		goto cleanup;
4235 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4236 		sdev_printk(KERN_INFO, scp->device,
4237 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4238 			    my_name, __func__, a_num * lb_size, ret);
4239 	}
4240 	if (is_bytchk3) {
4241 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4242 			memcpy(arr + off, arr, lb_size);
4243 	}
4244 	ret = 0;
4245 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4246 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4247 		ret = check_condition_result;
4248 		goto cleanup;
4249 	}
4250 cleanup:
4251 	read_unlock(macc_lckp);
4252 	kfree(arr);
4253 	return ret;
4254 }
4255 
4256 #define RZONES_DESC_HD 64
4257 
4258 /* Report zones depending on start LBA nad reporting options */
4259 static int resp_report_zones(struct scsi_cmnd *scp,
4260 			     struct sdebug_dev_info *devip)
4261 {
4262 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4263 	int ret = 0;
4264 	u32 alloc_len, rep_opts, rep_len;
4265 	bool partial;
4266 	u64 lba, zs_lba;
4267 	u8 *arr = NULL, *desc;
4268 	u8 *cmd = scp->cmnd;
4269 	struct sdeb_zone_state *zsp;
4270 	struct sdeb_store_info *sip = devip2sip(devip, false);
4271 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4272 
4273 	if (!sdebug_dev_is_zoned(devip)) {
4274 		mk_sense_invalid_opcode(scp);
4275 		return check_condition_result;
4276 	}
4277 	zs_lba = get_unaligned_be64(cmd + 2);
4278 	alloc_len = get_unaligned_be32(cmd + 10);
4279 	rep_opts = cmd[14] & 0x3f;
4280 	partial = cmd[14] & 0x80;
4281 
4282 	if (zs_lba >= sdebug_capacity) {
4283 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4284 		return check_condition_result;
4285 	}
4286 
4287 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4288 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4289 			    max_zones);
4290 
4291 	arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4292 	if (!arr) {
4293 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4294 				INSUFF_RES_ASCQ);
4295 		return check_condition_result;
4296 	}
4297 
4298 	read_lock(macc_lckp);
4299 
4300 	desc = arr + 64;
4301 	for (i = 0; i < max_zones; i++) {
4302 		lba = zs_lba + devip->zsize * i;
4303 		if (lba > sdebug_capacity)
4304 			break;
4305 		zsp = zbc_zone(devip, lba);
4306 		switch (rep_opts) {
4307 		case 0x00:
4308 			/* All zones */
4309 			break;
4310 		case 0x01:
4311 			/* Empty zones */
4312 			if (zsp->z_cond != ZC1_EMPTY)
4313 				continue;
4314 			break;
4315 		case 0x02:
4316 			/* Implicit open zones */
4317 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4318 				continue;
4319 			break;
4320 		case 0x03:
4321 			/* Explicit open zones */
4322 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4323 				continue;
4324 			break;
4325 		case 0x04:
4326 			/* Closed zones */
4327 			if (zsp->z_cond != ZC4_CLOSED)
4328 				continue;
4329 			break;
4330 		case 0x05:
4331 			/* Full zones */
4332 			if (zsp->z_cond != ZC5_FULL)
4333 				continue;
4334 			break;
4335 		case 0x06:
4336 		case 0x07:
4337 		case 0x10:
4338 			/*
4339 			 * Read-only, offline, reset WP recommended are
4340 			 * not emulated: no zones to report;
4341 			 */
4342 			continue;
4343 		case 0x11:
4344 			/* non-seq-resource set */
4345 			if (!zsp->z_non_seq_resource)
4346 				continue;
4347 			break;
4348 		case 0x3f:
4349 			/* Not write pointer (conventional) zones */
4350 			if (!zbc_zone_is_conv(zsp))
4351 				continue;
4352 			break;
4353 		default:
4354 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4355 					INVALID_FIELD_IN_CDB, 0);
4356 			ret = check_condition_result;
4357 			goto fini;
4358 		}
4359 
4360 		if (nrz < rep_max_zones) {
4361 			/* Fill zone descriptor */
4362 			desc[0] = zsp->z_type;
4363 			desc[1] = zsp->z_cond << 4;
4364 			if (zsp->z_non_seq_resource)
4365 				desc[1] |= 1 << 1;
4366 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4367 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4368 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4369 			desc += 64;
4370 		}
4371 
4372 		if (partial && nrz >= rep_max_zones)
4373 			break;
4374 
4375 		nrz++;
4376 	}
4377 
4378 	/* Report header */
4379 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4380 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4381 
4382 	rep_len = (unsigned long)desc - (unsigned long)arr;
4383 	ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4384 
4385 fini:
4386 	read_unlock(macc_lckp);
4387 	kfree(arr);
4388 	return ret;
4389 }
4390 
4391 /* Logic transplanted from tcmu-runner, file_zbc.c */
4392 static void zbc_open_all(struct sdebug_dev_info *devip)
4393 {
4394 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4395 	unsigned int i;
4396 
4397 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4398 		if (zsp->z_cond == ZC4_CLOSED)
4399 			zbc_open_zone(devip, &devip->zstate[i], true);
4400 	}
4401 }
4402 
4403 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4404 {
4405 	int res = 0;
4406 	u64 z_id;
4407 	enum sdebug_z_cond zc;
4408 	u8 *cmd = scp->cmnd;
4409 	struct sdeb_zone_state *zsp;
4410 	bool all = cmd[14] & 0x01;
4411 	struct sdeb_store_info *sip = devip2sip(devip, false);
4412 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4413 
4414 	if (!sdebug_dev_is_zoned(devip)) {
4415 		mk_sense_invalid_opcode(scp);
4416 		return check_condition_result;
4417 	}
4418 
4419 	write_lock(macc_lckp);
4420 
4421 	if (all) {
4422 		/* Check if all closed zones can be open */
4423 		if (devip->max_open &&
4424 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4425 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4426 					INSUFF_ZONE_ASCQ);
4427 			res = check_condition_result;
4428 			goto fini;
4429 		}
4430 		/* Open all closed zones */
4431 		zbc_open_all(devip);
4432 		goto fini;
4433 	}
4434 
4435 	/* Open the specified zone */
4436 	z_id = get_unaligned_be64(cmd + 2);
4437 	if (z_id >= sdebug_capacity) {
4438 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4439 		res = check_condition_result;
4440 		goto fini;
4441 	}
4442 
4443 	zsp = zbc_zone(devip, z_id);
4444 	if (z_id != zsp->z_start) {
4445 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4446 		res = check_condition_result;
4447 		goto fini;
4448 	}
4449 	if (zbc_zone_is_conv(zsp)) {
4450 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4451 		res = check_condition_result;
4452 		goto fini;
4453 	}
4454 
4455 	zc = zsp->z_cond;
4456 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4457 		goto fini;
4458 
4459 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4460 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4461 				INSUFF_ZONE_ASCQ);
4462 		res = check_condition_result;
4463 		goto fini;
4464 	}
4465 
4466 	if (zc == ZC2_IMPLICIT_OPEN)
4467 		zbc_close_zone(devip, zsp);
4468 	zbc_open_zone(devip, zsp, true);
4469 fini:
4470 	write_unlock(macc_lckp);
4471 	return res;
4472 }
4473 
4474 static void zbc_close_all(struct sdebug_dev_info *devip)
4475 {
4476 	unsigned int i;
4477 
4478 	for (i = 0; i < devip->nr_zones; i++)
4479 		zbc_close_zone(devip, &devip->zstate[i]);
4480 }
4481 
4482 static int resp_close_zone(struct scsi_cmnd *scp,
4483 			   struct sdebug_dev_info *devip)
4484 {
4485 	int res = 0;
4486 	u64 z_id;
4487 	u8 *cmd = scp->cmnd;
4488 	struct sdeb_zone_state *zsp;
4489 	bool all = cmd[14] & 0x01;
4490 	struct sdeb_store_info *sip = devip2sip(devip, false);
4491 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4492 
4493 	if (!sdebug_dev_is_zoned(devip)) {
4494 		mk_sense_invalid_opcode(scp);
4495 		return check_condition_result;
4496 	}
4497 
4498 	write_lock(macc_lckp);
4499 
4500 	if (all) {
4501 		zbc_close_all(devip);
4502 		goto fini;
4503 	}
4504 
4505 	/* Close specified zone */
4506 	z_id = get_unaligned_be64(cmd + 2);
4507 	if (z_id >= sdebug_capacity) {
4508 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4509 		res = check_condition_result;
4510 		goto fini;
4511 	}
4512 
4513 	zsp = zbc_zone(devip, z_id);
4514 	if (z_id != zsp->z_start) {
4515 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4516 		res = check_condition_result;
4517 		goto fini;
4518 	}
4519 	if (zbc_zone_is_conv(zsp)) {
4520 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4521 		res = check_condition_result;
4522 		goto fini;
4523 	}
4524 
4525 	zbc_close_zone(devip, zsp);
4526 fini:
4527 	write_unlock(macc_lckp);
4528 	return res;
4529 }
4530 
4531 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4532 			    struct sdeb_zone_state *zsp, bool empty)
4533 {
4534 	enum sdebug_z_cond zc = zsp->z_cond;
4535 
4536 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4537 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4538 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4539 			zbc_close_zone(devip, zsp);
4540 		if (zsp->z_cond == ZC4_CLOSED)
4541 			devip->nr_closed--;
4542 		zsp->z_wp = zsp->z_start + zsp->z_size;
4543 		zsp->z_cond = ZC5_FULL;
4544 	}
4545 }
4546 
4547 static void zbc_finish_all(struct sdebug_dev_info *devip)
4548 {
4549 	unsigned int i;
4550 
4551 	for (i = 0; i < devip->nr_zones; i++)
4552 		zbc_finish_zone(devip, &devip->zstate[i], false);
4553 }
4554 
4555 static int resp_finish_zone(struct scsi_cmnd *scp,
4556 			    struct sdebug_dev_info *devip)
4557 {
4558 	struct sdeb_zone_state *zsp;
4559 	int res = 0;
4560 	u64 z_id;
4561 	u8 *cmd = scp->cmnd;
4562 	bool all = cmd[14] & 0x01;
4563 	struct sdeb_store_info *sip = devip2sip(devip, false);
4564 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4565 
4566 	if (!sdebug_dev_is_zoned(devip)) {
4567 		mk_sense_invalid_opcode(scp);
4568 		return check_condition_result;
4569 	}
4570 
4571 	write_lock(macc_lckp);
4572 
4573 	if (all) {
4574 		zbc_finish_all(devip);
4575 		goto fini;
4576 	}
4577 
4578 	/* Finish the specified zone */
4579 	z_id = get_unaligned_be64(cmd + 2);
4580 	if (z_id >= sdebug_capacity) {
4581 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4582 		res = check_condition_result;
4583 		goto fini;
4584 	}
4585 
4586 	zsp = zbc_zone(devip, z_id);
4587 	if (z_id != zsp->z_start) {
4588 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4589 		res = check_condition_result;
4590 		goto fini;
4591 	}
4592 	if (zbc_zone_is_conv(zsp)) {
4593 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4594 		res = check_condition_result;
4595 		goto fini;
4596 	}
4597 
4598 	zbc_finish_zone(devip, zsp, true);
4599 fini:
4600 	write_unlock(macc_lckp);
4601 	return res;
4602 }
4603 
4604 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4605 			 struct sdeb_zone_state *zsp)
4606 {
4607 	enum sdebug_z_cond zc;
4608 
4609 	if (zbc_zone_is_conv(zsp))
4610 		return;
4611 
4612 	zc = zsp->z_cond;
4613 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4614 		zbc_close_zone(devip, zsp);
4615 
4616 	if (zsp->z_cond == ZC4_CLOSED)
4617 		devip->nr_closed--;
4618 
4619 	zsp->z_non_seq_resource = false;
4620 	zsp->z_wp = zsp->z_start;
4621 	zsp->z_cond = ZC1_EMPTY;
4622 }
4623 
4624 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4625 {
4626 	unsigned int i;
4627 
4628 	for (i = 0; i < devip->nr_zones; i++)
4629 		zbc_rwp_zone(devip, &devip->zstate[i]);
4630 }
4631 
4632 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4633 {
4634 	struct sdeb_zone_state *zsp;
4635 	int res = 0;
4636 	u64 z_id;
4637 	u8 *cmd = scp->cmnd;
4638 	bool all = cmd[14] & 0x01;
4639 	struct sdeb_store_info *sip = devip2sip(devip, false);
4640 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4641 
4642 	if (!sdebug_dev_is_zoned(devip)) {
4643 		mk_sense_invalid_opcode(scp);
4644 		return check_condition_result;
4645 	}
4646 
4647 	write_lock(macc_lckp);
4648 
4649 	if (all) {
4650 		zbc_rwp_all(devip);
4651 		goto fini;
4652 	}
4653 
4654 	z_id = get_unaligned_be64(cmd + 2);
4655 	if (z_id >= sdebug_capacity) {
4656 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4657 		res = check_condition_result;
4658 		goto fini;
4659 	}
4660 
4661 	zsp = zbc_zone(devip, z_id);
4662 	if (z_id != zsp->z_start) {
4663 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4664 		res = check_condition_result;
4665 		goto fini;
4666 	}
4667 	if (zbc_zone_is_conv(zsp)) {
4668 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4669 		res = check_condition_result;
4670 		goto fini;
4671 	}
4672 
4673 	zbc_rwp_zone(devip, zsp);
4674 fini:
4675 	write_unlock(macc_lckp);
4676 	return res;
4677 }
4678 
4679 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4680 {
4681 	u16 hwq;
4682 
4683 	if (sdebug_host_max_queue) {
4684 		/* Provide a simple method to choose the hwq */
4685 		hwq = smp_processor_id() % submit_queues;
4686 	} else {
4687 		u32 tag = blk_mq_unique_tag(cmnd->request);
4688 
4689 		hwq = blk_mq_unique_tag_to_hwq(tag);
4690 
4691 		pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4692 		if (WARN_ON_ONCE(hwq >= submit_queues))
4693 			hwq = 0;
4694 	}
4695 	return sdebug_q_arr + hwq;
4696 }
4697 
4698 static u32 get_tag(struct scsi_cmnd *cmnd)
4699 {
4700 	return blk_mq_unique_tag(cmnd->request);
4701 }
4702 
4703 /* Queued (deferred) command completions converge here. */
4704 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4705 {
4706 	bool aborted = sd_dp->aborted;
4707 	int qc_idx;
4708 	int retiring = 0;
4709 	unsigned long iflags;
4710 	struct sdebug_queue *sqp;
4711 	struct sdebug_queued_cmd *sqcp;
4712 	struct scsi_cmnd *scp;
4713 	struct sdebug_dev_info *devip;
4714 
4715 	sd_dp->defer_t = SDEB_DEFER_NONE;
4716 	if (unlikely(aborted))
4717 		sd_dp->aborted = false;
4718 	qc_idx = sd_dp->qc_idx;
4719 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4720 	if (sdebug_statistics) {
4721 		atomic_inc(&sdebug_completions);
4722 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4723 			atomic_inc(&sdebug_miss_cpus);
4724 	}
4725 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4726 		pr_err("wild qc_idx=%d\n", qc_idx);
4727 		return;
4728 	}
4729 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4730 	sqcp = &sqp->qc_arr[qc_idx];
4731 	scp = sqcp->a_cmnd;
4732 	if (unlikely(scp == NULL)) {
4733 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4734 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4735 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4736 		return;
4737 	}
4738 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4739 	if (likely(devip))
4740 		atomic_dec(&devip->num_in_q);
4741 	else
4742 		pr_err("devip=NULL\n");
4743 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4744 		retiring = 1;
4745 
4746 	sqcp->a_cmnd = NULL;
4747 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4748 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4749 		pr_err("Unexpected completion\n");
4750 		return;
4751 	}
4752 
4753 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4754 		int k, retval;
4755 
4756 		retval = atomic_read(&retired_max_queue);
4757 		if (qc_idx >= retval) {
4758 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4759 			pr_err("index %d too large\n", retval);
4760 			return;
4761 		}
4762 		k = find_last_bit(sqp->in_use_bm, retval);
4763 		if ((k < sdebug_max_queue) || (k == retval))
4764 			atomic_set(&retired_max_queue, 0);
4765 		else
4766 			atomic_set(&retired_max_queue, k + 1);
4767 	}
4768 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4769 	if (unlikely(aborted)) {
4770 		if (sdebug_verbose)
4771 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4772 		return;
4773 	}
4774 	scp->scsi_done(scp); /* callback to mid level */
4775 }
4776 
4777 /* When high resolution timer goes off this function is called. */
4778 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4779 {
4780 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4781 						  hrt);
4782 	sdebug_q_cmd_complete(sd_dp);
4783 	return HRTIMER_NORESTART;
4784 }
4785 
4786 /* When work queue schedules work, it calls this function. */
4787 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4788 {
4789 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4790 						  ew.work);
4791 	sdebug_q_cmd_complete(sd_dp);
4792 }
4793 
4794 static bool got_shared_uuid;
4795 static uuid_t shared_uuid;
4796 
4797 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4798 {
4799 	struct sdeb_zone_state *zsp;
4800 	sector_t capacity = get_sdebug_capacity();
4801 	sector_t zstart = 0;
4802 	unsigned int i;
4803 
4804 	/*
4805 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4806 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4807 	 * use the specified zone size checking that at least 2 zones can be
4808 	 * created for the device.
4809 	 */
4810 	if (!sdeb_zbc_zone_size_mb) {
4811 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4812 			>> ilog2(sdebug_sector_size);
4813 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4814 			devip->zsize >>= 1;
4815 		if (devip->zsize < 2) {
4816 			pr_err("Device capacity too small\n");
4817 			return -EINVAL;
4818 		}
4819 	} else {
4820 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4821 			pr_err("Zone size is not a power of 2\n");
4822 			return -EINVAL;
4823 		}
4824 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4825 			>> ilog2(sdebug_sector_size);
4826 		if (devip->zsize >= capacity) {
4827 			pr_err("Zone size too large for device capacity\n");
4828 			return -EINVAL;
4829 		}
4830 	}
4831 
4832 	devip->zsize_shift = ilog2(devip->zsize);
4833 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4834 
4835 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4836 		pr_err("Number of conventional zones too large\n");
4837 		return -EINVAL;
4838 	}
4839 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4840 
4841 	if (devip->zmodel == BLK_ZONED_HM) {
4842 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4843 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4844 			devip->max_open = (devip->nr_zones - 1) / 2;
4845 		else
4846 			devip->max_open = sdeb_zbc_max_open;
4847 	}
4848 
4849 	devip->zstate = kcalloc(devip->nr_zones,
4850 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4851 	if (!devip->zstate)
4852 		return -ENOMEM;
4853 
4854 	for (i = 0; i < devip->nr_zones; i++) {
4855 		zsp = &devip->zstate[i];
4856 
4857 		zsp->z_start = zstart;
4858 
4859 		if (i < devip->nr_conv_zones) {
4860 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4861 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4862 			zsp->z_wp = (sector_t)-1;
4863 		} else {
4864 			if (devip->zmodel == BLK_ZONED_HM)
4865 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4866 			else
4867 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4868 			zsp->z_cond = ZC1_EMPTY;
4869 			zsp->z_wp = zsp->z_start;
4870 		}
4871 
4872 		if (zsp->z_start + devip->zsize < capacity)
4873 			zsp->z_size = devip->zsize;
4874 		else
4875 			zsp->z_size = capacity - zsp->z_start;
4876 
4877 		zstart += zsp->z_size;
4878 	}
4879 
4880 	return 0;
4881 }
4882 
4883 static struct sdebug_dev_info *sdebug_device_create(
4884 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4885 {
4886 	struct sdebug_dev_info *devip;
4887 
4888 	devip = kzalloc(sizeof(*devip), flags);
4889 	if (devip) {
4890 		if (sdebug_uuid_ctl == 1)
4891 			uuid_gen(&devip->lu_name);
4892 		else if (sdebug_uuid_ctl == 2) {
4893 			if (got_shared_uuid)
4894 				devip->lu_name = shared_uuid;
4895 			else {
4896 				uuid_gen(&shared_uuid);
4897 				got_shared_uuid = true;
4898 				devip->lu_name = shared_uuid;
4899 			}
4900 		}
4901 		devip->sdbg_host = sdbg_host;
4902 		if (sdeb_zbc_in_use) {
4903 			devip->zmodel = sdeb_zbc_model;
4904 			if (sdebug_device_create_zones(devip)) {
4905 				kfree(devip);
4906 				return NULL;
4907 			}
4908 		} else {
4909 			devip->zmodel = BLK_ZONED_NONE;
4910 		}
4911 		devip->sdbg_host = sdbg_host;
4912 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4913 	}
4914 	return devip;
4915 }
4916 
4917 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4918 {
4919 	struct sdebug_host_info *sdbg_host;
4920 	struct sdebug_dev_info *open_devip = NULL;
4921 	struct sdebug_dev_info *devip;
4922 
4923 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4924 	if (!sdbg_host) {
4925 		pr_err("Host info NULL\n");
4926 		return NULL;
4927 	}
4928 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4929 		if ((devip->used) && (devip->channel == sdev->channel) &&
4930 		    (devip->target == sdev->id) &&
4931 		    (devip->lun == sdev->lun))
4932 			return devip;
4933 		else {
4934 			if ((!devip->used) && (!open_devip))
4935 				open_devip = devip;
4936 		}
4937 	}
4938 	if (!open_devip) { /* try and make a new one */
4939 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4940 		if (!open_devip) {
4941 			pr_err("out of memory at line %d\n", __LINE__);
4942 			return NULL;
4943 		}
4944 	}
4945 
4946 	open_devip->channel = sdev->channel;
4947 	open_devip->target = sdev->id;
4948 	open_devip->lun = sdev->lun;
4949 	open_devip->sdbg_host = sdbg_host;
4950 	atomic_set(&open_devip->num_in_q, 0);
4951 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4952 	open_devip->used = true;
4953 	return open_devip;
4954 }
4955 
4956 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4957 {
4958 	if (sdebug_verbose)
4959 		pr_info("slave_alloc <%u %u %u %llu>\n",
4960 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4961 	return 0;
4962 }
4963 
4964 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4965 {
4966 	struct sdebug_dev_info *devip =
4967 			(struct sdebug_dev_info *)sdp->hostdata;
4968 
4969 	if (sdebug_verbose)
4970 		pr_info("slave_configure <%u %u %u %llu>\n",
4971 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4972 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4973 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4974 	if (devip == NULL) {
4975 		devip = find_build_dev_info(sdp);
4976 		if (devip == NULL)
4977 			return 1;  /* no resources, will be marked offline */
4978 	}
4979 	sdp->hostdata = devip;
4980 	if (sdebug_no_uld)
4981 		sdp->no_uld_attach = 1;
4982 	config_cdb_len(sdp);
4983 	return 0;
4984 }
4985 
4986 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
4987 {
4988 	struct sdebug_dev_info *devip =
4989 		(struct sdebug_dev_info *)sdp->hostdata;
4990 
4991 	if (sdebug_verbose)
4992 		pr_info("slave_destroy <%u %u %u %llu>\n",
4993 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4994 	if (devip) {
4995 		/* make this slot available for re-use */
4996 		devip->used = false;
4997 		sdp->hostdata = NULL;
4998 	}
4999 }
5000 
5001 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5002 			   enum sdeb_defer_type defer_t)
5003 {
5004 	if (!sd_dp)
5005 		return;
5006 	if (defer_t == SDEB_DEFER_HRT)
5007 		hrtimer_cancel(&sd_dp->hrt);
5008 	else if (defer_t == SDEB_DEFER_WQ)
5009 		cancel_work_sync(&sd_dp->ew.work);
5010 }
5011 
5012 /* If @cmnd found deletes its timer or work queue and returns true; else
5013    returns false */
5014 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5015 {
5016 	unsigned long iflags;
5017 	int j, k, qmax, r_qmax;
5018 	enum sdeb_defer_type l_defer_t;
5019 	struct sdebug_queue *sqp;
5020 	struct sdebug_queued_cmd *sqcp;
5021 	struct sdebug_dev_info *devip;
5022 	struct sdebug_defer *sd_dp;
5023 
5024 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5025 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5026 		qmax = sdebug_max_queue;
5027 		r_qmax = atomic_read(&retired_max_queue);
5028 		if (r_qmax > qmax)
5029 			qmax = r_qmax;
5030 		for (k = 0; k < qmax; ++k) {
5031 			if (test_bit(k, sqp->in_use_bm)) {
5032 				sqcp = &sqp->qc_arr[k];
5033 				if (cmnd != sqcp->a_cmnd)
5034 					continue;
5035 				/* found */
5036 				devip = (struct sdebug_dev_info *)
5037 						cmnd->device->hostdata;
5038 				if (devip)
5039 					atomic_dec(&devip->num_in_q);
5040 				sqcp->a_cmnd = NULL;
5041 				sd_dp = sqcp->sd_dp;
5042 				if (sd_dp) {
5043 					l_defer_t = sd_dp->defer_t;
5044 					sd_dp->defer_t = SDEB_DEFER_NONE;
5045 				} else
5046 					l_defer_t = SDEB_DEFER_NONE;
5047 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5048 				stop_qc_helper(sd_dp, l_defer_t);
5049 				clear_bit(k, sqp->in_use_bm);
5050 				return true;
5051 			}
5052 		}
5053 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5054 	}
5055 	return false;
5056 }
5057 
5058 /* Deletes (stops) timers or work queues of all queued commands */
5059 static void stop_all_queued(void)
5060 {
5061 	unsigned long iflags;
5062 	int j, k;
5063 	enum sdeb_defer_type l_defer_t;
5064 	struct sdebug_queue *sqp;
5065 	struct sdebug_queued_cmd *sqcp;
5066 	struct sdebug_dev_info *devip;
5067 	struct sdebug_defer *sd_dp;
5068 
5069 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5070 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5071 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5072 			if (test_bit(k, sqp->in_use_bm)) {
5073 				sqcp = &sqp->qc_arr[k];
5074 				if (sqcp->a_cmnd == NULL)
5075 					continue;
5076 				devip = (struct sdebug_dev_info *)
5077 					sqcp->a_cmnd->device->hostdata;
5078 				if (devip)
5079 					atomic_dec(&devip->num_in_q);
5080 				sqcp->a_cmnd = NULL;
5081 				sd_dp = sqcp->sd_dp;
5082 				if (sd_dp) {
5083 					l_defer_t = sd_dp->defer_t;
5084 					sd_dp->defer_t = SDEB_DEFER_NONE;
5085 				} else
5086 					l_defer_t = SDEB_DEFER_NONE;
5087 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5088 				stop_qc_helper(sd_dp, l_defer_t);
5089 				clear_bit(k, sqp->in_use_bm);
5090 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5091 			}
5092 		}
5093 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5094 	}
5095 }
5096 
5097 /* Free queued command memory on heap */
5098 static void free_all_queued(void)
5099 {
5100 	int j, k;
5101 	struct sdebug_queue *sqp;
5102 	struct sdebug_queued_cmd *sqcp;
5103 
5104 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5105 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5106 			sqcp = &sqp->qc_arr[k];
5107 			kfree(sqcp->sd_dp);
5108 			sqcp->sd_dp = NULL;
5109 		}
5110 	}
5111 }
5112 
5113 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5114 {
5115 	bool ok;
5116 
5117 	++num_aborts;
5118 	if (SCpnt) {
5119 		ok = stop_queued_cmnd(SCpnt);
5120 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5121 			sdev_printk(KERN_INFO, SCpnt->device,
5122 				    "%s: command%s found\n", __func__,
5123 				    ok ? "" : " not");
5124 	}
5125 	return SUCCESS;
5126 }
5127 
5128 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5129 {
5130 	++num_dev_resets;
5131 	if (SCpnt && SCpnt->device) {
5132 		struct scsi_device *sdp = SCpnt->device;
5133 		struct sdebug_dev_info *devip =
5134 				(struct sdebug_dev_info *)sdp->hostdata;
5135 
5136 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5137 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5138 		if (devip)
5139 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5140 	}
5141 	return SUCCESS;
5142 }
5143 
5144 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5145 {
5146 	struct sdebug_host_info *sdbg_host;
5147 	struct sdebug_dev_info *devip;
5148 	struct scsi_device *sdp;
5149 	struct Scsi_Host *hp;
5150 	int k = 0;
5151 
5152 	++num_target_resets;
5153 	if (!SCpnt)
5154 		goto lie;
5155 	sdp = SCpnt->device;
5156 	if (!sdp)
5157 		goto lie;
5158 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5159 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5160 	hp = sdp->host;
5161 	if (!hp)
5162 		goto lie;
5163 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5164 	if (sdbg_host) {
5165 		list_for_each_entry(devip,
5166 				    &sdbg_host->dev_info_list,
5167 				    dev_list)
5168 			if (devip->target == sdp->id) {
5169 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5170 				++k;
5171 			}
5172 	}
5173 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5174 		sdev_printk(KERN_INFO, sdp,
5175 			    "%s: %d device(s) found in target\n", __func__, k);
5176 lie:
5177 	return SUCCESS;
5178 }
5179 
5180 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5181 {
5182 	struct sdebug_host_info *sdbg_host;
5183 	struct sdebug_dev_info *devip;
5184 	struct scsi_device *sdp;
5185 	struct Scsi_Host *hp;
5186 	int k = 0;
5187 
5188 	++num_bus_resets;
5189 	if (!(SCpnt && SCpnt->device))
5190 		goto lie;
5191 	sdp = SCpnt->device;
5192 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5193 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5194 	hp = sdp->host;
5195 	if (hp) {
5196 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5197 		if (sdbg_host) {
5198 			list_for_each_entry(devip,
5199 					    &sdbg_host->dev_info_list,
5200 					    dev_list) {
5201 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5202 				++k;
5203 			}
5204 		}
5205 	}
5206 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5207 		sdev_printk(KERN_INFO, sdp,
5208 			    "%s: %d device(s) found in host\n", __func__, k);
5209 lie:
5210 	return SUCCESS;
5211 }
5212 
5213 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5214 {
5215 	struct sdebug_host_info *sdbg_host;
5216 	struct sdebug_dev_info *devip;
5217 	int k = 0;
5218 
5219 	++num_host_resets;
5220 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5221 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5222 	spin_lock(&sdebug_host_list_lock);
5223 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5224 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5225 				    dev_list) {
5226 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5227 			++k;
5228 		}
5229 	}
5230 	spin_unlock(&sdebug_host_list_lock);
5231 	stop_all_queued();
5232 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5233 		sdev_printk(KERN_INFO, SCpnt->device,
5234 			    "%s: %d device(s) found\n", __func__, k);
5235 	return SUCCESS;
5236 }
5237 
5238 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5239 {
5240 	struct msdos_partition *pp;
5241 	int starts[SDEBUG_MAX_PARTS + 2];
5242 	int sectors_per_part, num_sectors, k;
5243 	int heads_by_sects, start_sec, end_sec;
5244 
5245 	/* assume partition table already zeroed */
5246 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5247 		return;
5248 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5249 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5250 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5251 	}
5252 	num_sectors = (int)sdebug_store_sectors;
5253 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5254 			   / sdebug_num_parts;
5255 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5256 	starts[0] = sdebug_sectors_per;
5257 	for (k = 1; k < sdebug_num_parts; ++k)
5258 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5259 			    * heads_by_sects;
5260 	starts[sdebug_num_parts] = num_sectors;
5261 	starts[sdebug_num_parts + 1] = 0;
5262 
5263 	ramp[510] = 0x55;	/* magic partition markings */
5264 	ramp[511] = 0xAA;
5265 	pp = (struct msdos_partition *)(ramp + 0x1be);
5266 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5267 		start_sec = starts[k];
5268 		end_sec = starts[k + 1] - 1;
5269 		pp->boot_ind = 0;
5270 
5271 		pp->cyl = start_sec / heads_by_sects;
5272 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5273 			   / sdebug_sectors_per;
5274 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5275 
5276 		pp->end_cyl = end_sec / heads_by_sects;
5277 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5278 			       / sdebug_sectors_per;
5279 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5280 
5281 		pp->start_sect = cpu_to_le32(start_sec);
5282 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5283 		pp->sys_ind = 0x83;	/* plain Linux partition */
5284 	}
5285 }
5286 
5287 static void block_unblock_all_queues(bool block)
5288 {
5289 	int j;
5290 	struct sdebug_queue *sqp;
5291 
5292 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5293 		atomic_set(&sqp->blocked, (int)block);
5294 }
5295 
5296 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5297  * commands will be processed normally before triggers occur.
5298  */
5299 static void tweak_cmnd_count(void)
5300 {
5301 	int count, modulo;
5302 
5303 	modulo = abs(sdebug_every_nth);
5304 	if (modulo < 2)
5305 		return;
5306 	block_unblock_all_queues(true);
5307 	count = atomic_read(&sdebug_cmnd_count);
5308 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5309 	block_unblock_all_queues(false);
5310 }
5311 
5312 static void clear_queue_stats(void)
5313 {
5314 	atomic_set(&sdebug_cmnd_count, 0);
5315 	atomic_set(&sdebug_completions, 0);
5316 	atomic_set(&sdebug_miss_cpus, 0);
5317 	atomic_set(&sdebug_a_tsf, 0);
5318 }
5319 
5320 static bool inject_on_this_cmd(void)
5321 {
5322 	if (sdebug_every_nth == 0)
5323 		return false;
5324 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5325 }
5326 
5327 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5328 
5329 /* Complete the processing of the thread that queued a SCSI command to this
5330  * driver. It either completes the command by calling cmnd_done() or
5331  * schedules a hr timer or work queue then returns 0. Returns
5332  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5333  */
5334 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5335 			 int scsi_result,
5336 			 int (*pfp)(struct scsi_cmnd *,
5337 				    struct sdebug_dev_info *),
5338 			 int delta_jiff, int ndelay)
5339 {
5340 	bool new_sd_dp;
5341 	bool inject = false;
5342 	int k, num_in_q, qdepth;
5343 	unsigned long iflags;
5344 	u64 ns_from_boot = 0;
5345 	struct sdebug_queue *sqp;
5346 	struct sdebug_queued_cmd *sqcp;
5347 	struct scsi_device *sdp;
5348 	struct sdebug_defer *sd_dp;
5349 
5350 	if (unlikely(devip == NULL)) {
5351 		if (scsi_result == 0)
5352 			scsi_result = DID_NO_CONNECT << 16;
5353 		goto respond_in_thread;
5354 	}
5355 	sdp = cmnd->device;
5356 
5357 	if (delta_jiff == 0)
5358 		goto respond_in_thread;
5359 
5360 	sqp = get_queue(cmnd);
5361 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5362 	if (unlikely(atomic_read(&sqp->blocked))) {
5363 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5364 		return SCSI_MLQUEUE_HOST_BUSY;
5365 	}
5366 	num_in_q = atomic_read(&devip->num_in_q);
5367 	qdepth = cmnd->device->queue_depth;
5368 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5369 		if (scsi_result) {
5370 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5371 			goto respond_in_thread;
5372 		} else
5373 			scsi_result = device_qfull_result;
5374 	} else if (unlikely(sdebug_every_nth &&
5375 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5376 			    (scsi_result == 0))) {
5377 		if ((num_in_q == (qdepth - 1)) &&
5378 		    (atomic_inc_return(&sdebug_a_tsf) >=
5379 		     abs(sdebug_every_nth))) {
5380 			atomic_set(&sdebug_a_tsf, 0);
5381 			inject = true;
5382 			scsi_result = device_qfull_result;
5383 		}
5384 	}
5385 
5386 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5387 	if (unlikely(k >= sdebug_max_queue)) {
5388 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5389 		if (scsi_result)
5390 			goto respond_in_thread;
5391 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5392 			scsi_result = device_qfull_result;
5393 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5394 			sdev_printk(KERN_INFO, sdp,
5395 				    "%s: max_queue=%d exceeded, %s\n",
5396 				    __func__, sdebug_max_queue,
5397 				    (scsi_result ?  "status: TASK SET FULL" :
5398 						    "report: host busy"));
5399 		if (scsi_result)
5400 			goto respond_in_thread;
5401 		else
5402 			return SCSI_MLQUEUE_HOST_BUSY;
5403 	}
5404 	set_bit(k, sqp->in_use_bm);
5405 	atomic_inc(&devip->num_in_q);
5406 	sqcp = &sqp->qc_arr[k];
5407 	sqcp->a_cmnd = cmnd;
5408 	cmnd->host_scribble = (unsigned char *)sqcp;
5409 	sd_dp = sqcp->sd_dp;
5410 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5411 	if (!sd_dp) {
5412 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5413 		if (!sd_dp) {
5414 			atomic_dec(&devip->num_in_q);
5415 			clear_bit(k, sqp->in_use_bm);
5416 			return SCSI_MLQUEUE_HOST_BUSY;
5417 		}
5418 		new_sd_dp = true;
5419 	} else {
5420 		new_sd_dp = false;
5421 	}
5422 
5423 	/* Set the hostwide tag */
5424 	if (sdebug_host_max_queue)
5425 		sd_dp->hc_idx = get_tag(cmnd);
5426 
5427 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5428 		ns_from_boot = ktime_get_boottime_ns();
5429 
5430 	/* one of the resp_*() response functions is called here */
5431 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5432 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5433 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5434 		delta_jiff = ndelay = 0;
5435 	}
5436 	if (cmnd->result == 0 && scsi_result != 0)
5437 		cmnd->result = scsi_result;
5438 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5439 		if (atomic_read(&sdeb_inject_pending)) {
5440 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5441 			atomic_set(&sdeb_inject_pending, 0);
5442 			cmnd->result = check_condition_result;
5443 		}
5444 	}
5445 
5446 	if (unlikely(sdebug_verbose && cmnd->result))
5447 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5448 			    __func__, cmnd->result);
5449 
5450 	if (delta_jiff > 0 || ndelay > 0) {
5451 		ktime_t kt;
5452 
5453 		if (delta_jiff > 0) {
5454 			u64 ns = jiffies_to_nsecs(delta_jiff);
5455 
5456 			if (sdebug_random && ns < U32_MAX) {
5457 				ns = prandom_u32_max((u32)ns);
5458 			} else if (sdebug_random) {
5459 				ns >>= 12;	/* scale to 4 usec precision */
5460 				if (ns < U32_MAX)	/* over 4 hours max */
5461 					ns = prandom_u32_max((u32)ns);
5462 				ns <<= 12;
5463 			}
5464 			kt = ns_to_ktime(ns);
5465 		} else {	/* ndelay has a 4.2 second max */
5466 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5467 					     (u32)ndelay;
5468 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5469 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5470 
5471 				if (kt <= d) {	/* elapsed duration >= kt */
5472 					sqcp->a_cmnd = NULL;
5473 					atomic_dec(&devip->num_in_q);
5474 					clear_bit(k, sqp->in_use_bm);
5475 					if (new_sd_dp)
5476 						kfree(sd_dp);
5477 					/* call scsi_done() from this thread */
5478 					cmnd->scsi_done(cmnd);
5479 					return 0;
5480 				}
5481 				/* otherwise reduce kt by elapsed time */
5482 				kt -= d;
5483 			}
5484 		}
5485 		if (!sd_dp->init_hrt) {
5486 			sd_dp->init_hrt = true;
5487 			sqcp->sd_dp = sd_dp;
5488 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5489 				     HRTIMER_MODE_REL_PINNED);
5490 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5491 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5492 			sd_dp->qc_idx = k;
5493 		}
5494 		if (sdebug_statistics)
5495 			sd_dp->issuing_cpu = raw_smp_processor_id();
5496 		sd_dp->defer_t = SDEB_DEFER_HRT;
5497 		/* schedule the invocation of scsi_done() for a later time */
5498 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5499 	} else {	/* jdelay < 0, use work queue */
5500 		if (!sd_dp->init_wq) {
5501 			sd_dp->init_wq = true;
5502 			sqcp->sd_dp = sd_dp;
5503 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5504 			sd_dp->qc_idx = k;
5505 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5506 		}
5507 		if (sdebug_statistics)
5508 			sd_dp->issuing_cpu = raw_smp_processor_id();
5509 		sd_dp->defer_t = SDEB_DEFER_WQ;
5510 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5511 			     atomic_read(&sdeb_inject_pending)))
5512 			sd_dp->aborted = true;
5513 		schedule_work(&sd_dp->ew.work);
5514 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5515 			     atomic_read(&sdeb_inject_pending))) {
5516 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5517 			blk_abort_request(cmnd->request);
5518 			atomic_set(&sdeb_inject_pending, 0);
5519 		}
5520 	}
5521 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5522 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5523 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5524 	return 0;
5525 
5526 respond_in_thread:	/* call back to mid-layer using invocation thread */
5527 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5528 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5529 	if (cmnd->result == 0 && scsi_result != 0)
5530 		cmnd->result = scsi_result;
5531 	cmnd->scsi_done(cmnd);
5532 	return 0;
5533 }
5534 
5535 /* Note: The following macros create attribute files in the
5536    /sys/module/scsi_debug/parameters directory. Unfortunately this
5537    driver is unaware of a change and cannot trigger auxiliary actions
5538    as it can when the corresponding attribute in the
5539    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5540  */
5541 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5542 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5543 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5544 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5545 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5546 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5547 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5548 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5549 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5550 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5551 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5552 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5553 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5554 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5555 module_param_string(inq_product, sdebug_inq_product_id,
5556 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5557 module_param_string(inq_rev, sdebug_inq_product_rev,
5558 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5559 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5560 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5561 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5562 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5563 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5564 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5565 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5566 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5567 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5568 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5569 		   S_IRUGO | S_IWUSR);
5570 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5571 		   S_IRUGO | S_IWUSR);
5572 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5573 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5574 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5575 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5576 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5577 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5578 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5579 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5580 module_param_named(per_host_store, sdebug_per_host_store, bool,
5581 		   S_IRUGO | S_IWUSR);
5582 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5583 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5584 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5585 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5586 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5587 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5588 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5589 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5590 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5591 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5592 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5593 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5594 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5595 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5596 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5597 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5598 		   S_IRUGO | S_IWUSR);
5599 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5600 module_param_named(write_same_length, sdebug_write_same_length, int,
5601 		   S_IRUGO | S_IWUSR);
5602 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5603 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5604 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5605 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5606 
5607 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5608 MODULE_DESCRIPTION("SCSI debug adapter driver");
5609 MODULE_LICENSE("GPL");
5610 MODULE_VERSION(SDEBUG_VERSION);
5611 
5612 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5613 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5614 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5615 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5616 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5617 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5618 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5619 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5620 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5621 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5622 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5623 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5624 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5625 MODULE_PARM_DESC(host_max_queue,
5626 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5627 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5628 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5629 		 SDEBUG_VERSION "\")");
5630 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5631 MODULE_PARM_DESC(lbprz,
5632 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5633 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5634 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5635 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5636 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5637 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5638 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5639 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5640 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5641 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5642 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5643 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5644 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5645 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5646 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5647 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5648 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5649 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5650 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5651 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5652 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5653 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5654 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5655 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5656 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5657 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5658 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5659 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5660 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5661 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5662 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5663 MODULE_PARM_DESC(uuid_ctl,
5664 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5665 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5666 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5667 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5668 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5669 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5670 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5671 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5672 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5673 
5674 #define SDEBUG_INFO_LEN 256
5675 static char sdebug_info[SDEBUG_INFO_LEN];
5676 
5677 static const char *scsi_debug_info(struct Scsi_Host *shp)
5678 {
5679 	int k;
5680 
5681 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5682 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5683 	if (k >= (SDEBUG_INFO_LEN - 1))
5684 		return sdebug_info;
5685 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5686 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5687 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5688 		  "statistics", (int)sdebug_statistics);
5689 	return sdebug_info;
5690 }
5691 
5692 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5693 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5694 				 int length)
5695 {
5696 	char arr[16];
5697 	int opts;
5698 	int minLen = length > 15 ? 15 : length;
5699 
5700 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5701 		return -EACCES;
5702 	memcpy(arr, buffer, minLen);
5703 	arr[minLen] = '\0';
5704 	if (1 != sscanf(arr, "%d", &opts))
5705 		return -EINVAL;
5706 	sdebug_opts = opts;
5707 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5708 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5709 	if (sdebug_every_nth != 0)
5710 		tweak_cmnd_count();
5711 	return length;
5712 }
5713 
5714 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5715  * same for each scsi_debug host (if more than one). Some of the counters
5716  * output are not atomics so might be inaccurate in a busy system. */
5717 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5718 {
5719 	int f, j, l;
5720 	struct sdebug_queue *sqp;
5721 	struct sdebug_host_info *sdhp;
5722 
5723 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5724 		   SDEBUG_VERSION, sdebug_version_date);
5725 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5726 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5727 		   sdebug_opts, sdebug_every_nth);
5728 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5729 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5730 		   sdebug_sector_size, "bytes");
5731 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5732 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5733 		   num_aborts);
5734 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5735 		   num_dev_resets, num_target_resets, num_bus_resets,
5736 		   num_host_resets);
5737 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5738 		   dix_reads, dix_writes, dif_errors);
5739 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5740 		   sdebug_statistics);
5741 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5742 		   atomic_read(&sdebug_cmnd_count),
5743 		   atomic_read(&sdebug_completions),
5744 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5745 		   atomic_read(&sdebug_a_tsf));
5746 
5747 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5748 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5749 		seq_printf(m, "  queue %d:\n", j);
5750 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5751 		if (f != sdebug_max_queue) {
5752 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5753 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5754 				   "first,last bits", f, l);
5755 		}
5756 	}
5757 
5758 	seq_printf(m, "this host_no=%d\n", host->host_no);
5759 	if (!xa_empty(per_store_ap)) {
5760 		bool niu;
5761 		int idx;
5762 		unsigned long l_idx;
5763 		struct sdeb_store_info *sip;
5764 
5765 		seq_puts(m, "\nhost list:\n");
5766 		j = 0;
5767 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5768 			idx = sdhp->si_idx;
5769 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5770 				   sdhp->shost->host_no, idx);
5771 			++j;
5772 		}
5773 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5774 			   sdeb_most_recent_idx);
5775 		j = 0;
5776 		xa_for_each(per_store_ap, l_idx, sip) {
5777 			niu = xa_get_mark(per_store_ap, l_idx,
5778 					  SDEB_XA_NOT_IN_USE);
5779 			idx = (int)l_idx;
5780 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5781 				   (niu ? "  not_in_use" : ""));
5782 			++j;
5783 		}
5784 	}
5785 	return 0;
5786 }
5787 
5788 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5789 {
5790 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5791 }
5792 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5793  * of delay is jiffies.
5794  */
5795 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5796 			   size_t count)
5797 {
5798 	int jdelay, res;
5799 
5800 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5801 		res = count;
5802 		if (sdebug_jdelay != jdelay) {
5803 			int j, k;
5804 			struct sdebug_queue *sqp;
5805 
5806 			block_unblock_all_queues(true);
5807 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5808 			     ++j, ++sqp) {
5809 				k = find_first_bit(sqp->in_use_bm,
5810 						   sdebug_max_queue);
5811 				if (k != sdebug_max_queue) {
5812 					res = -EBUSY;   /* queued commands */
5813 					break;
5814 				}
5815 			}
5816 			if (res > 0) {
5817 				sdebug_jdelay = jdelay;
5818 				sdebug_ndelay = 0;
5819 			}
5820 			block_unblock_all_queues(false);
5821 		}
5822 		return res;
5823 	}
5824 	return -EINVAL;
5825 }
5826 static DRIVER_ATTR_RW(delay);
5827 
5828 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5829 {
5830 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5831 }
5832 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5833 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5834 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5835 			    size_t count)
5836 {
5837 	int ndelay, res;
5838 
5839 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5840 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5841 		res = count;
5842 		if (sdebug_ndelay != ndelay) {
5843 			int j, k;
5844 			struct sdebug_queue *sqp;
5845 
5846 			block_unblock_all_queues(true);
5847 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5848 			     ++j, ++sqp) {
5849 				k = find_first_bit(sqp->in_use_bm,
5850 						   sdebug_max_queue);
5851 				if (k != sdebug_max_queue) {
5852 					res = -EBUSY;   /* queued commands */
5853 					break;
5854 				}
5855 			}
5856 			if (res > 0) {
5857 				sdebug_ndelay = ndelay;
5858 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5859 							: DEF_JDELAY;
5860 			}
5861 			block_unblock_all_queues(false);
5862 		}
5863 		return res;
5864 	}
5865 	return -EINVAL;
5866 }
5867 static DRIVER_ATTR_RW(ndelay);
5868 
5869 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5870 {
5871 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5872 }
5873 
5874 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5875 			  size_t count)
5876 {
5877 	int opts;
5878 	char work[20];
5879 
5880 	if (sscanf(buf, "%10s", work) == 1) {
5881 		if (strncasecmp(work, "0x", 2) == 0) {
5882 			if (kstrtoint(work + 2, 16, &opts) == 0)
5883 				goto opts_done;
5884 		} else {
5885 			if (kstrtoint(work, 10, &opts) == 0)
5886 				goto opts_done;
5887 		}
5888 	}
5889 	return -EINVAL;
5890 opts_done:
5891 	sdebug_opts = opts;
5892 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5893 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5894 	tweak_cmnd_count();
5895 	return count;
5896 }
5897 static DRIVER_ATTR_RW(opts);
5898 
5899 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5900 {
5901 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5902 }
5903 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5904 			   size_t count)
5905 {
5906 	int n;
5907 
5908 	/* Cannot change from or to TYPE_ZBC with sysfs */
5909 	if (sdebug_ptype == TYPE_ZBC)
5910 		return -EINVAL;
5911 
5912 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5913 		if (n == TYPE_ZBC)
5914 			return -EINVAL;
5915 		sdebug_ptype = n;
5916 		return count;
5917 	}
5918 	return -EINVAL;
5919 }
5920 static DRIVER_ATTR_RW(ptype);
5921 
5922 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5923 {
5924 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5925 }
5926 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5927 			    size_t count)
5928 {
5929 	int n;
5930 
5931 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5932 		sdebug_dsense = n;
5933 		return count;
5934 	}
5935 	return -EINVAL;
5936 }
5937 static DRIVER_ATTR_RW(dsense);
5938 
5939 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5940 {
5941 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5942 }
5943 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5944 			     size_t count)
5945 {
5946 	int n, idx;
5947 
5948 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5949 		bool want_store = (n == 0);
5950 		struct sdebug_host_info *sdhp;
5951 
5952 		n = (n > 0);
5953 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5954 		if (sdebug_fake_rw == n)
5955 			return count;	/* not transitioning so do nothing */
5956 
5957 		if (want_store) {	/* 1 --> 0 transition, set up store */
5958 			if (sdeb_first_idx < 0) {
5959 				idx = sdebug_add_store();
5960 				if (idx < 0)
5961 					return idx;
5962 			} else {
5963 				idx = sdeb_first_idx;
5964 				xa_clear_mark(per_store_ap, idx,
5965 					      SDEB_XA_NOT_IN_USE);
5966 			}
5967 			/* make all hosts use same store */
5968 			list_for_each_entry(sdhp, &sdebug_host_list,
5969 					    host_list) {
5970 				if (sdhp->si_idx != idx) {
5971 					xa_set_mark(per_store_ap, sdhp->si_idx,
5972 						    SDEB_XA_NOT_IN_USE);
5973 					sdhp->si_idx = idx;
5974 				}
5975 			}
5976 			sdeb_most_recent_idx = idx;
5977 		} else {	/* 0 --> 1 transition is trigger for shrink */
5978 			sdebug_erase_all_stores(true /* apart from first */);
5979 		}
5980 		sdebug_fake_rw = n;
5981 		return count;
5982 	}
5983 	return -EINVAL;
5984 }
5985 static DRIVER_ATTR_RW(fake_rw);
5986 
5987 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
5988 {
5989 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
5990 }
5991 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
5992 			      size_t count)
5993 {
5994 	int n;
5995 
5996 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5997 		sdebug_no_lun_0 = n;
5998 		return count;
5999 	}
6000 	return -EINVAL;
6001 }
6002 static DRIVER_ATTR_RW(no_lun_0);
6003 
6004 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6005 {
6006 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6007 }
6008 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6009 			      size_t count)
6010 {
6011 	int n;
6012 
6013 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6014 		sdebug_num_tgts = n;
6015 		sdebug_max_tgts_luns();
6016 		return count;
6017 	}
6018 	return -EINVAL;
6019 }
6020 static DRIVER_ATTR_RW(num_tgts);
6021 
6022 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6023 {
6024 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6025 }
6026 static DRIVER_ATTR_RO(dev_size_mb);
6027 
6028 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6029 {
6030 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6031 }
6032 
6033 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6034 				    size_t count)
6035 {
6036 	bool v;
6037 
6038 	if (kstrtobool(buf, &v))
6039 		return -EINVAL;
6040 
6041 	sdebug_per_host_store = v;
6042 	return count;
6043 }
6044 static DRIVER_ATTR_RW(per_host_store);
6045 
6046 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6047 {
6048 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6049 }
6050 static DRIVER_ATTR_RO(num_parts);
6051 
6052 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6053 {
6054 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6055 }
6056 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6057 			       size_t count)
6058 {
6059 	int nth;
6060 	char work[20];
6061 
6062 	if (sscanf(buf, "%10s", work) == 1) {
6063 		if (strncasecmp(work, "0x", 2) == 0) {
6064 			if (kstrtoint(work + 2, 16, &nth) == 0)
6065 				goto every_nth_done;
6066 		} else {
6067 			if (kstrtoint(work, 10, &nth) == 0)
6068 				goto every_nth_done;
6069 		}
6070 	}
6071 	return -EINVAL;
6072 
6073 every_nth_done:
6074 	sdebug_every_nth = nth;
6075 	if (nth && !sdebug_statistics) {
6076 		pr_info("every_nth needs statistics=1, set it\n");
6077 		sdebug_statistics = true;
6078 	}
6079 	tweak_cmnd_count();
6080 	return count;
6081 }
6082 static DRIVER_ATTR_RW(every_nth);
6083 
6084 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6085 {
6086 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6087 }
6088 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6089 			      size_t count)
6090 {
6091 	int n;
6092 	bool changed;
6093 
6094 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6095 		if (n > 256) {
6096 			pr_warn("max_luns can be no more than 256\n");
6097 			return -EINVAL;
6098 		}
6099 		changed = (sdebug_max_luns != n);
6100 		sdebug_max_luns = n;
6101 		sdebug_max_tgts_luns();
6102 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6103 			struct sdebug_host_info *sdhp;
6104 			struct sdebug_dev_info *dp;
6105 
6106 			spin_lock(&sdebug_host_list_lock);
6107 			list_for_each_entry(sdhp, &sdebug_host_list,
6108 					    host_list) {
6109 				list_for_each_entry(dp, &sdhp->dev_info_list,
6110 						    dev_list) {
6111 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6112 						dp->uas_bm);
6113 				}
6114 			}
6115 			spin_unlock(&sdebug_host_list_lock);
6116 		}
6117 		return count;
6118 	}
6119 	return -EINVAL;
6120 }
6121 static DRIVER_ATTR_RW(max_luns);
6122 
6123 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6124 {
6125 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6126 }
6127 /* N.B. max_queue can be changed while there are queued commands. In flight
6128  * commands beyond the new max_queue will be completed. */
6129 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6130 			       size_t count)
6131 {
6132 	int j, n, k, a;
6133 	struct sdebug_queue *sqp;
6134 
6135 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6136 	    (n <= SDEBUG_CANQUEUE) &&
6137 	    (sdebug_host_max_queue == 0)) {
6138 		block_unblock_all_queues(true);
6139 		k = 0;
6140 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6141 		     ++j, ++sqp) {
6142 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6143 			if (a > k)
6144 				k = a;
6145 		}
6146 		sdebug_max_queue = n;
6147 		if (k == SDEBUG_CANQUEUE)
6148 			atomic_set(&retired_max_queue, 0);
6149 		else if (k >= n)
6150 			atomic_set(&retired_max_queue, k + 1);
6151 		else
6152 			atomic_set(&retired_max_queue, 0);
6153 		block_unblock_all_queues(false);
6154 		return count;
6155 	}
6156 	return -EINVAL;
6157 }
6158 static DRIVER_ATTR_RW(max_queue);
6159 
6160 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6161 {
6162 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6163 }
6164 
6165 /*
6166  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6167  * in range [0, sdebug_host_max_queue), we can't change it.
6168  */
6169 static DRIVER_ATTR_RO(host_max_queue);
6170 
6171 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6172 {
6173 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6174 }
6175 static DRIVER_ATTR_RO(no_uld);
6176 
6177 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6178 {
6179 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6180 }
6181 static DRIVER_ATTR_RO(scsi_level);
6182 
6183 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6184 {
6185 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6186 }
6187 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6188 				size_t count)
6189 {
6190 	int n;
6191 	bool changed;
6192 
6193 	/* Ignore capacity change for ZBC drives for now */
6194 	if (sdeb_zbc_in_use)
6195 		return -ENOTSUPP;
6196 
6197 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6198 		changed = (sdebug_virtual_gb != n);
6199 		sdebug_virtual_gb = n;
6200 		sdebug_capacity = get_sdebug_capacity();
6201 		if (changed) {
6202 			struct sdebug_host_info *sdhp;
6203 			struct sdebug_dev_info *dp;
6204 
6205 			spin_lock(&sdebug_host_list_lock);
6206 			list_for_each_entry(sdhp, &sdebug_host_list,
6207 					    host_list) {
6208 				list_for_each_entry(dp, &sdhp->dev_info_list,
6209 						    dev_list) {
6210 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6211 						dp->uas_bm);
6212 				}
6213 			}
6214 			spin_unlock(&sdebug_host_list_lock);
6215 		}
6216 		return count;
6217 	}
6218 	return -EINVAL;
6219 }
6220 static DRIVER_ATTR_RW(virtual_gb);
6221 
6222 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6223 {
6224 	/* absolute number of hosts currently active is what is shown */
6225 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6226 }
6227 
6228 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6229 			      size_t count)
6230 {
6231 	bool found;
6232 	unsigned long idx;
6233 	struct sdeb_store_info *sip;
6234 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6235 	int delta_hosts;
6236 
6237 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6238 		return -EINVAL;
6239 	if (delta_hosts > 0) {
6240 		do {
6241 			found = false;
6242 			if (want_phs) {
6243 				xa_for_each_marked(per_store_ap, idx, sip,
6244 						   SDEB_XA_NOT_IN_USE) {
6245 					sdeb_most_recent_idx = (int)idx;
6246 					found = true;
6247 					break;
6248 				}
6249 				if (found)	/* re-use case */
6250 					sdebug_add_host_helper((int)idx);
6251 				else
6252 					sdebug_do_add_host(true);
6253 			} else {
6254 				sdebug_do_add_host(false);
6255 			}
6256 		} while (--delta_hosts);
6257 	} else if (delta_hosts < 0) {
6258 		do {
6259 			sdebug_do_remove_host(false);
6260 		} while (++delta_hosts);
6261 	}
6262 	return count;
6263 }
6264 static DRIVER_ATTR_RW(add_host);
6265 
6266 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6267 {
6268 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6269 }
6270 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6271 				    size_t count)
6272 {
6273 	int n;
6274 
6275 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6276 		sdebug_vpd_use_hostno = n;
6277 		return count;
6278 	}
6279 	return -EINVAL;
6280 }
6281 static DRIVER_ATTR_RW(vpd_use_hostno);
6282 
6283 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6284 {
6285 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6286 }
6287 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6288 				size_t count)
6289 {
6290 	int n;
6291 
6292 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6293 		if (n > 0)
6294 			sdebug_statistics = true;
6295 		else {
6296 			clear_queue_stats();
6297 			sdebug_statistics = false;
6298 		}
6299 		return count;
6300 	}
6301 	return -EINVAL;
6302 }
6303 static DRIVER_ATTR_RW(statistics);
6304 
6305 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6306 {
6307 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6308 }
6309 static DRIVER_ATTR_RO(sector_size);
6310 
6311 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6312 {
6313 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6314 }
6315 static DRIVER_ATTR_RO(submit_queues);
6316 
6317 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6318 {
6319 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6320 }
6321 static DRIVER_ATTR_RO(dix);
6322 
6323 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6324 {
6325 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6326 }
6327 static DRIVER_ATTR_RO(dif);
6328 
6329 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6330 {
6331 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6332 }
6333 static DRIVER_ATTR_RO(guard);
6334 
6335 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6336 {
6337 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6338 }
6339 static DRIVER_ATTR_RO(ato);
6340 
6341 static ssize_t map_show(struct device_driver *ddp, char *buf)
6342 {
6343 	ssize_t count = 0;
6344 
6345 	if (!scsi_debug_lbp())
6346 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6347 				 sdebug_store_sectors);
6348 
6349 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6350 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6351 
6352 		if (sip)
6353 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6354 					  (int)map_size, sip->map_storep);
6355 	}
6356 	buf[count++] = '\n';
6357 	buf[count] = '\0';
6358 
6359 	return count;
6360 }
6361 static DRIVER_ATTR_RO(map);
6362 
6363 static ssize_t random_show(struct device_driver *ddp, char *buf)
6364 {
6365 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6366 }
6367 
6368 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6369 			    size_t count)
6370 {
6371 	bool v;
6372 
6373 	if (kstrtobool(buf, &v))
6374 		return -EINVAL;
6375 
6376 	sdebug_random = v;
6377 	return count;
6378 }
6379 static DRIVER_ATTR_RW(random);
6380 
6381 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6382 {
6383 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6384 }
6385 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6386 			       size_t count)
6387 {
6388 	int n;
6389 
6390 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6391 		sdebug_removable = (n > 0);
6392 		return count;
6393 	}
6394 	return -EINVAL;
6395 }
6396 static DRIVER_ATTR_RW(removable);
6397 
6398 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6399 {
6400 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6401 }
6402 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6403 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6404 			       size_t count)
6405 {
6406 	int n;
6407 
6408 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6409 		sdebug_host_lock = (n > 0);
6410 		return count;
6411 	}
6412 	return -EINVAL;
6413 }
6414 static DRIVER_ATTR_RW(host_lock);
6415 
6416 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6417 {
6418 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6419 }
6420 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6421 			    size_t count)
6422 {
6423 	int n;
6424 
6425 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6426 		sdebug_strict = (n > 0);
6427 		return count;
6428 	}
6429 	return -EINVAL;
6430 }
6431 static DRIVER_ATTR_RW(strict);
6432 
6433 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6434 {
6435 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6436 }
6437 static DRIVER_ATTR_RO(uuid_ctl);
6438 
6439 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6440 {
6441 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6442 }
6443 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6444 			     size_t count)
6445 {
6446 	int ret, n;
6447 
6448 	ret = kstrtoint(buf, 0, &n);
6449 	if (ret)
6450 		return ret;
6451 	sdebug_cdb_len = n;
6452 	all_config_cdb_len();
6453 	return count;
6454 }
6455 static DRIVER_ATTR_RW(cdb_len);
6456 
6457 static const char * const zbc_model_strs_a[] = {
6458 	[BLK_ZONED_NONE] = "none",
6459 	[BLK_ZONED_HA]   = "host-aware",
6460 	[BLK_ZONED_HM]   = "host-managed",
6461 };
6462 
6463 static const char * const zbc_model_strs_b[] = {
6464 	[BLK_ZONED_NONE] = "no",
6465 	[BLK_ZONED_HA]   = "aware",
6466 	[BLK_ZONED_HM]   = "managed",
6467 };
6468 
6469 static const char * const zbc_model_strs_c[] = {
6470 	[BLK_ZONED_NONE] = "0",
6471 	[BLK_ZONED_HA]   = "1",
6472 	[BLK_ZONED_HM]   = "2",
6473 };
6474 
6475 static int sdeb_zbc_model_str(const char *cp)
6476 {
6477 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6478 
6479 	if (res < 0) {
6480 		res = sysfs_match_string(zbc_model_strs_b, cp);
6481 		if (res < 0) {
6482 			res = sysfs_match_string(zbc_model_strs_c, cp);
6483 			if (res < 0)
6484 				return -EINVAL;
6485 		}
6486 	}
6487 	return res;
6488 }
6489 
6490 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6491 {
6492 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6493 			 zbc_model_strs_a[sdeb_zbc_model]);
6494 }
6495 static DRIVER_ATTR_RO(zbc);
6496 
6497 /* Note: The following array creates attribute files in the
6498    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6499    files (over those found in the /sys/module/scsi_debug/parameters
6500    directory) is that auxiliary actions can be triggered when an attribute
6501    is changed. For example see: add_host_store() above.
6502  */
6503 
6504 static struct attribute *sdebug_drv_attrs[] = {
6505 	&driver_attr_delay.attr,
6506 	&driver_attr_opts.attr,
6507 	&driver_attr_ptype.attr,
6508 	&driver_attr_dsense.attr,
6509 	&driver_attr_fake_rw.attr,
6510 	&driver_attr_host_max_queue.attr,
6511 	&driver_attr_no_lun_0.attr,
6512 	&driver_attr_num_tgts.attr,
6513 	&driver_attr_dev_size_mb.attr,
6514 	&driver_attr_num_parts.attr,
6515 	&driver_attr_every_nth.attr,
6516 	&driver_attr_max_luns.attr,
6517 	&driver_attr_max_queue.attr,
6518 	&driver_attr_no_uld.attr,
6519 	&driver_attr_scsi_level.attr,
6520 	&driver_attr_virtual_gb.attr,
6521 	&driver_attr_add_host.attr,
6522 	&driver_attr_per_host_store.attr,
6523 	&driver_attr_vpd_use_hostno.attr,
6524 	&driver_attr_sector_size.attr,
6525 	&driver_attr_statistics.attr,
6526 	&driver_attr_submit_queues.attr,
6527 	&driver_attr_dix.attr,
6528 	&driver_attr_dif.attr,
6529 	&driver_attr_guard.attr,
6530 	&driver_attr_ato.attr,
6531 	&driver_attr_map.attr,
6532 	&driver_attr_random.attr,
6533 	&driver_attr_removable.attr,
6534 	&driver_attr_host_lock.attr,
6535 	&driver_attr_ndelay.attr,
6536 	&driver_attr_strict.attr,
6537 	&driver_attr_uuid_ctl.attr,
6538 	&driver_attr_cdb_len.attr,
6539 	&driver_attr_zbc.attr,
6540 	NULL,
6541 };
6542 ATTRIBUTE_GROUPS(sdebug_drv);
6543 
6544 static struct device *pseudo_primary;
6545 
6546 static int __init scsi_debug_init(void)
6547 {
6548 	bool want_store = (sdebug_fake_rw == 0);
6549 	unsigned long sz;
6550 	int k, ret, hosts_to_add;
6551 	int idx = -1;
6552 
6553 	ramdisk_lck_a[0] = &atomic_rw;
6554 	ramdisk_lck_a[1] = &atomic_rw2;
6555 	atomic_set(&retired_max_queue, 0);
6556 
6557 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6558 		pr_warn("ndelay must be less than 1 second, ignored\n");
6559 		sdebug_ndelay = 0;
6560 	} else if (sdebug_ndelay > 0)
6561 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6562 
6563 	switch (sdebug_sector_size) {
6564 	case  512:
6565 	case 1024:
6566 	case 2048:
6567 	case 4096:
6568 		break;
6569 	default:
6570 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6571 		return -EINVAL;
6572 	}
6573 
6574 	switch (sdebug_dif) {
6575 	case T10_PI_TYPE0_PROTECTION:
6576 		break;
6577 	case T10_PI_TYPE1_PROTECTION:
6578 	case T10_PI_TYPE2_PROTECTION:
6579 	case T10_PI_TYPE3_PROTECTION:
6580 		have_dif_prot = true;
6581 		break;
6582 
6583 	default:
6584 		pr_err("dif must be 0, 1, 2 or 3\n");
6585 		return -EINVAL;
6586 	}
6587 
6588 	if (sdebug_num_tgts < 0) {
6589 		pr_err("num_tgts must be >= 0\n");
6590 		return -EINVAL;
6591 	}
6592 
6593 	if (sdebug_guard > 1) {
6594 		pr_err("guard must be 0 or 1\n");
6595 		return -EINVAL;
6596 	}
6597 
6598 	if (sdebug_ato > 1) {
6599 		pr_err("ato must be 0 or 1\n");
6600 		return -EINVAL;
6601 	}
6602 
6603 	if (sdebug_physblk_exp > 15) {
6604 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6605 		return -EINVAL;
6606 	}
6607 	if (sdebug_max_luns > 256) {
6608 		pr_warn("max_luns can be no more than 256, use default\n");
6609 		sdebug_max_luns = DEF_MAX_LUNS;
6610 	}
6611 
6612 	if (sdebug_lowest_aligned > 0x3fff) {
6613 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6614 		return -EINVAL;
6615 	}
6616 
6617 	if (submit_queues < 1) {
6618 		pr_err("submit_queues must be 1 or more\n");
6619 		return -EINVAL;
6620 	}
6621 
6622 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6623 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6624 		return -EINVAL;
6625 	}
6626 
6627 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6628 	    (sdebug_host_max_queue < 0)) {
6629 		pr_err("host_max_queue must be in range [0 %d]\n",
6630 		       SDEBUG_CANQUEUE);
6631 		return -EINVAL;
6632 	}
6633 
6634 	if (sdebug_host_max_queue &&
6635 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6636 		sdebug_max_queue = sdebug_host_max_queue;
6637 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6638 			sdebug_max_queue);
6639 	}
6640 
6641 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6642 			       GFP_KERNEL);
6643 	if (sdebug_q_arr == NULL)
6644 		return -ENOMEM;
6645 	for (k = 0; k < submit_queues; ++k)
6646 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6647 
6648 	/*
6649 	 * check for host managed zoned block device specified with
6650 	 * ptype=0x14 or zbc=XXX.
6651 	 */
6652 	if (sdebug_ptype == TYPE_ZBC) {
6653 		sdeb_zbc_model = BLK_ZONED_HM;
6654 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6655 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6656 		if (k < 0) {
6657 			ret = k;
6658 			goto free_vm;
6659 		}
6660 		sdeb_zbc_model = k;
6661 		switch (sdeb_zbc_model) {
6662 		case BLK_ZONED_NONE:
6663 		case BLK_ZONED_HA:
6664 			sdebug_ptype = TYPE_DISK;
6665 			break;
6666 		case BLK_ZONED_HM:
6667 			sdebug_ptype = TYPE_ZBC;
6668 			break;
6669 		default:
6670 			pr_err("Invalid ZBC model\n");
6671 			return -EINVAL;
6672 		}
6673 	}
6674 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6675 		sdeb_zbc_in_use = true;
6676 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6677 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6678 	}
6679 
6680 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6681 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6682 	if (sdebug_dev_size_mb < 1)
6683 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6684 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6685 	sdebug_store_sectors = sz / sdebug_sector_size;
6686 	sdebug_capacity = get_sdebug_capacity();
6687 
6688 	/* play around with geometry, don't waste too much on track 0 */
6689 	sdebug_heads = 8;
6690 	sdebug_sectors_per = 32;
6691 	if (sdebug_dev_size_mb >= 256)
6692 		sdebug_heads = 64;
6693 	else if (sdebug_dev_size_mb >= 16)
6694 		sdebug_heads = 32;
6695 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6696 			       (sdebug_sectors_per * sdebug_heads);
6697 	if (sdebug_cylinders_per >= 1024) {
6698 		/* other LLDs do this; implies >= 1GB ram disk ... */
6699 		sdebug_heads = 255;
6700 		sdebug_sectors_per = 63;
6701 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6702 			       (sdebug_sectors_per * sdebug_heads);
6703 	}
6704 	if (scsi_debug_lbp()) {
6705 		sdebug_unmap_max_blocks =
6706 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6707 
6708 		sdebug_unmap_max_desc =
6709 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6710 
6711 		sdebug_unmap_granularity =
6712 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6713 
6714 		if (sdebug_unmap_alignment &&
6715 		    sdebug_unmap_granularity <=
6716 		    sdebug_unmap_alignment) {
6717 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6718 			ret = -EINVAL;
6719 			goto free_q_arr;
6720 		}
6721 	}
6722 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6723 	if (want_store) {
6724 		idx = sdebug_add_store();
6725 		if (idx < 0) {
6726 			ret = idx;
6727 			goto free_q_arr;
6728 		}
6729 	}
6730 
6731 	pseudo_primary = root_device_register("pseudo_0");
6732 	if (IS_ERR(pseudo_primary)) {
6733 		pr_warn("root_device_register() error\n");
6734 		ret = PTR_ERR(pseudo_primary);
6735 		goto free_vm;
6736 	}
6737 	ret = bus_register(&pseudo_lld_bus);
6738 	if (ret < 0) {
6739 		pr_warn("bus_register error: %d\n", ret);
6740 		goto dev_unreg;
6741 	}
6742 	ret = driver_register(&sdebug_driverfs_driver);
6743 	if (ret < 0) {
6744 		pr_warn("driver_register error: %d\n", ret);
6745 		goto bus_unreg;
6746 	}
6747 
6748 	hosts_to_add = sdebug_add_host;
6749 	sdebug_add_host = 0;
6750 
6751 	for (k = 0; k < hosts_to_add; k++) {
6752 		if (want_store && k == 0) {
6753 			ret = sdebug_add_host_helper(idx);
6754 			if (ret < 0) {
6755 				pr_err("add_host_helper k=%d, error=%d\n",
6756 				       k, -ret);
6757 				break;
6758 			}
6759 		} else {
6760 			ret = sdebug_do_add_host(want_store &&
6761 						 sdebug_per_host_store);
6762 			if (ret < 0) {
6763 				pr_err("add_host k=%d error=%d\n", k, -ret);
6764 				break;
6765 			}
6766 		}
6767 	}
6768 	if (sdebug_verbose)
6769 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6770 
6771 	return 0;
6772 
6773 bus_unreg:
6774 	bus_unregister(&pseudo_lld_bus);
6775 dev_unreg:
6776 	root_device_unregister(pseudo_primary);
6777 free_vm:
6778 	sdebug_erase_store(idx, NULL);
6779 free_q_arr:
6780 	kfree(sdebug_q_arr);
6781 	return ret;
6782 }
6783 
6784 static void __exit scsi_debug_exit(void)
6785 {
6786 	int k = sdebug_num_hosts;
6787 
6788 	stop_all_queued();
6789 	for (; k; k--)
6790 		sdebug_do_remove_host(true);
6791 	free_all_queued();
6792 	driver_unregister(&sdebug_driverfs_driver);
6793 	bus_unregister(&pseudo_lld_bus);
6794 	root_device_unregister(pseudo_primary);
6795 
6796 	sdebug_erase_all_stores(false);
6797 	xa_destroy(per_store_ap);
6798 }
6799 
6800 device_initcall(scsi_debug_init);
6801 module_exit(scsi_debug_exit);
6802 
6803 static void sdebug_release_adapter(struct device *dev)
6804 {
6805 	struct sdebug_host_info *sdbg_host;
6806 
6807 	sdbg_host = to_sdebug_host(dev);
6808 	kfree(sdbg_host);
6809 }
6810 
6811 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6812 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6813 {
6814 	if (idx < 0)
6815 		return;
6816 	if (!sip) {
6817 		if (xa_empty(per_store_ap))
6818 			return;
6819 		sip = xa_load(per_store_ap, idx);
6820 		if (!sip)
6821 			return;
6822 	}
6823 	vfree(sip->map_storep);
6824 	vfree(sip->dif_storep);
6825 	vfree(sip->storep);
6826 	xa_erase(per_store_ap, idx);
6827 	kfree(sip);
6828 }
6829 
6830 /* Assume apart_from_first==false only in shutdown case. */
6831 static void sdebug_erase_all_stores(bool apart_from_first)
6832 {
6833 	unsigned long idx;
6834 	struct sdeb_store_info *sip = NULL;
6835 
6836 	xa_for_each(per_store_ap, idx, sip) {
6837 		if (apart_from_first)
6838 			apart_from_first = false;
6839 		else
6840 			sdebug_erase_store(idx, sip);
6841 	}
6842 	if (apart_from_first)
6843 		sdeb_most_recent_idx = sdeb_first_idx;
6844 }
6845 
6846 /*
6847  * Returns store xarray new element index (idx) if >=0 else negated errno.
6848  * Limit the number of stores to 65536.
6849  */
6850 static int sdebug_add_store(void)
6851 {
6852 	int res;
6853 	u32 n_idx;
6854 	unsigned long iflags;
6855 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6856 	struct sdeb_store_info *sip = NULL;
6857 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6858 
6859 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6860 	if (!sip)
6861 		return -ENOMEM;
6862 
6863 	xa_lock_irqsave(per_store_ap, iflags);
6864 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6865 	if (unlikely(res < 0)) {
6866 		xa_unlock_irqrestore(per_store_ap, iflags);
6867 		kfree(sip);
6868 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6869 		return res;
6870 	}
6871 	sdeb_most_recent_idx = n_idx;
6872 	if (sdeb_first_idx < 0)
6873 		sdeb_first_idx = n_idx;
6874 	xa_unlock_irqrestore(per_store_ap, iflags);
6875 
6876 	res = -ENOMEM;
6877 	sip->storep = vzalloc(sz);
6878 	if (!sip->storep) {
6879 		pr_err("user data oom\n");
6880 		goto err;
6881 	}
6882 	if (sdebug_num_parts > 0)
6883 		sdebug_build_parts(sip->storep, sz);
6884 
6885 	/* DIF/DIX: what T10 calls Protection Information (PI) */
6886 	if (sdebug_dix) {
6887 		int dif_size;
6888 
6889 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6890 		sip->dif_storep = vmalloc(dif_size);
6891 
6892 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6893 			sip->dif_storep);
6894 
6895 		if (!sip->dif_storep) {
6896 			pr_err("DIX oom\n");
6897 			goto err;
6898 		}
6899 		memset(sip->dif_storep, 0xff, dif_size);
6900 	}
6901 	/* Logical Block Provisioning */
6902 	if (scsi_debug_lbp()) {
6903 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6904 		sip->map_storep = vmalloc(array_size(sizeof(long),
6905 						     BITS_TO_LONGS(map_size)));
6906 
6907 		pr_info("%lu provisioning blocks\n", map_size);
6908 
6909 		if (!sip->map_storep) {
6910 			pr_err("LBP map oom\n");
6911 			goto err;
6912 		}
6913 
6914 		bitmap_zero(sip->map_storep, map_size);
6915 
6916 		/* Map first 1KB for partition table */
6917 		if (sdebug_num_parts)
6918 			map_region(sip, 0, 2);
6919 	}
6920 
6921 	rwlock_init(&sip->macc_lck);
6922 	return (int)n_idx;
6923 err:
6924 	sdebug_erase_store((int)n_idx, sip);
6925 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
6926 	return res;
6927 }
6928 
6929 static int sdebug_add_host_helper(int per_host_idx)
6930 {
6931 	int k, devs_per_host, idx;
6932 	int error = -ENOMEM;
6933 	struct sdebug_host_info *sdbg_host;
6934 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
6935 
6936 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6937 	if (!sdbg_host)
6938 		return -ENOMEM;
6939 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6940 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6941 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6942 	sdbg_host->si_idx = idx;
6943 
6944 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6945 
6946 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6947 	for (k = 0; k < devs_per_host; k++) {
6948 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6949 		if (!sdbg_devinfo)
6950 			goto clean;
6951 	}
6952 
6953 	spin_lock(&sdebug_host_list_lock);
6954 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6955 	spin_unlock(&sdebug_host_list_lock);
6956 
6957 	sdbg_host->dev.bus = &pseudo_lld_bus;
6958 	sdbg_host->dev.parent = pseudo_primary;
6959 	sdbg_host->dev.release = &sdebug_release_adapter;
6960 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6961 
6962 	error = device_register(&sdbg_host->dev);
6963 	if (error)
6964 		goto clean;
6965 
6966 	++sdebug_num_hosts;
6967 	return 0;
6968 
6969 clean:
6970 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
6971 				 dev_list) {
6972 		list_del(&sdbg_devinfo->dev_list);
6973 		kfree(sdbg_devinfo->zstate);
6974 		kfree(sdbg_devinfo);
6975 	}
6976 	kfree(sdbg_host);
6977 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
6978 	return error;
6979 }
6980 
6981 static int sdebug_do_add_host(bool mk_new_store)
6982 {
6983 	int ph_idx = sdeb_most_recent_idx;
6984 
6985 	if (mk_new_store) {
6986 		ph_idx = sdebug_add_store();
6987 		if (ph_idx < 0)
6988 			return ph_idx;
6989 	}
6990 	return sdebug_add_host_helper(ph_idx);
6991 }
6992 
6993 static void sdebug_do_remove_host(bool the_end)
6994 {
6995 	int idx = -1;
6996 	struct sdebug_host_info *sdbg_host = NULL;
6997 	struct sdebug_host_info *sdbg_host2;
6998 
6999 	spin_lock(&sdebug_host_list_lock);
7000 	if (!list_empty(&sdebug_host_list)) {
7001 		sdbg_host = list_entry(sdebug_host_list.prev,
7002 				       struct sdebug_host_info, host_list);
7003 		idx = sdbg_host->si_idx;
7004 	}
7005 	if (!the_end && idx >= 0) {
7006 		bool unique = true;
7007 
7008 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7009 			if (sdbg_host2 == sdbg_host)
7010 				continue;
7011 			if (idx == sdbg_host2->si_idx) {
7012 				unique = false;
7013 				break;
7014 			}
7015 		}
7016 		if (unique) {
7017 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7018 			if (idx == sdeb_most_recent_idx)
7019 				--sdeb_most_recent_idx;
7020 		}
7021 	}
7022 	if (sdbg_host)
7023 		list_del(&sdbg_host->host_list);
7024 	spin_unlock(&sdebug_host_list_lock);
7025 
7026 	if (!sdbg_host)
7027 		return;
7028 
7029 	device_unregister(&sdbg_host->dev);
7030 	--sdebug_num_hosts;
7031 }
7032 
7033 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7034 {
7035 	int num_in_q = 0;
7036 	struct sdebug_dev_info *devip;
7037 
7038 	block_unblock_all_queues(true);
7039 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7040 	if (NULL == devip) {
7041 		block_unblock_all_queues(false);
7042 		return	-ENODEV;
7043 	}
7044 	num_in_q = atomic_read(&devip->num_in_q);
7045 
7046 	if (qdepth < 1)
7047 		qdepth = 1;
7048 	/* allow to exceed max host qc_arr elements for testing */
7049 	if (qdepth > SDEBUG_CANQUEUE + 10)
7050 		qdepth = SDEBUG_CANQUEUE + 10;
7051 	scsi_change_queue_depth(sdev, qdepth);
7052 
7053 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7054 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7055 			    __func__, qdepth, num_in_q);
7056 	}
7057 	block_unblock_all_queues(false);
7058 	return sdev->queue_depth;
7059 }
7060 
7061 static bool fake_timeout(struct scsi_cmnd *scp)
7062 {
7063 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7064 		if (sdebug_every_nth < -1)
7065 			sdebug_every_nth = -1;
7066 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7067 			return true; /* ignore command causing timeout */
7068 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7069 			 scsi_medium_access_command(scp))
7070 			return true; /* time out reads and writes */
7071 	}
7072 	return false;
7073 }
7074 
7075 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7076 				   struct scsi_cmnd *scp)
7077 {
7078 	u8 sdeb_i;
7079 	struct scsi_device *sdp = scp->device;
7080 	const struct opcode_info_t *oip;
7081 	const struct opcode_info_t *r_oip;
7082 	struct sdebug_dev_info *devip;
7083 	u8 *cmd = scp->cmnd;
7084 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7085 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7086 	int k, na;
7087 	int errsts = 0;
7088 	u32 flags;
7089 	u16 sa;
7090 	u8 opcode = cmd[0];
7091 	bool has_wlun_rl;
7092 	bool inject_now;
7093 
7094 	scsi_set_resid(scp, 0);
7095 	if (sdebug_statistics) {
7096 		atomic_inc(&sdebug_cmnd_count);
7097 		inject_now = inject_on_this_cmd();
7098 	} else {
7099 		inject_now = false;
7100 	}
7101 	if (unlikely(sdebug_verbose &&
7102 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7103 		char b[120];
7104 		int n, len, sb;
7105 
7106 		len = scp->cmd_len;
7107 		sb = (int)sizeof(b);
7108 		if (len > 32)
7109 			strcpy(b, "too long, over 32 bytes");
7110 		else {
7111 			for (k = 0, n = 0; k < len && n < sb; ++k)
7112 				n += scnprintf(b + n, sb - n, "%02x ",
7113 					       (u32)cmd[k]);
7114 		}
7115 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7116 			    blk_mq_unique_tag(scp->request), b);
7117 	}
7118 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7119 		return SCSI_MLQUEUE_HOST_BUSY;
7120 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7121 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7122 		goto err_out;
7123 
7124 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7125 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7126 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7127 	if (unlikely(!devip)) {
7128 		devip = find_build_dev_info(sdp);
7129 		if (NULL == devip)
7130 			goto err_out;
7131 	}
7132 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7133 		atomic_set(&sdeb_inject_pending, 1);
7134 
7135 	na = oip->num_attached;
7136 	r_pfp = oip->pfp;
7137 	if (na) {	/* multiple commands with this opcode */
7138 		r_oip = oip;
7139 		if (FF_SA & r_oip->flags) {
7140 			if (F_SA_LOW & oip->flags)
7141 				sa = 0x1f & cmd[1];
7142 			else
7143 				sa = get_unaligned_be16(cmd + 8);
7144 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7145 				if (opcode == oip->opcode && sa == oip->sa)
7146 					break;
7147 			}
7148 		} else {   /* since no service action only check opcode */
7149 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7150 				if (opcode == oip->opcode)
7151 					break;
7152 			}
7153 		}
7154 		if (k > na) {
7155 			if (F_SA_LOW & r_oip->flags)
7156 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7157 			else if (F_SA_HIGH & r_oip->flags)
7158 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7159 			else
7160 				mk_sense_invalid_opcode(scp);
7161 			goto check_cond;
7162 		}
7163 	}	/* else (when na==0) we assume the oip is a match */
7164 	flags = oip->flags;
7165 	if (unlikely(F_INV_OP & flags)) {
7166 		mk_sense_invalid_opcode(scp);
7167 		goto check_cond;
7168 	}
7169 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7170 		if (sdebug_verbose)
7171 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7172 				    my_name, opcode, " supported for wlun");
7173 		mk_sense_invalid_opcode(scp);
7174 		goto check_cond;
7175 	}
7176 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7177 		u8 rem;
7178 		int j;
7179 
7180 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7181 			rem = ~oip->len_mask[k] & cmd[k];
7182 			if (rem) {
7183 				for (j = 7; j >= 0; --j, rem <<= 1) {
7184 					if (0x80 & rem)
7185 						break;
7186 				}
7187 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7188 				goto check_cond;
7189 			}
7190 		}
7191 	}
7192 	if (unlikely(!(F_SKIP_UA & flags) &&
7193 		     find_first_bit(devip->uas_bm,
7194 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7195 		errsts = make_ua(scp, devip);
7196 		if (errsts)
7197 			goto check_cond;
7198 	}
7199 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
7200 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7201 		if (sdebug_verbose)
7202 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
7203 				    "%s\n", my_name, "initializing command "
7204 				    "required");
7205 		errsts = check_condition_result;
7206 		goto fini;
7207 	}
7208 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7209 		goto fini;
7210 	if (unlikely(sdebug_every_nth)) {
7211 		if (fake_timeout(scp))
7212 			return 0;	/* ignore command: make trouble */
7213 	}
7214 	if (likely(oip->pfp))
7215 		pfp = oip->pfp;	/* calls a resp_* function */
7216 	else
7217 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7218 
7219 fini:
7220 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7221 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7222 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7223 					    sdebug_ndelay > 10000)) {
7224 		/*
7225 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7226 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7227 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7228 		 * For Synchronize Cache want 1/20 of SSU's delay.
7229 		 */
7230 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7231 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7232 
7233 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7234 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7235 	} else
7236 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7237 				     sdebug_ndelay);
7238 check_cond:
7239 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7240 err_out:
7241 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7242 }
7243 
7244 static struct scsi_host_template sdebug_driver_template = {
7245 	.show_info =		scsi_debug_show_info,
7246 	.write_info =		scsi_debug_write_info,
7247 	.proc_name =		sdebug_proc_name,
7248 	.name =			"SCSI DEBUG",
7249 	.info =			scsi_debug_info,
7250 	.slave_alloc =		scsi_debug_slave_alloc,
7251 	.slave_configure =	scsi_debug_slave_configure,
7252 	.slave_destroy =	scsi_debug_slave_destroy,
7253 	.ioctl =		scsi_debug_ioctl,
7254 	.queuecommand =		scsi_debug_queuecommand,
7255 	.change_queue_depth =	sdebug_change_qdepth,
7256 	.eh_abort_handler =	scsi_debug_abort,
7257 	.eh_device_reset_handler = scsi_debug_device_reset,
7258 	.eh_target_reset_handler = scsi_debug_target_reset,
7259 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7260 	.eh_host_reset_handler = scsi_debug_host_reset,
7261 	.can_queue =		SDEBUG_CANQUEUE,
7262 	.this_id =		7,
7263 	.sg_tablesize =		SG_MAX_SEGMENTS,
7264 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7265 	.max_sectors =		-1U,
7266 	.max_segment_size =	-1U,
7267 	.module =		THIS_MODULE,
7268 	.track_queue_depth =	1,
7269 };
7270 
7271 static int sdebug_driver_probe(struct device *dev)
7272 {
7273 	int error = 0;
7274 	struct sdebug_host_info *sdbg_host;
7275 	struct Scsi_Host *hpnt;
7276 	int hprot;
7277 
7278 	sdbg_host = to_sdebug_host(dev);
7279 
7280 	if (sdebug_host_max_queue)
7281 		sdebug_driver_template.can_queue = sdebug_host_max_queue;
7282 	else
7283 		sdebug_driver_template.can_queue = sdebug_max_queue;
7284 	if (!sdebug_clustering)
7285 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7286 
7287 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7288 	if (NULL == hpnt) {
7289 		pr_err("scsi_host_alloc failed\n");
7290 		error = -ENODEV;
7291 		return error;
7292 	}
7293 	if (submit_queues > nr_cpu_ids) {
7294 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7295 			my_name, submit_queues, nr_cpu_ids);
7296 		submit_queues = nr_cpu_ids;
7297 	}
7298 	/*
7299 	 * Decide whether to tell scsi subsystem that we want mq. The
7300 	 * following should give the same answer for each host. If the host
7301 	 * has a limit of hostwide max commands, then do not set.
7302 	 */
7303 	if (!sdebug_host_max_queue)
7304 		hpnt->nr_hw_queues = submit_queues;
7305 
7306 	sdbg_host->shost = hpnt;
7307 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7308 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7309 		hpnt->max_id = sdebug_num_tgts + 1;
7310 	else
7311 		hpnt->max_id = sdebug_num_tgts;
7312 	/* = sdebug_max_luns; */
7313 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7314 
7315 	hprot = 0;
7316 
7317 	switch (sdebug_dif) {
7318 
7319 	case T10_PI_TYPE1_PROTECTION:
7320 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7321 		if (sdebug_dix)
7322 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7323 		break;
7324 
7325 	case T10_PI_TYPE2_PROTECTION:
7326 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7327 		if (sdebug_dix)
7328 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7329 		break;
7330 
7331 	case T10_PI_TYPE3_PROTECTION:
7332 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7333 		if (sdebug_dix)
7334 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7335 		break;
7336 
7337 	default:
7338 		if (sdebug_dix)
7339 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7340 		break;
7341 	}
7342 
7343 	scsi_host_set_prot(hpnt, hprot);
7344 
7345 	if (have_dif_prot || sdebug_dix)
7346 		pr_info("host protection%s%s%s%s%s%s%s\n",
7347 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7348 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7349 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7350 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7351 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7352 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7353 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7354 
7355 	if (sdebug_guard == 1)
7356 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7357 	else
7358 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7359 
7360 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7361 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7362 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7363 		sdebug_statistics = true;
7364 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7365 	if (error) {
7366 		pr_err("scsi_add_host failed\n");
7367 		error = -ENODEV;
7368 		scsi_host_put(hpnt);
7369 	} else {
7370 		scsi_scan_host(hpnt);
7371 	}
7372 
7373 	return error;
7374 }
7375 
7376 static int sdebug_driver_remove(struct device *dev)
7377 {
7378 	struct sdebug_host_info *sdbg_host;
7379 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7380 
7381 	sdbg_host = to_sdebug_host(dev);
7382 
7383 	if (!sdbg_host) {
7384 		pr_err("Unable to locate host info\n");
7385 		return -ENODEV;
7386 	}
7387 
7388 	scsi_remove_host(sdbg_host->shost);
7389 
7390 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7391 				 dev_list) {
7392 		list_del(&sdbg_devinfo->dev_list);
7393 		kfree(sdbg_devinfo->zstate);
7394 		kfree(sdbg_devinfo);
7395 	}
7396 
7397 	scsi_host_put(sdbg_host->shost);
7398 	return 0;
7399 }
7400 
7401 static int pseudo_lld_bus_match(struct device *dev,
7402 				struct device_driver *dev_driver)
7403 {
7404 	return 1;
7405 }
7406 
7407 static struct bus_type pseudo_lld_bus = {
7408 	.name = "pseudo",
7409 	.match = pseudo_lld_bus_match,
7410 	.probe = sdebug_driver_probe,
7411 	.remove = sdebug_driver_remove,
7412 	.drv_groups = sdebug_drv_groups,
7413 };
7414