xref: /linux/drivers/scsi/scsi_debug.c (revision 88e45067a30918ebb4942120892963e2311330af)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47 
48 #include <net/checksum.h>
49 
50 #include <linux/unaligned.h>
51 
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60 
61 #include "sd.h"
62 #include "scsi_logging.h"
63 
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67 
68 #define MY_NAME "scsi_debug"
69 
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define LOGICAL_UNIT_NOT_READY 0x4
75 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
76 #define UNRECOVERED_READ_ERR 0x11
77 #define PARAMETER_LIST_LENGTH_ERR 0x1a
78 #define INVALID_OPCODE 0x20
79 #define LBA_OUT_OF_RANGE 0x21
80 #define INVALID_FIELD_IN_CDB 0x24
81 #define INVALID_FIELD_IN_PARAM_LIST 0x26
82 #define WRITE_PROTECTED 0x27
83 #define UA_RESET_ASC 0x29
84 #define UA_CHANGED_ASC 0x2a
85 #define TARGET_CHANGED_ASC 0x3f
86 #define LUNS_CHANGED_ASCQ 0x0e
87 #define INSUFF_RES_ASC 0x55
88 #define INSUFF_RES_ASCQ 0x3
89 #define POWER_ON_RESET_ASCQ 0x0
90 #define POWER_ON_OCCURRED_ASCQ 0x1
91 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
92 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
93 #define CAPACITY_CHANGED_ASCQ 0x9
94 #define SAVING_PARAMS_UNSUP 0x39
95 #define TRANSPORT_PROBLEM 0x4b
96 #define THRESHOLD_EXCEEDED 0x5d
97 #define LOW_POWER_COND_ON 0x5e
98 #define MISCOMPARE_VERIFY_ASC 0x1d
99 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
100 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
101 #define WRITE_ERROR_ASC 0xc
102 #define UNALIGNED_WRITE_ASCQ 0x4
103 #define WRITE_BOUNDARY_ASCQ 0x5
104 #define READ_INVDATA_ASCQ 0x6
105 #define READ_BOUNDARY_ASCQ 0x7
106 #define ATTEMPT_ACCESS_GAP 0x9
107 #define INSUFF_ZONE_ASCQ 0xe
108 /* see drivers/scsi/sense_codes.h */
109 
110 /* Additional Sense Code Qualifier (ASCQ) */
111 #define ACK_NAK_TO 0x3
112 
113 /* Default values for driver parameters */
114 #define DEF_NUM_HOST   1
115 #define DEF_NUM_TGTS   1
116 #define DEF_MAX_LUNS   1
117 /* With these defaults, this driver will make 1 host with 1 target
118  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
119  */
120 #define DEF_ATO 1
121 #define DEF_CDB_LEN 10
122 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
123 #define DEF_DEV_SIZE_PRE_INIT   0
124 #define DEF_DEV_SIZE_MB   8
125 #define DEF_ZBC_DEV_SIZE_MB   128
126 #define DEF_DIF 0
127 #define DEF_DIX 0
128 #define DEF_PER_HOST_STORE false
129 #define DEF_D_SENSE   0
130 #define DEF_EVERY_NTH   0
131 #define DEF_FAKE_RW	0
132 #define DEF_GUARD 0
133 #define DEF_HOST_LOCK 0
134 #define DEF_LBPU 0
135 #define DEF_LBPWS 0
136 #define DEF_LBPWS10 0
137 #define DEF_LBPRZ 1
138 #define DEF_LOWEST_ALIGNED 0
139 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
140 #define DEF_NO_LUN_0   0
141 #define DEF_NUM_PARTS   0
142 #define DEF_OPTS   0
143 #define DEF_OPT_BLKS 1024
144 #define DEF_PHYSBLK_EXP 0
145 #define DEF_OPT_XFERLEN_EXP 0
146 #define DEF_PTYPE   TYPE_DISK
147 #define DEF_RANDOM false
148 #define DEF_REMOVABLE false
149 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
150 #define DEF_SECTOR_SIZE 512
151 #define DEF_UNMAP_ALIGNMENT 0
152 #define DEF_UNMAP_GRANULARITY 1
153 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
154 #define DEF_UNMAP_MAX_DESC 256
155 #define DEF_VIRTUAL_GB   0
156 #define DEF_VPD_USE_HOSTNO 1
157 #define DEF_WRITESAME_LENGTH 0xFFFF
158 #define DEF_ATOMIC_WR 0
159 #define DEF_ATOMIC_WR_MAX_LENGTH 8192
160 #define DEF_ATOMIC_WR_ALIGN 2
161 #define DEF_ATOMIC_WR_GRAN 2
162 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
163 #define DEF_ATOMIC_WR_MAX_BNDRY 128
164 #define DEF_STRICT 0
165 #define DEF_STATISTICS false
166 #define DEF_SUBMIT_QUEUES 1
167 #define DEF_TUR_MS_TO_READY 0
168 #define DEF_UUID_CTL 0
169 #define JDELAY_OVERRIDDEN -9999
170 
171 /* Default parameters for ZBC drives */
172 #define DEF_ZBC_ZONE_SIZE_MB	128
173 #define DEF_ZBC_MAX_OPEN_ZONES	8
174 #define DEF_ZBC_NR_CONV_ZONES	1
175 
176 #define SDEBUG_LUN_0_VAL 0
177 
178 /* bit mask values for sdebug_opts */
179 #define SDEBUG_OPT_NOISE		1
180 #define SDEBUG_OPT_MEDIUM_ERR		2
181 #define SDEBUG_OPT_TIMEOUT		4
182 #define SDEBUG_OPT_RECOVERED_ERR	8
183 #define SDEBUG_OPT_TRANSPORT_ERR	16
184 #define SDEBUG_OPT_DIF_ERR		32
185 #define SDEBUG_OPT_DIX_ERR		64
186 #define SDEBUG_OPT_MAC_TIMEOUT		128
187 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
188 #define SDEBUG_OPT_Q_NOISE		0x200
189 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
190 #define SDEBUG_OPT_RARE_TSF		0x800
191 #define SDEBUG_OPT_N_WCE		0x1000
192 #define SDEBUG_OPT_RESET_NOISE		0x2000
193 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
194 #define SDEBUG_OPT_HOST_BUSY		0x8000
195 #define SDEBUG_OPT_CMD_ABORT		0x10000
196 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
197 			      SDEBUG_OPT_RESET_NOISE)
198 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
199 				  SDEBUG_OPT_TRANSPORT_ERR | \
200 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
201 				  SDEBUG_OPT_SHORT_TRANSFER | \
202 				  SDEBUG_OPT_HOST_BUSY | \
203 				  SDEBUG_OPT_CMD_ABORT)
204 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
205 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
206 
207 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
208  * priority order. In the subset implemented here lower numbers have higher
209  * priority. The UA numbers should be a sequence starting from 0 with
210  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
211 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
212 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
213 #define SDEBUG_UA_BUS_RESET 2
214 #define SDEBUG_UA_MODE_CHANGED 3
215 #define SDEBUG_UA_CAPACITY_CHANGED 4
216 #define SDEBUG_UA_LUNS_CHANGED 5
217 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
218 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
219 #define SDEBUG_NUM_UAS 8
220 
221 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
222  * sector on read commands: */
223 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
224 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
225 
226 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
227  * (for response) per submit queue at one time. Can be reduced by max_queue
228  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
229  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
230  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
231  * but cannot exceed SDEBUG_CANQUEUE .
232  */
233 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
234 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
235 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
236 
237 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
238 #define F_D_IN			1	/* Data-in command (e.g. READ) */
239 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
240 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
241 #define F_D_UNKN		8
242 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
243 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
244 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
245 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
246 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
247 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
248 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
249 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
250 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
251 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
252 
253 /* Useful combinations of the above flags */
254 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
255 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
256 #define FF_SA (F_SA_HIGH | F_SA_LOW)
257 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
258 
259 #define SDEBUG_MAX_PARTS 4
260 
261 #define SDEBUG_MAX_CMD_LEN 32
262 
263 #define SDEB_XA_NOT_IN_USE XA_MARK_1
264 
265 static struct kmem_cache *queued_cmd_cache;
266 
267 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
268 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
269 
270 /* Zone types (zbcr05 table 25) */
271 enum sdebug_z_type {
272 	ZBC_ZTYPE_CNV	= 0x1,
273 	ZBC_ZTYPE_SWR	= 0x2,
274 	ZBC_ZTYPE_SWP	= 0x3,
275 	/* ZBC_ZTYPE_SOBR = 0x4, */
276 	ZBC_ZTYPE_GAP	= 0x5,
277 };
278 
279 /* enumeration names taken from table 26, zbcr05 */
280 enum sdebug_z_cond {
281 	ZBC_NOT_WRITE_POINTER	= 0x0,
282 	ZC1_EMPTY		= 0x1,
283 	ZC2_IMPLICIT_OPEN	= 0x2,
284 	ZC3_EXPLICIT_OPEN	= 0x3,
285 	ZC4_CLOSED		= 0x4,
286 	ZC6_READ_ONLY		= 0xd,
287 	ZC5_FULL		= 0xe,
288 	ZC7_OFFLINE		= 0xf,
289 };
290 
291 struct sdeb_zone_state {	/* ZBC: per zone state */
292 	enum sdebug_z_type z_type;
293 	enum sdebug_z_cond z_cond;
294 	bool z_non_seq_resource;
295 	unsigned int z_size;
296 	sector_t z_start;
297 	sector_t z_wp;
298 };
299 
300 enum sdebug_err_type {
301 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
302 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
303 					/* queuecmd return failed */
304 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
305 					/* queuecmd return succeed but */
306 					/* with errors set in scsi_cmnd */
307 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
308 					/* scsi_debug_abort() */
309 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
310 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
311 };
312 
313 struct sdebug_err_inject {
314 	int type;
315 	struct list_head list;
316 	int cnt;
317 	unsigned char cmd;
318 	struct rcu_head rcu;
319 
320 	union {
321 		/*
322 		 * For ERR_FAIL_QUEUE_CMD
323 		 */
324 		int queuecmd_ret;
325 
326 		/*
327 		 * For ERR_FAIL_CMD
328 		 */
329 		struct {
330 			unsigned char host_byte;
331 			unsigned char driver_byte;
332 			unsigned char status_byte;
333 			unsigned char sense_key;
334 			unsigned char asc;
335 			unsigned char asq;
336 		};
337 	};
338 };
339 
340 struct sdebug_dev_info {
341 	struct list_head dev_list;
342 	unsigned int channel;
343 	unsigned int target;
344 	u64 lun;
345 	uuid_t lu_name;
346 	struct sdebug_host_info *sdbg_host;
347 	unsigned long uas_bm[1];
348 	atomic_t stopped;	/* 1: by SSU, 2: device start */
349 	bool used;
350 
351 	/* For ZBC devices */
352 	bool zoned;
353 	unsigned int zcap;
354 	unsigned int zsize;
355 	unsigned int zsize_shift;
356 	unsigned int nr_zones;
357 	unsigned int nr_conv_zones;
358 	unsigned int nr_seq_zones;
359 	unsigned int nr_imp_open;
360 	unsigned int nr_exp_open;
361 	unsigned int nr_closed;
362 	unsigned int max_open;
363 	ktime_t create_ts;	/* time since bootup that this device was created */
364 	struct sdeb_zone_state *zstate;
365 
366 	struct dentry *debugfs_entry;
367 	struct spinlock list_lock;
368 	struct list_head inject_err_list;
369 };
370 
371 struct sdebug_target_info {
372 	bool reset_fail;
373 	struct dentry *debugfs_entry;
374 };
375 
376 struct sdebug_host_info {
377 	struct list_head host_list;
378 	int si_idx;	/* sdeb_store_info (per host) xarray index */
379 	struct Scsi_Host *shost;
380 	struct device dev;
381 	struct list_head dev_info_list;
382 };
383 
384 /* There is an xarray of pointers to this struct's objects, one per host */
385 struct sdeb_store_info {
386 	rwlock_t macc_data_lck;	/* for media data access on this store */
387 	rwlock_t macc_meta_lck;	/* for atomic media meta access on this store */
388 	rwlock_t macc_sector_lck;	/* per-sector media data access on this store */
389 	u8 *storep;		/* user data storage (ram) */
390 	struct t10_pi_tuple *dif_storep; /* protection info */
391 	void *map_storep;	/* provisioning map */
392 };
393 
394 #define dev_to_sdebug_host(d)	\
395 	container_of(d, struct sdebug_host_info, dev)
396 
397 #define shost_to_sdebug_host(shost)	\
398 	dev_to_sdebug_host(shost->dma_dev)
399 
400 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
401 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
402 
403 struct sdebug_defer {
404 	struct hrtimer hrt;
405 	struct execute_work ew;
406 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
407 	int issuing_cpu;
408 	bool aborted;	/* true when blk_abort_request() already called */
409 	enum sdeb_defer_type defer_t;
410 };
411 
412 struct sdebug_device_access_info {
413 	bool atomic_write;
414 	u64 lba;
415 	u32 num;
416 	struct scsi_cmnd *self;
417 };
418 
419 struct sdebug_queued_cmd {
420 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
421 	 * instance indicates this slot is in use.
422 	 */
423 	struct sdebug_defer sd_dp;
424 	struct scsi_cmnd *scmd;
425 	struct sdebug_device_access_info *i;
426 };
427 
428 struct sdebug_scsi_cmd {
429 	spinlock_t   lock;
430 };
431 
432 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
433 static atomic_t sdebug_completions;  /* count of deferred completions */
434 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
435 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
436 static atomic_t sdeb_inject_pending;
437 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
438 
439 struct opcode_info_t {
440 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
441 				/* for terminating element */
442 	u8 opcode;		/* if num_attached > 0, preferred */
443 	u16 sa;			/* service action */
444 	u32 flags;		/* OR-ed set of SDEB_F_* */
445 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
446 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
447 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
448 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
449 };
450 
451 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
452 enum sdeb_opcode_index {
453 	SDEB_I_INVALID_OPCODE =	0,
454 	SDEB_I_INQUIRY = 1,
455 	SDEB_I_REPORT_LUNS = 2,
456 	SDEB_I_REQUEST_SENSE = 3,
457 	SDEB_I_TEST_UNIT_READY = 4,
458 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
459 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
460 	SDEB_I_LOG_SENSE = 7,
461 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
462 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
463 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
464 	SDEB_I_START_STOP = 11,
465 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
466 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
467 	SDEB_I_MAINT_IN = 14,
468 	SDEB_I_MAINT_OUT = 15,
469 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
470 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
471 	SDEB_I_RESERVE = 18,		/* 6, 10 */
472 	SDEB_I_RELEASE = 19,		/* 6, 10 */
473 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
474 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
475 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
476 	SDEB_I_SEND_DIAG = 23,
477 	SDEB_I_UNMAP = 24,
478 	SDEB_I_WRITE_BUFFER = 25,
479 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
480 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
481 	SDEB_I_COMP_WRITE = 28,
482 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
483 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
484 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
485 	SDEB_I_ATOMIC_WRITE_16 = 32,
486 	SDEB_I_LAST_ELEM_P1 = 33,	/* keep this last (previous + 1) */
487 };
488 
489 
490 static const unsigned char opcode_ind_arr[256] = {
491 /* 0x0; 0x0->0x1f: 6 byte cdbs */
492 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
493 	    0, 0, 0, 0,
494 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
495 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
496 	    SDEB_I_RELEASE,
497 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
498 	    SDEB_I_ALLOW_REMOVAL, 0,
499 /* 0x20; 0x20->0x3f: 10 byte cdbs */
500 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
501 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
502 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
503 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
504 /* 0x40; 0x40->0x5f: 10 byte cdbs */
505 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
506 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
507 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
508 	    SDEB_I_RELEASE,
509 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
510 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
511 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
513 	0, SDEB_I_VARIABLE_LEN,
514 /* 0x80; 0x80->0x9f: 16 byte cdbs */
515 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
516 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
517 	0, 0, 0, SDEB_I_VERIFY,
518 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
519 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
520 	0, 0, 0, 0,
521 	SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
522 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
523 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
524 	     SDEB_I_MAINT_OUT, 0, 0, 0,
525 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
526 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
527 	0, 0, 0, 0, 0, 0, 0, 0,
528 	0, 0, 0, 0, 0, 0, 0, 0,
529 /* 0xc0; 0xc0->0xff: vendor specific */
530 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
531 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
532 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
533 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
534 };
535 
536 /*
537  * The following "response" functions return the SCSI mid-level's 4 byte
538  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
539  * command completion, they can mask their return value with
540  * SDEG_RES_IMMED_MASK .
541  */
542 #define SDEG_RES_IMMED_MASK 0x40000000
543 
544 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
551 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
553 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
554 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
555 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
556 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
557 static int resp_get_stream_status(struct scsi_cmnd *scp,
558 				  struct sdebug_dev_info *devip);
559 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
560 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
561 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
562 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
563 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
564 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
565 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
566 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
567 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
568 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
569 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
570 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
571 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
572 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
573 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
574 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
575 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
576 
577 static int sdebug_do_add_host(bool mk_new_store);
578 static int sdebug_add_host_helper(int per_host_idx);
579 static void sdebug_do_remove_host(bool the_end);
580 static int sdebug_add_store(void);
581 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
582 static void sdebug_erase_all_stores(bool apart_from_first);
583 
584 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
585 
586 /*
587  * The following are overflow arrays for cdbs that "hit" the same index in
588  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
589  * should be placed in opcode_info_arr[], the others should be placed here.
590  */
591 static const struct opcode_info_t msense_iarr[] = {
592 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
593 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 };
595 
596 static const struct opcode_info_t mselect_iarr[] = {
597 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
598 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
599 };
600 
601 static const struct opcode_info_t read_iarr[] = {
602 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
603 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
604 	     0, 0, 0, 0} },
605 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
606 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
607 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
608 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
609 	     0xc7, 0, 0, 0, 0} },
610 };
611 
612 static const struct opcode_info_t write_iarr[] = {
613 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
614 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
615 		   0, 0, 0, 0, 0, 0} },
616 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
617 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
618 		   0, 0, 0} },
619 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
620 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 		   0xbf, 0xc7, 0, 0, 0, 0} },
622 };
623 
624 static const struct opcode_info_t verify_iarr[] = {
625 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
626 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
627 		   0, 0, 0, 0, 0, 0} },
628 };
629 
630 static const struct opcode_info_t sa_in_16_iarr[] = {
631 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
632 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
633 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
634 	{0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
635 	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
636 	     0, 0} },	/* GET STREAM STATUS */
637 };
638 
639 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
640 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
641 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
642 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
643 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
644 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
645 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
646 };
647 
648 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
649 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
650 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
651 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
652 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
653 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
654 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
655 };
656 
657 static const struct opcode_info_t write_same_iarr[] = {
658 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
659 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
661 };
662 
663 static const struct opcode_info_t reserve_iarr[] = {
664 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
665 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
666 };
667 
668 static const struct opcode_info_t release_iarr[] = {
669 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
670 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
671 };
672 
673 static const struct opcode_info_t sync_cache_iarr[] = {
674 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
675 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
676 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
677 };
678 
679 static const struct opcode_info_t pre_fetch_iarr[] = {
680 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
681 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
682 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
683 };
684 
685 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
686 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
687 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
688 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
689 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
690 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
691 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
692 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
693 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
694 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
695 };
696 
697 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
698 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
699 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
700 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
701 };
702 
703 
704 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
705  * plus the terminating elements for logic that scans this table such as
706  * REPORT SUPPORTED OPERATION CODES. */
707 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
708 /* 0 */
709 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
710 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
711 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
712 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
713 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
714 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
715 	     0, 0} },					/* REPORT LUNS */
716 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
717 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
718 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
719 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
720 /* 5 */
721 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
722 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
723 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
724 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
725 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
726 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
727 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
728 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
729 	     0, 0, 0} },
730 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
731 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
732 	     0, 0} },
733 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
734 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
735 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
736 /* 10 */
737 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
738 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
739 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
740 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
741 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
742 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
743 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
744 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
745 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
746 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
747 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
748 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
749 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
750 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
751 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
752 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
753 				0xff, 0, 0xc7, 0, 0, 0, 0} },
754 /* 15 */
755 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
756 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
757 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
758 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
759 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
760 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
761 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
762 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
763 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
764 	     0xff, 0xff} },
765 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
766 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
767 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
768 	     0} },
769 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
770 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
771 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
772 	     0} },
773 /* 20 */
774 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
775 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
776 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
777 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
778 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
779 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
780 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
781 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
782 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
783 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
784 /* 25 */
785 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
786 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
787 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
788 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
789 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
790 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
791 		 0, 0, 0, 0, 0} },
792 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
793 	    resp_sync_cache, sync_cache_iarr,
794 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
795 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
796 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
797 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
798 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
799 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
800 	    resp_pre_fetch, pre_fetch_iarr,
801 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
802 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
803 
804 /* 30 */
805 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
806 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
807 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
808 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
809 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
810 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
811 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
812 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
813 /* 31 */
814 	{0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
815 	    resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
816 		{16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
817 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
818 /* sentinel */
819 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
820 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
821 };
822 
823 static int sdebug_num_hosts;
824 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
825 static int sdebug_ato = DEF_ATO;
826 static int sdebug_cdb_len = DEF_CDB_LEN;
827 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
828 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
829 static int sdebug_dif = DEF_DIF;
830 static int sdebug_dix = DEF_DIX;
831 static int sdebug_dsense = DEF_D_SENSE;
832 static int sdebug_every_nth = DEF_EVERY_NTH;
833 static int sdebug_fake_rw = DEF_FAKE_RW;
834 static unsigned int sdebug_guard = DEF_GUARD;
835 static int sdebug_host_max_queue;	/* per host */
836 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
837 static int sdebug_max_luns = DEF_MAX_LUNS;
838 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
839 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
840 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
841 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
842 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
843 static int sdebug_no_uld;
844 static int sdebug_num_parts = DEF_NUM_PARTS;
845 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
846 static int sdebug_opt_blks = DEF_OPT_BLKS;
847 static int sdebug_opts = DEF_OPTS;
848 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
849 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
850 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
851 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
852 static int sdebug_sector_size = DEF_SECTOR_SIZE;
853 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
854 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
855 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
856 static unsigned int sdebug_lbpu = DEF_LBPU;
857 static unsigned int sdebug_lbpws = DEF_LBPWS;
858 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
859 static unsigned int sdebug_lbprz = DEF_LBPRZ;
860 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
861 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
862 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
863 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
864 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
865 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
866 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
867 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
868 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
869 static unsigned int sdebug_atomic_wr_max_length_bndry =
870 			DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
871 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
872 static int sdebug_uuid_ctl = DEF_UUID_CTL;
873 static bool sdebug_random = DEF_RANDOM;
874 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
875 static bool sdebug_removable = DEF_REMOVABLE;
876 static bool sdebug_clustering;
877 static bool sdebug_host_lock = DEF_HOST_LOCK;
878 static bool sdebug_strict = DEF_STRICT;
879 static bool sdebug_any_injecting_opt;
880 static bool sdebug_no_rwlock;
881 static bool sdebug_verbose;
882 static bool have_dif_prot;
883 static bool write_since_sync;
884 static bool sdebug_statistics = DEF_STATISTICS;
885 static bool sdebug_wp;
886 static bool sdebug_allow_restart;
887 static enum {
888 	BLK_ZONED_NONE	= 0,
889 	BLK_ZONED_HA	= 1,
890 	BLK_ZONED_HM	= 2,
891 } sdeb_zbc_model = BLK_ZONED_NONE;
892 static char *sdeb_zbc_model_s;
893 
894 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
895 			  SAM_LUN_AM_FLAT = 0x1,
896 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
897 			  SAM_LUN_AM_EXTENDED = 0x3};
898 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
899 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
900 
901 static unsigned int sdebug_store_sectors;
902 static sector_t sdebug_capacity;	/* in sectors */
903 
904 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
905    may still need them */
906 static int sdebug_heads;		/* heads per disk */
907 static int sdebug_cylinders_per;	/* cylinders per surface */
908 static int sdebug_sectors_per;		/* sectors per cylinder */
909 
910 static LIST_HEAD(sdebug_host_list);
911 static DEFINE_MUTEX(sdebug_host_list_mutex);
912 
913 static struct xarray per_store_arr;
914 static struct xarray *per_store_ap = &per_store_arr;
915 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
916 static int sdeb_most_recent_idx = -1;
917 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
918 
919 static unsigned long map_size;
920 static int num_aborts;
921 static int num_dev_resets;
922 static int num_target_resets;
923 static int num_bus_resets;
924 static int num_host_resets;
925 static int dix_writes;
926 static int dix_reads;
927 static int dif_errors;
928 
929 /* ZBC global data */
930 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
931 static int sdeb_zbc_zone_cap_mb;
932 static int sdeb_zbc_zone_size_mb;
933 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
934 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
935 
936 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
937 static int poll_queues; /* iouring iopoll interface.*/
938 
939 static atomic_long_t writes_by_group_number[64];
940 
941 static char sdebug_proc_name[] = MY_NAME;
942 static const char *my_name = MY_NAME;
943 
944 static const struct bus_type pseudo_lld_bus;
945 
946 static struct device_driver sdebug_driverfs_driver = {
947 	.name 		= sdebug_proc_name,
948 	.bus		= &pseudo_lld_bus,
949 };
950 
951 static const int check_condition_result =
952 	SAM_STAT_CHECK_CONDITION;
953 
954 static const int illegal_condition_result =
955 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
956 
957 static const int device_qfull_result =
958 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
959 
960 static const int condition_met_result = SAM_STAT_CONDITION_MET;
961 
962 static struct dentry *sdebug_debugfs_root;
963 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
964 
sdebug_err_free(struct rcu_head * head)965 static void sdebug_err_free(struct rcu_head *head)
966 {
967 	struct sdebug_err_inject *inject =
968 		container_of(head, typeof(*inject), rcu);
969 
970 	kfree(inject);
971 }
972 
sdebug_err_add(struct scsi_device * sdev,struct sdebug_err_inject * new)973 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
974 {
975 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
976 	struct sdebug_err_inject *err;
977 
978 	spin_lock(&devip->list_lock);
979 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
980 		if (err->type == new->type && err->cmd == new->cmd) {
981 			list_del_rcu(&err->list);
982 			call_rcu(&err->rcu, sdebug_err_free);
983 		}
984 	}
985 
986 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
987 	spin_unlock(&devip->list_lock);
988 }
989 
sdebug_err_remove(struct scsi_device * sdev,const char * buf,size_t count)990 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
991 {
992 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
993 	struct sdebug_err_inject *err;
994 	int type;
995 	unsigned char cmd;
996 
997 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
998 		kfree(buf);
999 		return -EINVAL;
1000 	}
1001 
1002 	spin_lock(&devip->list_lock);
1003 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1004 		if (err->type == type && err->cmd == cmd) {
1005 			list_del_rcu(&err->list);
1006 			call_rcu(&err->rcu, sdebug_err_free);
1007 			spin_unlock(&devip->list_lock);
1008 			kfree(buf);
1009 			return count;
1010 		}
1011 	}
1012 	spin_unlock(&devip->list_lock);
1013 
1014 	kfree(buf);
1015 	return -EINVAL;
1016 }
1017 
sdebug_error_show(struct seq_file * m,void * p)1018 static int sdebug_error_show(struct seq_file *m, void *p)
1019 {
1020 	struct scsi_device *sdev = (struct scsi_device *)m->private;
1021 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1022 	struct sdebug_err_inject *err;
1023 
1024 	seq_puts(m, "Type\tCount\tCommand\n");
1025 
1026 	rcu_read_lock();
1027 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1028 		switch (err->type) {
1029 		case ERR_TMOUT_CMD:
1030 		case ERR_ABORT_CMD_FAILED:
1031 		case ERR_LUN_RESET_FAILED:
1032 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1033 				err->cmd);
1034 		break;
1035 
1036 		case ERR_FAIL_QUEUE_CMD:
1037 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1038 				err->cnt, err->cmd, err->queuecmd_ret);
1039 		break;
1040 
1041 		case ERR_FAIL_CMD:
1042 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1043 				err->type, err->cnt, err->cmd,
1044 				err->host_byte, err->driver_byte,
1045 				err->status_byte, err->sense_key,
1046 				err->asc, err->asq);
1047 		break;
1048 		}
1049 	}
1050 	rcu_read_unlock();
1051 
1052 	return 0;
1053 }
1054 
sdebug_error_open(struct inode * inode,struct file * file)1055 static int sdebug_error_open(struct inode *inode, struct file *file)
1056 {
1057 	return single_open(file, sdebug_error_show, inode->i_private);
1058 }
1059 
sdebug_error_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1060 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1061 		size_t count, loff_t *ppos)
1062 {
1063 	char *buf;
1064 	unsigned int inject_type;
1065 	struct sdebug_err_inject *inject;
1066 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1067 
1068 	buf = kzalloc(count + 1, GFP_KERNEL);
1069 	if (!buf)
1070 		return -ENOMEM;
1071 
1072 	if (copy_from_user(buf, ubuf, count)) {
1073 		kfree(buf);
1074 		return -EFAULT;
1075 	}
1076 
1077 	if (buf[0] == '-')
1078 		return sdebug_err_remove(sdev, buf, count);
1079 
1080 	if (sscanf(buf, "%d", &inject_type) != 1) {
1081 		kfree(buf);
1082 		return -EINVAL;
1083 	}
1084 
1085 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1086 	if (!inject) {
1087 		kfree(buf);
1088 		return -ENOMEM;
1089 	}
1090 
1091 	switch (inject_type) {
1092 	case ERR_TMOUT_CMD:
1093 	case ERR_ABORT_CMD_FAILED:
1094 	case ERR_LUN_RESET_FAILED:
1095 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1096 			   &inject->cmd) != 3)
1097 			goto out_error;
1098 	break;
1099 
1100 	case ERR_FAIL_QUEUE_CMD:
1101 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1102 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1103 			goto out_error;
1104 	break;
1105 
1106 	case ERR_FAIL_CMD:
1107 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1108 			   &inject->type, &inject->cnt, &inject->cmd,
1109 			   &inject->host_byte, &inject->driver_byte,
1110 			   &inject->status_byte, &inject->sense_key,
1111 			   &inject->asc, &inject->asq) != 9)
1112 			goto out_error;
1113 	break;
1114 
1115 	default:
1116 		goto out_error;
1117 	break;
1118 	}
1119 
1120 	kfree(buf);
1121 	sdebug_err_add(sdev, inject);
1122 
1123 	return count;
1124 
1125 out_error:
1126 	kfree(buf);
1127 	kfree(inject);
1128 	return -EINVAL;
1129 }
1130 
1131 static const struct file_operations sdebug_error_fops = {
1132 	.open	= sdebug_error_open,
1133 	.read	= seq_read,
1134 	.write	= sdebug_error_write,
1135 	.release = single_release,
1136 };
1137 
sdebug_target_reset_fail_show(struct seq_file * m,void * p)1138 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1139 {
1140 	struct scsi_target *starget = (struct scsi_target *)m->private;
1141 	struct sdebug_target_info *targetip =
1142 		(struct sdebug_target_info *)starget->hostdata;
1143 
1144 	if (targetip)
1145 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1146 
1147 	return 0;
1148 }
1149 
sdebug_target_reset_fail_open(struct inode * inode,struct file * file)1150 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1151 {
1152 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1153 }
1154 
sdebug_target_reset_fail_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1155 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1156 		const char __user *ubuf, size_t count, loff_t *ppos)
1157 {
1158 	int ret;
1159 	struct scsi_target *starget =
1160 		(struct scsi_target *)file->f_inode->i_private;
1161 	struct sdebug_target_info *targetip =
1162 		(struct sdebug_target_info *)starget->hostdata;
1163 
1164 	if (targetip) {
1165 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1166 		return ret < 0 ? ret : count;
1167 	}
1168 	return -ENODEV;
1169 }
1170 
1171 static const struct file_operations sdebug_target_reset_fail_fops = {
1172 	.open	= sdebug_target_reset_fail_open,
1173 	.read	= seq_read,
1174 	.write	= sdebug_target_reset_fail_write,
1175 	.release = single_release,
1176 };
1177 
sdebug_target_alloc(struct scsi_target * starget)1178 static int sdebug_target_alloc(struct scsi_target *starget)
1179 {
1180 	struct sdebug_target_info *targetip;
1181 
1182 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1183 	if (!targetip)
1184 		return -ENOMEM;
1185 
1186 	async_synchronize_full_domain(&sdebug_async_domain);
1187 
1188 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1189 				sdebug_debugfs_root);
1190 
1191 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1192 				&sdebug_target_reset_fail_fops);
1193 
1194 	starget->hostdata = targetip;
1195 
1196 	return 0;
1197 }
1198 
sdebug_tartget_cleanup_async(void * data,async_cookie_t cookie)1199 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1200 {
1201 	struct sdebug_target_info *targetip = data;
1202 
1203 	debugfs_remove(targetip->debugfs_entry);
1204 	kfree(targetip);
1205 }
1206 
sdebug_target_destroy(struct scsi_target * starget)1207 static void sdebug_target_destroy(struct scsi_target *starget)
1208 {
1209 	struct sdebug_target_info *targetip;
1210 
1211 	targetip = (struct sdebug_target_info *)starget->hostdata;
1212 	if (targetip) {
1213 		starget->hostdata = NULL;
1214 		async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1215 				&sdebug_async_domain);
1216 	}
1217 }
1218 
1219 /* Only do the extra work involved in logical block provisioning if one or
1220  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1221  * real reads and writes (i.e. not skipping them for speed).
1222  */
scsi_debug_lbp(void)1223 static inline bool scsi_debug_lbp(void)
1224 {
1225 	return 0 == sdebug_fake_rw &&
1226 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1227 }
1228 
scsi_debug_atomic_write(void)1229 static inline bool scsi_debug_atomic_write(void)
1230 {
1231 	return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1232 }
1233 
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)1234 static void *lba2fake_store(struct sdeb_store_info *sip,
1235 			    unsigned long long lba)
1236 {
1237 	struct sdeb_store_info *lsip = sip;
1238 
1239 	lba = do_div(lba, sdebug_store_sectors);
1240 	if (!sip || !sip->storep) {
1241 		WARN_ON_ONCE(true);
1242 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1243 	}
1244 	return lsip->storep + lba * sdebug_sector_size;
1245 }
1246 
dif_store(struct sdeb_store_info * sip,sector_t sector)1247 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1248 				      sector_t sector)
1249 {
1250 	sector = sector_div(sector, sdebug_store_sectors);
1251 
1252 	return sip->dif_storep + sector;
1253 }
1254 
sdebug_max_tgts_luns(void)1255 static void sdebug_max_tgts_luns(void)
1256 {
1257 	struct sdebug_host_info *sdbg_host;
1258 	struct Scsi_Host *hpnt;
1259 
1260 	mutex_lock(&sdebug_host_list_mutex);
1261 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1262 		hpnt = sdbg_host->shost;
1263 		if ((hpnt->this_id >= 0) &&
1264 		    (sdebug_num_tgts > hpnt->this_id))
1265 			hpnt->max_id = sdebug_num_tgts + 1;
1266 		else
1267 			hpnt->max_id = sdebug_num_tgts;
1268 		/* sdebug_max_luns; */
1269 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1270 	}
1271 	mutex_unlock(&sdebug_host_list_mutex);
1272 }
1273 
1274 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1275 
1276 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)1277 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1278 				 enum sdeb_cmd_data c_d,
1279 				 int in_byte, int in_bit)
1280 {
1281 	unsigned char *sbuff;
1282 	u8 sks[4];
1283 	int sl, asc;
1284 
1285 	sbuff = scp->sense_buffer;
1286 	if (!sbuff) {
1287 		sdev_printk(KERN_ERR, scp->device,
1288 			    "%s: sense_buffer is NULL\n", __func__);
1289 		return;
1290 	}
1291 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1292 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1293 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1294 	memset(sks, 0, sizeof(sks));
1295 	sks[0] = 0x80;
1296 	if (c_d)
1297 		sks[0] |= 0x40;
1298 	if (in_bit >= 0) {
1299 		sks[0] |= 0x8;
1300 		sks[0] |= 0x7 & in_bit;
1301 	}
1302 	put_unaligned_be16(in_byte, sks + 1);
1303 	if (sdebug_dsense) {
1304 		sl = sbuff[7] + 8;
1305 		sbuff[7] = sl;
1306 		sbuff[sl] = 0x2;
1307 		sbuff[sl + 1] = 0x6;
1308 		memcpy(sbuff + sl + 4, sks, 3);
1309 	} else
1310 		memcpy(sbuff + 15, sks, 3);
1311 	if (sdebug_verbose)
1312 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1313 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1314 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1315 }
1316 
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)1317 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1318 {
1319 	if (!scp->sense_buffer) {
1320 		sdev_printk(KERN_ERR, scp->device,
1321 			    "%s: sense_buffer is NULL\n", __func__);
1322 		return;
1323 	}
1324 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1325 
1326 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1327 
1328 	if (sdebug_verbose)
1329 		sdev_printk(KERN_INFO, scp->device,
1330 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1331 			    my_name, key, asc, asq);
1332 }
1333 
mk_sense_invalid_opcode(struct scsi_cmnd * scp)1334 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1335 {
1336 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1337 }
1338 
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)1339 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1340 			    void __user *arg)
1341 {
1342 	if (sdebug_verbose) {
1343 		if (0x1261 == cmd)
1344 			sdev_printk(KERN_INFO, dev,
1345 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1346 		else if (0x5331 == cmd)
1347 			sdev_printk(KERN_INFO, dev,
1348 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1349 				    __func__);
1350 		else
1351 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1352 				    __func__, cmd);
1353 	}
1354 	return -EINVAL;
1355 	/* return -ENOTTY; // correct return but upsets fdisk */
1356 }
1357 
config_cdb_len(struct scsi_device * sdev)1358 static void config_cdb_len(struct scsi_device *sdev)
1359 {
1360 	switch (sdebug_cdb_len) {
1361 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1362 		sdev->use_10_for_rw = false;
1363 		sdev->use_16_for_rw = false;
1364 		sdev->use_10_for_ms = false;
1365 		break;
1366 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1367 		sdev->use_10_for_rw = true;
1368 		sdev->use_16_for_rw = false;
1369 		sdev->use_10_for_ms = false;
1370 		break;
1371 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1372 		sdev->use_10_for_rw = true;
1373 		sdev->use_16_for_rw = false;
1374 		sdev->use_10_for_ms = true;
1375 		break;
1376 	case 16:
1377 		sdev->use_10_for_rw = false;
1378 		sdev->use_16_for_rw = true;
1379 		sdev->use_10_for_ms = true;
1380 		break;
1381 	case 32: /* No knobs to suggest this so same as 16 for now */
1382 		sdev->use_10_for_rw = false;
1383 		sdev->use_16_for_rw = true;
1384 		sdev->use_10_for_ms = true;
1385 		break;
1386 	default:
1387 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1388 			sdebug_cdb_len);
1389 		sdev->use_10_for_rw = true;
1390 		sdev->use_16_for_rw = false;
1391 		sdev->use_10_for_ms = false;
1392 		sdebug_cdb_len = 10;
1393 		break;
1394 	}
1395 }
1396 
all_config_cdb_len(void)1397 static void all_config_cdb_len(void)
1398 {
1399 	struct sdebug_host_info *sdbg_host;
1400 	struct Scsi_Host *shost;
1401 	struct scsi_device *sdev;
1402 
1403 	mutex_lock(&sdebug_host_list_mutex);
1404 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1405 		shost = sdbg_host->shost;
1406 		shost_for_each_device(sdev, shost) {
1407 			config_cdb_len(sdev);
1408 		}
1409 	}
1410 	mutex_unlock(&sdebug_host_list_mutex);
1411 }
1412 
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1413 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1414 {
1415 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1416 	struct sdebug_dev_info *dp;
1417 
1418 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1419 		if ((devip->sdbg_host == dp->sdbg_host) &&
1420 		    (devip->target == dp->target)) {
1421 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1422 		}
1423 	}
1424 }
1425 
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1426 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1427 {
1428 	int k;
1429 
1430 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1431 	if (k != SDEBUG_NUM_UAS) {
1432 		const char *cp = NULL;
1433 
1434 		switch (k) {
1435 		case SDEBUG_UA_POR:
1436 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1437 					POWER_ON_RESET_ASCQ);
1438 			if (sdebug_verbose)
1439 				cp = "power on reset";
1440 			break;
1441 		case SDEBUG_UA_POOCCUR:
1442 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1443 					POWER_ON_OCCURRED_ASCQ);
1444 			if (sdebug_verbose)
1445 				cp = "power on occurred";
1446 			break;
1447 		case SDEBUG_UA_BUS_RESET:
1448 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1449 					BUS_RESET_ASCQ);
1450 			if (sdebug_verbose)
1451 				cp = "bus reset";
1452 			break;
1453 		case SDEBUG_UA_MODE_CHANGED:
1454 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1455 					MODE_CHANGED_ASCQ);
1456 			if (sdebug_verbose)
1457 				cp = "mode parameters changed";
1458 			break;
1459 		case SDEBUG_UA_CAPACITY_CHANGED:
1460 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1461 					CAPACITY_CHANGED_ASCQ);
1462 			if (sdebug_verbose)
1463 				cp = "capacity data changed";
1464 			break;
1465 		case SDEBUG_UA_MICROCODE_CHANGED:
1466 			mk_sense_buffer(scp, UNIT_ATTENTION,
1467 					TARGET_CHANGED_ASC,
1468 					MICROCODE_CHANGED_ASCQ);
1469 			if (sdebug_verbose)
1470 				cp = "microcode has been changed";
1471 			break;
1472 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1473 			mk_sense_buffer(scp, UNIT_ATTENTION,
1474 					TARGET_CHANGED_ASC,
1475 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1476 			if (sdebug_verbose)
1477 				cp = "microcode has been changed without reset";
1478 			break;
1479 		case SDEBUG_UA_LUNS_CHANGED:
1480 			/*
1481 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1482 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1483 			 * on the target, until a REPORT LUNS command is
1484 			 * received.  SPC-4 behavior is to report it only once.
1485 			 * NOTE:  sdebug_scsi_level does not use the same
1486 			 * values as struct scsi_device->scsi_level.
1487 			 */
1488 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1489 				clear_luns_changed_on_target(devip);
1490 			mk_sense_buffer(scp, UNIT_ATTENTION,
1491 					TARGET_CHANGED_ASC,
1492 					LUNS_CHANGED_ASCQ);
1493 			if (sdebug_verbose)
1494 				cp = "reported luns data has changed";
1495 			break;
1496 		default:
1497 			pr_warn("unexpected unit attention code=%d\n", k);
1498 			if (sdebug_verbose)
1499 				cp = "unknown";
1500 			break;
1501 		}
1502 		clear_bit(k, devip->uas_bm);
1503 		if (sdebug_verbose)
1504 			sdev_printk(KERN_INFO, scp->device,
1505 				   "%s reports: Unit attention: %s\n",
1506 				   my_name, cp);
1507 		return check_condition_result;
1508 	}
1509 	return 0;
1510 }
1511 
1512 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1513 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1514 				int arr_len)
1515 {
1516 	int act_len;
1517 	struct scsi_data_buffer *sdb = &scp->sdb;
1518 
1519 	if (!sdb->length)
1520 		return 0;
1521 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1522 		return DID_ERROR << 16;
1523 
1524 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1525 				      arr, arr_len);
1526 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1527 
1528 	return 0;
1529 }
1530 
1531 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1532  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1533  * calls, not required to write in ascending offset order. Assumes resid
1534  * set to scsi_bufflen() prior to any calls.
1535  */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1536 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1537 				  int arr_len, unsigned int off_dst)
1538 {
1539 	unsigned int act_len, n;
1540 	struct scsi_data_buffer *sdb = &scp->sdb;
1541 	off_t skip = off_dst;
1542 
1543 	if (sdb->length <= off_dst)
1544 		return 0;
1545 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1546 		return DID_ERROR << 16;
1547 
1548 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1549 				       arr, arr_len, skip);
1550 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1551 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1552 		 scsi_get_resid(scp));
1553 	n = scsi_bufflen(scp) - (off_dst + act_len);
1554 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1555 	return 0;
1556 }
1557 
1558 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1559  * 'arr' or -1 if error.
1560  */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1561 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1562 			       int arr_len)
1563 {
1564 	if (!scsi_bufflen(scp))
1565 		return 0;
1566 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1567 		return -1;
1568 
1569 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1570 }
1571 
1572 
1573 static char sdebug_inq_vendor_id[9] = "Linux   ";
1574 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1575 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1576 /* Use some locally assigned NAAs for SAS addresses. */
1577 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1578 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1579 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1580 
1581 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1582 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1583 			  int target_dev_id, int dev_id_num,
1584 			  const char *dev_id_str, int dev_id_str_len,
1585 			  const uuid_t *lu_name)
1586 {
1587 	int num, port_a;
1588 	char b[32];
1589 
1590 	port_a = target_dev_id + 1;
1591 	/* T10 vendor identifier field format (faked) */
1592 	arr[0] = 0x2;	/* ASCII */
1593 	arr[1] = 0x1;
1594 	arr[2] = 0x0;
1595 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1596 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1597 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1598 	num = 8 + 16 + dev_id_str_len;
1599 	arr[3] = num;
1600 	num += 4;
1601 	if (dev_id_num >= 0) {
1602 		if (sdebug_uuid_ctl) {
1603 			/* Locally assigned UUID */
1604 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1605 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1606 			arr[num++] = 0x0;
1607 			arr[num++] = 0x12;
1608 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1609 			arr[num++] = 0x0;
1610 			memcpy(arr + num, lu_name, 16);
1611 			num += 16;
1612 		} else {
1613 			/* NAA-3, Logical unit identifier (binary) */
1614 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1615 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1616 			arr[num++] = 0x0;
1617 			arr[num++] = 0x8;
1618 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1619 			num += 8;
1620 		}
1621 		/* Target relative port number */
1622 		arr[num++] = 0x61;	/* proto=sas, binary */
1623 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1624 		arr[num++] = 0x0;	/* reserved */
1625 		arr[num++] = 0x4;	/* length */
1626 		arr[num++] = 0x0;	/* reserved */
1627 		arr[num++] = 0x0;	/* reserved */
1628 		arr[num++] = 0x0;
1629 		arr[num++] = 0x1;	/* relative port A */
1630 	}
1631 	/* NAA-3, Target port identifier */
1632 	arr[num++] = 0x61;	/* proto=sas, binary */
1633 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1634 	arr[num++] = 0x0;
1635 	arr[num++] = 0x8;
1636 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1637 	num += 8;
1638 	/* NAA-3, Target port group identifier */
1639 	arr[num++] = 0x61;	/* proto=sas, binary */
1640 	arr[num++] = 0x95;	/* piv=1, target port group id */
1641 	arr[num++] = 0x0;
1642 	arr[num++] = 0x4;
1643 	arr[num++] = 0;
1644 	arr[num++] = 0;
1645 	put_unaligned_be16(port_group_id, arr + num);
1646 	num += 2;
1647 	/* NAA-3, Target device identifier */
1648 	arr[num++] = 0x61;	/* proto=sas, binary */
1649 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1650 	arr[num++] = 0x0;
1651 	arr[num++] = 0x8;
1652 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1653 	num += 8;
1654 	/* SCSI name string: Target device identifier */
1655 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1656 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1657 	arr[num++] = 0x0;
1658 	arr[num++] = 24;
1659 	memcpy(arr + num, "naa.32222220", 12);
1660 	num += 12;
1661 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1662 	memcpy(arr + num, b, 8);
1663 	num += 8;
1664 	memset(arr + num, 0, 4);
1665 	num += 4;
1666 	return num;
1667 }
1668 
1669 static unsigned char vpd84_data[] = {
1670 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1671     0x22,0x22,0x22,0x0,0xbb,0x1,
1672     0x22,0x22,0x22,0x0,0xbb,0x2,
1673 };
1674 
1675 /*  Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1676 static int inquiry_vpd_84(unsigned char *arr)
1677 {
1678 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1679 	return sizeof(vpd84_data);
1680 }
1681 
1682 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1683 static int inquiry_vpd_85(unsigned char *arr)
1684 {
1685 	int num = 0;
1686 	const char *na1 = "https://www.kernel.org/config";
1687 	const char *na2 = "http://www.kernel.org/log";
1688 	int plen, olen;
1689 
1690 	arr[num++] = 0x1;	/* lu, storage config */
1691 	arr[num++] = 0x0;	/* reserved */
1692 	arr[num++] = 0x0;
1693 	olen = strlen(na1);
1694 	plen = olen + 1;
1695 	if (plen % 4)
1696 		plen = ((plen / 4) + 1) * 4;
1697 	arr[num++] = plen;	/* length, null termianted, padded */
1698 	memcpy(arr + num, na1, olen);
1699 	memset(arr + num + olen, 0, plen - olen);
1700 	num += plen;
1701 
1702 	arr[num++] = 0x4;	/* lu, logging */
1703 	arr[num++] = 0x0;	/* reserved */
1704 	arr[num++] = 0x0;
1705 	olen = strlen(na2);
1706 	plen = olen + 1;
1707 	if (plen % 4)
1708 		plen = ((plen / 4) + 1) * 4;
1709 	arr[num++] = plen;	/* length, null terminated, padded */
1710 	memcpy(arr + num, na2, olen);
1711 	memset(arr + num + olen, 0, plen - olen);
1712 	num += plen;
1713 
1714 	return num;
1715 }
1716 
1717 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1718 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1719 {
1720 	int num = 0;
1721 	int port_a, port_b;
1722 
1723 	port_a = target_dev_id + 1;
1724 	port_b = port_a + 1;
1725 	arr[num++] = 0x0;	/* reserved */
1726 	arr[num++] = 0x0;	/* reserved */
1727 	arr[num++] = 0x0;
1728 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1729 	memset(arr + num, 0, 6);
1730 	num += 6;
1731 	arr[num++] = 0x0;
1732 	arr[num++] = 12;	/* length tp descriptor */
1733 	/* naa-5 target port identifier (A) */
1734 	arr[num++] = 0x61;	/* proto=sas, binary */
1735 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1736 	arr[num++] = 0x0;	/* reserved */
1737 	arr[num++] = 0x8;	/* length */
1738 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1739 	num += 8;
1740 	arr[num++] = 0x0;	/* reserved */
1741 	arr[num++] = 0x0;	/* reserved */
1742 	arr[num++] = 0x0;
1743 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1744 	memset(arr + num, 0, 6);
1745 	num += 6;
1746 	arr[num++] = 0x0;
1747 	arr[num++] = 12;	/* length tp descriptor */
1748 	/* naa-5 target port identifier (B) */
1749 	arr[num++] = 0x61;	/* proto=sas, binary */
1750 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1751 	arr[num++] = 0x0;	/* reserved */
1752 	arr[num++] = 0x8;	/* length */
1753 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1754 	num += 8;
1755 
1756 	return num;
1757 }
1758 
1759 
1760 static unsigned char vpd89_data[] = {
1761 /* from 4th byte */ 0,0,0,0,
1762 'l','i','n','u','x',' ',' ',' ',
1763 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1764 '1','2','3','4',
1765 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1766 0xec,0,0,0,
1767 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1768 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1769 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1770 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1771 0x53,0x41,
1772 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1773 0x20,0x20,
1774 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1775 0x10,0x80,
1776 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1777 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1778 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1779 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1780 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1781 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1782 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1783 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1784 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1785 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1786 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1787 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1788 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1789 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1790 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1791 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1792 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1793 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1794 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1795 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1796 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1797 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1798 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1799 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1800 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1801 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1802 };
1803 
1804 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1805 static int inquiry_vpd_89(unsigned char *arr)
1806 {
1807 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1808 	return sizeof(vpd89_data);
1809 }
1810 
1811 
1812 static unsigned char vpdb0_data[] = {
1813 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1814 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1815 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1816 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1817 };
1818 
1819 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1820 static int inquiry_vpd_b0(unsigned char *arr)
1821 {
1822 	unsigned int gran;
1823 
1824 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1825 
1826 	/* Optimal transfer length granularity */
1827 	if (sdebug_opt_xferlen_exp != 0 &&
1828 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1829 		gran = 1 << sdebug_opt_xferlen_exp;
1830 	else
1831 		gran = 1 << sdebug_physblk_exp;
1832 	put_unaligned_be16(gran, arr + 2);
1833 
1834 	/* Maximum Transfer Length */
1835 	if (sdebug_store_sectors > 0x400)
1836 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1837 
1838 	/* Optimal Transfer Length */
1839 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1840 
1841 	if (sdebug_lbpu) {
1842 		/* Maximum Unmap LBA Count */
1843 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1844 
1845 		/* Maximum Unmap Block Descriptor Count */
1846 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1847 	}
1848 
1849 	/* Unmap Granularity Alignment */
1850 	if (sdebug_unmap_alignment) {
1851 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1852 		arr[28] |= 0x80; /* UGAVALID */
1853 	}
1854 
1855 	/* Optimal Unmap Granularity */
1856 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1857 
1858 	/* Maximum WRITE SAME Length */
1859 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1860 
1861 	if (sdebug_atomic_wr) {
1862 		put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1863 		put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1864 		put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1865 		put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1866 		put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1867 	}
1868 
1869 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1870 }
1871 
1872 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1873 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1874 {
1875 	memset(arr, 0, 0x3c);
1876 	arr[0] = 0;
1877 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1878 	arr[2] = 0;
1879 	arr[3] = 5;	/* less than 1.8" */
1880 
1881 	return 0x3c;
1882 }
1883 
1884 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1885 static int inquiry_vpd_b2(unsigned char *arr)
1886 {
1887 	memset(arr, 0, 0x4);
1888 	arr[0] = 0;			/* threshold exponent */
1889 	if (sdebug_lbpu)
1890 		arr[1] = 1 << 7;
1891 	if (sdebug_lbpws)
1892 		arr[1] |= 1 << 6;
1893 	if (sdebug_lbpws10)
1894 		arr[1] |= 1 << 5;
1895 	if (sdebug_lbprz && scsi_debug_lbp())
1896 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1897 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1898 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1899 	/* threshold_percentage=0 */
1900 	return 0x4;
1901 }
1902 
1903 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1904 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1905 {
1906 	memset(arr, 0, 0x3c);
1907 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1908 	/*
1909 	 * Set Optimal number of open sequential write preferred zones and
1910 	 * Optimal number of non-sequentially written sequential write
1911 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1912 	 * fields set to zero, apart from Max. number of open swrz_s field.
1913 	 */
1914 	put_unaligned_be32(0xffffffff, &arr[4]);
1915 	put_unaligned_be32(0xffffffff, &arr[8]);
1916 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1917 		put_unaligned_be32(devip->max_open, &arr[12]);
1918 	else
1919 		put_unaligned_be32(0xffffffff, &arr[12]);
1920 	if (devip->zcap < devip->zsize) {
1921 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1922 		put_unaligned_be64(devip->zsize, &arr[20]);
1923 	} else {
1924 		arr[19] = 0;
1925 	}
1926 	return 0x3c;
1927 }
1928 
1929 #define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
1930 
1931 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1932 
1933 /* Block limits extension VPD page (SBC-4) */
inquiry_vpd_b7(unsigned char * arrb4)1934 static int inquiry_vpd_b7(unsigned char *arrb4)
1935 {
1936 	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1937 	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1938 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1939 	return SDEBUG_BLE_LEN_AFTER_B4;
1940 }
1941 
1942 #define SDEBUG_LONG_INQ_SZ 96
1943 #define SDEBUG_MAX_INQ_ARR_SZ 584
1944 
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1945 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1946 {
1947 	unsigned char pq_pdt;
1948 	unsigned char *arr;
1949 	unsigned char *cmd = scp->cmnd;
1950 	u32 alloc_len, n;
1951 	int ret;
1952 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1953 
1954 	alloc_len = get_unaligned_be16(cmd + 3);
1955 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1956 	if (! arr)
1957 		return DID_REQUEUE << 16;
1958 	is_disk = (sdebug_ptype == TYPE_DISK);
1959 	is_zbc = devip->zoned;
1960 	is_disk_zbc = (is_disk || is_zbc);
1961 	have_wlun = scsi_is_wlun(scp->device->lun);
1962 	if (have_wlun)
1963 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1964 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1965 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1966 	else
1967 		pq_pdt = (sdebug_ptype & 0x1f);
1968 	arr[0] = pq_pdt;
1969 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1970 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1971 		kfree(arr);
1972 		return check_condition_result;
1973 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1974 		int lu_id_num, port_group_id, target_dev_id;
1975 		u32 len;
1976 		char lu_id_str[6];
1977 		int host_no = devip->sdbg_host->shost->host_no;
1978 
1979 		arr[1] = cmd[2];
1980 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1981 		    (devip->channel & 0x7f);
1982 		if (sdebug_vpd_use_hostno == 0)
1983 			host_no = 0;
1984 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1985 			    (devip->target * 1000) + devip->lun);
1986 		target_dev_id = ((host_no + 1) * 2000) +
1987 				 (devip->target * 1000) - 3;
1988 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1989 		if (0 == cmd[2]) { /* supported vital product data pages */
1990 			n = 4;
1991 			arr[n++] = 0x0;   /* this page */
1992 			arr[n++] = 0x80;  /* unit serial number */
1993 			arr[n++] = 0x83;  /* device identification */
1994 			arr[n++] = 0x84;  /* software interface ident. */
1995 			arr[n++] = 0x85;  /* management network addresses */
1996 			arr[n++] = 0x86;  /* extended inquiry */
1997 			arr[n++] = 0x87;  /* mode page policy */
1998 			arr[n++] = 0x88;  /* SCSI ports */
1999 			if (is_disk_zbc) {	  /* SBC or ZBC */
2000 				arr[n++] = 0x89;  /* ATA information */
2001 				arr[n++] = 0xb0;  /* Block limits */
2002 				arr[n++] = 0xb1;  /* Block characteristics */
2003 				if (is_disk)
2004 					arr[n++] = 0xb2;  /* LB Provisioning */
2005 				if (is_zbc)
2006 					arr[n++] = 0xb6;  /* ZB dev. char. */
2007 				arr[n++] = 0xb7;  /* Block limits extension */
2008 			}
2009 			arr[3] = n - 4;	  /* number of supported VPD pages */
2010 		} else if (0x80 == cmd[2]) { /* unit serial number */
2011 			arr[3] = len;
2012 			memcpy(&arr[4], lu_id_str, len);
2013 		} else if (0x83 == cmd[2]) { /* device identification */
2014 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2015 						target_dev_id, lu_id_num,
2016 						lu_id_str, len,
2017 						&devip->lu_name);
2018 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
2019 			arr[3] = inquiry_vpd_84(&arr[4]);
2020 		} else if (0x85 == cmd[2]) { /* Management network addresses */
2021 			arr[3] = inquiry_vpd_85(&arr[4]);
2022 		} else if (0x86 == cmd[2]) { /* extended inquiry */
2023 			arr[3] = 0x3c;	/* number of following entries */
2024 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2025 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
2026 			else if (have_dif_prot)
2027 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
2028 			else
2029 				arr[4] = 0x0;   /* no protection stuff */
2030 			/*
2031 			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2032 			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2033 			 */
2034 			arr[5] = 0x17;
2035 		} else if (0x87 == cmd[2]) { /* mode page policy */
2036 			arr[3] = 0x8;	/* number of following entries */
2037 			arr[4] = 0x2;	/* disconnect-reconnect mp */
2038 			arr[6] = 0x80;	/* mlus, shared */
2039 			arr[8] = 0x18;	 /* protocol specific lu */
2040 			arr[10] = 0x82;	 /* mlus, per initiator port */
2041 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
2042 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2043 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2044 			n = inquiry_vpd_89(&arr[4]);
2045 			put_unaligned_be16(n, arr + 2);
2046 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2047 			arr[3] = inquiry_vpd_b0(&arr[4]);
2048 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2049 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2050 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2051 			arr[3] = inquiry_vpd_b2(&arr[4]);
2052 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2053 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2054 		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2055 			arr[3] = inquiry_vpd_b7(&arr[4]);
2056 		} else {
2057 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2058 			kfree(arr);
2059 			return check_condition_result;
2060 		}
2061 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2062 		ret = fill_from_dev_buffer(scp, arr,
2063 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2064 		kfree(arr);
2065 		return ret;
2066 	}
2067 	/* drops through here for a standard inquiry */
2068 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2069 	arr[2] = sdebug_scsi_level;
2070 	arr[3] = 2;    /* response_data_format==2 */
2071 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2072 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2073 	if (sdebug_vpd_use_hostno == 0)
2074 		arr[5] |= 0x10; /* claim: implicit TPGS */
2075 	arr[6] = 0x10; /* claim: MultiP */
2076 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2077 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2078 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2079 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2080 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2081 	/* Use Vendor Specific area to place driver date in ASCII hex */
2082 	memcpy(&arr[36], sdebug_version_date, 8);
2083 	/* version descriptors (2 bytes each) follow */
2084 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2085 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2086 	n = 62;
2087 	if (is_disk) {		/* SBC-4 no version claimed */
2088 		put_unaligned_be16(0x600, arr + n);
2089 		n += 2;
2090 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
2091 		put_unaligned_be16(0x525, arr + n);
2092 		n += 2;
2093 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2094 		put_unaligned_be16(0x624, arr + n);
2095 		n += 2;
2096 	}
2097 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2098 	ret = fill_from_dev_buffer(scp, arr,
2099 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2100 	kfree(arr);
2101 	return ret;
2102 }
2103 
2104 /* See resp_iec_m_pg() for how this data is manipulated */
2105 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2106 				   0, 0, 0x0, 0x0};
2107 
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2108 static int resp_requests(struct scsi_cmnd *scp,
2109 			 struct sdebug_dev_info *devip)
2110 {
2111 	unsigned char *cmd = scp->cmnd;
2112 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2113 	bool dsense = !!(cmd[1] & 1);
2114 	u32 alloc_len = cmd[4];
2115 	u32 len = 18;
2116 	int stopped_state = atomic_read(&devip->stopped);
2117 
2118 	memset(arr, 0, sizeof(arr));
2119 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2120 		if (dsense) {
2121 			arr[0] = 0x72;
2122 			arr[1] = NOT_READY;
2123 			arr[2] = LOGICAL_UNIT_NOT_READY;
2124 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2125 			len = 8;
2126 		} else {
2127 			arr[0] = 0x70;
2128 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2129 			arr[7] = 0xa;			/* 18 byte sense buffer */
2130 			arr[12] = LOGICAL_UNIT_NOT_READY;
2131 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2132 		}
2133 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2134 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2135 		if (dsense) {
2136 			arr[0] = 0x72;
2137 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2138 			arr[2] = THRESHOLD_EXCEEDED;
2139 			arr[3] = 0xff;		/* Failure prediction(false) */
2140 			len = 8;
2141 		} else {
2142 			arr[0] = 0x70;
2143 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2144 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2145 			arr[12] = THRESHOLD_EXCEEDED;
2146 			arr[13] = 0xff;		/* Failure prediction(false) */
2147 		}
2148 	} else {	/* nothing to report */
2149 		if (dsense) {
2150 			len = 8;
2151 			memset(arr, 0, len);
2152 			arr[0] = 0x72;
2153 		} else {
2154 			memset(arr, 0, len);
2155 			arr[0] = 0x70;
2156 			arr[7] = 0xa;
2157 		}
2158 	}
2159 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2160 }
2161 
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2162 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2163 {
2164 	unsigned char *cmd = scp->cmnd;
2165 	int power_cond, want_stop, stopped_state;
2166 	bool changing;
2167 
2168 	power_cond = (cmd[4] & 0xf0) >> 4;
2169 	if (power_cond) {
2170 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2171 		return check_condition_result;
2172 	}
2173 	want_stop = !(cmd[4] & 1);
2174 	stopped_state = atomic_read(&devip->stopped);
2175 	if (stopped_state == 2) {
2176 		ktime_t now_ts = ktime_get_boottime();
2177 
2178 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2179 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2180 
2181 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2182 				/* tur_ms_to_ready timer extinguished */
2183 				atomic_set(&devip->stopped, 0);
2184 				stopped_state = 0;
2185 			}
2186 		}
2187 		if (stopped_state == 2) {
2188 			if (want_stop) {
2189 				stopped_state = 1;	/* dummy up success */
2190 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2191 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2192 				return check_condition_result;
2193 			}
2194 		}
2195 	}
2196 	changing = (stopped_state != want_stop);
2197 	if (changing)
2198 		atomic_xchg(&devip->stopped, want_stop);
2199 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2200 		return SDEG_RES_IMMED_MASK;
2201 	else
2202 		return 0;
2203 }
2204 
get_sdebug_capacity(void)2205 static sector_t get_sdebug_capacity(void)
2206 {
2207 	static const unsigned int gibibyte = 1073741824;
2208 
2209 	if (sdebug_virtual_gb > 0)
2210 		return (sector_t)sdebug_virtual_gb *
2211 			(gibibyte / sdebug_sector_size);
2212 	else
2213 		return sdebug_store_sectors;
2214 }
2215 
2216 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2217 static int resp_readcap(struct scsi_cmnd *scp,
2218 			struct sdebug_dev_info *devip)
2219 {
2220 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2221 	unsigned int capac;
2222 
2223 	/* following just in case virtual_gb changed */
2224 	sdebug_capacity = get_sdebug_capacity();
2225 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2226 	if (sdebug_capacity < 0xffffffff) {
2227 		capac = (unsigned int)sdebug_capacity - 1;
2228 		put_unaligned_be32(capac, arr + 0);
2229 	} else
2230 		put_unaligned_be32(0xffffffff, arr + 0);
2231 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2232 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2233 }
2234 
2235 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2236 static int resp_readcap16(struct scsi_cmnd *scp,
2237 			  struct sdebug_dev_info *devip)
2238 {
2239 	unsigned char *cmd = scp->cmnd;
2240 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2241 	u32 alloc_len;
2242 
2243 	alloc_len = get_unaligned_be32(cmd + 10);
2244 	/* following just in case virtual_gb changed */
2245 	sdebug_capacity = get_sdebug_capacity();
2246 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2247 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2248 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2249 	arr[13] = sdebug_physblk_exp & 0xf;
2250 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2251 
2252 	if (scsi_debug_lbp()) {
2253 		arr[14] |= 0x80; /* LBPME */
2254 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2255 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2256 		 * in the wider field maps to 0 in this field.
2257 		 */
2258 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2259 			arr[14] |= 0x40;
2260 	}
2261 
2262 	/*
2263 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2264 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2265 	 */
2266 	if (devip->zoned)
2267 		arr[12] |= 1 << 4;
2268 
2269 	arr[15] = sdebug_lowest_aligned & 0xff;
2270 
2271 	if (have_dif_prot) {
2272 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2273 		arr[12] |= 1; /* PROT_EN */
2274 	}
2275 
2276 	return fill_from_dev_buffer(scp, arr,
2277 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2278 }
2279 
2280 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2281 
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2282 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2283 			      struct sdebug_dev_info *devip)
2284 {
2285 	unsigned char *cmd = scp->cmnd;
2286 	unsigned char *arr;
2287 	int host_no = devip->sdbg_host->shost->host_no;
2288 	int port_group_a, port_group_b, port_a, port_b;
2289 	u32 alen, n, rlen;
2290 	int ret;
2291 
2292 	alen = get_unaligned_be32(cmd + 6);
2293 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2294 	if (! arr)
2295 		return DID_REQUEUE << 16;
2296 	/*
2297 	 * EVPD page 0x88 states we have two ports, one
2298 	 * real and a fake port with no device connected.
2299 	 * So we create two port groups with one port each
2300 	 * and set the group with port B to unavailable.
2301 	 */
2302 	port_a = 0x1; /* relative port A */
2303 	port_b = 0x2; /* relative port B */
2304 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2305 			(devip->channel & 0x7f);
2306 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2307 			(devip->channel & 0x7f) + 0x80;
2308 
2309 	/*
2310 	 * The asymmetric access state is cycled according to the host_id.
2311 	 */
2312 	n = 4;
2313 	if (sdebug_vpd_use_hostno == 0) {
2314 		arr[n++] = host_no % 3; /* Asymm access state */
2315 		arr[n++] = 0x0F; /* claim: all states are supported */
2316 	} else {
2317 		arr[n++] = 0x0; /* Active/Optimized path */
2318 		arr[n++] = 0x01; /* only support active/optimized paths */
2319 	}
2320 	put_unaligned_be16(port_group_a, arr + n);
2321 	n += 2;
2322 	arr[n++] = 0;    /* Reserved */
2323 	arr[n++] = 0;    /* Status code */
2324 	arr[n++] = 0;    /* Vendor unique */
2325 	arr[n++] = 0x1;  /* One port per group */
2326 	arr[n++] = 0;    /* Reserved */
2327 	arr[n++] = 0;    /* Reserved */
2328 	put_unaligned_be16(port_a, arr + n);
2329 	n += 2;
2330 	arr[n++] = 3;    /* Port unavailable */
2331 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2332 	put_unaligned_be16(port_group_b, arr + n);
2333 	n += 2;
2334 	arr[n++] = 0;    /* Reserved */
2335 	arr[n++] = 0;    /* Status code */
2336 	arr[n++] = 0;    /* Vendor unique */
2337 	arr[n++] = 0x1;  /* One port per group */
2338 	arr[n++] = 0;    /* Reserved */
2339 	arr[n++] = 0;    /* Reserved */
2340 	put_unaligned_be16(port_b, arr + n);
2341 	n += 2;
2342 
2343 	rlen = n - 4;
2344 	put_unaligned_be32(rlen, arr + 0);
2345 
2346 	/*
2347 	 * Return the smallest value of either
2348 	 * - The allocated length
2349 	 * - The constructed command length
2350 	 * - The maximum array size
2351 	 */
2352 	rlen = min(alen, n);
2353 	ret = fill_from_dev_buffer(scp, arr,
2354 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2355 	kfree(arr);
2356 	return ret;
2357 }
2358 
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2359 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2360 			     struct sdebug_dev_info *devip)
2361 {
2362 	bool rctd;
2363 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2364 	u16 req_sa, u;
2365 	u32 alloc_len, a_len;
2366 	int k, offset, len, errsts, count, bump, na;
2367 	const struct opcode_info_t *oip;
2368 	const struct opcode_info_t *r_oip;
2369 	u8 *arr;
2370 	u8 *cmd = scp->cmnd;
2371 
2372 	rctd = !!(cmd[2] & 0x80);
2373 	reporting_opts = cmd[2] & 0x7;
2374 	req_opcode = cmd[3];
2375 	req_sa = get_unaligned_be16(cmd + 4);
2376 	alloc_len = get_unaligned_be32(cmd + 6);
2377 	if (alloc_len < 4 || alloc_len > 0xffff) {
2378 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2379 		return check_condition_result;
2380 	}
2381 	if (alloc_len > 8192)
2382 		a_len = 8192;
2383 	else
2384 		a_len = alloc_len;
2385 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2386 	if (NULL == arr) {
2387 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2388 				INSUFF_RES_ASCQ);
2389 		return check_condition_result;
2390 	}
2391 	switch (reporting_opts) {
2392 	case 0:	/* all commands */
2393 		/* count number of commands */
2394 		for (count = 0, oip = opcode_info_arr;
2395 		     oip->num_attached != 0xff; ++oip) {
2396 			if (F_INV_OP & oip->flags)
2397 				continue;
2398 			count += (oip->num_attached + 1);
2399 		}
2400 		bump = rctd ? 20 : 8;
2401 		put_unaligned_be32(count * bump, arr);
2402 		for (offset = 4, oip = opcode_info_arr;
2403 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2404 			if (F_INV_OP & oip->flags)
2405 				continue;
2406 			na = oip->num_attached;
2407 			arr[offset] = oip->opcode;
2408 			put_unaligned_be16(oip->sa, arr + offset + 2);
2409 			if (rctd)
2410 				arr[offset + 5] |= 0x2;
2411 			if (FF_SA & oip->flags)
2412 				arr[offset + 5] |= 0x1;
2413 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2414 			if (rctd)
2415 				put_unaligned_be16(0xa, arr + offset + 8);
2416 			r_oip = oip;
2417 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2418 				if (F_INV_OP & oip->flags)
2419 					continue;
2420 				offset += bump;
2421 				arr[offset] = oip->opcode;
2422 				put_unaligned_be16(oip->sa, arr + offset + 2);
2423 				if (rctd)
2424 					arr[offset + 5] |= 0x2;
2425 				if (FF_SA & oip->flags)
2426 					arr[offset + 5] |= 0x1;
2427 				put_unaligned_be16(oip->len_mask[0],
2428 						   arr + offset + 6);
2429 				if (rctd)
2430 					put_unaligned_be16(0xa,
2431 							   arr + offset + 8);
2432 			}
2433 			oip = r_oip;
2434 			offset += bump;
2435 		}
2436 		break;
2437 	case 1:	/* one command: opcode only */
2438 	case 2:	/* one command: opcode plus service action */
2439 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2440 		sdeb_i = opcode_ind_arr[req_opcode];
2441 		oip = &opcode_info_arr[sdeb_i];
2442 		if (F_INV_OP & oip->flags) {
2443 			supp = 1;
2444 			offset = 4;
2445 		} else {
2446 			if (1 == reporting_opts) {
2447 				if (FF_SA & oip->flags) {
2448 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2449 							     2, 2);
2450 					kfree(arr);
2451 					return check_condition_result;
2452 				}
2453 				req_sa = 0;
2454 			} else if (2 == reporting_opts &&
2455 				   0 == (FF_SA & oip->flags)) {
2456 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2457 				kfree(arr);	/* point at requested sa */
2458 				return check_condition_result;
2459 			}
2460 			if (0 == (FF_SA & oip->flags) &&
2461 			    req_opcode == oip->opcode)
2462 				supp = 3;
2463 			else if (0 == (FF_SA & oip->flags)) {
2464 				na = oip->num_attached;
2465 				for (k = 0, oip = oip->arrp; k < na;
2466 				     ++k, ++oip) {
2467 					if (req_opcode == oip->opcode)
2468 						break;
2469 				}
2470 				supp = (k >= na) ? 1 : 3;
2471 			} else if (req_sa != oip->sa) {
2472 				na = oip->num_attached;
2473 				for (k = 0, oip = oip->arrp; k < na;
2474 				     ++k, ++oip) {
2475 					if (req_sa == oip->sa)
2476 						break;
2477 				}
2478 				supp = (k >= na) ? 1 : 3;
2479 			} else
2480 				supp = 3;
2481 			if (3 == supp) {
2482 				u = oip->len_mask[0];
2483 				put_unaligned_be16(u, arr + 2);
2484 				arr[4] = oip->opcode;
2485 				for (k = 1; k < u; ++k)
2486 					arr[4 + k] = (k < 16) ?
2487 						 oip->len_mask[k] : 0xff;
2488 				offset = 4 + u;
2489 			} else
2490 				offset = 4;
2491 		}
2492 		arr[1] = (rctd ? 0x80 : 0) | supp;
2493 		if (rctd) {
2494 			put_unaligned_be16(0xa, arr + offset);
2495 			offset += 12;
2496 		}
2497 		break;
2498 	default:
2499 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2500 		kfree(arr);
2501 		return check_condition_result;
2502 	}
2503 	offset = (offset < a_len) ? offset : a_len;
2504 	len = (offset < alloc_len) ? offset : alloc_len;
2505 	errsts = fill_from_dev_buffer(scp, arr, len);
2506 	kfree(arr);
2507 	return errsts;
2508 }
2509 
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2510 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2511 			  struct sdebug_dev_info *devip)
2512 {
2513 	bool repd;
2514 	u32 alloc_len, len;
2515 	u8 arr[16];
2516 	u8 *cmd = scp->cmnd;
2517 
2518 	memset(arr, 0, sizeof(arr));
2519 	repd = !!(cmd[2] & 0x80);
2520 	alloc_len = get_unaligned_be32(cmd + 6);
2521 	if (alloc_len < 4) {
2522 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2523 		return check_condition_result;
2524 	}
2525 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2526 	arr[1] = 0x1;		/* ITNRS */
2527 	if (repd) {
2528 		arr[3] = 0xc;
2529 		len = 16;
2530 	} else
2531 		len = 4;
2532 
2533 	len = (len < alloc_len) ? len : alloc_len;
2534 	return fill_from_dev_buffer(scp, arr, len);
2535 }
2536 
2537 /* <<Following mode page info copied from ST318451LW>> */
2538 
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2539 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2540 {	/* Read-Write Error Recovery page for mode_sense */
2541 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2542 					5, 0, 0xff, 0xff};
2543 
2544 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2545 	if (1 == pcontrol)
2546 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2547 	return sizeof(err_recov_pg);
2548 }
2549 
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2550 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2551 { 	/* Disconnect-Reconnect page for mode_sense */
2552 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2553 					 0, 0, 0, 0, 0, 0, 0, 0};
2554 
2555 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2556 	if (1 == pcontrol)
2557 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2558 	return sizeof(disconnect_pg);
2559 }
2560 
resp_format_pg(unsigned char * p,int pcontrol,int target)2561 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2562 {       /* Format device page for mode_sense */
2563 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2564 				     0, 0, 0, 0, 0, 0, 0, 0,
2565 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2566 
2567 	memcpy(p, format_pg, sizeof(format_pg));
2568 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2569 	put_unaligned_be16(sdebug_sector_size, p + 12);
2570 	if (sdebug_removable)
2571 		p[20] |= 0x20; /* should agree with INQUIRY */
2572 	if (1 == pcontrol)
2573 		memset(p + 2, 0, sizeof(format_pg) - 2);
2574 	return sizeof(format_pg);
2575 }
2576 
2577 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2578 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2579 				     0, 0, 0, 0};
2580 
resp_caching_pg(unsigned char * p,int pcontrol,int target)2581 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2582 { 	/* Caching page for mode_sense */
2583 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2584 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2585 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2586 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2587 
2588 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2589 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2590 	memcpy(p, caching_pg, sizeof(caching_pg));
2591 	if (1 == pcontrol)
2592 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2593 	else if (2 == pcontrol)
2594 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2595 	return sizeof(caching_pg);
2596 }
2597 
2598 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2599 				    0, 0, 0x2, 0x4b};
2600 
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2601 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2602 { 	/* Control mode page for mode_sense */
2603 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2604 					0, 0, 0, 0};
2605 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2606 				     0, 0, 0x2, 0x4b};
2607 
2608 	if (sdebug_dsense)
2609 		ctrl_m_pg[2] |= 0x4;
2610 	else
2611 		ctrl_m_pg[2] &= ~0x4;
2612 
2613 	if (sdebug_ato)
2614 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2615 
2616 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2617 	if (1 == pcontrol)
2618 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2619 	else if (2 == pcontrol)
2620 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2621 	return sizeof(ctrl_m_pg);
2622 }
2623 
2624 /* IO Advice Hints Grouping mode page */
resp_grouping_m_pg(unsigned char * p,int pcontrol,int target)2625 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2626 {
2627 	/* IO Advice Hints Grouping mode page */
2628 	struct grouping_m_pg {
2629 		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2630 		u8 subpage_code;
2631 		__be16 page_length;
2632 		u8 reserved[12];
2633 		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2634 	};
2635 	static const struct grouping_m_pg gr_m_pg = {
2636 		.page_code = 0xa | 0x40,
2637 		.subpage_code = 5,
2638 		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2639 		.descr = {
2640 			{ .st_enble = 1 },
2641 			{ .st_enble = 1 },
2642 			{ .st_enble = 1 },
2643 			{ .st_enble = 1 },
2644 			{ .st_enble = 1 },
2645 			{ .st_enble = 0 },
2646 		}
2647 	};
2648 
2649 	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2650 		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2651 	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2652 	if (1 == pcontrol) {
2653 		/* There are no changeable values so clear from byte 4 on. */
2654 		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2655 	}
2656 	return sizeof(gr_m_pg);
2657 }
2658 
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2659 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2660 {	/* Informational Exceptions control mode page for mode_sense */
2661 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2662 				       0, 0, 0x0, 0x0};
2663 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2664 				      0, 0, 0x0, 0x0};
2665 
2666 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2667 	if (1 == pcontrol)
2668 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2669 	else if (2 == pcontrol)
2670 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2671 	return sizeof(iec_m_pg);
2672 }
2673 
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2674 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2675 {	/* SAS SSP mode page - short format for mode_sense */
2676 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2677 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2678 
2679 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2680 	if (1 == pcontrol)
2681 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2682 	return sizeof(sas_sf_m_pg);
2683 }
2684 
2685 
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2686 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2687 			      int target_dev_id)
2688 {	/* SAS phy control and discover mode page for mode_sense */
2689 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2690 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2691 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2692 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2693 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2694 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2695 		    0, 0, 0, 0, 0, 0, 0, 0,
2696 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2697 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2698 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2699 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2700 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2701 		    0, 0, 0, 0, 0, 0, 0, 0,
2702 		};
2703 	int port_a, port_b;
2704 
2705 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2706 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2707 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2708 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2709 	port_a = target_dev_id + 1;
2710 	port_b = port_a + 1;
2711 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2712 	put_unaligned_be32(port_a, p + 20);
2713 	put_unaligned_be32(port_b, p + 48 + 20);
2714 	if (1 == pcontrol)
2715 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2716 	return sizeof(sas_pcd_m_pg);
2717 }
2718 
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2719 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2720 {	/* SAS SSP shared protocol specific port mode subpage */
2721 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2722 		    0, 0, 0, 0, 0, 0, 0, 0,
2723 		};
2724 
2725 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2726 	if (1 == pcontrol)
2727 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2728 	return sizeof(sas_sha_m_pg);
2729 }
2730 
2731 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2732 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2733 
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2734 static int resp_mode_sense(struct scsi_cmnd *scp,
2735 			   struct sdebug_dev_info *devip)
2736 {
2737 	int pcontrol, pcode, subpcode, bd_len;
2738 	unsigned char dev_spec;
2739 	u32 alloc_len, offset, len;
2740 	int target_dev_id;
2741 	int target = scp->device->id;
2742 	unsigned char *ap;
2743 	unsigned char *arr __free(kfree);
2744 	unsigned char *cmd = scp->cmnd;
2745 	bool dbd, llbaa, msense_6, is_disk, is_zbc;
2746 
2747 	arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2748 	if (!arr)
2749 		return -ENOMEM;
2750 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2751 	pcontrol = (cmd[2] & 0xc0) >> 6;
2752 	pcode = cmd[2] & 0x3f;
2753 	subpcode = cmd[3];
2754 	msense_6 = (MODE_SENSE == cmd[0]);
2755 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2756 	is_disk = (sdebug_ptype == TYPE_DISK);
2757 	is_zbc = devip->zoned;
2758 	if ((is_disk || is_zbc) && !dbd)
2759 		bd_len = llbaa ? 16 : 8;
2760 	else
2761 		bd_len = 0;
2762 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2763 	if (0x3 == pcontrol) {  /* Saving values not supported */
2764 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2765 		return check_condition_result;
2766 	}
2767 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2768 			(devip->target * 1000) - 3;
2769 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2770 	if (is_disk || is_zbc) {
2771 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2772 		if (sdebug_wp)
2773 			dev_spec |= 0x80;
2774 	} else
2775 		dev_spec = 0x0;
2776 	if (msense_6) {
2777 		arr[2] = dev_spec;
2778 		arr[3] = bd_len;
2779 		offset = 4;
2780 	} else {
2781 		arr[3] = dev_spec;
2782 		if (16 == bd_len)
2783 			arr[4] = 0x1;	/* set LONGLBA bit */
2784 		arr[7] = bd_len;	/* assume 255 or less */
2785 		offset = 8;
2786 	}
2787 	ap = arr + offset;
2788 	if ((bd_len > 0) && (!sdebug_capacity))
2789 		sdebug_capacity = get_sdebug_capacity();
2790 
2791 	if (8 == bd_len) {
2792 		if (sdebug_capacity > 0xfffffffe)
2793 			put_unaligned_be32(0xffffffff, ap + 0);
2794 		else
2795 			put_unaligned_be32(sdebug_capacity, ap + 0);
2796 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2797 		offset += bd_len;
2798 		ap = arr + offset;
2799 	} else if (16 == bd_len) {
2800 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2801 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2802 		offset += bd_len;
2803 		ap = arr + offset;
2804 	}
2805 
2806 	/*
2807 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2808 	 *        len += resp_*_pg(ap + len, pcontrol, target);
2809 	 */
2810 	switch (pcode) {
2811 	case 0x1:	/* Read-Write error recovery page, direct access */
2812 		if (subpcode > 0x0 && subpcode < 0xff)
2813 			goto bad_subpcode;
2814 		len = resp_err_recov_pg(ap, pcontrol, target);
2815 		offset += len;
2816 		break;
2817 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2818 		if (subpcode > 0x0 && subpcode < 0xff)
2819 			goto bad_subpcode;
2820 		len = resp_disconnect_pg(ap, pcontrol, target);
2821 		offset += len;
2822 		break;
2823 	case 0x3:       /* Format device page, direct access */
2824 		if (subpcode > 0x0 && subpcode < 0xff)
2825 			goto bad_subpcode;
2826 		if (is_disk) {
2827 			len = resp_format_pg(ap, pcontrol, target);
2828 			offset += len;
2829 		} else {
2830 			goto bad_pcode;
2831 		}
2832 		break;
2833 	case 0x8:	/* Caching page, direct access */
2834 		if (subpcode > 0x0 && subpcode < 0xff)
2835 			goto bad_subpcode;
2836 		if (is_disk || is_zbc) {
2837 			len = resp_caching_pg(ap, pcontrol, target);
2838 			offset += len;
2839 		} else {
2840 			goto bad_pcode;
2841 		}
2842 		break;
2843 	case 0xa:	/* Control Mode page, all devices */
2844 		switch (subpcode) {
2845 		case 0:
2846 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2847 			break;
2848 		case 0x05:
2849 			len = resp_grouping_m_pg(ap, pcontrol, target);
2850 			break;
2851 		case 0xff:
2852 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2853 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2854 			break;
2855 		default:
2856 			goto bad_subpcode;
2857 		}
2858 		offset += len;
2859 		break;
2860 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2861 		if (subpcode > 0x2 && subpcode < 0xff)
2862 			goto bad_subpcode;
2863 		len = 0;
2864 		if ((0x0 == subpcode) || (0xff == subpcode))
2865 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2866 		if ((0x1 == subpcode) || (0xff == subpcode))
2867 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2868 						  target_dev_id);
2869 		if ((0x2 == subpcode) || (0xff == subpcode))
2870 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2871 		offset += len;
2872 		break;
2873 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2874 		if (subpcode > 0x0 && subpcode < 0xff)
2875 			goto bad_subpcode;
2876 		len = resp_iec_m_pg(ap, pcontrol, target);
2877 		offset += len;
2878 		break;
2879 	case 0x3f:	/* Read all Mode pages */
2880 		if (subpcode > 0x0 && subpcode < 0xff)
2881 			goto bad_subpcode;
2882 		len = resp_err_recov_pg(ap, pcontrol, target);
2883 		len += resp_disconnect_pg(ap + len, pcontrol, target);
2884 		if (is_disk) {
2885 			len += resp_format_pg(ap + len, pcontrol, target);
2886 			len += resp_caching_pg(ap + len, pcontrol, target);
2887 		} else if (is_zbc) {
2888 			len += resp_caching_pg(ap + len, pcontrol, target);
2889 		}
2890 		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2891 		if (0xff == subpcode)
2892 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2893 		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2894 		if (0xff == subpcode) {
2895 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2896 						  target_dev_id);
2897 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2898 		}
2899 		len += resp_iec_m_pg(ap + len, pcontrol, target);
2900 		offset += len;
2901 		break;
2902 	default:
2903 		goto bad_pcode;
2904 	}
2905 	if (msense_6)
2906 		arr[0] = offset - 1;
2907 	else
2908 		put_unaligned_be16((offset - 2), arr + 0);
2909 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2910 
2911 bad_pcode:
2912 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2913 	return check_condition_result;
2914 
2915 bad_subpcode:
2916 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2917 	return check_condition_result;
2918 }
2919 
2920 #define SDEBUG_MAX_MSELECT_SZ 512
2921 
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2922 static int resp_mode_select(struct scsi_cmnd *scp,
2923 			    struct sdebug_dev_info *devip)
2924 {
2925 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2926 	int param_len, res, mpage;
2927 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2928 	unsigned char *cmd = scp->cmnd;
2929 	int mselect6 = (MODE_SELECT == cmd[0]);
2930 
2931 	memset(arr, 0, sizeof(arr));
2932 	pf = cmd[1] & 0x10;
2933 	sp = cmd[1] & 0x1;
2934 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2935 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2936 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2937 		return check_condition_result;
2938 	}
2939 	res = fetch_to_dev_buffer(scp, arr, param_len);
2940 	if (-1 == res)
2941 		return DID_ERROR << 16;
2942 	else if (sdebug_verbose && (res < param_len))
2943 		sdev_printk(KERN_INFO, scp->device,
2944 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2945 			    __func__, param_len, res);
2946 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2947 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2948 	off = bd_len + (mselect6 ? 4 : 8);
2949 	if (md_len > 2 || off >= res) {
2950 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2951 		return check_condition_result;
2952 	}
2953 	mpage = arr[off] & 0x3f;
2954 	ps = !!(arr[off] & 0x80);
2955 	if (ps) {
2956 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2957 		return check_condition_result;
2958 	}
2959 	spf = !!(arr[off] & 0x40);
2960 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2961 		       (arr[off + 1] + 2);
2962 	if ((pg_len + off) > param_len) {
2963 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2964 				PARAMETER_LIST_LENGTH_ERR, 0);
2965 		return check_condition_result;
2966 	}
2967 	switch (mpage) {
2968 	case 0x8:      /* Caching Mode page */
2969 		if (caching_pg[1] == arr[off + 1]) {
2970 			memcpy(caching_pg + 2, arr + off + 2,
2971 			       sizeof(caching_pg) - 2);
2972 			goto set_mode_changed_ua;
2973 		}
2974 		break;
2975 	case 0xa:      /* Control Mode page */
2976 		if (ctrl_m_pg[1] == arr[off + 1]) {
2977 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2978 			       sizeof(ctrl_m_pg) - 2);
2979 			if (ctrl_m_pg[4] & 0x8)
2980 				sdebug_wp = true;
2981 			else
2982 				sdebug_wp = false;
2983 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2984 			goto set_mode_changed_ua;
2985 		}
2986 		break;
2987 	case 0x1c:      /* Informational Exceptions Mode page */
2988 		if (iec_m_pg[1] == arr[off + 1]) {
2989 			memcpy(iec_m_pg + 2, arr + off + 2,
2990 			       sizeof(iec_m_pg) - 2);
2991 			goto set_mode_changed_ua;
2992 		}
2993 		break;
2994 	default:
2995 		break;
2996 	}
2997 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2998 	return check_condition_result;
2999 set_mode_changed_ua:
3000 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3001 	return 0;
3002 }
3003 
resp_temp_l_pg(unsigned char * arr)3004 static int resp_temp_l_pg(unsigned char *arr)
3005 {
3006 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
3007 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
3008 		};
3009 
3010 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3011 	return sizeof(temp_l_pg);
3012 }
3013 
resp_ie_l_pg(unsigned char * arr)3014 static int resp_ie_l_pg(unsigned char *arr)
3015 {
3016 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3017 		};
3018 
3019 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3020 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
3021 		arr[4] = THRESHOLD_EXCEEDED;
3022 		arr[5] = 0xff;
3023 	}
3024 	return sizeof(ie_l_pg);
3025 }
3026 
resp_env_rep_l_spg(unsigned char * arr)3027 static int resp_env_rep_l_spg(unsigned char *arr)
3028 {
3029 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
3030 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
3031 					 0x1, 0x0, 0x23, 0x8,
3032 					 0x0, 55, 72, 35, 55, 45, 0, 0,
3033 		};
3034 
3035 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3036 	return sizeof(env_rep_l_spg);
3037 }
3038 
3039 #define SDEBUG_MAX_LSENSE_SZ 512
3040 
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3041 static int resp_log_sense(struct scsi_cmnd *scp,
3042 			  struct sdebug_dev_info *devip)
3043 {
3044 	int ppc, sp, pcode, subpcode;
3045 	u32 alloc_len, len, n;
3046 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3047 	unsigned char *cmd = scp->cmnd;
3048 
3049 	memset(arr, 0, sizeof(arr));
3050 	ppc = cmd[1] & 0x2;
3051 	sp = cmd[1] & 0x1;
3052 	if (ppc || sp) {
3053 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3054 		return check_condition_result;
3055 	}
3056 	pcode = cmd[2] & 0x3f;
3057 	subpcode = cmd[3] & 0xff;
3058 	alloc_len = get_unaligned_be16(cmd + 7);
3059 	arr[0] = pcode;
3060 	if (0 == subpcode) {
3061 		switch (pcode) {
3062 		case 0x0:	/* Supported log pages log page */
3063 			n = 4;
3064 			arr[n++] = 0x0;		/* this page */
3065 			arr[n++] = 0xd;		/* Temperature */
3066 			arr[n++] = 0x2f;	/* Informational exceptions */
3067 			arr[3] = n - 4;
3068 			break;
3069 		case 0xd:	/* Temperature log page */
3070 			arr[3] = resp_temp_l_pg(arr + 4);
3071 			break;
3072 		case 0x2f:	/* Informational exceptions log page */
3073 			arr[3] = resp_ie_l_pg(arr + 4);
3074 			break;
3075 		default:
3076 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3077 			return check_condition_result;
3078 		}
3079 	} else if (0xff == subpcode) {
3080 		arr[0] |= 0x40;
3081 		arr[1] = subpcode;
3082 		switch (pcode) {
3083 		case 0x0:	/* Supported log pages and subpages log page */
3084 			n = 4;
3085 			arr[n++] = 0x0;
3086 			arr[n++] = 0x0;		/* 0,0 page */
3087 			arr[n++] = 0x0;
3088 			arr[n++] = 0xff;	/* this page */
3089 			arr[n++] = 0xd;
3090 			arr[n++] = 0x0;		/* Temperature */
3091 			arr[n++] = 0xd;
3092 			arr[n++] = 0x1;		/* Environment reporting */
3093 			arr[n++] = 0xd;
3094 			arr[n++] = 0xff;	/* all 0xd subpages */
3095 			arr[n++] = 0x2f;
3096 			arr[n++] = 0x0;	/* Informational exceptions */
3097 			arr[n++] = 0x2f;
3098 			arr[n++] = 0xff;	/* all 0x2f subpages */
3099 			arr[3] = n - 4;
3100 			break;
3101 		case 0xd:	/* Temperature subpages */
3102 			n = 4;
3103 			arr[n++] = 0xd;
3104 			arr[n++] = 0x0;		/* Temperature */
3105 			arr[n++] = 0xd;
3106 			arr[n++] = 0x1;		/* Environment reporting */
3107 			arr[n++] = 0xd;
3108 			arr[n++] = 0xff;	/* these subpages */
3109 			arr[3] = n - 4;
3110 			break;
3111 		case 0x2f:	/* Informational exceptions subpages */
3112 			n = 4;
3113 			arr[n++] = 0x2f;
3114 			arr[n++] = 0x0;		/* Informational exceptions */
3115 			arr[n++] = 0x2f;
3116 			arr[n++] = 0xff;	/* these subpages */
3117 			arr[3] = n - 4;
3118 			break;
3119 		default:
3120 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3121 			return check_condition_result;
3122 		}
3123 	} else if (subpcode > 0) {
3124 		arr[0] |= 0x40;
3125 		arr[1] = subpcode;
3126 		if (pcode == 0xd && subpcode == 1)
3127 			arr[3] = resp_env_rep_l_spg(arr + 4);
3128 		else {
3129 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3130 			return check_condition_result;
3131 		}
3132 	} else {
3133 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3134 		return check_condition_result;
3135 	}
3136 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3137 	return fill_from_dev_buffer(scp, arr,
3138 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3139 }
3140 
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)3141 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3142 {
3143 	return devip->nr_zones != 0;
3144 }
3145 
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)3146 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3147 					unsigned long long lba)
3148 {
3149 	u32 zno = lba >> devip->zsize_shift;
3150 	struct sdeb_zone_state *zsp;
3151 
3152 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3153 		return &devip->zstate[zno];
3154 
3155 	/*
3156 	 * If the zone capacity is less than the zone size, adjust for gap
3157 	 * zones.
3158 	 */
3159 	zno = 2 * zno - devip->nr_conv_zones;
3160 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3161 	zsp = &devip->zstate[zno];
3162 	if (lba >= zsp->z_start + zsp->z_size)
3163 		zsp++;
3164 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3165 	return zsp;
3166 }
3167 
zbc_zone_is_conv(struct sdeb_zone_state * zsp)3168 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3169 {
3170 	return zsp->z_type == ZBC_ZTYPE_CNV;
3171 }
3172 
zbc_zone_is_gap(struct sdeb_zone_state * zsp)3173 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3174 {
3175 	return zsp->z_type == ZBC_ZTYPE_GAP;
3176 }
3177 
zbc_zone_is_seq(struct sdeb_zone_state * zsp)3178 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3179 {
3180 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3181 }
3182 
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3183 static void zbc_close_zone(struct sdebug_dev_info *devip,
3184 			   struct sdeb_zone_state *zsp)
3185 {
3186 	enum sdebug_z_cond zc;
3187 
3188 	if (!zbc_zone_is_seq(zsp))
3189 		return;
3190 
3191 	zc = zsp->z_cond;
3192 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3193 		return;
3194 
3195 	if (zc == ZC2_IMPLICIT_OPEN)
3196 		devip->nr_imp_open--;
3197 	else
3198 		devip->nr_exp_open--;
3199 
3200 	if (zsp->z_wp == zsp->z_start) {
3201 		zsp->z_cond = ZC1_EMPTY;
3202 	} else {
3203 		zsp->z_cond = ZC4_CLOSED;
3204 		devip->nr_closed++;
3205 	}
3206 }
3207 
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)3208 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3209 {
3210 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3211 	unsigned int i;
3212 
3213 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3214 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3215 			zbc_close_zone(devip, zsp);
3216 			return;
3217 		}
3218 	}
3219 }
3220 
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)3221 static void zbc_open_zone(struct sdebug_dev_info *devip,
3222 			  struct sdeb_zone_state *zsp, bool explicit)
3223 {
3224 	enum sdebug_z_cond zc;
3225 
3226 	if (!zbc_zone_is_seq(zsp))
3227 		return;
3228 
3229 	zc = zsp->z_cond;
3230 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3231 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3232 		return;
3233 
3234 	/* Close an implicit open zone if necessary */
3235 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3236 		zbc_close_zone(devip, zsp);
3237 	else if (devip->max_open &&
3238 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3239 		zbc_close_imp_open_zone(devip);
3240 
3241 	if (zsp->z_cond == ZC4_CLOSED)
3242 		devip->nr_closed--;
3243 	if (explicit) {
3244 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3245 		devip->nr_exp_open++;
3246 	} else {
3247 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3248 		devip->nr_imp_open++;
3249 	}
3250 }
3251 
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3252 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3253 				     struct sdeb_zone_state *zsp)
3254 {
3255 	switch (zsp->z_cond) {
3256 	case ZC2_IMPLICIT_OPEN:
3257 		devip->nr_imp_open--;
3258 		break;
3259 	case ZC3_EXPLICIT_OPEN:
3260 		devip->nr_exp_open--;
3261 		break;
3262 	default:
3263 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3264 			  zsp->z_start, zsp->z_cond);
3265 		break;
3266 	}
3267 	zsp->z_cond = ZC5_FULL;
3268 }
3269 
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)3270 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3271 		       unsigned long long lba, unsigned int num)
3272 {
3273 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3274 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3275 
3276 	if (!zbc_zone_is_seq(zsp))
3277 		return;
3278 
3279 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3280 		zsp->z_wp += num;
3281 		if (zsp->z_wp >= zend)
3282 			zbc_set_zone_full(devip, zsp);
3283 		return;
3284 	}
3285 
3286 	while (num) {
3287 		if (lba != zsp->z_wp)
3288 			zsp->z_non_seq_resource = true;
3289 
3290 		end = lba + num;
3291 		if (end >= zend) {
3292 			n = zend - lba;
3293 			zsp->z_wp = zend;
3294 		} else if (end > zsp->z_wp) {
3295 			n = num;
3296 			zsp->z_wp = end;
3297 		} else {
3298 			n = num;
3299 		}
3300 		if (zsp->z_wp >= zend)
3301 			zbc_set_zone_full(devip, zsp);
3302 
3303 		num -= n;
3304 		lba += n;
3305 		if (num) {
3306 			zsp++;
3307 			zend = zsp->z_start + zsp->z_size;
3308 		}
3309 	}
3310 }
3311 
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3312 static int check_zbc_access_params(struct scsi_cmnd *scp,
3313 			unsigned long long lba, unsigned int num, bool write)
3314 {
3315 	struct scsi_device *sdp = scp->device;
3316 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3317 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3318 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3319 
3320 	if (!write) {
3321 		/* For host-managed, reads cannot cross zone types boundaries */
3322 		if (zsp->z_type != zsp_end->z_type) {
3323 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3324 					LBA_OUT_OF_RANGE,
3325 					READ_INVDATA_ASCQ);
3326 			return check_condition_result;
3327 		}
3328 		return 0;
3329 	}
3330 
3331 	/* Writing into a gap zone is not allowed */
3332 	if (zbc_zone_is_gap(zsp)) {
3333 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3334 				ATTEMPT_ACCESS_GAP);
3335 		return check_condition_result;
3336 	}
3337 
3338 	/* No restrictions for writes within conventional zones */
3339 	if (zbc_zone_is_conv(zsp)) {
3340 		if (!zbc_zone_is_conv(zsp_end)) {
3341 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3342 					LBA_OUT_OF_RANGE,
3343 					WRITE_BOUNDARY_ASCQ);
3344 			return check_condition_result;
3345 		}
3346 		return 0;
3347 	}
3348 
3349 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3350 		/* Writes cannot cross sequential zone boundaries */
3351 		if (zsp_end != zsp) {
3352 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3353 					LBA_OUT_OF_RANGE,
3354 					WRITE_BOUNDARY_ASCQ);
3355 			return check_condition_result;
3356 		}
3357 		/* Cannot write full zones */
3358 		if (zsp->z_cond == ZC5_FULL) {
3359 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3360 					INVALID_FIELD_IN_CDB, 0);
3361 			return check_condition_result;
3362 		}
3363 		/* Writes must be aligned to the zone WP */
3364 		if (lba != zsp->z_wp) {
3365 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3366 					LBA_OUT_OF_RANGE,
3367 					UNALIGNED_WRITE_ASCQ);
3368 			return check_condition_result;
3369 		}
3370 	}
3371 
3372 	/* Handle implicit open of closed and empty zones */
3373 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3374 		if (devip->max_open &&
3375 		    devip->nr_exp_open >= devip->max_open) {
3376 			mk_sense_buffer(scp, DATA_PROTECT,
3377 					INSUFF_RES_ASC,
3378 					INSUFF_ZONE_ASCQ);
3379 			return check_condition_result;
3380 		}
3381 		zbc_open_zone(devip, zsp, false);
3382 	}
3383 
3384 	return 0;
3385 }
3386 
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3387 static inline int check_device_access_params
3388 			(struct scsi_cmnd *scp, unsigned long long lba,
3389 			 unsigned int num, bool write)
3390 {
3391 	struct scsi_device *sdp = scp->device;
3392 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3393 
3394 	if (lba + num > sdebug_capacity) {
3395 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3396 		return check_condition_result;
3397 	}
3398 	/* transfer length excessive (tie in to block limits VPD page) */
3399 	if (num > sdebug_store_sectors) {
3400 		/* needs work to find which cdb byte 'num' comes from */
3401 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3402 		return check_condition_result;
3403 	}
3404 	if (write && unlikely(sdebug_wp)) {
3405 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3406 		return check_condition_result;
3407 	}
3408 	if (sdebug_dev_is_zoned(devip))
3409 		return check_zbc_access_params(scp, lba, num, write);
3410 
3411 	return 0;
3412 }
3413 
3414 /*
3415  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3416  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3417  * that access any of the "stores" in struct sdeb_store_info should call this
3418  * function with bug_if_fake_rw set to true.
3419  */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)3420 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3421 						bool bug_if_fake_rw)
3422 {
3423 	if (sdebug_fake_rw) {
3424 		BUG_ON(bug_if_fake_rw);	/* See note above */
3425 		return NULL;
3426 	}
3427 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3428 }
3429 
3430 static inline void
sdeb_read_lock(rwlock_t * lock)3431 sdeb_read_lock(rwlock_t *lock)
3432 {
3433 	if (sdebug_no_rwlock)
3434 		__acquire(lock);
3435 	else
3436 		read_lock(lock);
3437 }
3438 
3439 static inline void
sdeb_read_unlock(rwlock_t * lock)3440 sdeb_read_unlock(rwlock_t *lock)
3441 {
3442 	if (sdebug_no_rwlock)
3443 		__release(lock);
3444 	else
3445 		read_unlock(lock);
3446 }
3447 
3448 static inline void
sdeb_write_lock(rwlock_t * lock)3449 sdeb_write_lock(rwlock_t *lock)
3450 {
3451 	if (sdebug_no_rwlock)
3452 		__acquire(lock);
3453 	else
3454 		write_lock(lock);
3455 }
3456 
3457 static inline void
sdeb_write_unlock(rwlock_t * lock)3458 sdeb_write_unlock(rwlock_t *lock)
3459 {
3460 	if (sdebug_no_rwlock)
3461 		__release(lock);
3462 	else
3463 		write_unlock(lock);
3464 }
3465 
3466 static inline void
sdeb_data_read_lock(struct sdeb_store_info * sip)3467 sdeb_data_read_lock(struct sdeb_store_info *sip)
3468 {
3469 	BUG_ON(!sip);
3470 
3471 	sdeb_read_lock(&sip->macc_data_lck);
3472 }
3473 
3474 static inline void
sdeb_data_read_unlock(struct sdeb_store_info * sip)3475 sdeb_data_read_unlock(struct sdeb_store_info *sip)
3476 {
3477 	BUG_ON(!sip);
3478 
3479 	sdeb_read_unlock(&sip->macc_data_lck);
3480 }
3481 
3482 static inline void
sdeb_data_write_lock(struct sdeb_store_info * sip)3483 sdeb_data_write_lock(struct sdeb_store_info *sip)
3484 {
3485 	BUG_ON(!sip);
3486 
3487 	sdeb_write_lock(&sip->macc_data_lck);
3488 }
3489 
3490 static inline void
sdeb_data_write_unlock(struct sdeb_store_info * sip)3491 sdeb_data_write_unlock(struct sdeb_store_info *sip)
3492 {
3493 	BUG_ON(!sip);
3494 
3495 	sdeb_write_unlock(&sip->macc_data_lck);
3496 }
3497 
3498 static inline void
sdeb_data_sector_read_lock(struct sdeb_store_info * sip)3499 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
3500 {
3501 	BUG_ON(!sip);
3502 
3503 	sdeb_read_lock(&sip->macc_sector_lck);
3504 }
3505 
3506 static inline void
sdeb_data_sector_read_unlock(struct sdeb_store_info * sip)3507 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
3508 {
3509 	BUG_ON(!sip);
3510 
3511 	sdeb_read_unlock(&sip->macc_sector_lck);
3512 }
3513 
3514 static inline void
sdeb_data_sector_write_lock(struct sdeb_store_info * sip)3515 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
3516 {
3517 	BUG_ON(!sip);
3518 
3519 	sdeb_write_lock(&sip->macc_sector_lck);
3520 }
3521 
3522 static inline void
sdeb_data_sector_write_unlock(struct sdeb_store_info * sip)3523 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
3524 {
3525 	BUG_ON(!sip);
3526 
3527 	sdeb_write_unlock(&sip->macc_sector_lck);
3528 }
3529 
3530 /*
3531  * Atomic locking:
3532  * We simplify the atomic model to allow only 1x atomic write and many non-
3533  * atomic reads or writes for all LBAs.
3534 
3535  * A RW lock has a similar bahaviour:
3536  * Only 1x writer and many readers.
3537 
3538  * So use a RW lock for per-device read and write locking:
3539  * An atomic access grabs the lock as a writer and non-atomic grabs the lock
3540  * as a reader.
3541  */
3542 
3543 static inline void
sdeb_data_lock(struct sdeb_store_info * sip,bool atomic)3544 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
3545 {
3546 	if (atomic)
3547 		sdeb_data_write_lock(sip);
3548 	else
3549 		sdeb_data_read_lock(sip);
3550 }
3551 
3552 static inline void
sdeb_data_unlock(struct sdeb_store_info * sip,bool atomic)3553 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
3554 {
3555 	if (atomic)
3556 		sdeb_data_write_unlock(sip);
3557 	else
3558 		sdeb_data_read_unlock(sip);
3559 }
3560 
3561 /* Allow many reads but only 1x write per sector */
3562 static inline void
sdeb_data_sector_lock(struct sdeb_store_info * sip,bool do_write)3563 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
3564 {
3565 	if (do_write)
3566 		sdeb_data_sector_write_lock(sip);
3567 	else
3568 		sdeb_data_sector_read_lock(sip);
3569 }
3570 
3571 static inline void
sdeb_data_sector_unlock(struct sdeb_store_info * sip,bool do_write)3572 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
3573 {
3574 	if (do_write)
3575 		sdeb_data_sector_write_unlock(sip);
3576 	else
3577 		sdeb_data_sector_read_unlock(sip);
3578 }
3579 
3580 static inline void
sdeb_meta_read_lock(struct sdeb_store_info * sip)3581 sdeb_meta_read_lock(struct sdeb_store_info *sip)
3582 {
3583 	if (sdebug_no_rwlock) {
3584 		if (sip)
3585 			__acquire(&sip->macc_meta_lck);
3586 		else
3587 			__acquire(&sdeb_fake_rw_lck);
3588 	} else {
3589 		if (sip)
3590 			read_lock(&sip->macc_meta_lck);
3591 		else
3592 			read_lock(&sdeb_fake_rw_lck);
3593 	}
3594 }
3595 
3596 static inline void
sdeb_meta_read_unlock(struct sdeb_store_info * sip)3597 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
3598 {
3599 	if (sdebug_no_rwlock) {
3600 		if (sip)
3601 			__release(&sip->macc_meta_lck);
3602 		else
3603 			__release(&sdeb_fake_rw_lck);
3604 	} else {
3605 		if (sip)
3606 			read_unlock(&sip->macc_meta_lck);
3607 		else
3608 			read_unlock(&sdeb_fake_rw_lck);
3609 	}
3610 }
3611 
3612 static inline void
sdeb_meta_write_lock(struct sdeb_store_info * sip)3613 sdeb_meta_write_lock(struct sdeb_store_info *sip)
3614 {
3615 	if (sdebug_no_rwlock) {
3616 		if (sip)
3617 			__acquire(&sip->macc_meta_lck);
3618 		else
3619 			__acquire(&sdeb_fake_rw_lck);
3620 	} else {
3621 		if (sip)
3622 			write_lock(&sip->macc_meta_lck);
3623 		else
3624 			write_lock(&sdeb_fake_rw_lck);
3625 	}
3626 }
3627 
3628 static inline void
sdeb_meta_write_unlock(struct sdeb_store_info * sip)3629 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
3630 {
3631 	if (sdebug_no_rwlock) {
3632 		if (sip)
3633 			__release(&sip->macc_meta_lck);
3634 		else
3635 			__release(&sdeb_fake_rw_lck);
3636 	} else {
3637 		if (sip)
3638 			write_unlock(&sip->macc_meta_lck);
3639 		else
3640 			write_unlock(&sdeb_fake_rw_lck);
3641 	}
3642 }
3643 
3644 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,u8 group_number,bool do_write,bool atomic)3645 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3646 			    u32 sg_skip, u64 lba, u32 num, u8 group_number,
3647 			    bool do_write, bool atomic)
3648 {
3649 	int ret;
3650 	u64 block;
3651 	enum dma_data_direction dir;
3652 	struct scsi_data_buffer *sdb = &scp->sdb;
3653 	u8 *fsp;
3654 	int i, total = 0;
3655 
3656 	/*
3657 	 * Even though reads are inherently atomic (in this driver), we expect
3658 	 * the atomic flag only for writes.
3659 	 */
3660 	if (!do_write && atomic)
3661 		return -1;
3662 
3663 	if (do_write) {
3664 		dir = DMA_TO_DEVICE;
3665 		write_since_sync = true;
3666 	} else {
3667 		dir = DMA_FROM_DEVICE;
3668 	}
3669 
3670 	if (!sdb->length || !sip)
3671 		return 0;
3672 	if (scp->sc_data_direction != dir)
3673 		return -1;
3674 
3675 	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
3676 		atomic_long_inc(&writes_by_group_number[group_number]);
3677 
3678 	fsp = sip->storep;
3679 
3680 	block = do_div(lba, sdebug_store_sectors);
3681 
3682 	/* Only allow 1x atomic write or multiple non-atomic writes at any given time */
3683 	sdeb_data_lock(sip, atomic);
3684 	for (i = 0; i < num; i++) {
3685 		/* We shouldn't need to lock for atomic writes, but do it anyway */
3686 		sdeb_data_sector_lock(sip, do_write);
3687 		ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3688 		   fsp + (block * sdebug_sector_size),
3689 		   sdebug_sector_size, sg_skip, do_write);
3690 		sdeb_data_sector_unlock(sip, do_write);
3691 		total += ret;
3692 		if (ret != sdebug_sector_size)
3693 			break;
3694 		sg_skip += sdebug_sector_size;
3695 		if (++block >= sdebug_store_sectors)
3696 			block = 0;
3697 	}
3698 	sdeb_data_unlock(sip, atomic);
3699 
3700 	return total;
3701 }
3702 
3703 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)3704 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3705 {
3706 	struct scsi_data_buffer *sdb = &scp->sdb;
3707 
3708 	if (!sdb->length)
3709 		return 0;
3710 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3711 		return -1;
3712 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3713 			      num * sdebug_sector_size, 0, true);
3714 }
3715 
3716 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3717  * arr into sip->storep+lba and return true. If comparison fails then
3718  * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)3719 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3720 			      const u8 *arr, bool compare_only)
3721 {
3722 	bool res;
3723 	u64 block, rest = 0;
3724 	u32 store_blks = sdebug_store_sectors;
3725 	u32 lb_size = sdebug_sector_size;
3726 	u8 *fsp = sip->storep;
3727 
3728 	block = do_div(lba, store_blks);
3729 	if (block + num > store_blks)
3730 		rest = block + num - store_blks;
3731 
3732 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3733 	if (!res)
3734 		return res;
3735 	if (rest)
3736 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3737 			     rest * lb_size);
3738 	if (!res)
3739 		return res;
3740 	if (compare_only)
3741 		return true;
3742 	arr += num * lb_size;
3743 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3744 	if (rest)
3745 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3746 	return res;
3747 }
3748 
dif_compute_csum(const void * buf,int len)3749 static __be16 dif_compute_csum(const void *buf, int len)
3750 {
3751 	__be16 csum;
3752 
3753 	if (sdebug_guard)
3754 		csum = (__force __be16)ip_compute_csum(buf, len);
3755 	else
3756 		csum = cpu_to_be16(crc_t10dif(buf, len));
3757 
3758 	return csum;
3759 }
3760 
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3761 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3762 		      sector_t sector, u32 ei_lba)
3763 {
3764 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3765 
3766 	if (sdt->guard_tag != csum) {
3767 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3768 			(unsigned long)sector,
3769 			be16_to_cpu(sdt->guard_tag),
3770 			be16_to_cpu(csum));
3771 		return 0x01;
3772 	}
3773 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3774 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3775 		pr_err("REF check failed on sector %lu\n",
3776 			(unsigned long)sector);
3777 		return 0x03;
3778 	}
3779 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3780 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3781 		pr_err("REF check failed on sector %lu\n",
3782 			(unsigned long)sector);
3783 		return 0x03;
3784 	}
3785 	return 0;
3786 }
3787 
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3788 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3789 			  unsigned int sectors, bool read)
3790 {
3791 	size_t resid;
3792 	void *paddr;
3793 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3794 						scp->device->hostdata, true);
3795 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3796 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3797 	struct sg_mapping_iter miter;
3798 
3799 	/* Bytes of protection data to copy into sgl */
3800 	resid = sectors * sizeof(*dif_storep);
3801 
3802 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3803 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3804 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3805 
3806 	while (sg_miter_next(&miter) && resid > 0) {
3807 		size_t len = min_t(size_t, miter.length, resid);
3808 		void *start = dif_store(sip, sector);
3809 		size_t rest = 0;
3810 
3811 		if (dif_store_end < start + len)
3812 			rest = start + len - dif_store_end;
3813 
3814 		paddr = miter.addr;
3815 
3816 		if (read)
3817 			memcpy(paddr, start, len - rest);
3818 		else
3819 			memcpy(start, paddr, len - rest);
3820 
3821 		if (rest) {
3822 			if (read)
3823 				memcpy(paddr + len - rest, dif_storep, rest);
3824 			else
3825 				memcpy(dif_storep, paddr + len - rest, rest);
3826 		}
3827 
3828 		sector += len / sizeof(*dif_storep);
3829 		resid -= len;
3830 	}
3831 	sg_miter_stop(&miter);
3832 }
3833 
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3834 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3835 			    unsigned int sectors, u32 ei_lba)
3836 {
3837 	int ret = 0;
3838 	unsigned int i;
3839 	sector_t sector;
3840 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3841 						scp->device->hostdata, true);
3842 	struct t10_pi_tuple *sdt;
3843 
3844 	for (i = 0; i < sectors; i++, ei_lba++) {
3845 		sector = start_sec + i;
3846 		sdt = dif_store(sip, sector);
3847 
3848 		if (sdt->app_tag == cpu_to_be16(0xffff))
3849 			continue;
3850 
3851 		/*
3852 		 * Because scsi_debug acts as both initiator and
3853 		 * target we proceed to verify the PI even if
3854 		 * RDPROTECT=3. This is done so the "initiator" knows
3855 		 * which type of error to return. Otherwise we would
3856 		 * have to iterate over the PI twice.
3857 		 */
3858 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3859 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3860 					 sector, ei_lba);
3861 			if (ret) {
3862 				dif_errors++;
3863 				break;
3864 			}
3865 		}
3866 	}
3867 
3868 	dif_copy_prot(scp, start_sec, sectors, true);
3869 	dix_reads++;
3870 
3871 	return ret;
3872 }
3873 
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3874 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3875 {
3876 	bool check_prot;
3877 	u32 num;
3878 	u32 ei_lba;
3879 	int ret;
3880 	u64 lba;
3881 	struct sdeb_store_info *sip = devip2sip(devip, true);
3882 	u8 *cmd = scp->cmnd;
3883 	bool meta_data_locked = false;
3884 
3885 	switch (cmd[0]) {
3886 	case READ_16:
3887 		ei_lba = 0;
3888 		lba = get_unaligned_be64(cmd + 2);
3889 		num = get_unaligned_be32(cmd + 10);
3890 		check_prot = true;
3891 		break;
3892 	case READ_10:
3893 		ei_lba = 0;
3894 		lba = get_unaligned_be32(cmd + 2);
3895 		num = get_unaligned_be16(cmd + 7);
3896 		check_prot = true;
3897 		break;
3898 	case READ_6:
3899 		ei_lba = 0;
3900 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3901 		      (u32)(cmd[1] & 0x1f) << 16;
3902 		num = (0 == cmd[4]) ? 256 : cmd[4];
3903 		check_prot = true;
3904 		break;
3905 	case READ_12:
3906 		ei_lba = 0;
3907 		lba = get_unaligned_be32(cmd + 2);
3908 		num = get_unaligned_be32(cmd + 6);
3909 		check_prot = true;
3910 		break;
3911 	case XDWRITEREAD_10:
3912 		ei_lba = 0;
3913 		lba = get_unaligned_be32(cmd + 2);
3914 		num = get_unaligned_be16(cmd + 7);
3915 		check_prot = false;
3916 		break;
3917 	default:	/* assume READ(32) */
3918 		lba = get_unaligned_be64(cmd + 12);
3919 		ei_lba = get_unaligned_be32(cmd + 20);
3920 		num = get_unaligned_be32(cmd + 28);
3921 		check_prot = false;
3922 		break;
3923 	}
3924 	if (unlikely(have_dif_prot && check_prot)) {
3925 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3926 		    (cmd[1] & 0xe0)) {
3927 			mk_sense_invalid_opcode(scp);
3928 			return check_condition_result;
3929 		}
3930 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3931 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3932 		    (cmd[1] & 0xe0) == 0)
3933 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3934 				    "to DIF device\n");
3935 	}
3936 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3937 		     atomic_read(&sdeb_inject_pending))) {
3938 		num /= 2;
3939 		atomic_set(&sdeb_inject_pending, 0);
3940 	}
3941 
3942 	/*
3943 	 * When checking device access params, for reads we only check data
3944 	 * versus what is set at init time, so no need to lock.
3945 	 */
3946 	ret = check_device_access_params(scp, lba, num, false);
3947 	if (ret)
3948 		return ret;
3949 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3950 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3951 		     ((lba + num) > sdebug_medium_error_start))) {
3952 		/* claim unrecoverable read error */
3953 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3954 		/* set info field and valid bit for fixed descriptor */
3955 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3956 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3957 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3958 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3959 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3960 		}
3961 		scsi_set_resid(scp, scsi_bufflen(scp));
3962 		return check_condition_result;
3963 	}
3964 
3965 	if (sdebug_dev_is_zoned(devip) ||
3966 	    (sdebug_dix && scsi_prot_sg_count(scp)))  {
3967 		sdeb_meta_read_lock(sip);
3968 		meta_data_locked = true;
3969 	}
3970 
3971 	/* DIX + T10 DIF */
3972 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3973 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3974 		case 1: /* Guard tag error */
3975 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3976 				sdeb_meta_read_unlock(sip);
3977 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3978 				return check_condition_result;
3979 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3980 				sdeb_meta_read_unlock(sip);
3981 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3982 				return illegal_condition_result;
3983 			}
3984 			break;
3985 		case 3: /* Reference tag error */
3986 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3987 				sdeb_meta_read_unlock(sip);
3988 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3989 				return check_condition_result;
3990 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3991 				sdeb_meta_read_unlock(sip);
3992 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3993 				return illegal_condition_result;
3994 			}
3995 			break;
3996 		}
3997 	}
3998 
3999 	ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4000 	if (meta_data_locked)
4001 		sdeb_meta_read_unlock(sip);
4002 	if (unlikely(ret == -1))
4003 		return DID_ERROR << 16;
4004 
4005 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4006 
4007 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4008 		     atomic_read(&sdeb_inject_pending))) {
4009 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4010 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4011 			atomic_set(&sdeb_inject_pending, 0);
4012 			return check_condition_result;
4013 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4014 			/* Logical block guard check failed */
4015 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4016 			atomic_set(&sdeb_inject_pending, 0);
4017 			return illegal_condition_result;
4018 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4019 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4020 			atomic_set(&sdeb_inject_pending, 0);
4021 			return illegal_condition_result;
4022 		}
4023 	}
4024 	return 0;
4025 }
4026 
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)4027 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4028 			     unsigned int sectors, u32 ei_lba)
4029 {
4030 	int ret;
4031 	struct t10_pi_tuple *sdt;
4032 	void *daddr;
4033 	sector_t sector = start_sec;
4034 	int ppage_offset;
4035 	int dpage_offset;
4036 	struct sg_mapping_iter diter;
4037 	struct sg_mapping_iter piter;
4038 
4039 	BUG_ON(scsi_sg_count(SCpnt) == 0);
4040 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4041 
4042 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4043 			scsi_prot_sg_count(SCpnt),
4044 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4045 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4046 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4047 
4048 	/* For each protection page */
4049 	while (sg_miter_next(&piter)) {
4050 		dpage_offset = 0;
4051 		if (WARN_ON(!sg_miter_next(&diter))) {
4052 			ret = 0x01;
4053 			goto out;
4054 		}
4055 
4056 		for (ppage_offset = 0; ppage_offset < piter.length;
4057 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
4058 			/* If we're at the end of the current
4059 			 * data page advance to the next one
4060 			 */
4061 			if (dpage_offset >= diter.length) {
4062 				if (WARN_ON(!sg_miter_next(&diter))) {
4063 					ret = 0x01;
4064 					goto out;
4065 				}
4066 				dpage_offset = 0;
4067 			}
4068 
4069 			sdt = piter.addr + ppage_offset;
4070 			daddr = diter.addr + dpage_offset;
4071 
4072 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4073 				ret = dif_verify(sdt, daddr, sector, ei_lba);
4074 				if (ret)
4075 					goto out;
4076 			}
4077 
4078 			sector++;
4079 			ei_lba++;
4080 			dpage_offset += sdebug_sector_size;
4081 		}
4082 		diter.consumed = dpage_offset;
4083 		sg_miter_stop(&diter);
4084 	}
4085 	sg_miter_stop(&piter);
4086 
4087 	dif_copy_prot(SCpnt, start_sec, sectors, false);
4088 	dix_writes++;
4089 
4090 	return 0;
4091 
4092 out:
4093 	dif_errors++;
4094 	sg_miter_stop(&diter);
4095 	sg_miter_stop(&piter);
4096 	return ret;
4097 }
4098 
lba_to_map_index(sector_t lba)4099 static unsigned long lba_to_map_index(sector_t lba)
4100 {
4101 	if (sdebug_unmap_alignment)
4102 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4103 	sector_div(lba, sdebug_unmap_granularity);
4104 	return lba;
4105 }
4106 
map_index_to_lba(unsigned long index)4107 static sector_t map_index_to_lba(unsigned long index)
4108 {
4109 	sector_t lba = index * sdebug_unmap_granularity;
4110 
4111 	if (sdebug_unmap_alignment)
4112 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4113 	return lba;
4114 }
4115 
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)4116 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4117 			      unsigned int *num)
4118 {
4119 	sector_t end;
4120 	unsigned int mapped;
4121 	unsigned long index;
4122 	unsigned long next;
4123 
4124 	index = lba_to_map_index(lba);
4125 	mapped = test_bit(index, sip->map_storep);
4126 
4127 	if (mapped)
4128 		next = find_next_zero_bit(sip->map_storep, map_size, index);
4129 	else
4130 		next = find_next_bit(sip->map_storep, map_size, index);
4131 
4132 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
4133 	*num = end - lba;
4134 	return mapped;
4135 }
4136 
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4137 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4138 		       unsigned int len)
4139 {
4140 	sector_t end = lba + len;
4141 
4142 	while (lba < end) {
4143 		unsigned long index = lba_to_map_index(lba);
4144 
4145 		if (index < map_size)
4146 			set_bit(index, sip->map_storep);
4147 
4148 		lba = map_index_to_lba(index + 1);
4149 	}
4150 }
4151 
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4152 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4153 			 unsigned int len)
4154 {
4155 	sector_t end = lba + len;
4156 	u8 *fsp = sip->storep;
4157 
4158 	while (lba < end) {
4159 		unsigned long index = lba_to_map_index(lba);
4160 
4161 		if (lba == map_index_to_lba(index) &&
4162 		    lba + sdebug_unmap_granularity <= end &&
4163 		    index < map_size) {
4164 			clear_bit(index, sip->map_storep);
4165 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
4166 				memset(fsp + lba * sdebug_sector_size,
4167 				       (sdebug_lbprz & 1) ? 0 : 0xff,
4168 				       sdebug_sector_size *
4169 				       sdebug_unmap_granularity);
4170 			}
4171 			if (sip->dif_storep) {
4172 				memset(sip->dif_storep + lba, 0xff,
4173 				       sizeof(*sip->dif_storep) *
4174 				       sdebug_unmap_granularity);
4175 			}
4176 		}
4177 		lba = map_index_to_lba(index + 1);
4178 	}
4179 }
4180 
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4181 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4182 {
4183 	bool check_prot;
4184 	u32 num;
4185 	u8 group = 0;
4186 	u32 ei_lba;
4187 	int ret;
4188 	u64 lba;
4189 	struct sdeb_store_info *sip = devip2sip(devip, true);
4190 	u8 *cmd = scp->cmnd;
4191 	bool meta_data_locked = false;
4192 
4193 	switch (cmd[0]) {
4194 	case WRITE_16:
4195 		ei_lba = 0;
4196 		lba = get_unaligned_be64(cmd + 2);
4197 		num = get_unaligned_be32(cmd + 10);
4198 		group = cmd[14] & 0x3f;
4199 		check_prot = true;
4200 		break;
4201 	case WRITE_10:
4202 		ei_lba = 0;
4203 		lba = get_unaligned_be32(cmd + 2);
4204 		group = cmd[6] & 0x3f;
4205 		num = get_unaligned_be16(cmd + 7);
4206 		check_prot = true;
4207 		break;
4208 	case WRITE_6:
4209 		ei_lba = 0;
4210 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4211 		      (u32)(cmd[1] & 0x1f) << 16;
4212 		num = (0 == cmd[4]) ? 256 : cmd[4];
4213 		check_prot = true;
4214 		break;
4215 	case WRITE_12:
4216 		ei_lba = 0;
4217 		lba = get_unaligned_be32(cmd + 2);
4218 		num = get_unaligned_be32(cmd + 6);
4219 		group = cmd[6] & 0x3f;
4220 		check_prot = true;
4221 		break;
4222 	case 0x53:	/* XDWRITEREAD(10) */
4223 		ei_lba = 0;
4224 		lba = get_unaligned_be32(cmd + 2);
4225 		group = cmd[6] & 0x1f;
4226 		num = get_unaligned_be16(cmd + 7);
4227 		check_prot = false;
4228 		break;
4229 	default:	/* assume WRITE(32) */
4230 		group = cmd[6] & 0x3f;
4231 		lba = get_unaligned_be64(cmd + 12);
4232 		ei_lba = get_unaligned_be32(cmd + 20);
4233 		num = get_unaligned_be32(cmd + 28);
4234 		check_prot = false;
4235 		break;
4236 	}
4237 	if (unlikely(have_dif_prot && check_prot)) {
4238 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4239 		    (cmd[1] & 0xe0)) {
4240 			mk_sense_invalid_opcode(scp);
4241 			return check_condition_result;
4242 		}
4243 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4244 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4245 		    (cmd[1] & 0xe0) == 0)
4246 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4247 				    "to DIF device\n");
4248 	}
4249 
4250 	if (sdebug_dev_is_zoned(devip) ||
4251 	    (sdebug_dix && scsi_prot_sg_count(scp)) ||
4252 	    scsi_debug_lbp())  {
4253 		sdeb_meta_write_lock(sip);
4254 		meta_data_locked = true;
4255 	}
4256 
4257 	ret = check_device_access_params(scp, lba, num, true);
4258 	if (ret) {
4259 		if (meta_data_locked)
4260 			sdeb_meta_write_unlock(sip);
4261 		return ret;
4262 	}
4263 
4264 	/* DIX + T10 DIF */
4265 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4266 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
4267 		case 1: /* Guard tag error */
4268 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4269 				sdeb_meta_write_unlock(sip);
4270 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4271 				return illegal_condition_result;
4272 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4273 				sdeb_meta_write_unlock(sip);
4274 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4275 				return check_condition_result;
4276 			}
4277 			break;
4278 		case 3: /* Reference tag error */
4279 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4280 				sdeb_meta_write_unlock(sip);
4281 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4282 				return illegal_condition_result;
4283 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4284 				sdeb_meta_write_unlock(sip);
4285 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4286 				return check_condition_result;
4287 			}
4288 			break;
4289 		}
4290 	}
4291 
4292 	ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
4293 	if (unlikely(scsi_debug_lbp()))
4294 		map_region(sip, lba, num);
4295 
4296 	/* If ZBC zone then bump its write pointer */
4297 	if (sdebug_dev_is_zoned(devip))
4298 		zbc_inc_wp(devip, lba, num);
4299 	if (meta_data_locked)
4300 		sdeb_meta_write_unlock(sip);
4301 
4302 	if (unlikely(-1 == ret))
4303 		return DID_ERROR << 16;
4304 	else if (unlikely(sdebug_verbose &&
4305 			  (ret < (num * sdebug_sector_size))))
4306 		sdev_printk(KERN_INFO, scp->device,
4307 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4308 			    my_name, num * sdebug_sector_size, ret);
4309 
4310 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4311 		     atomic_read(&sdeb_inject_pending))) {
4312 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4313 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4314 			atomic_set(&sdeb_inject_pending, 0);
4315 			return check_condition_result;
4316 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4317 			/* Logical block guard check failed */
4318 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4319 			atomic_set(&sdeb_inject_pending, 0);
4320 			return illegal_condition_result;
4321 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4322 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4323 			atomic_set(&sdeb_inject_pending, 0);
4324 			return illegal_condition_result;
4325 		}
4326 	}
4327 	return 0;
4328 }
4329 
4330 /*
4331  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4332  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4333  */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4334 static int resp_write_scat(struct scsi_cmnd *scp,
4335 			   struct sdebug_dev_info *devip)
4336 {
4337 	u8 *cmd = scp->cmnd;
4338 	u8 *lrdp = NULL;
4339 	u8 *up;
4340 	struct sdeb_store_info *sip = devip2sip(devip, true);
4341 	u8 wrprotect;
4342 	u16 lbdof, num_lrd, k;
4343 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4344 	u32 lb_size = sdebug_sector_size;
4345 	u32 ei_lba;
4346 	u64 lba;
4347 	u8 group;
4348 	int ret, res;
4349 	bool is_16;
4350 	static const u32 lrd_size = 32; /* + parameter list header size */
4351 
4352 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
4353 		is_16 = false;
4354 		group = cmd[6] & 0x3f;
4355 		wrprotect = (cmd[10] >> 5) & 0x7;
4356 		lbdof = get_unaligned_be16(cmd + 12);
4357 		num_lrd = get_unaligned_be16(cmd + 16);
4358 		bt_len = get_unaligned_be32(cmd + 28);
4359 	} else {        /* that leaves WRITE SCATTERED(16) */
4360 		is_16 = true;
4361 		wrprotect = (cmd[2] >> 5) & 0x7;
4362 		lbdof = get_unaligned_be16(cmd + 4);
4363 		num_lrd = get_unaligned_be16(cmd + 8);
4364 		bt_len = get_unaligned_be32(cmd + 10);
4365 		group = cmd[14] & 0x3f;
4366 		if (unlikely(have_dif_prot)) {
4367 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4368 			    wrprotect) {
4369 				mk_sense_invalid_opcode(scp);
4370 				return illegal_condition_result;
4371 			}
4372 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4373 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4374 			     wrprotect == 0)
4375 				sdev_printk(KERN_ERR, scp->device,
4376 					    "Unprotected WR to DIF device\n");
4377 		}
4378 	}
4379 	if ((num_lrd == 0) || (bt_len == 0))
4380 		return 0;       /* T10 says these do-nothings are not errors */
4381 	if (lbdof == 0) {
4382 		if (sdebug_verbose)
4383 			sdev_printk(KERN_INFO, scp->device,
4384 				"%s: %s: LB Data Offset field bad\n",
4385 				my_name, __func__);
4386 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4387 		return illegal_condition_result;
4388 	}
4389 	lbdof_blen = lbdof * lb_size;
4390 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4391 		if (sdebug_verbose)
4392 			sdev_printk(KERN_INFO, scp->device,
4393 				"%s: %s: LBA range descriptors don't fit\n",
4394 				my_name, __func__);
4395 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4396 		return illegal_condition_result;
4397 	}
4398 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4399 	if (lrdp == NULL)
4400 		return SCSI_MLQUEUE_HOST_BUSY;
4401 	if (sdebug_verbose)
4402 		sdev_printk(KERN_INFO, scp->device,
4403 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4404 			my_name, __func__, lbdof_blen);
4405 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4406 	if (res == -1) {
4407 		ret = DID_ERROR << 16;
4408 		goto err_out;
4409 	}
4410 
4411 	/* Just keep it simple and always lock for now */
4412 	sdeb_meta_write_lock(sip);
4413 	sg_off = lbdof_blen;
4414 	/* Spec says Buffer xfer Length field in number of LBs in dout */
4415 	cum_lb = 0;
4416 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4417 		lba = get_unaligned_be64(up + 0);
4418 		num = get_unaligned_be32(up + 8);
4419 		if (sdebug_verbose)
4420 			sdev_printk(KERN_INFO, scp->device,
4421 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4422 				my_name, __func__, k, lba, num, sg_off);
4423 		if (num == 0)
4424 			continue;
4425 		ret = check_device_access_params(scp, lba, num, true);
4426 		if (ret)
4427 			goto err_out_unlock;
4428 		num_by = num * lb_size;
4429 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4430 
4431 		if ((cum_lb + num) > bt_len) {
4432 			if (sdebug_verbose)
4433 				sdev_printk(KERN_INFO, scp->device,
4434 				    "%s: %s: sum of blocks > data provided\n",
4435 				    my_name, __func__);
4436 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4437 					0);
4438 			ret = illegal_condition_result;
4439 			goto err_out_unlock;
4440 		}
4441 
4442 		/* DIX + T10 DIF */
4443 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4444 			int prot_ret = prot_verify_write(scp, lba, num,
4445 							 ei_lba);
4446 
4447 			if (prot_ret) {
4448 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4449 						prot_ret);
4450 				ret = illegal_condition_result;
4451 				goto err_out_unlock;
4452 			}
4453 		}
4454 
4455 		/*
4456 		 * Write ranges atomically to keep as close to pre-atomic
4457 		 * writes behaviour as possible.
4458 		 */
4459 		ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
4460 		/* If ZBC zone then bump its write pointer */
4461 		if (sdebug_dev_is_zoned(devip))
4462 			zbc_inc_wp(devip, lba, num);
4463 		if (unlikely(scsi_debug_lbp()))
4464 			map_region(sip, lba, num);
4465 		if (unlikely(-1 == ret)) {
4466 			ret = DID_ERROR << 16;
4467 			goto err_out_unlock;
4468 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
4469 			sdev_printk(KERN_INFO, scp->device,
4470 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4471 			    my_name, num_by, ret);
4472 
4473 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4474 			     atomic_read(&sdeb_inject_pending))) {
4475 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4476 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4477 				atomic_set(&sdeb_inject_pending, 0);
4478 				ret = check_condition_result;
4479 				goto err_out_unlock;
4480 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4481 				/* Logical block guard check failed */
4482 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4483 				atomic_set(&sdeb_inject_pending, 0);
4484 				ret = illegal_condition_result;
4485 				goto err_out_unlock;
4486 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4487 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4488 				atomic_set(&sdeb_inject_pending, 0);
4489 				ret = illegal_condition_result;
4490 				goto err_out_unlock;
4491 			}
4492 		}
4493 		sg_off += num_by;
4494 		cum_lb += num;
4495 	}
4496 	ret = 0;
4497 err_out_unlock:
4498 	sdeb_meta_write_unlock(sip);
4499 err_out:
4500 	kfree(lrdp);
4501 	return ret;
4502 }
4503 
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)4504 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4505 			   u32 ei_lba, bool unmap, bool ndob)
4506 {
4507 	struct scsi_device *sdp = scp->device;
4508 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4509 	unsigned long long i;
4510 	u64 block, lbaa;
4511 	u32 lb_size = sdebug_sector_size;
4512 	int ret;
4513 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4514 						scp->device->hostdata, true);
4515 	u8 *fs1p;
4516 	u8 *fsp;
4517 	bool meta_data_locked = false;
4518 
4519 	if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
4520 		sdeb_meta_write_lock(sip);
4521 		meta_data_locked = true;
4522 	}
4523 
4524 	ret = check_device_access_params(scp, lba, num, true);
4525 	if (ret)
4526 		goto out;
4527 
4528 	if (unmap && scsi_debug_lbp()) {
4529 		unmap_region(sip, lba, num);
4530 		goto out;
4531 	}
4532 	lbaa = lba;
4533 	block = do_div(lbaa, sdebug_store_sectors);
4534 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
4535 	fsp = sip->storep;
4536 	fs1p = fsp + (block * lb_size);
4537 	sdeb_data_write_lock(sip);
4538 	if (ndob) {
4539 		memset(fs1p, 0, lb_size);
4540 		ret = 0;
4541 	} else
4542 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4543 
4544 	if (-1 == ret) {
4545 		ret = DID_ERROR << 16;
4546 		goto out;
4547 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
4548 		sdev_printk(KERN_INFO, scp->device,
4549 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
4550 			    my_name, "write same", lb_size, ret);
4551 
4552 	/* Copy first sector to remaining blocks */
4553 	for (i = 1 ; i < num ; i++) {
4554 		lbaa = lba + i;
4555 		block = do_div(lbaa, sdebug_store_sectors);
4556 		memmove(fsp + (block * lb_size), fs1p, lb_size);
4557 	}
4558 	if (scsi_debug_lbp())
4559 		map_region(sip, lba, num);
4560 	/* If ZBC zone then bump its write pointer */
4561 	if (sdebug_dev_is_zoned(devip))
4562 		zbc_inc_wp(devip, lba, num);
4563 	sdeb_data_write_unlock(sip);
4564 	ret = 0;
4565 out:
4566 	if (meta_data_locked)
4567 		sdeb_meta_write_unlock(sip);
4568 	return ret;
4569 }
4570 
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4571 static int resp_write_same_10(struct scsi_cmnd *scp,
4572 			      struct sdebug_dev_info *devip)
4573 {
4574 	u8 *cmd = scp->cmnd;
4575 	u32 lba;
4576 	u16 num;
4577 	u32 ei_lba = 0;
4578 	bool unmap = false;
4579 
4580 	if (cmd[1] & 0x8) {
4581 		if (sdebug_lbpws10 == 0) {
4582 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4583 			return check_condition_result;
4584 		} else
4585 			unmap = true;
4586 	}
4587 	lba = get_unaligned_be32(cmd + 2);
4588 	num = get_unaligned_be16(cmd + 7);
4589 	if (num > sdebug_write_same_length) {
4590 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4591 		return check_condition_result;
4592 	}
4593 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4594 }
4595 
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4596 static int resp_write_same_16(struct scsi_cmnd *scp,
4597 			      struct sdebug_dev_info *devip)
4598 {
4599 	u8 *cmd = scp->cmnd;
4600 	u64 lba;
4601 	u32 num;
4602 	u32 ei_lba = 0;
4603 	bool unmap = false;
4604 	bool ndob = false;
4605 
4606 	if (cmd[1] & 0x8) {	/* UNMAP */
4607 		if (sdebug_lbpws == 0) {
4608 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4609 			return check_condition_result;
4610 		} else
4611 			unmap = true;
4612 	}
4613 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4614 		ndob = true;
4615 	lba = get_unaligned_be64(cmd + 2);
4616 	num = get_unaligned_be32(cmd + 10);
4617 	if (num > sdebug_write_same_length) {
4618 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4619 		return check_condition_result;
4620 	}
4621 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4622 }
4623 
4624 /* Note the mode field is in the same position as the (lower) service action
4625  * field. For the Report supported operation codes command, SPC-4 suggests
4626  * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4627 static int resp_write_buffer(struct scsi_cmnd *scp,
4628 			     struct sdebug_dev_info *devip)
4629 {
4630 	u8 *cmd = scp->cmnd;
4631 	struct scsi_device *sdp = scp->device;
4632 	struct sdebug_dev_info *dp;
4633 	u8 mode;
4634 
4635 	mode = cmd[1] & 0x1f;
4636 	switch (mode) {
4637 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4638 		/* set UAs on this device only */
4639 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4640 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4641 		break;
4642 	case 0x5:	/* download MC, save and ACT */
4643 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4644 		break;
4645 	case 0x6:	/* download MC with offsets and ACT */
4646 		/* set UAs on most devices (LUs) in this target */
4647 		list_for_each_entry(dp,
4648 				    &devip->sdbg_host->dev_info_list,
4649 				    dev_list)
4650 			if (dp->target == sdp->id) {
4651 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4652 				if (devip != dp)
4653 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4654 						dp->uas_bm);
4655 			}
4656 		break;
4657 	case 0x7:	/* download MC with offsets, save, and ACT */
4658 		/* set UA on all devices (LUs) in this target */
4659 		list_for_each_entry(dp,
4660 				    &devip->sdbg_host->dev_info_list,
4661 				    dev_list)
4662 			if (dp->target == sdp->id)
4663 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4664 					dp->uas_bm);
4665 		break;
4666 	default:
4667 		/* do nothing for this command for other mode values */
4668 		break;
4669 	}
4670 	return 0;
4671 }
4672 
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4673 static int resp_comp_write(struct scsi_cmnd *scp,
4674 			   struct sdebug_dev_info *devip)
4675 {
4676 	u8 *cmd = scp->cmnd;
4677 	u8 *arr;
4678 	struct sdeb_store_info *sip = devip2sip(devip, true);
4679 	u64 lba;
4680 	u32 dnum;
4681 	u32 lb_size = sdebug_sector_size;
4682 	u8 num;
4683 	int ret;
4684 	int retval = 0;
4685 
4686 	lba = get_unaligned_be64(cmd + 2);
4687 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4688 	if (0 == num)
4689 		return 0;	/* degenerate case, not an error */
4690 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4691 	    (cmd[1] & 0xe0)) {
4692 		mk_sense_invalid_opcode(scp);
4693 		return check_condition_result;
4694 	}
4695 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4696 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4697 	    (cmd[1] & 0xe0) == 0)
4698 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4699 			    "to DIF device\n");
4700 	ret = check_device_access_params(scp, lba, num, false);
4701 	if (ret)
4702 		return ret;
4703 	dnum = 2 * num;
4704 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4705 	if (NULL == arr) {
4706 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4707 				INSUFF_RES_ASCQ);
4708 		return check_condition_result;
4709 	}
4710 
4711 	ret = do_dout_fetch(scp, dnum, arr);
4712 	if (ret == -1) {
4713 		retval = DID_ERROR << 16;
4714 		goto cleanup_free;
4715 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4716 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4717 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4718 			    dnum * lb_size, ret);
4719 
4720 	sdeb_data_write_lock(sip);
4721 	sdeb_meta_write_lock(sip);
4722 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4723 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4724 		retval = check_condition_result;
4725 		goto cleanup_unlock;
4726 	}
4727 
4728 	/* Cover sip->map_storep (which map_region()) sets with data lock */
4729 	if (scsi_debug_lbp())
4730 		map_region(sip, lba, num);
4731 cleanup_unlock:
4732 	sdeb_meta_write_unlock(sip);
4733 	sdeb_data_write_unlock(sip);
4734 cleanup_free:
4735 	kfree(arr);
4736 	return retval;
4737 }
4738 
4739 struct unmap_block_desc {
4740 	__be64	lba;
4741 	__be32	blocks;
4742 	__be32	__reserved;
4743 };
4744 
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4745 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4746 {
4747 	unsigned char *buf;
4748 	struct unmap_block_desc *desc;
4749 	struct sdeb_store_info *sip = devip2sip(devip, true);
4750 	unsigned int i, payload_len, descriptors;
4751 	int ret;
4752 
4753 	if (!scsi_debug_lbp())
4754 		return 0;	/* fib and say its done */
4755 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4756 	BUG_ON(scsi_bufflen(scp) != payload_len);
4757 
4758 	descriptors = (payload_len - 8) / 16;
4759 	if (descriptors > sdebug_unmap_max_desc) {
4760 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4761 		return check_condition_result;
4762 	}
4763 
4764 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4765 	if (!buf) {
4766 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4767 				INSUFF_RES_ASCQ);
4768 		return check_condition_result;
4769 	}
4770 
4771 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4772 
4773 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4774 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4775 
4776 	desc = (void *)&buf[8];
4777 
4778 	sdeb_meta_write_lock(sip);
4779 
4780 	for (i = 0 ; i < descriptors ; i++) {
4781 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4782 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4783 
4784 		ret = check_device_access_params(scp, lba, num, true);
4785 		if (ret)
4786 			goto out;
4787 
4788 		unmap_region(sip, lba, num);
4789 	}
4790 
4791 	ret = 0;
4792 
4793 out:
4794 	sdeb_meta_write_unlock(sip);
4795 	kfree(buf);
4796 
4797 	return ret;
4798 }
4799 
4800 #define SDEBUG_GET_LBA_STATUS_LEN 32
4801 
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4802 static int resp_get_lba_status(struct scsi_cmnd *scp,
4803 			       struct sdebug_dev_info *devip)
4804 {
4805 	u8 *cmd = scp->cmnd;
4806 	u64 lba;
4807 	u32 alloc_len, mapped, num;
4808 	int ret;
4809 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4810 
4811 	lba = get_unaligned_be64(cmd + 2);
4812 	alloc_len = get_unaligned_be32(cmd + 10);
4813 
4814 	if (alloc_len < 24)
4815 		return 0;
4816 
4817 	ret = check_device_access_params(scp, lba, 1, false);
4818 	if (ret)
4819 		return ret;
4820 
4821 	if (scsi_debug_lbp()) {
4822 		struct sdeb_store_info *sip = devip2sip(devip, true);
4823 
4824 		mapped = map_state(sip, lba, &num);
4825 	} else {
4826 		mapped = 1;
4827 		/* following just in case virtual_gb changed */
4828 		sdebug_capacity = get_sdebug_capacity();
4829 		if (sdebug_capacity - lba <= 0xffffffff)
4830 			num = sdebug_capacity - lba;
4831 		else
4832 			num = 0xffffffff;
4833 	}
4834 
4835 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4836 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4837 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4838 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4839 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4840 
4841 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4842 }
4843 
resp_get_stream_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4844 static int resp_get_stream_status(struct scsi_cmnd *scp,
4845 				  struct sdebug_dev_info *devip)
4846 {
4847 	u16 starting_stream_id, stream_id;
4848 	const u8 *cmd = scp->cmnd;
4849 	u32 alloc_len, offset;
4850 	u8 arr[256] = {};
4851 	struct scsi_stream_status_header *h = (void *)arr;
4852 
4853 	starting_stream_id = get_unaligned_be16(cmd + 4);
4854 	alloc_len = get_unaligned_be32(cmd + 10);
4855 
4856 	if (alloc_len < 8) {
4857 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4858 		return check_condition_result;
4859 	}
4860 
4861 	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
4862 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
4863 		return check_condition_result;
4864 	}
4865 
4866 	/*
4867 	 * The GET STREAM STATUS command only reports status information
4868 	 * about open streams. Treat the non-permanent stream as open.
4869 	 */
4870 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
4871 			   &h->number_of_open_streams);
4872 
4873 	for (offset = 8, stream_id = starting_stream_id;
4874 	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
4875 		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
4876 	     offset += 8, stream_id++) {
4877 		struct scsi_stream_status *stream_status = (void *)arr + offset;
4878 
4879 		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
4880 		put_unaligned_be16(stream_id,
4881 				   &stream_status->stream_identifier);
4882 		stream_status->rel_lifetime = stream_id + 1;
4883 	}
4884 	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
4885 
4886 	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
4887 }
4888 
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4889 static int resp_sync_cache(struct scsi_cmnd *scp,
4890 			   struct sdebug_dev_info *devip)
4891 {
4892 	int res = 0;
4893 	u64 lba;
4894 	u32 num_blocks;
4895 	u8 *cmd = scp->cmnd;
4896 
4897 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4898 		lba = get_unaligned_be32(cmd + 2);
4899 		num_blocks = get_unaligned_be16(cmd + 7);
4900 	} else {				/* SYNCHRONIZE_CACHE(16) */
4901 		lba = get_unaligned_be64(cmd + 2);
4902 		num_blocks = get_unaligned_be32(cmd + 10);
4903 	}
4904 	if (lba + num_blocks > sdebug_capacity) {
4905 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4906 		return check_condition_result;
4907 	}
4908 	if (!write_since_sync || (cmd[1] & 0x2))
4909 		res = SDEG_RES_IMMED_MASK;
4910 	else		/* delay if write_since_sync and IMMED clear */
4911 		write_since_sync = false;
4912 	return res;
4913 }
4914 
4915 /*
4916  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4917  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4918  * a GOOD status otherwise. Model a disk with a big cache and yield
4919  * CONDITION MET. Actually tries to bring range in main memory into the
4920  * cache associated with the CPU(s).
4921  */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4922 static int resp_pre_fetch(struct scsi_cmnd *scp,
4923 			  struct sdebug_dev_info *devip)
4924 {
4925 	int res = 0;
4926 	u64 lba;
4927 	u64 block, rest = 0;
4928 	u32 nblks;
4929 	u8 *cmd = scp->cmnd;
4930 	struct sdeb_store_info *sip = devip2sip(devip, true);
4931 	u8 *fsp = sip->storep;
4932 
4933 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4934 		lba = get_unaligned_be32(cmd + 2);
4935 		nblks = get_unaligned_be16(cmd + 7);
4936 	} else {			/* PRE-FETCH(16) */
4937 		lba = get_unaligned_be64(cmd + 2);
4938 		nblks = get_unaligned_be32(cmd + 10);
4939 	}
4940 	if (lba + nblks > sdebug_capacity) {
4941 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4942 		return check_condition_result;
4943 	}
4944 	if (!fsp)
4945 		goto fini;
4946 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4947 	block = do_div(lba, sdebug_store_sectors);
4948 	if (block + nblks > sdebug_store_sectors)
4949 		rest = block + nblks - sdebug_store_sectors;
4950 
4951 	/* Try to bring the PRE-FETCH range into CPU's cache */
4952 	sdeb_data_read_lock(sip);
4953 	prefetch_range(fsp + (sdebug_sector_size * block),
4954 		       (nblks - rest) * sdebug_sector_size);
4955 	if (rest)
4956 		prefetch_range(fsp, rest * sdebug_sector_size);
4957 
4958 	sdeb_data_read_unlock(sip);
4959 fini:
4960 	if (cmd[1] & 0x2)
4961 		res = SDEG_RES_IMMED_MASK;
4962 	return res | condition_met_result;
4963 }
4964 
4965 #define RL_BUCKET_ELEMS 8
4966 
4967 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4968  * (W-LUN), the normal Linux scanning logic does not associate it with a
4969  * device (e.g. /dev/sg7). The following magic will make that association:
4970  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4971  * where <n> is a host number. If there are multiple targets in a host then
4972  * the above will associate a W-LUN to each target. To only get a W-LUN
4973  * for target 2, then use "echo '- 2 49409' > scan" .
4974  */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4975 static int resp_report_luns(struct scsi_cmnd *scp,
4976 			    struct sdebug_dev_info *devip)
4977 {
4978 	unsigned char *cmd = scp->cmnd;
4979 	unsigned int alloc_len;
4980 	unsigned char select_report;
4981 	u64 lun;
4982 	struct scsi_lun *lun_p;
4983 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4984 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4985 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4986 	unsigned int tlun_cnt;	/* total LUN count */
4987 	unsigned int rlen;	/* response length (in bytes) */
4988 	int k, j, n, res;
4989 	unsigned int off_rsp = 0;
4990 	const int sz_lun = sizeof(struct scsi_lun);
4991 
4992 	clear_luns_changed_on_target(devip);
4993 
4994 	select_report = cmd[2];
4995 	alloc_len = get_unaligned_be32(cmd + 6);
4996 
4997 	if (alloc_len < 4) {
4998 		pr_err("alloc len too small %d\n", alloc_len);
4999 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5000 		return check_condition_result;
5001 	}
5002 
5003 	switch (select_report) {
5004 	case 0:		/* all LUNs apart from W-LUNs */
5005 		lun_cnt = sdebug_max_luns;
5006 		wlun_cnt = 0;
5007 		break;
5008 	case 1:		/* only W-LUNs */
5009 		lun_cnt = 0;
5010 		wlun_cnt = 1;
5011 		break;
5012 	case 2:		/* all LUNs */
5013 		lun_cnt = sdebug_max_luns;
5014 		wlun_cnt = 1;
5015 		break;
5016 	case 0x10:	/* only administrative LUs */
5017 	case 0x11:	/* see SPC-5 */
5018 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
5019 	default:
5020 		pr_debug("select report invalid %d\n", select_report);
5021 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5022 		return check_condition_result;
5023 	}
5024 
5025 	if (sdebug_no_lun_0 && (lun_cnt > 0))
5026 		--lun_cnt;
5027 
5028 	tlun_cnt = lun_cnt + wlun_cnt;
5029 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
5030 	scsi_set_resid(scp, scsi_bufflen(scp));
5031 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5032 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5033 
5034 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
5035 	lun = sdebug_no_lun_0 ? 1 : 0;
5036 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5037 		memset(arr, 0, sizeof(arr));
5038 		lun_p = (struct scsi_lun *)&arr[0];
5039 		if (k == 0) {
5040 			put_unaligned_be32(rlen, &arr[0]);
5041 			++lun_p;
5042 			j = 1;
5043 		}
5044 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5045 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5046 				break;
5047 			int_to_scsilun(lun++, lun_p);
5048 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5049 				lun_p->scsi_lun[0] |= 0x40;
5050 		}
5051 		if (j < RL_BUCKET_ELEMS)
5052 			break;
5053 		n = j * sz_lun;
5054 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5055 		if (res)
5056 			return res;
5057 		off_rsp += n;
5058 	}
5059 	if (wlun_cnt) {
5060 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5061 		++j;
5062 	}
5063 	if (j > 0)
5064 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5065 	return res;
5066 }
5067 
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5068 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5069 {
5070 	bool is_bytchk3 = false;
5071 	u8 bytchk;
5072 	int ret, j;
5073 	u32 vnum, a_num, off;
5074 	const u32 lb_size = sdebug_sector_size;
5075 	u64 lba;
5076 	u8 *arr;
5077 	u8 *cmd = scp->cmnd;
5078 	struct sdeb_store_info *sip = devip2sip(devip, true);
5079 
5080 	bytchk = (cmd[1] >> 1) & 0x3;
5081 	if (bytchk == 0) {
5082 		return 0;	/* always claim internal verify okay */
5083 	} else if (bytchk == 2) {
5084 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5085 		return check_condition_result;
5086 	} else if (bytchk == 3) {
5087 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
5088 	}
5089 	switch (cmd[0]) {
5090 	case VERIFY_16:
5091 		lba = get_unaligned_be64(cmd + 2);
5092 		vnum = get_unaligned_be32(cmd + 10);
5093 		break;
5094 	case VERIFY:		/* is VERIFY(10) */
5095 		lba = get_unaligned_be32(cmd + 2);
5096 		vnum = get_unaligned_be16(cmd + 7);
5097 		break;
5098 	default:
5099 		mk_sense_invalid_opcode(scp);
5100 		return check_condition_result;
5101 	}
5102 	if (vnum == 0)
5103 		return 0;	/* not an error */
5104 	a_num = is_bytchk3 ? 1 : vnum;
5105 	/* Treat following check like one for read (i.e. no write) access */
5106 	ret = check_device_access_params(scp, lba, a_num, false);
5107 	if (ret)
5108 		return ret;
5109 
5110 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5111 	if (!arr) {
5112 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5113 				INSUFF_RES_ASCQ);
5114 		return check_condition_result;
5115 	}
5116 	/* Not changing store, so only need read access */
5117 	sdeb_data_read_lock(sip);
5118 
5119 	ret = do_dout_fetch(scp, a_num, arr);
5120 	if (ret == -1) {
5121 		ret = DID_ERROR << 16;
5122 		goto cleanup;
5123 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5124 		sdev_printk(KERN_INFO, scp->device,
5125 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5126 			    my_name, __func__, a_num * lb_size, ret);
5127 	}
5128 	if (is_bytchk3) {
5129 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5130 			memcpy(arr + off, arr, lb_size);
5131 	}
5132 	ret = 0;
5133 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5134 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5135 		ret = check_condition_result;
5136 		goto cleanup;
5137 	}
5138 cleanup:
5139 	sdeb_data_read_unlock(sip);
5140 	kfree(arr);
5141 	return ret;
5142 }
5143 
5144 #define RZONES_DESC_HD 64
5145 
5146 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5147 static int resp_report_zones(struct scsi_cmnd *scp,
5148 			     struct sdebug_dev_info *devip)
5149 {
5150 	unsigned int rep_max_zones, nrz = 0;
5151 	int ret = 0;
5152 	u32 alloc_len, rep_opts, rep_len;
5153 	bool partial;
5154 	u64 lba, zs_lba;
5155 	u8 *arr = NULL, *desc;
5156 	u8 *cmd = scp->cmnd;
5157 	struct sdeb_zone_state *zsp = NULL;
5158 	struct sdeb_store_info *sip = devip2sip(devip, false);
5159 
5160 	if (!sdebug_dev_is_zoned(devip)) {
5161 		mk_sense_invalid_opcode(scp);
5162 		return check_condition_result;
5163 	}
5164 	zs_lba = get_unaligned_be64(cmd + 2);
5165 	alloc_len = get_unaligned_be32(cmd + 10);
5166 	if (alloc_len == 0)
5167 		return 0;	/* not an error */
5168 	rep_opts = cmd[14] & 0x3f;
5169 	partial = cmd[14] & 0x80;
5170 
5171 	if (zs_lba >= sdebug_capacity) {
5172 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5173 		return check_condition_result;
5174 	}
5175 
5176 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5177 
5178 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5179 	if (!arr) {
5180 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5181 				INSUFF_RES_ASCQ);
5182 		return check_condition_result;
5183 	}
5184 
5185 	sdeb_meta_read_lock(sip);
5186 
5187 	desc = arr + 64;
5188 	for (lba = zs_lba; lba < sdebug_capacity;
5189 	     lba = zsp->z_start + zsp->z_size) {
5190 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5191 			break;
5192 		zsp = zbc_zone(devip, lba);
5193 		switch (rep_opts) {
5194 		case 0x00:
5195 			/* All zones */
5196 			break;
5197 		case 0x01:
5198 			/* Empty zones */
5199 			if (zsp->z_cond != ZC1_EMPTY)
5200 				continue;
5201 			break;
5202 		case 0x02:
5203 			/* Implicit open zones */
5204 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5205 				continue;
5206 			break;
5207 		case 0x03:
5208 			/* Explicit open zones */
5209 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5210 				continue;
5211 			break;
5212 		case 0x04:
5213 			/* Closed zones */
5214 			if (zsp->z_cond != ZC4_CLOSED)
5215 				continue;
5216 			break;
5217 		case 0x05:
5218 			/* Full zones */
5219 			if (zsp->z_cond != ZC5_FULL)
5220 				continue;
5221 			break;
5222 		case 0x06:
5223 		case 0x07:
5224 		case 0x10:
5225 			/*
5226 			 * Read-only, offline, reset WP recommended are
5227 			 * not emulated: no zones to report;
5228 			 */
5229 			continue;
5230 		case 0x11:
5231 			/* non-seq-resource set */
5232 			if (!zsp->z_non_seq_resource)
5233 				continue;
5234 			break;
5235 		case 0x3e:
5236 			/* All zones except gap zones. */
5237 			if (zbc_zone_is_gap(zsp))
5238 				continue;
5239 			break;
5240 		case 0x3f:
5241 			/* Not write pointer (conventional) zones */
5242 			if (zbc_zone_is_seq(zsp))
5243 				continue;
5244 			break;
5245 		default:
5246 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
5247 					INVALID_FIELD_IN_CDB, 0);
5248 			ret = check_condition_result;
5249 			goto fini;
5250 		}
5251 
5252 		if (nrz < rep_max_zones) {
5253 			/* Fill zone descriptor */
5254 			desc[0] = zsp->z_type;
5255 			desc[1] = zsp->z_cond << 4;
5256 			if (zsp->z_non_seq_resource)
5257 				desc[1] |= 1 << 1;
5258 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
5259 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
5260 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5261 			desc += 64;
5262 		}
5263 
5264 		if (partial && nrz >= rep_max_zones)
5265 			break;
5266 
5267 		nrz++;
5268 	}
5269 
5270 	/* Report header */
5271 	/* Zone list length. */
5272 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5273 	/* Maximum LBA */
5274 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5275 	/* Zone starting LBA granularity. */
5276 	if (devip->zcap < devip->zsize)
5277 		put_unaligned_be64(devip->zsize, arr + 16);
5278 
5279 	rep_len = (unsigned long)desc - (unsigned long)arr;
5280 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5281 
5282 fini:
5283 	sdeb_meta_read_unlock(sip);
5284 	kfree(arr);
5285 	return ret;
5286 }
5287 
resp_atomic_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5288 static int resp_atomic_write(struct scsi_cmnd *scp,
5289 			     struct sdebug_dev_info *devip)
5290 {
5291 	struct sdeb_store_info *sip;
5292 	u8 *cmd = scp->cmnd;
5293 	u16 boundary, len;
5294 	u64 lba, lba_tmp;
5295 	int ret;
5296 
5297 	if (!scsi_debug_atomic_write()) {
5298 		mk_sense_invalid_opcode(scp);
5299 		return check_condition_result;
5300 	}
5301 
5302 	sip = devip2sip(devip, true);
5303 
5304 	lba = get_unaligned_be64(cmd + 2);
5305 	boundary = get_unaligned_be16(cmd + 10);
5306 	len = get_unaligned_be16(cmd + 12);
5307 
5308 	lba_tmp = lba;
5309 	if (sdebug_atomic_wr_align &&
5310 	    do_div(lba_tmp, sdebug_atomic_wr_align)) {
5311 		/* Does not meet alignment requirement */
5312 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5313 		return check_condition_result;
5314 	}
5315 
5316 	if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
5317 		/* Does not meet alignment requirement */
5318 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5319 		return check_condition_result;
5320 	}
5321 
5322 	if (boundary > 0) {
5323 		if (boundary > sdebug_atomic_wr_max_bndry) {
5324 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5325 			return check_condition_result;
5326 		}
5327 
5328 		if (len > sdebug_atomic_wr_max_length_bndry) {
5329 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5330 			return check_condition_result;
5331 		}
5332 	} else {
5333 		if (len > sdebug_atomic_wr_max_length) {
5334 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5335 			return check_condition_result;
5336 		}
5337 	}
5338 
5339 	ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
5340 	if (unlikely(ret == -1))
5341 		return DID_ERROR << 16;
5342 	if (unlikely(ret != len * sdebug_sector_size))
5343 		return DID_ERROR << 16;
5344 	return 0;
5345 }
5346 
5347 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)5348 static void zbc_open_all(struct sdebug_dev_info *devip)
5349 {
5350 	struct sdeb_zone_state *zsp = &devip->zstate[0];
5351 	unsigned int i;
5352 
5353 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
5354 		if (zsp->z_cond == ZC4_CLOSED)
5355 			zbc_open_zone(devip, &devip->zstate[i], true);
5356 	}
5357 }
5358 
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5359 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5360 {
5361 	int res = 0;
5362 	u64 z_id;
5363 	enum sdebug_z_cond zc;
5364 	u8 *cmd = scp->cmnd;
5365 	struct sdeb_zone_state *zsp;
5366 	bool all = cmd[14] & 0x01;
5367 	struct sdeb_store_info *sip = devip2sip(devip, false);
5368 
5369 	if (!sdebug_dev_is_zoned(devip)) {
5370 		mk_sense_invalid_opcode(scp);
5371 		return check_condition_result;
5372 	}
5373 	sdeb_meta_write_lock(sip);
5374 
5375 	if (all) {
5376 		/* Check if all closed zones can be open */
5377 		if (devip->max_open &&
5378 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
5379 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5380 					INSUFF_ZONE_ASCQ);
5381 			res = check_condition_result;
5382 			goto fini;
5383 		}
5384 		/* Open all closed zones */
5385 		zbc_open_all(devip);
5386 		goto fini;
5387 	}
5388 
5389 	/* Open the specified zone */
5390 	z_id = get_unaligned_be64(cmd + 2);
5391 	if (z_id >= sdebug_capacity) {
5392 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5393 		res = check_condition_result;
5394 		goto fini;
5395 	}
5396 
5397 	zsp = zbc_zone(devip, z_id);
5398 	if (z_id != zsp->z_start) {
5399 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5400 		res = check_condition_result;
5401 		goto fini;
5402 	}
5403 	if (zbc_zone_is_conv(zsp)) {
5404 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5405 		res = check_condition_result;
5406 		goto fini;
5407 	}
5408 
5409 	zc = zsp->z_cond;
5410 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
5411 		goto fini;
5412 
5413 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
5414 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5415 				INSUFF_ZONE_ASCQ);
5416 		res = check_condition_result;
5417 		goto fini;
5418 	}
5419 
5420 	zbc_open_zone(devip, zsp, true);
5421 fini:
5422 	sdeb_meta_write_unlock(sip);
5423 	return res;
5424 }
5425 
zbc_close_all(struct sdebug_dev_info * devip)5426 static void zbc_close_all(struct sdebug_dev_info *devip)
5427 {
5428 	unsigned int i;
5429 
5430 	for (i = 0; i < devip->nr_zones; i++)
5431 		zbc_close_zone(devip, &devip->zstate[i]);
5432 }
5433 
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5434 static int resp_close_zone(struct scsi_cmnd *scp,
5435 			   struct sdebug_dev_info *devip)
5436 {
5437 	int res = 0;
5438 	u64 z_id;
5439 	u8 *cmd = scp->cmnd;
5440 	struct sdeb_zone_state *zsp;
5441 	bool all = cmd[14] & 0x01;
5442 	struct sdeb_store_info *sip = devip2sip(devip, false);
5443 
5444 	if (!sdebug_dev_is_zoned(devip)) {
5445 		mk_sense_invalid_opcode(scp);
5446 		return check_condition_result;
5447 	}
5448 
5449 	sdeb_meta_write_lock(sip);
5450 
5451 	if (all) {
5452 		zbc_close_all(devip);
5453 		goto fini;
5454 	}
5455 
5456 	/* Close specified zone */
5457 	z_id = get_unaligned_be64(cmd + 2);
5458 	if (z_id >= sdebug_capacity) {
5459 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5460 		res = check_condition_result;
5461 		goto fini;
5462 	}
5463 
5464 	zsp = zbc_zone(devip, z_id);
5465 	if (z_id != zsp->z_start) {
5466 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5467 		res = check_condition_result;
5468 		goto fini;
5469 	}
5470 	if (zbc_zone_is_conv(zsp)) {
5471 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5472 		res = check_condition_result;
5473 		goto fini;
5474 	}
5475 
5476 	zbc_close_zone(devip, zsp);
5477 fini:
5478 	sdeb_meta_write_unlock(sip);
5479 	return res;
5480 }
5481 
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)5482 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5483 			    struct sdeb_zone_state *zsp, bool empty)
5484 {
5485 	enum sdebug_z_cond zc = zsp->z_cond;
5486 
5487 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5488 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5489 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5490 			zbc_close_zone(devip, zsp);
5491 		if (zsp->z_cond == ZC4_CLOSED)
5492 			devip->nr_closed--;
5493 		zsp->z_wp = zsp->z_start + zsp->z_size;
5494 		zsp->z_cond = ZC5_FULL;
5495 	}
5496 }
5497 
zbc_finish_all(struct sdebug_dev_info * devip)5498 static void zbc_finish_all(struct sdebug_dev_info *devip)
5499 {
5500 	unsigned int i;
5501 
5502 	for (i = 0; i < devip->nr_zones; i++)
5503 		zbc_finish_zone(devip, &devip->zstate[i], false);
5504 }
5505 
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5506 static int resp_finish_zone(struct scsi_cmnd *scp,
5507 			    struct sdebug_dev_info *devip)
5508 {
5509 	struct sdeb_zone_state *zsp;
5510 	int res = 0;
5511 	u64 z_id;
5512 	u8 *cmd = scp->cmnd;
5513 	bool all = cmd[14] & 0x01;
5514 	struct sdeb_store_info *sip = devip2sip(devip, false);
5515 
5516 	if (!sdebug_dev_is_zoned(devip)) {
5517 		mk_sense_invalid_opcode(scp);
5518 		return check_condition_result;
5519 	}
5520 
5521 	sdeb_meta_write_lock(sip);
5522 
5523 	if (all) {
5524 		zbc_finish_all(devip);
5525 		goto fini;
5526 	}
5527 
5528 	/* Finish the specified zone */
5529 	z_id = get_unaligned_be64(cmd + 2);
5530 	if (z_id >= sdebug_capacity) {
5531 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5532 		res = check_condition_result;
5533 		goto fini;
5534 	}
5535 
5536 	zsp = zbc_zone(devip, z_id);
5537 	if (z_id != zsp->z_start) {
5538 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5539 		res = check_condition_result;
5540 		goto fini;
5541 	}
5542 	if (zbc_zone_is_conv(zsp)) {
5543 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5544 		res = check_condition_result;
5545 		goto fini;
5546 	}
5547 
5548 	zbc_finish_zone(devip, zsp, true);
5549 fini:
5550 	sdeb_meta_write_unlock(sip);
5551 	return res;
5552 }
5553 
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)5554 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5555 			 struct sdeb_zone_state *zsp)
5556 {
5557 	enum sdebug_z_cond zc;
5558 	struct sdeb_store_info *sip = devip2sip(devip, false);
5559 
5560 	if (!zbc_zone_is_seq(zsp))
5561 		return;
5562 
5563 	zc = zsp->z_cond;
5564 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5565 		zbc_close_zone(devip, zsp);
5566 
5567 	if (zsp->z_cond == ZC4_CLOSED)
5568 		devip->nr_closed--;
5569 
5570 	if (zsp->z_wp > zsp->z_start)
5571 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5572 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5573 
5574 	zsp->z_non_seq_resource = false;
5575 	zsp->z_wp = zsp->z_start;
5576 	zsp->z_cond = ZC1_EMPTY;
5577 }
5578 
zbc_rwp_all(struct sdebug_dev_info * devip)5579 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5580 {
5581 	unsigned int i;
5582 
5583 	for (i = 0; i < devip->nr_zones; i++)
5584 		zbc_rwp_zone(devip, &devip->zstate[i]);
5585 }
5586 
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5587 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5588 {
5589 	struct sdeb_zone_state *zsp;
5590 	int res = 0;
5591 	u64 z_id;
5592 	u8 *cmd = scp->cmnd;
5593 	bool all = cmd[14] & 0x01;
5594 	struct sdeb_store_info *sip = devip2sip(devip, false);
5595 
5596 	if (!sdebug_dev_is_zoned(devip)) {
5597 		mk_sense_invalid_opcode(scp);
5598 		return check_condition_result;
5599 	}
5600 
5601 	sdeb_meta_write_lock(sip);
5602 
5603 	if (all) {
5604 		zbc_rwp_all(devip);
5605 		goto fini;
5606 	}
5607 
5608 	z_id = get_unaligned_be64(cmd + 2);
5609 	if (z_id >= sdebug_capacity) {
5610 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5611 		res = check_condition_result;
5612 		goto fini;
5613 	}
5614 
5615 	zsp = zbc_zone(devip, z_id);
5616 	if (z_id != zsp->z_start) {
5617 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5618 		res = check_condition_result;
5619 		goto fini;
5620 	}
5621 	if (zbc_zone_is_conv(zsp)) {
5622 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5623 		res = check_condition_result;
5624 		goto fini;
5625 	}
5626 
5627 	zbc_rwp_zone(devip, zsp);
5628 fini:
5629 	sdeb_meta_write_unlock(sip);
5630 	return res;
5631 }
5632 
get_tag(struct scsi_cmnd * cmnd)5633 static u32 get_tag(struct scsi_cmnd *cmnd)
5634 {
5635 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5636 }
5637 
5638 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)5639 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5640 {
5641 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5642 	unsigned long flags;
5643 	struct scsi_cmnd *scp = sqcp->scmd;
5644 	struct sdebug_scsi_cmd *sdsc;
5645 	bool aborted;
5646 
5647 	if (sdebug_statistics) {
5648 		atomic_inc(&sdebug_completions);
5649 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5650 			atomic_inc(&sdebug_miss_cpus);
5651 	}
5652 
5653 	if (!scp) {
5654 		pr_err("scmd=NULL\n");
5655 		goto out;
5656 	}
5657 
5658 	sdsc = scsi_cmd_priv(scp);
5659 	spin_lock_irqsave(&sdsc->lock, flags);
5660 	aborted = sd_dp->aborted;
5661 	if (unlikely(aborted))
5662 		sd_dp->aborted = false;
5663 	ASSIGN_QUEUED_CMD(scp, NULL);
5664 
5665 	spin_unlock_irqrestore(&sdsc->lock, flags);
5666 
5667 	if (aborted) {
5668 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5669 		blk_abort_request(scsi_cmd_to_rq(scp));
5670 		goto out;
5671 	}
5672 
5673 	scsi_done(scp); /* callback to mid level */
5674 out:
5675 	sdebug_free_queued_cmd(sqcp);
5676 }
5677 
5678 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)5679 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5680 {
5681 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5682 						  hrt);
5683 	sdebug_q_cmd_complete(sd_dp);
5684 	return HRTIMER_NORESTART;
5685 }
5686 
5687 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)5688 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5689 {
5690 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5691 						  ew.work);
5692 	sdebug_q_cmd_complete(sd_dp);
5693 }
5694 
5695 static bool got_shared_uuid;
5696 static uuid_t shared_uuid;
5697 
sdebug_device_create_zones(struct sdebug_dev_info * devip)5698 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5699 {
5700 	struct sdeb_zone_state *zsp;
5701 	sector_t capacity = get_sdebug_capacity();
5702 	sector_t conv_capacity;
5703 	sector_t zstart = 0;
5704 	unsigned int i;
5705 
5706 	/*
5707 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5708 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5709 	 * use the specified zone size checking that at least 2 zones can be
5710 	 * created for the device.
5711 	 */
5712 	if (!sdeb_zbc_zone_size_mb) {
5713 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5714 			>> ilog2(sdebug_sector_size);
5715 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5716 			devip->zsize >>= 1;
5717 		if (devip->zsize < 2) {
5718 			pr_err("Device capacity too small\n");
5719 			return -EINVAL;
5720 		}
5721 	} else {
5722 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5723 			pr_err("Zone size is not a power of 2\n");
5724 			return -EINVAL;
5725 		}
5726 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5727 			>> ilog2(sdebug_sector_size);
5728 		if (devip->zsize >= capacity) {
5729 			pr_err("Zone size too large for device capacity\n");
5730 			return -EINVAL;
5731 		}
5732 	}
5733 
5734 	devip->zsize_shift = ilog2(devip->zsize);
5735 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5736 
5737 	if (sdeb_zbc_zone_cap_mb == 0) {
5738 		devip->zcap = devip->zsize;
5739 	} else {
5740 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5741 			      ilog2(sdebug_sector_size);
5742 		if (devip->zcap > devip->zsize) {
5743 			pr_err("Zone capacity too large\n");
5744 			return -EINVAL;
5745 		}
5746 	}
5747 
5748 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5749 	if (conv_capacity >= capacity) {
5750 		pr_err("Number of conventional zones too large\n");
5751 		return -EINVAL;
5752 	}
5753 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5754 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5755 			      devip->zsize_shift;
5756 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5757 
5758 	/* Add gap zones if zone capacity is smaller than the zone size */
5759 	if (devip->zcap < devip->zsize)
5760 		devip->nr_zones += devip->nr_seq_zones;
5761 
5762 	if (devip->zoned) {
5763 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5764 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5765 			devip->max_open = (devip->nr_zones - 1) / 2;
5766 		else
5767 			devip->max_open = sdeb_zbc_max_open;
5768 	}
5769 
5770 	devip->zstate = kcalloc(devip->nr_zones,
5771 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5772 	if (!devip->zstate)
5773 		return -ENOMEM;
5774 
5775 	for (i = 0; i < devip->nr_zones; i++) {
5776 		zsp = &devip->zstate[i];
5777 
5778 		zsp->z_start = zstart;
5779 
5780 		if (i < devip->nr_conv_zones) {
5781 			zsp->z_type = ZBC_ZTYPE_CNV;
5782 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5783 			zsp->z_wp = (sector_t)-1;
5784 			zsp->z_size =
5785 				min_t(u64, devip->zsize, capacity - zstart);
5786 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5787 			if (devip->zoned)
5788 				zsp->z_type = ZBC_ZTYPE_SWR;
5789 			else
5790 				zsp->z_type = ZBC_ZTYPE_SWP;
5791 			zsp->z_cond = ZC1_EMPTY;
5792 			zsp->z_wp = zsp->z_start;
5793 			zsp->z_size =
5794 				min_t(u64, devip->zcap, capacity - zstart);
5795 		} else {
5796 			zsp->z_type = ZBC_ZTYPE_GAP;
5797 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5798 			zsp->z_wp = (sector_t)-1;
5799 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5800 					    capacity - zstart);
5801 		}
5802 
5803 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5804 		zstart += zsp->z_size;
5805 	}
5806 
5807 	return 0;
5808 }
5809 
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)5810 static struct sdebug_dev_info *sdebug_device_create(
5811 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5812 {
5813 	struct sdebug_dev_info *devip;
5814 
5815 	devip = kzalloc(sizeof(*devip), flags);
5816 	if (devip) {
5817 		if (sdebug_uuid_ctl == 1)
5818 			uuid_gen(&devip->lu_name);
5819 		else if (sdebug_uuid_ctl == 2) {
5820 			if (got_shared_uuid)
5821 				devip->lu_name = shared_uuid;
5822 			else {
5823 				uuid_gen(&shared_uuid);
5824 				got_shared_uuid = true;
5825 				devip->lu_name = shared_uuid;
5826 			}
5827 		}
5828 		devip->sdbg_host = sdbg_host;
5829 		if (sdeb_zbc_in_use) {
5830 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5831 			if (sdebug_device_create_zones(devip)) {
5832 				kfree(devip);
5833 				return NULL;
5834 			}
5835 		} else {
5836 			devip->zoned = false;
5837 		}
5838 		devip->create_ts = ktime_get_boottime();
5839 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5840 		spin_lock_init(&devip->list_lock);
5841 		INIT_LIST_HEAD(&devip->inject_err_list);
5842 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5843 	}
5844 	return devip;
5845 }
5846 
find_build_dev_info(struct scsi_device * sdev)5847 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5848 {
5849 	struct sdebug_host_info *sdbg_host;
5850 	struct sdebug_dev_info *open_devip = NULL;
5851 	struct sdebug_dev_info *devip;
5852 
5853 	sdbg_host = shost_to_sdebug_host(sdev->host);
5854 
5855 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5856 		if ((devip->used) && (devip->channel == sdev->channel) &&
5857 		    (devip->target == sdev->id) &&
5858 		    (devip->lun == sdev->lun))
5859 			return devip;
5860 		else {
5861 			if ((!devip->used) && (!open_devip))
5862 				open_devip = devip;
5863 		}
5864 	}
5865 	if (!open_devip) { /* try and make a new one */
5866 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5867 		if (!open_devip) {
5868 			pr_err("out of memory at line %d\n", __LINE__);
5869 			return NULL;
5870 		}
5871 	}
5872 
5873 	open_devip->channel = sdev->channel;
5874 	open_devip->target = sdev->id;
5875 	open_devip->lun = sdev->lun;
5876 	open_devip->sdbg_host = sdbg_host;
5877 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5878 	open_devip->used = true;
5879 	return open_devip;
5880 }
5881 
scsi_debug_sdev_init(struct scsi_device * sdp)5882 static int scsi_debug_sdev_init(struct scsi_device *sdp)
5883 {
5884 	if (sdebug_verbose)
5885 		pr_info("sdev_init <%u %u %u %llu>\n",
5886 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5887 
5888 	return 0;
5889 }
5890 
scsi_debug_sdev_configure(struct scsi_device * sdp,struct queue_limits * lim)5891 static int scsi_debug_sdev_configure(struct scsi_device *sdp,
5892 				     struct queue_limits *lim)
5893 {
5894 	struct sdebug_dev_info *devip =
5895 			(struct sdebug_dev_info *)sdp->hostdata;
5896 	struct dentry *dentry;
5897 
5898 	if (sdebug_verbose)
5899 		pr_info("sdev_configure <%u %u %u %llu>\n",
5900 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5901 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5902 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5903 	if (devip == NULL) {
5904 		devip = find_build_dev_info(sdp);
5905 		if (devip == NULL)
5906 			return 1;  /* no resources, will be marked offline */
5907 	}
5908 	sdp->hostdata = devip;
5909 	if (sdebug_no_uld)
5910 		sdp->no_uld_attach = 1;
5911 	config_cdb_len(sdp);
5912 
5913 	if (sdebug_allow_restart)
5914 		sdp->allow_restart = 1;
5915 
5916 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5917 				sdebug_debugfs_root);
5918 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
5919 		pr_info("%s: failed to create debugfs directory for device %s\n",
5920 			__func__, dev_name(&sdp->sdev_gendev));
5921 
5922 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5923 				&sdebug_error_fops);
5924 	if (IS_ERR_OR_NULL(dentry))
5925 		pr_info("%s: failed to create error file for device %s\n",
5926 			__func__, dev_name(&sdp->sdev_gendev));
5927 
5928 	return 0;
5929 }
5930 
scsi_debug_sdev_destroy(struct scsi_device * sdp)5931 static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
5932 {
5933 	struct sdebug_dev_info *devip =
5934 		(struct sdebug_dev_info *)sdp->hostdata;
5935 	struct sdebug_err_inject *err;
5936 
5937 	if (sdebug_verbose)
5938 		pr_info("sdev_destroy <%u %u %u %llu>\n",
5939 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5940 
5941 	if (!devip)
5942 		return;
5943 
5944 	spin_lock(&devip->list_lock);
5945 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5946 		list_del_rcu(&err->list);
5947 		call_rcu(&err->rcu, sdebug_err_free);
5948 	}
5949 	spin_unlock(&devip->list_lock);
5950 
5951 	debugfs_remove(devip->debugfs_entry);
5952 
5953 	/* make this slot available for re-use */
5954 	devip->used = false;
5955 	sdp->hostdata = NULL;
5956 }
5957 
5958 /* Returns true if we require the queued memory to be freed by the caller. */
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)5959 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5960 			   enum sdeb_defer_type defer_t)
5961 {
5962 	if (defer_t == SDEB_DEFER_HRT) {
5963 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5964 
5965 		switch (res) {
5966 		case 0: /* Not active, it must have already run */
5967 		case -1: /* -1 It's executing the CB */
5968 			return false;
5969 		case 1: /* Was active, we've now cancelled */
5970 		default:
5971 			return true;
5972 		}
5973 	} else if (defer_t == SDEB_DEFER_WQ) {
5974 		/* Cancel if pending */
5975 		if (cancel_work_sync(&sd_dp->ew.work))
5976 			return true;
5977 		/* Was not pending, so it must have run */
5978 		return false;
5979 	} else if (defer_t == SDEB_DEFER_POLL) {
5980 		return true;
5981 	}
5982 
5983 	return false;
5984 }
5985 
5986 
scsi_debug_stop_cmnd(struct scsi_cmnd * cmnd)5987 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5988 {
5989 	enum sdeb_defer_type l_defer_t;
5990 	struct sdebug_defer *sd_dp;
5991 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5992 	struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5993 
5994 	lockdep_assert_held(&sdsc->lock);
5995 
5996 	if (!sqcp)
5997 		return false;
5998 	sd_dp = &sqcp->sd_dp;
5999 	l_defer_t = READ_ONCE(sd_dp->defer_t);
6000 	ASSIGN_QUEUED_CMD(cmnd, NULL);
6001 
6002 	if (stop_qc_helper(sd_dp, l_defer_t))
6003 		sdebug_free_queued_cmd(sqcp);
6004 
6005 	return true;
6006 }
6007 
6008 /*
6009  * Called from scsi_debug_abort() only, which is for timed-out cmd.
6010  */
scsi_debug_abort_cmnd(struct scsi_cmnd * cmnd)6011 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6012 {
6013 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6014 	unsigned long flags;
6015 	bool res;
6016 
6017 	spin_lock_irqsave(&sdsc->lock, flags);
6018 	res = scsi_debug_stop_cmnd(cmnd);
6019 	spin_unlock_irqrestore(&sdsc->lock, flags);
6020 
6021 	return res;
6022 }
6023 
6024 /*
6025  * All we can do is set the cmnd as internally aborted and wait for it to
6026  * finish. We cannot call scsi_done() as normal completion path may do that.
6027  */
sdebug_stop_cmnd(struct request * rq,void * data)6028 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6029 {
6030 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6031 
6032 	return true;
6033 }
6034 
6035 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)6036 static void stop_all_queued(void)
6037 {
6038 	struct sdebug_host_info *sdhp;
6039 
6040 	mutex_lock(&sdebug_host_list_mutex);
6041 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6042 		struct Scsi_Host *shost = sdhp->shost;
6043 
6044 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6045 	}
6046 	mutex_unlock(&sdebug_host_list_mutex);
6047 }
6048 
sdebug_fail_abort(struct scsi_cmnd * cmnd)6049 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6050 {
6051 	struct scsi_device *sdp = cmnd->device;
6052 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6053 	struct sdebug_err_inject *err;
6054 	unsigned char *cmd = cmnd->cmnd;
6055 	int ret = 0;
6056 
6057 	if (devip == NULL)
6058 		return 0;
6059 
6060 	rcu_read_lock();
6061 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6062 		if (err->type == ERR_ABORT_CMD_FAILED &&
6063 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6064 			ret = !!err->cnt;
6065 			if (err->cnt < 0)
6066 				err->cnt++;
6067 
6068 			rcu_read_unlock();
6069 			return ret;
6070 		}
6071 	}
6072 	rcu_read_unlock();
6073 
6074 	return 0;
6075 }
6076 
scsi_debug_abort(struct scsi_cmnd * SCpnt)6077 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6078 {
6079 	bool ok = scsi_debug_abort_cmnd(SCpnt);
6080 	u8 *cmd = SCpnt->cmnd;
6081 	u8 opcode = cmd[0];
6082 
6083 	++num_aborts;
6084 
6085 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6086 		sdev_printk(KERN_INFO, SCpnt->device,
6087 			    "%s: command%s found\n", __func__,
6088 			    ok ? "" : " not");
6089 
6090 	if (sdebug_fail_abort(SCpnt)) {
6091 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6092 			    opcode);
6093 		return FAILED;
6094 	}
6095 
6096 	return SUCCESS;
6097 }
6098 
scsi_debug_stop_all_queued_iter(struct request * rq,void * data)6099 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6100 {
6101 	struct scsi_device *sdp = data;
6102 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6103 
6104 	if (scmd->device == sdp)
6105 		scsi_debug_abort_cmnd(scmd);
6106 
6107 	return true;
6108 }
6109 
6110 /* Deletes (stops) timers or work queues of all queued commands per sdev */
scsi_debug_stop_all_queued(struct scsi_device * sdp)6111 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6112 {
6113 	struct Scsi_Host *shost = sdp->host;
6114 
6115 	blk_mq_tagset_busy_iter(&shost->tag_set,
6116 				scsi_debug_stop_all_queued_iter, sdp);
6117 }
6118 
sdebug_fail_lun_reset(struct scsi_cmnd * cmnd)6119 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6120 {
6121 	struct scsi_device *sdp = cmnd->device;
6122 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6123 	struct sdebug_err_inject *err;
6124 	unsigned char *cmd = cmnd->cmnd;
6125 	int ret = 0;
6126 
6127 	if (devip == NULL)
6128 		return 0;
6129 
6130 	rcu_read_lock();
6131 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6132 		if (err->type == ERR_LUN_RESET_FAILED &&
6133 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6134 			ret = !!err->cnt;
6135 			if (err->cnt < 0)
6136 				err->cnt++;
6137 
6138 			rcu_read_unlock();
6139 			return ret;
6140 		}
6141 	}
6142 	rcu_read_unlock();
6143 
6144 	return 0;
6145 }
6146 
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)6147 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6148 {
6149 	struct scsi_device *sdp = SCpnt->device;
6150 	struct sdebug_dev_info *devip = sdp->hostdata;
6151 	u8 *cmd = SCpnt->cmnd;
6152 	u8 opcode = cmd[0];
6153 
6154 	++num_dev_resets;
6155 
6156 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6157 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6158 
6159 	scsi_debug_stop_all_queued(sdp);
6160 	if (devip)
6161 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
6162 
6163 	if (sdebug_fail_lun_reset(SCpnt)) {
6164 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6165 		return FAILED;
6166 	}
6167 
6168 	return SUCCESS;
6169 }
6170 
sdebug_fail_target_reset(struct scsi_cmnd * cmnd)6171 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6172 {
6173 	struct scsi_target *starget = scsi_target(cmnd->device);
6174 	struct sdebug_target_info *targetip =
6175 		(struct sdebug_target_info *)starget->hostdata;
6176 
6177 	if (targetip)
6178 		return targetip->reset_fail;
6179 
6180 	return 0;
6181 }
6182 
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)6183 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6184 {
6185 	struct scsi_device *sdp = SCpnt->device;
6186 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6187 	struct sdebug_dev_info *devip;
6188 	u8 *cmd = SCpnt->cmnd;
6189 	u8 opcode = cmd[0];
6190 	int k = 0;
6191 
6192 	++num_target_resets;
6193 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6194 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6195 
6196 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6197 		if (devip->target == sdp->id) {
6198 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6199 			++k;
6200 		}
6201 	}
6202 
6203 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6204 		sdev_printk(KERN_INFO, sdp,
6205 			    "%s: %d device(s) found in target\n", __func__, k);
6206 
6207 	if (sdebug_fail_target_reset(SCpnt)) {
6208 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
6209 			    opcode);
6210 		return FAILED;
6211 	}
6212 
6213 	return SUCCESS;
6214 }
6215 
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)6216 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
6217 {
6218 	struct scsi_device *sdp = SCpnt->device;
6219 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6220 	struct sdebug_dev_info *devip;
6221 	int k = 0;
6222 
6223 	++num_bus_resets;
6224 
6225 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6226 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6227 
6228 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6229 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6230 		++k;
6231 	}
6232 
6233 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6234 		sdev_printk(KERN_INFO, sdp,
6235 			    "%s: %d device(s) found in host\n", __func__, k);
6236 	return SUCCESS;
6237 }
6238 
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)6239 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
6240 {
6241 	struct sdebug_host_info *sdbg_host;
6242 	struct sdebug_dev_info *devip;
6243 	int k = 0;
6244 
6245 	++num_host_resets;
6246 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6247 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
6248 	mutex_lock(&sdebug_host_list_mutex);
6249 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
6250 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
6251 				    dev_list) {
6252 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6253 			++k;
6254 		}
6255 	}
6256 	mutex_unlock(&sdebug_host_list_mutex);
6257 	stop_all_queued();
6258 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6259 		sdev_printk(KERN_INFO, SCpnt->device,
6260 			    "%s: %d device(s) found\n", __func__, k);
6261 	return SUCCESS;
6262 }
6263 
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)6264 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
6265 {
6266 	struct msdos_partition *pp;
6267 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
6268 	int sectors_per_part, num_sectors, k;
6269 	int heads_by_sects, start_sec, end_sec;
6270 
6271 	/* assume partition table already zeroed */
6272 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
6273 		return;
6274 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
6275 		sdebug_num_parts = SDEBUG_MAX_PARTS;
6276 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
6277 	}
6278 	num_sectors = (int)get_sdebug_capacity();
6279 	sectors_per_part = (num_sectors - sdebug_sectors_per)
6280 			   / sdebug_num_parts;
6281 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
6282 	starts[0] = sdebug_sectors_per;
6283 	max_part_secs = sectors_per_part;
6284 	for (k = 1; k < sdebug_num_parts; ++k) {
6285 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
6286 			    * heads_by_sects;
6287 		if (starts[k] - starts[k - 1] < max_part_secs)
6288 			max_part_secs = starts[k] - starts[k - 1];
6289 	}
6290 	starts[sdebug_num_parts] = num_sectors;
6291 	starts[sdebug_num_parts + 1] = 0;
6292 
6293 	ramp[510] = 0x55;	/* magic partition markings */
6294 	ramp[511] = 0xAA;
6295 	pp = (struct msdos_partition *)(ramp + 0x1be);
6296 	for (k = 0; starts[k + 1]; ++k, ++pp) {
6297 		start_sec = starts[k];
6298 		end_sec = starts[k] + max_part_secs - 1;
6299 		pp->boot_ind = 0;
6300 
6301 		pp->cyl = start_sec / heads_by_sects;
6302 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
6303 			   / sdebug_sectors_per;
6304 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
6305 
6306 		pp->end_cyl = end_sec / heads_by_sects;
6307 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
6308 			       / sdebug_sectors_per;
6309 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
6310 
6311 		pp->start_sect = cpu_to_le32(start_sec);
6312 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
6313 		pp->sys_ind = 0x83;	/* plain Linux partition */
6314 	}
6315 }
6316 
block_unblock_all_queues(bool block)6317 static void block_unblock_all_queues(bool block)
6318 {
6319 	struct sdebug_host_info *sdhp;
6320 
6321 	lockdep_assert_held(&sdebug_host_list_mutex);
6322 
6323 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6324 		struct Scsi_Host *shost = sdhp->shost;
6325 
6326 		if (block)
6327 			scsi_block_requests(shost);
6328 		else
6329 			scsi_unblock_requests(shost);
6330 	}
6331 }
6332 
6333 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6334  * commands will be processed normally before triggers occur.
6335  */
tweak_cmnd_count(void)6336 static void tweak_cmnd_count(void)
6337 {
6338 	int count, modulo;
6339 
6340 	modulo = abs(sdebug_every_nth);
6341 	if (modulo < 2)
6342 		return;
6343 
6344 	mutex_lock(&sdebug_host_list_mutex);
6345 	block_unblock_all_queues(true);
6346 	count = atomic_read(&sdebug_cmnd_count);
6347 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
6348 	block_unblock_all_queues(false);
6349 	mutex_unlock(&sdebug_host_list_mutex);
6350 }
6351 
clear_queue_stats(void)6352 static void clear_queue_stats(void)
6353 {
6354 	atomic_set(&sdebug_cmnd_count, 0);
6355 	atomic_set(&sdebug_completions, 0);
6356 	atomic_set(&sdebug_miss_cpus, 0);
6357 	atomic_set(&sdebug_a_tsf, 0);
6358 }
6359 
inject_on_this_cmd(void)6360 static bool inject_on_this_cmd(void)
6361 {
6362 	if (sdebug_every_nth == 0)
6363 		return false;
6364 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6365 }
6366 
6367 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
6368 
6369 
sdebug_free_queued_cmd(struct sdebug_queued_cmd * sqcp)6370 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
6371 {
6372 	if (sqcp)
6373 		kmem_cache_free(queued_cmd_cache, sqcp);
6374 }
6375 
sdebug_alloc_queued_cmd(struct scsi_cmnd * scmd)6376 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
6377 {
6378 	struct sdebug_queued_cmd *sqcp;
6379 	struct sdebug_defer *sd_dp;
6380 
6381 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
6382 	if (!sqcp)
6383 		return NULL;
6384 
6385 	sd_dp = &sqcp->sd_dp;
6386 
6387 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6388 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
6389 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
6390 
6391 	sqcp->scmd = scmd;
6392 
6393 	return sqcp;
6394 }
6395 
6396 /* Complete the processing of the thread that queued a SCSI command to this
6397  * driver. It either completes the command by calling cmnd_done() or
6398  * schedules a hr timer or work queue then returns 0. Returns
6399  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6400  */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)6401 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
6402 			 int scsi_result,
6403 			 int (*pfp)(struct scsi_cmnd *,
6404 				    struct sdebug_dev_info *),
6405 			 int delta_jiff, int ndelay)
6406 {
6407 	struct request *rq = scsi_cmd_to_rq(cmnd);
6408 	bool polled = rq->cmd_flags & REQ_POLLED;
6409 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6410 	unsigned long flags;
6411 	u64 ns_from_boot = 0;
6412 	struct sdebug_queued_cmd *sqcp;
6413 	struct scsi_device *sdp;
6414 	struct sdebug_defer *sd_dp;
6415 
6416 	if (unlikely(devip == NULL)) {
6417 		if (scsi_result == 0)
6418 			scsi_result = DID_NO_CONNECT << 16;
6419 		goto respond_in_thread;
6420 	}
6421 	sdp = cmnd->device;
6422 
6423 	if (delta_jiff == 0)
6424 		goto respond_in_thread;
6425 
6426 
6427 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6428 		     (scsi_result == 0))) {
6429 		int num_in_q = scsi_device_busy(sdp);
6430 		int qdepth = cmnd->device->queue_depth;
6431 
6432 		if ((num_in_q == qdepth) &&
6433 		    (atomic_inc_return(&sdebug_a_tsf) >=
6434 		     abs(sdebug_every_nth))) {
6435 			atomic_set(&sdebug_a_tsf, 0);
6436 			scsi_result = device_qfull_result;
6437 
6438 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6439 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6440 					    __func__, num_in_q);
6441 		}
6442 	}
6443 
6444 	sqcp = sdebug_alloc_queued_cmd(cmnd);
6445 	if (!sqcp) {
6446 		pr_err("%s no alloc\n", __func__);
6447 		return SCSI_MLQUEUE_HOST_BUSY;
6448 	}
6449 	sd_dp = &sqcp->sd_dp;
6450 
6451 	if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
6452 		ns_from_boot = ktime_get_boottime_ns();
6453 
6454 	/* one of the resp_*() response functions is called here */
6455 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6456 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
6457 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
6458 		delta_jiff = ndelay = 0;
6459 	}
6460 	if (cmnd->result == 0 && scsi_result != 0)
6461 		cmnd->result = scsi_result;
6462 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6463 		if (atomic_read(&sdeb_inject_pending)) {
6464 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6465 			atomic_set(&sdeb_inject_pending, 0);
6466 			cmnd->result = check_condition_result;
6467 		}
6468 	}
6469 
6470 	if (unlikely(sdebug_verbose && cmnd->result))
6471 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6472 			    __func__, cmnd->result);
6473 
6474 	if (delta_jiff > 0 || ndelay > 0) {
6475 		ktime_t kt;
6476 
6477 		if (delta_jiff > 0) {
6478 			u64 ns = jiffies_to_nsecs(delta_jiff);
6479 
6480 			if (sdebug_random && ns < U32_MAX) {
6481 				ns = get_random_u32_below((u32)ns);
6482 			} else if (sdebug_random) {
6483 				ns >>= 12;	/* scale to 4 usec precision */
6484 				if (ns < U32_MAX)	/* over 4 hours max */
6485 					ns = get_random_u32_below((u32)ns);
6486 				ns <<= 12;
6487 			}
6488 			kt = ns_to_ktime(ns);
6489 		} else {	/* ndelay has a 4.2 second max */
6490 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6491 					     (u32)ndelay;
6492 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6493 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
6494 
6495 				if (kt <= d) {	/* elapsed duration >= kt */
6496 					/* call scsi_done() from this thread */
6497 					sdebug_free_queued_cmd(sqcp);
6498 					scsi_done(cmnd);
6499 					return 0;
6500 				}
6501 				/* otherwise reduce kt by elapsed time */
6502 				kt -= d;
6503 			}
6504 		}
6505 		if (sdebug_statistics)
6506 			sd_dp->issuing_cpu = raw_smp_processor_id();
6507 		if (polled) {
6508 			spin_lock_irqsave(&sdsc->lock, flags);
6509 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6510 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6511 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6512 			spin_unlock_irqrestore(&sdsc->lock, flags);
6513 		} else {
6514 			/* schedule the invocation of scsi_done() for a later time */
6515 			spin_lock_irqsave(&sdsc->lock, flags);
6516 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6517 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6518 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6519 			/*
6520 			 * The completion handler will try to grab sqcp->lock,
6521 			 * so there is no chance that the completion handler
6522 			 * will call scsi_done() until we release the lock
6523 			 * here (so ok to keep referencing sdsc).
6524 			 */
6525 			spin_unlock_irqrestore(&sdsc->lock, flags);
6526 		}
6527 	} else {	/* jdelay < 0, use work queue */
6528 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6529 			     atomic_read(&sdeb_inject_pending))) {
6530 			sd_dp->aborted = true;
6531 			atomic_set(&sdeb_inject_pending, 0);
6532 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6533 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6534 		}
6535 
6536 		if (sdebug_statistics)
6537 			sd_dp->issuing_cpu = raw_smp_processor_id();
6538 		if (polled) {
6539 			spin_lock_irqsave(&sdsc->lock, flags);
6540 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6541 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6542 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6543 			spin_unlock_irqrestore(&sdsc->lock, flags);
6544 		} else {
6545 			spin_lock_irqsave(&sdsc->lock, flags);
6546 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6547 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6548 			schedule_work(&sd_dp->ew.work);
6549 			spin_unlock_irqrestore(&sdsc->lock, flags);
6550 		}
6551 	}
6552 
6553 	return 0;
6554 
6555 respond_in_thread:	/* call back to mid-layer using invocation thread */
6556 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6557 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
6558 	if (cmnd->result == 0 && scsi_result != 0)
6559 		cmnd->result = scsi_result;
6560 	scsi_done(cmnd);
6561 	return 0;
6562 }
6563 
6564 /* Note: The following macros create attribute files in the
6565    /sys/module/scsi_debug/parameters directory. Unfortunately this
6566    driver is unaware of a change and cannot trigger auxiliary actions
6567    as it can when the corresponding attribute in the
6568    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6569  */
6570 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6571 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6572 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6573 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6574 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6575 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6576 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6577 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6578 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6579 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6580 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6581 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6582 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6583 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6584 module_param_string(inq_product, sdebug_inq_product_id,
6585 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6586 module_param_string(inq_rev, sdebug_inq_product_rev,
6587 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6588 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6589 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6590 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6591 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6592 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6593 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6594 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
6595 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6596 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6597 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6598 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6599 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6600 		   S_IRUGO | S_IWUSR);
6601 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6602 		   S_IRUGO | S_IWUSR);
6603 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6604 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6605 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6606 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6607 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6608 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6609 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6610 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6611 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6612 module_param_named(per_host_store, sdebug_per_host_store, bool,
6613 		   S_IRUGO | S_IWUSR);
6614 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6615 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6616 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6617 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6618 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6619 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6620 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6621 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6622 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6623 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6624 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6625 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6626 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6627 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6628 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6629 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
6630 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
6631 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
6632 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
6633 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
6634 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6635 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6636 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6637 		   S_IRUGO | S_IWUSR);
6638 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6639 module_param_named(write_same_length, sdebug_write_same_length, int,
6640 		   S_IRUGO | S_IWUSR);
6641 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6642 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6643 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6644 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6645 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6646 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6647 
6648 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6649 MODULE_DESCRIPTION("SCSI debug adapter driver");
6650 MODULE_LICENSE("GPL");
6651 MODULE_VERSION(SDEBUG_VERSION);
6652 
6653 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6654 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6655 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6656 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6657 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6658 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6659 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6660 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6661 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6662 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6663 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6664 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6665 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6666 MODULE_PARM_DESC(host_max_queue,
6667 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6668 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6669 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6670 		 SDEBUG_VERSION "\")");
6671 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6672 MODULE_PARM_DESC(lbprz,
6673 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6674 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6675 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6676 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6677 MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
6678 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6679 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6680 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6681 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6682 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6683 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6684 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6685 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6686 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6687 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6688 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6689 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6690 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6691 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6692 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6693 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6694 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6695 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6696 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6697 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6698 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6699 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6700 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6701 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6702 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6703 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6704 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6705 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6706 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6707 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6708 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6709 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
6710 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
6711 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
6712 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
6713 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
6714 MODULE_PARM_DESC(uuid_ctl,
6715 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6716 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6717 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6718 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6719 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6720 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6721 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6722 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6723 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6724 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6725 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6726 
6727 #define SDEBUG_INFO_LEN 256
6728 static char sdebug_info[SDEBUG_INFO_LEN];
6729 
scsi_debug_info(struct Scsi_Host * shp)6730 static const char *scsi_debug_info(struct Scsi_Host *shp)
6731 {
6732 	int k;
6733 
6734 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6735 		      my_name, SDEBUG_VERSION, sdebug_version_date);
6736 	if (k >= (SDEBUG_INFO_LEN - 1))
6737 		return sdebug_info;
6738 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6739 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6740 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
6741 		  "statistics", (int)sdebug_statistics);
6742 	return sdebug_info;
6743 }
6744 
6745 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)6746 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6747 				 int length)
6748 {
6749 	char arr[16];
6750 	int opts;
6751 	int minLen = length > 15 ? 15 : length;
6752 
6753 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6754 		return -EACCES;
6755 	memcpy(arr, buffer, minLen);
6756 	arr[minLen] = '\0';
6757 	if (1 != sscanf(arr, "%d", &opts))
6758 		return -EINVAL;
6759 	sdebug_opts = opts;
6760 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6761 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6762 	if (sdebug_every_nth != 0)
6763 		tweak_cmnd_count();
6764 	return length;
6765 }
6766 
6767 struct sdebug_submit_queue_data {
6768 	int *first;
6769 	int *last;
6770 	int queue_num;
6771 };
6772 
sdebug_submit_queue_iter(struct request * rq,void * opaque)6773 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6774 {
6775 	struct sdebug_submit_queue_data *data = opaque;
6776 	u32 unique_tag = blk_mq_unique_tag(rq);
6777 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6778 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6779 	int queue_num = data->queue_num;
6780 
6781 	if (hwq != queue_num)
6782 		return true;
6783 
6784 	/* Rely on iter'ing in ascending tag order */
6785 	if (*data->first == -1)
6786 		*data->first = *data->last = tag;
6787 	else
6788 		*data->last = tag;
6789 
6790 	return true;
6791 }
6792 
6793 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6794  * same for each scsi_debug host (if more than one). Some of the counters
6795  * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)6796 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6797 {
6798 	struct sdebug_host_info *sdhp;
6799 	int j;
6800 
6801 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6802 		   SDEBUG_VERSION, sdebug_version_date);
6803 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6804 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6805 		   sdebug_opts, sdebug_every_nth);
6806 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6807 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6808 		   sdebug_sector_size, "bytes");
6809 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6810 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6811 		   num_aborts);
6812 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6813 		   num_dev_resets, num_target_resets, num_bus_resets,
6814 		   num_host_resets);
6815 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6816 		   dix_reads, dix_writes, dif_errors);
6817 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6818 		   sdebug_statistics);
6819 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6820 		   atomic_read(&sdebug_cmnd_count),
6821 		   atomic_read(&sdebug_completions),
6822 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6823 		   atomic_read(&sdebug_a_tsf),
6824 		   atomic_read(&sdeb_mq_poll_count));
6825 
6826 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6827 	for (j = 0; j < submit_queues; ++j) {
6828 		int f = -1, l = -1;
6829 		struct sdebug_submit_queue_data data = {
6830 			.queue_num = j,
6831 			.first = &f,
6832 			.last = &l,
6833 		};
6834 		seq_printf(m, "  queue %d:\n", j);
6835 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6836 					&data);
6837 		if (f >= 0) {
6838 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6839 				   "first,last bits", f, l);
6840 		}
6841 	}
6842 
6843 	seq_printf(m, "this host_no=%d\n", host->host_no);
6844 	if (!xa_empty(per_store_ap)) {
6845 		bool niu;
6846 		int idx;
6847 		unsigned long l_idx;
6848 		struct sdeb_store_info *sip;
6849 
6850 		seq_puts(m, "\nhost list:\n");
6851 		j = 0;
6852 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6853 			idx = sdhp->si_idx;
6854 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6855 				   sdhp->shost->host_no, idx);
6856 			++j;
6857 		}
6858 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6859 			   sdeb_most_recent_idx);
6860 		j = 0;
6861 		xa_for_each(per_store_ap, l_idx, sip) {
6862 			niu = xa_get_mark(per_store_ap, l_idx,
6863 					  SDEB_XA_NOT_IN_USE);
6864 			idx = (int)l_idx;
6865 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6866 				   (niu ? "  not_in_use" : ""));
6867 			++j;
6868 		}
6869 	}
6870 	return 0;
6871 }
6872 
delay_show(struct device_driver * ddp,char * buf)6873 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6874 {
6875 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6876 }
6877 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6878  * of delay is jiffies.
6879  */
delay_store(struct device_driver * ddp,const char * buf,size_t count)6880 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6881 			   size_t count)
6882 {
6883 	int jdelay, res;
6884 
6885 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6886 		res = count;
6887 		if (sdebug_jdelay != jdelay) {
6888 			struct sdebug_host_info *sdhp;
6889 
6890 			mutex_lock(&sdebug_host_list_mutex);
6891 			block_unblock_all_queues(true);
6892 
6893 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6894 				struct Scsi_Host *shost = sdhp->shost;
6895 
6896 				if (scsi_host_busy(shost)) {
6897 					res = -EBUSY;   /* queued commands */
6898 					break;
6899 				}
6900 			}
6901 			if (res > 0) {
6902 				sdebug_jdelay = jdelay;
6903 				sdebug_ndelay = 0;
6904 			}
6905 			block_unblock_all_queues(false);
6906 			mutex_unlock(&sdebug_host_list_mutex);
6907 		}
6908 		return res;
6909 	}
6910 	return -EINVAL;
6911 }
6912 static DRIVER_ATTR_RW(delay);
6913 
ndelay_show(struct device_driver * ddp,char * buf)6914 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6915 {
6916 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6917 }
6918 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6919 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)6920 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6921 			    size_t count)
6922 {
6923 	int ndelay, res;
6924 
6925 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6926 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6927 		res = count;
6928 		if (sdebug_ndelay != ndelay) {
6929 			struct sdebug_host_info *sdhp;
6930 
6931 			mutex_lock(&sdebug_host_list_mutex);
6932 			block_unblock_all_queues(true);
6933 
6934 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6935 				struct Scsi_Host *shost = sdhp->shost;
6936 
6937 				if (scsi_host_busy(shost)) {
6938 					res = -EBUSY;   /* queued commands */
6939 					break;
6940 				}
6941 			}
6942 
6943 			if (res > 0) {
6944 				sdebug_ndelay = ndelay;
6945 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6946 							: DEF_JDELAY;
6947 			}
6948 			block_unblock_all_queues(false);
6949 			mutex_unlock(&sdebug_host_list_mutex);
6950 		}
6951 		return res;
6952 	}
6953 	return -EINVAL;
6954 }
6955 static DRIVER_ATTR_RW(ndelay);
6956 
opts_show(struct device_driver * ddp,char * buf)6957 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6958 {
6959 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6960 }
6961 
opts_store(struct device_driver * ddp,const char * buf,size_t count)6962 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6963 			  size_t count)
6964 {
6965 	int opts;
6966 	char work[20];
6967 
6968 	if (sscanf(buf, "%10s", work) == 1) {
6969 		if (strncasecmp(work, "0x", 2) == 0) {
6970 			if (kstrtoint(work + 2, 16, &opts) == 0)
6971 				goto opts_done;
6972 		} else {
6973 			if (kstrtoint(work, 10, &opts) == 0)
6974 				goto opts_done;
6975 		}
6976 	}
6977 	return -EINVAL;
6978 opts_done:
6979 	sdebug_opts = opts;
6980 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6981 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6982 	tweak_cmnd_count();
6983 	return count;
6984 }
6985 static DRIVER_ATTR_RW(opts);
6986 
ptype_show(struct device_driver * ddp,char * buf)6987 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6988 {
6989 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6990 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)6991 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6992 			   size_t count)
6993 {
6994 	int n;
6995 
6996 	/* Cannot change from or to TYPE_ZBC with sysfs */
6997 	if (sdebug_ptype == TYPE_ZBC)
6998 		return -EINVAL;
6999 
7000 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7001 		if (n == TYPE_ZBC)
7002 			return -EINVAL;
7003 		sdebug_ptype = n;
7004 		return count;
7005 	}
7006 	return -EINVAL;
7007 }
7008 static DRIVER_ATTR_RW(ptype);
7009 
dsense_show(struct device_driver * ddp,char * buf)7010 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7011 {
7012 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7013 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)7014 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7015 			    size_t count)
7016 {
7017 	int n;
7018 
7019 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7020 		sdebug_dsense = n;
7021 		return count;
7022 	}
7023 	return -EINVAL;
7024 }
7025 static DRIVER_ATTR_RW(dsense);
7026 
fake_rw_show(struct device_driver * ddp,char * buf)7027 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7028 {
7029 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7030 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)7031 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7032 			     size_t count)
7033 {
7034 	int n, idx;
7035 
7036 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7037 		bool want_store = (n == 0);
7038 		struct sdebug_host_info *sdhp;
7039 
7040 		n = (n > 0);
7041 		sdebug_fake_rw = (sdebug_fake_rw > 0);
7042 		if (sdebug_fake_rw == n)
7043 			return count;	/* not transitioning so do nothing */
7044 
7045 		if (want_store) {	/* 1 --> 0 transition, set up store */
7046 			if (sdeb_first_idx < 0) {
7047 				idx = sdebug_add_store();
7048 				if (idx < 0)
7049 					return idx;
7050 			} else {
7051 				idx = sdeb_first_idx;
7052 				xa_clear_mark(per_store_ap, idx,
7053 					      SDEB_XA_NOT_IN_USE);
7054 			}
7055 			/* make all hosts use same store */
7056 			list_for_each_entry(sdhp, &sdebug_host_list,
7057 					    host_list) {
7058 				if (sdhp->si_idx != idx) {
7059 					xa_set_mark(per_store_ap, sdhp->si_idx,
7060 						    SDEB_XA_NOT_IN_USE);
7061 					sdhp->si_idx = idx;
7062 				}
7063 			}
7064 			sdeb_most_recent_idx = idx;
7065 		} else {	/* 0 --> 1 transition is trigger for shrink */
7066 			sdebug_erase_all_stores(true /* apart from first */);
7067 		}
7068 		sdebug_fake_rw = n;
7069 		return count;
7070 	}
7071 	return -EINVAL;
7072 }
7073 static DRIVER_ATTR_RW(fake_rw);
7074 
no_lun_0_show(struct device_driver * ddp,char * buf)7075 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7076 {
7077 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7078 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)7079 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7080 			      size_t count)
7081 {
7082 	int n;
7083 
7084 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7085 		sdebug_no_lun_0 = n;
7086 		return count;
7087 	}
7088 	return -EINVAL;
7089 }
7090 static DRIVER_ATTR_RW(no_lun_0);
7091 
num_tgts_show(struct device_driver * ddp,char * buf)7092 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7093 {
7094 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7095 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)7096 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7097 			      size_t count)
7098 {
7099 	int n;
7100 
7101 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7102 		sdebug_num_tgts = n;
7103 		sdebug_max_tgts_luns();
7104 		return count;
7105 	}
7106 	return -EINVAL;
7107 }
7108 static DRIVER_ATTR_RW(num_tgts);
7109 
dev_size_mb_show(struct device_driver * ddp,char * buf)7110 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7111 {
7112 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7113 }
7114 static DRIVER_ATTR_RO(dev_size_mb);
7115 
per_host_store_show(struct device_driver * ddp,char * buf)7116 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7117 {
7118 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7119 }
7120 
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)7121 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7122 				    size_t count)
7123 {
7124 	bool v;
7125 
7126 	if (kstrtobool(buf, &v))
7127 		return -EINVAL;
7128 
7129 	sdebug_per_host_store = v;
7130 	return count;
7131 }
7132 static DRIVER_ATTR_RW(per_host_store);
7133 
num_parts_show(struct device_driver * ddp,char * buf)7134 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7135 {
7136 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7137 }
7138 static DRIVER_ATTR_RO(num_parts);
7139 
every_nth_show(struct device_driver * ddp,char * buf)7140 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7141 {
7142 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7143 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)7144 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7145 			       size_t count)
7146 {
7147 	int nth;
7148 	char work[20];
7149 
7150 	if (sscanf(buf, "%10s", work) == 1) {
7151 		if (strncasecmp(work, "0x", 2) == 0) {
7152 			if (kstrtoint(work + 2, 16, &nth) == 0)
7153 				goto every_nth_done;
7154 		} else {
7155 			if (kstrtoint(work, 10, &nth) == 0)
7156 				goto every_nth_done;
7157 		}
7158 	}
7159 	return -EINVAL;
7160 
7161 every_nth_done:
7162 	sdebug_every_nth = nth;
7163 	if (nth && !sdebug_statistics) {
7164 		pr_info("every_nth needs statistics=1, set it\n");
7165 		sdebug_statistics = true;
7166 	}
7167 	tweak_cmnd_count();
7168 	return count;
7169 }
7170 static DRIVER_ATTR_RW(every_nth);
7171 
lun_format_show(struct device_driver * ddp,char * buf)7172 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7173 {
7174 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7175 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)7176 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7177 				size_t count)
7178 {
7179 	int n;
7180 	bool changed;
7181 
7182 	if (kstrtoint(buf, 0, &n))
7183 		return -EINVAL;
7184 	if (n >= 0) {
7185 		if (n > (int)SAM_LUN_AM_FLAT) {
7186 			pr_warn("only LUN address methods 0 and 1 are supported\n");
7187 			return -EINVAL;
7188 		}
7189 		changed = ((int)sdebug_lun_am != n);
7190 		sdebug_lun_am = n;
7191 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
7192 			struct sdebug_host_info *sdhp;
7193 			struct sdebug_dev_info *dp;
7194 
7195 			mutex_lock(&sdebug_host_list_mutex);
7196 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7197 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7198 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7199 				}
7200 			}
7201 			mutex_unlock(&sdebug_host_list_mutex);
7202 		}
7203 		return count;
7204 	}
7205 	return -EINVAL;
7206 }
7207 static DRIVER_ATTR_RW(lun_format);
7208 
max_luns_show(struct device_driver * ddp,char * buf)7209 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7210 {
7211 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7212 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)7213 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7214 			      size_t count)
7215 {
7216 	int n;
7217 	bool changed;
7218 
7219 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7220 		if (n > 256) {
7221 			pr_warn("max_luns can be no more than 256\n");
7222 			return -EINVAL;
7223 		}
7224 		changed = (sdebug_max_luns != n);
7225 		sdebug_max_luns = n;
7226 		sdebug_max_tgts_luns();
7227 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
7228 			struct sdebug_host_info *sdhp;
7229 			struct sdebug_dev_info *dp;
7230 
7231 			mutex_lock(&sdebug_host_list_mutex);
7232 			list_for_each_entry(sdhp, &sdebug_host_list,
7233 					    host_list) {
7234 				list_for_each_entry(dp, &sdhp->dev_info_list,
7235 						    dev_list) {
7236 					set_bit(SDEBUG_UA_LUNS_CHANGED,
7237 						dp->uas_bm);
7238 				}
7239 			}
7240 			mutex_unlock(&sdebug_host_list_mutex);
7241 		}
7242 		return count;
7243 	}
7244 	return -EINVAL;
7245 }
7246 static DRIVER_ATTR_RW(max_luns);
7247 
max_queue_show(struct device_driver * ddp,char * buf)7248 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
7249 {
7250 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
7251 }
7252 /* N.B. max_queue can be changed while there are queued commands. In flight
7253  * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)7254 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
7255 			       size_t count)
7256 {
7257 	int n;
7258 
7259 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
7260 	    (n <= SDEBUG_CANQUEUE) &&
7261 	    (sdebug_host_max_queue == 0)) {
7262 		mutex_lock(&sdebug_host_list_mutex);
7263 
7264 		/* We may only change sdebug_max_queue when we have no shosts */
7265 		if (list_empty(&sdebug_host_list))
7266 			sdebug_max_queue = n;
7267 		else
7268 			count = -EBUSY;
7269 		mutex_unlock(&sdebug_host_list_mutex);
7270 		return count;
7271 	}
7272 	return -EINVAL;
7273 }
7274 static DRIVER_ATTR_RW(max_queue);
7275 
host_max_queue_show(struct device_driver * ddp,char * buf)7276 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
7277 {
7278 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
7279 }
7280 
no_rwlock_show(struct device_driver * ddp,char * buf)7281 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
7282 {
7283 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
7284 }
7285 
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)7286 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
7287 {
7288 	bool v;
7289 
7290 	if (kstrtobool(buf, &v))
7291 		return -EINVAL;
7292 
7293 	sdebug_no_rwlock = v;
7294 	return count;
7295 }
7296 static DRIVER_ATTR_RW(no_rwlock);
7297 
7298 /*
7299  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
7300  * in range [0, sdebug_host_max_queue), we can't change it.
7301  */
7302 static DRIVER_ATTR_RO(host_max_queue);
7303 
no_uld_show(struct device_driver * ddp,char * buf)7304 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
7305 {
7306 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
7307 }
7308 static DRIVER_ATTR_RO(no_uld);
7309 
scsi_level_show(struct device_driver * ddp,char * buf)7310 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
7311 {
7312 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
7313 }
7314 static DRIVER_ATTR_RO(scsi_level);
7315 
virtual_gb_show(struct device_driver * ddp,char * buf)7316 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
7317 {
7318 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
7319 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)7320 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
7321 				size_t count)
7322 {
7323 	int n;
7324 	bool changed;
7325 
7326 	/* Ignore capacity change for ZBC drives for now */
7327 	if (sdeb_zbc_in_use)
7328 		return -ENOTSUPP;
7329 
7330 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7331 		changed = (sdebug_virtual_gb != n);
7332 		sdebug_virtual_gb = n;
7333 		sdebug_capacity = get_sdebug_capacity();
7334 		if (changed) {
7335 			struct sdebug_host_info *sdhp;
7336 			struct sdebug_dev_info *dp;
7337 
7338 			mutex_lock(&sdebug_host_list_mutex);
7339 			list_for_each_entry(sdhp, &sdebug_host_list,
7340 					    host_list) {
7341 				list_for_each_entry(dp, &sdhp->dev_info_list,
7342 						    dev_list) {
7343 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
7344 						dp->uas_bm);
7345 				}
7346 			}
7347 			mutex_unlock(&sdebug_host_list_mutex);
7348 		}
7349 		return count;
7350 	}
7351 	return -EINVAL;
7352 }
7353 static DRIVER_ATTR_RW(virtual_gb);
7354 
add_host_show(struct device_driver * ddp,char * buf)7355 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
7356 {
7357 	/* absolute number of hosts currently active is what is shown */
7358 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
7359 }
7360 
add_host_store(struct device_driver * ddp,const char * buf,size_t count)7361 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
7362 			      size_t count)
7363 {
7364 	bool found;
7365 	unsigned long idx;
7366 	struct sdeb_store_info *sip;
7367 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
7368 	int delta_hosts;
7369 
7370 	if (sscanf(buf, "%d", &delta_hosts) != 1)
7371 		return -EINVAL;
7372 	if (delta_hosts > 0) {
7373 		do {
7374 			found = false;
7375 			if (want_phs) {
7376 				xa_for_each_marked(per_store_ap, idx, sip,
7377 						   SDEB_XA_NOT_IN_USE) {
7378 					sdeb_most_recent_idx = (int)idx;
7379 					found = true;
7380 					break;
7381 				}
7382 				if (found)	/* re-use case */
7383 					sdebug_add_host_helper((int)idx);
7384 				else
7385 					sdebug_do_add_host(true);
7386 			} else {
7387 				sdebug_do_add_host(false);
7388 			}
7389 		} while (--delta_hosts);
7390 	} else if (delta_hosts < 0) {
7391 		do {
7392 			sdebug_do_remove_host(false);
7393 		} while (++delta_hosts);
7394 	}
7395 	return count;
7396 }
7397 static DRIVER_ATTR_RW(add_host);
7398 
vpd_use_hostno_show(struct device_driver * ddp,char * buf)7399 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
7400 {
7401 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
7402 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)7403 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
7404 				    size_t count)
7405 {
7406 	int n;
7407 
7408 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7409 		sdebug_vpd_use_hostno = n;
7410 		return count;
7411 	}
7412 	return -EINVAL;
7413 }
7414 static DRIVER_ATTR_RW(vpd_use_hostno);
7415 
statistics_show(struct device_driver * ddp,char * buf)7416 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
7417 {
7418 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
7419 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)7420 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
7421 				size_t count)
7422 {
7423 	int n;
7424 
7425 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
7426 		if (n > 0)
7427 			sdebug_statistics = true;
7428 		else {
7429 			clear_queue_stats();
7430 			sdebug_statistics = false;
7431 		}
7432 		return count;
7433 	}
7434 	return -EINVAL;
7435 }
7436 static DRIVER_ATTR_RW(statistics);
7437 
sector_size_show(struct device_driver * ddp,char * buf)7438 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7439 {
7440 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7441 }
7442 static DRIVER_ATTR_RO(sector_size);
7443 
submit_queues_show(struct device_driver * ddp,char * buf)7444 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7445 {
7446 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7447 }
7448 static DRIVER_ATTR_RO(submit_queues);
7449 
dix_show(struct device_driver * ddp,char * buf)7450 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7451 {
7452 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7453 }
7454 static DRIVER_ATTR_RO(dix);
7455 
dif_show(struct device_driver * ddp,char * buf)7456 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7457 {
7458 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7459 }
7460 static DRIVER_ATTR_RO(dif);
7461 
guard_show(struct device_driver * ddp,char * buf)7462 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7463 {
7464 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7465 }
7466 static DRIVER_ATTR_RO(guard);
7467 
ato_show(struct device_driver * ddp,char * buf)7468 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7469 {
7470 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7471 }
7472 static DRIVER_ATTR_RO(ato);
7473 
map_show(struct device_driver * ddp,char * buf)7474 static ssize_t map_show(struct device_driver *ddp, char *buf)
7475 {
7476 	ssize_t count = 0;
7477 
7478 	if (!scsi_debug_lbp())
7479 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7480 				 sdebug_store_sectors);
7481 
7482 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7483 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7484 
7485 		if (sip)
7486 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7487 					  (int)map_size, sip->map_storep);
7488 	}
7489 	buf[count++] = '\n';
7490 	buf[count] = '\0';
7491 
7492 	return count;
7493 }
7494 static DRIVER_ATTR_RO(map);
7495 
random_show(struct device_driver * ddp,char * buf)7496 static ssize_t random_show(struct device_driver *ddp, char *buf)
7497 {
7498 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7499 }
7500 
random_store(struct device_driver * ddp,const char * buf,size_t count)7501 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7502 			    size_t count)
7503 {
7504 	bool v;
7505 
7506 	if (kstrtobool(buf, &v))
7507 		return -EINVAL;
7508 
7509 	sdebug_random = v;
7510 	return count;
7511 }
7512 static DRIVER_ATTR_RW(random);
7513 
removable_show(struct device_driver * ddp,char * buf)7514 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7515 {
7516 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7517 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)7518 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7519 			       size_t count)
7520 {
7521 	int n;
7522 
7523 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7524 		sdebug_removable = (n > 0);
7525 		return count;
7526 	}
7527 	return -EINVAL;
7528 }
7529 static DRIVER_ATTR_RW(removable);
7530 
host_lock_show(struct device_driver * ddp,char * buf)7531 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7532 {
7533 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7534 }
7535 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)7536 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7537 			       size_t count)
7538 {
7539 	int n;
7540 
7541 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7542 		sdebug_host_lock = (n > 0);
7543 		return count;
7544 	}
7545 	return -EINVAL;
7546 }
7547 static DRIVER_ATTR_RW(host_lock);
7548 
strict_show(struct device_driver * ddp,char * buf)7549 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7550 {
7551 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7552 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)7553 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7554 			    size_t count)
7555 {
7556 	int n;
7557 
7558 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7559 		sdebug_strict = (n > 0);
7560 		return count;
7561 	}
7562 	return -EINVAL;
7563 }
7564 static DRIVER_ATTR_RW(strict);
7565 
uuid_ctl_show(struct device_driver * ddp,char * buf)7566 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7567 {
7568 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7569 }
7570 static DRIVER_ATTR_RO(uuid_ctl);
7571 
cdb_len_show(struct device_driver * ddp,char * buf)7572 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7573 {
7574 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7575 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)7576 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7577 			     size_t count)
7578 {
7579 	int ret, n;
7580 
7581 	ret = kstrtoint(buf, 0, &n);
7582 	if (ret)
7583 		return ret;
7584 	sdebug_cdb_len = n;
7585 	all_config_cdb_len();
7586 	return count;
7587 }
7588 static DRIVER_ATTR_RW(cdb_len);
7589 
7590 static const char * const zbc_model_strs_a[] = {
7591 	[BLK_ZONED_NONE] = "none",
7592 	[BLK_ZONED_HA]   = "host-aware",
7593 	[BLK_ZONED_HM]   = "host-managed",
7594 };
7595 
7596 static const char * const zbc_model_strs_b[] = {
7597 	[BLK_ZONED_NONE] = "no",
7598 	[BLK_ZONED_HA]   = "aware",
7599 	[BLK_ZONED_HM]   = "managed",
7600 };
7601 
7602 static const char * const zbc_model_strs_c[] = {
7603 	[BLK_ZONED_NONE] = "0",
7604 	[BLK_ZONED_HA]   = "1",
7605 	[BLK_ZONED_HM]   = "2",
7606 };
7607 
sdeb_zbc_model_str(const char * cp)7608 static int sdeb_zbc_model_str(const char *cp)
7609 {
7610 	int res = sysfs_match_string(zbc_model_strs_a, cp);
7611 
7612 	if (res < 0) {
7613 		res = sysfs_match_string(zbc_model_strs_b, cp);
7614 		if (res < 0) {
7615 			res = sysfs_match_string(zbc_model_strs_c, cp);
7616 			if (res < 0)
7617 				return -EINVAL;
7618 		}
7619 	}
7620 	return res;
7621 }
7622 
zbc_show(struct device_driver * ddp,char * buf)7623 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7624 {
7625 	return scnprintf(buf, PAGE_SIZE, "%s\n",
7626 			 zbc_model_strs_a[sdeb_zbc_model]);
7627 }
7628 static DRIVER_ATTR_RO(zbc);
7629 
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)7630 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7631 {
7632 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7633 }
7634 static DRIVER_ATTR_RO(tur_ms_to_ready);
7635 
group_number_stats_show(struct device_driver * ddp,char * buf)7636 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
7637 {
7638 	char *p = buf, *end = buf + PAGE_SIZE;
7639 	int i;
7640 
7641 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7642 		p += scnprintf(p, end - p, "%d %ld\n", i,
7643 			       atomic_long_read(&writes_by_group_number[i]));
7644 
7645 	return p - buf;
7646 }
7647 
group_number_stats_store(struct device_driver * ddp,const char * buf,size_t count)7648 static ssize_t group_number_stats_store(struct device_driver *ddp,
7649 					const char *buf, size_t count)
7650 {
7651 	int i;
7652 
7653 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7654 		atomic_long_set(&writes_by_group_number[i], 0);
7655 
7656 	return count;
7657 }
7658 static DRIVER_ATTR_RW(group_number_stats);
7659 
7660 /* Note: The following array creates attribute files in the
7661    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7662    files (over those found in the /sys/module/scsi_debug/parameters
7663    directory) is that auxiliary actions can be triggered when an attribute
7664    is changed. For example see: add_host_store() above.
7665  */
7666 
7667 static struct attribute *sdebug_drv_attrs[] = {
7668 	&driver_attr_delay.attr,
7669 	&driver_attr_opts.attr,
7670 	&driver_attr_ptype.attr,
7671 	&driver_attr_dsense.attr,
7672 	&driver_attr_fake_rw.attr,
7673 	&driver_attr_host_max_queue.attr,
7674 	&driver_attr_no_lun_0.attr,
7675 	&driver_attr_num_tgts.attr,
7676 	&driver_attr_dev_size_mb.attr,
7677 	&driver_attr_num_parts.attr,
7678 	&driver_attr_every_nth.attr,
7679 	&driver_attr_lun_format.attr,
7680 	&driver_attr_max_luns.attr,
7681 	&driver_attr_max_queue.attr,
7682 	&driver_attr_no_rwlock.attr,
7683 	&driver_attr_no_uld.attr,
7684 	&driver_attr_scsi_level.attr,
7685 	&driver_attr_virtual_gb.attr,
7686 	&driver_attr_add_host.attr,
7687 	&driver_attr_per_host_store.attr,
7688 	&driver_attr_vpd_use_hostno.attr,
7689 	&driver_attr_sector_size.attr,
7690 	&driver_attr_statistics.attr,
7691 	&driver_attr_submit_queues.attr,
7692 	&driver_attr_dix.attr,
7693 	&driver_attr_dif.attr,
7694 	&driver_attr_guard.attr,
7695 	&driver_attr_ato.attr,
7696 	&driver_attr_map.attr,
7697 	&driver_attr_random.attr,
7698 	&driver_attr_removable.attr,
7699 	&driver_attr_host_lock.attr,
7700 	&driver_attr_ndelay.attr,
7701 	&driver_attr_strict.attr,
7702 	&driver_attr_uuid_ctl.attr,
7703 	&driver_attr_cdb_len.attr,
7704 	&driver_attr_tur_ms_to_ready.attr,
7705 	&driver_attr_zbc.attr,
7706 	&driver_attr_group_number_stats.attr,
7707 	NULL,
7708 };
7709 ATTRIBUTE_GROUPS(sdebug_drv);
7710 
7711 static struct device *pseudo_primary;
7712 
scsi_debug_init(void)7713 static int __init scsi_debug_init(void)
7714 {
7715 	bool want_store = (sdebug_fake_rw == 0);
7716 	unsigned long sz;
7717 	int k, ret, hosts_to_add;
7718 	int idx = -1;
7719 
7720 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7721 		pr_warn("ndelay must be less than 1 second, ignored\n");
7722 		sdebug_ndelay = 0;
7723 	} else if (sdebug_ndelay > 0)
7724 		sdebug_jdelay = JDELAY_OVERRIDDEN;
7725 
7726 	switch (sdebug_sector_size) {
7727 	case  512:
7728 	case 1024:
7729 	case 2048:
7730 	case 4096:
7731 		break;
7732 	default:
7733 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
7734 		return -EINVAL;
7735 	}
7736 
7737 	switch (sdebug_dif) {
7738 	case T10_PI_TYPE0_PROTECTION:
7739 		break;
7740 	case T10_PI_TYPE1_PROTECTION:
7741 	case T10_PI_TYPE2_PROTECTION:
7742 	case T10_PI_TYPE3_PROTECTION:
7743 		have_dif_prot = true;
7744 		break;
7745 
7746 	default:
7747 		pr_err("dif must be 0, 1, 2 or 3\n");
7748 		return -EINVAL;
7749 	}
7750 
7751 	if (sdebug_num_tgts < 0) {
7752 		pr_err("num_tgts must be >= 0\n");
7753 		return -EINVAL;
7754 	}
7755 
7756 	if (sdebug_guard > 1) {
7757 		pr_err("guard must be 0 or 1\n");
7758 		return -EINVAL;
7759 	}
7760 
7761 	if (sdebug_ato > 1) {
7762 		pr_err("ato must be 0 or 1\n");
7763 		return -EINVAL;
7764 	}
7765 
7766 	if (sdebug_physblk_exp > 15) {
7767 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7768 		return -EINVAL;
7769 	}
7770 
7771 	sdebug_lun_am = sdebug_lun_am_i;
7772 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7773 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7774 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7775 	}
7776 
7777 	if (sdebug_max_luns > 256) {
7778 		if (sdebug_max_luns > 16384) {
7779 			pr_warn("max_luns can be no more than 16384, use default\n");
7780 			sdebug_max_luns = DEF_MAX_LUNS;
7781 		}
7782 		sdebug_lun_am = SAM_LUN_AM_FLAT;
7783 	}
7784 
7785 	if (sdebug_lowest_aligned > 0x3fff) {
7786 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7787 		return -EINVAL;
7788 	}
7789 
7790 	if (submit_queues < 1) {
7791 		pr_err("submit_queues must be 1 or more\n");
7792 		return -EINVAL;
7793 	}
7794 
7795 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7796 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7797 		return -EINVAL;
7798 	}
7799 
7800 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7801 	    (sdebug_host_max_queue < 0)) {
7802 		pr_err("host_max_queue must be in range [0 %d]\n",
7803 		       SDEBUG_CANQUEUE);
7804 		return -EINVAL;
7805 	}
7806 
7807 	if (sdebug_host_max_queue &&
7808 	    (sdebug_max_queue != sdebug_host_max_queue)) {
7809 		sdebug_max_queue = sdebug_host_max_queue;
7810 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7811 			sdebug_max_queue);
7812 	}
7813 
7814 	/*
7815 	 * check for host managed zoned block device specified with
7816 	 * ptype=0x14 or zbc=XXX.
7817 	 */
7818 	if (sdebug_ptype == TYPE_ZBC) {
7819 		sdeb_zbc_model = BLK_ZONED_HM;
7820 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7821 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7822 		if (k < 0)
7823 			return k;
7824 		sdeb_zbc_model = k;
7825 		switch (sdeb_zbc_model) {
7826 		case BLK_ZONED_NONE:
7827 		case BLK_ZONED_HA:
7828 			sdebug_ptype = TYPE_DISK;
7829 			break;
7830 		case BLK_ZONED_HM:
7831 			sdebug_ptype = TYPE_ZBC;
7832 			break;
7833 		default:
7834 			pr_err("Invalid ZBC model\n");
7835 			return -EINVAL;
7836 		}
7837 	}
7838 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7839 		sdeb_zbc_in_use = true;
7840 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7841 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7842 	}
7843 
7844 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7845 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7846 	if (sdebug_dev_size_mb < 1)
7847 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7848 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7849 	sdebug_store_sectors = sz / sdebug_sector_size;
7850 	sdebug_capacity = get_sdebug_capacity();
7851 
7852 	/* play around with geometry, don't waste too much on track 0 */
7853 	sdebug_heads = 8;
7854 	sdebug_sectors_per = 32;
7855 	if (sdebug_dev_size_mb >= 256)
7856 		sdebug_heads = 64;
7857 	else if (sdebug_dev_size_mb >= 16)
7858 		sdebug_heads = 32;
7859 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7860 			       (sdebug_sectors_per * sdebug_heads);
7861 	if (sdebug_cylinders_per >= 1024) {
7862 		/* other LLDs do this; implies >= 1GB ram disk ... */
7863 		sdebug_heads = 255;
7864 		sdebug_sectors_per = 63;
7865 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7866 			       (sdebug_sectors_per * sdebug_heads);
7867 	}
7868 	if (scsi_debug_lbp()) {
7869 		sdebug_unmap_max_blocks =
7870 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7871 
7872 		sdebug_unmap_max_desc =
7873 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7874 
7875 		sdebug_unmap_granularity =
7876 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7877 
7878 		if (sdebug_unmap_alignment &&
7879 		    sdebug_unmap_granularity <=
7880 		    sdebug_unmap_alignment) {
7881 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7882 			return -EINVAL;
7883 		}
7884 	}
7885 
7886 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7887 	if (want_store) {
7888 		idx = sdebug_add_store();
7889 		if (idx < 0)
7890 			return idx;
7891 	}
7892 
7893 	pseudo_primary = root_device_register("pseudo_0");
7894 	if (IS_ERR(pseudo_primary)) {
7895 		pr_warn("root_device_register() error\n");
7896 		ret = PTR_ERR(pseudo_primary);
7897 		goto free_vm;
7898 	}
7899 	ret = bus_register(&pseudo_lld_bus);
7900 	if (ret < 0) {
7901 		pr_warn("bus_register error: %d\n", ret);
7902 		goto dev_unreg;
7903 	}
7904 	ret = driver_register(&sdebug_driverfs_driver);
7905 	if (ret < 0) {
7906 		pr_warn("driver_register error: %d\n", ret);
7907 		goto bus_unreg;
7908 	}
7909 
7910 	hosts_to_add = sdebug_add_host;
7911 	sdebug_add_host = 0;
7912 
7913 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7914 	if (!queued_cmd_cache) {
7915 		ret = -ENOMEM;
7916 		goto driver_unreg;
7917 	}
7918 
7919 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7920 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7921 		pr_info("%s: failed to create initial debugfs directory\n", __func__);
7922 
7923 	for (k = 0; k < hosts_to_add; k++) {
7924 		if (want_store && k == 0) {
7925 			ret = sdebug_add_host_helper(idx);
7926 			if (ret < 0) {
7927 				pr_err("add_host_helper k=%d, error=%d\n",
7928 				       k, -ret);
7929 				break;
7930 			}
7931 		} else {
7932 			ret = sdebug_do_add_host(want_store &&
7933 						 sdebug_per_host_store);
7934 			if (ret < 0) {
7935 				pr_err("add_host k=%d error=%d\n", k, -ret);
7936 				break;
7937 			}
7938 		}
7939 	}
7940 	if (sdebug_verbose)
7941 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7942 
7943 	return 0;
7944 
7945 driver_unreg:
7946 	driver_unregister(&sdebug_driverfs_driver);
7947 bus_unreg:
7948 	bus_unregister(&pseudo_lld_bus);
7949 dev_unreg:
7950 	root_device_unregister(pseudo_primary);
7951 free_vm:
7952 	sdebug_erase_store(idx, NULL);
7953 	return ret;
7954 }
7955 
scsi_debug_exit(void)7956 static void __exit scsi_debug_exit(void)
7957 {
7958 	int k = sdebug_num_hosts;
7959 
7960 	for (; k; k--)
7961 		sdebug_do_remove_host(true);
7962 	kmem_cache_destroy(queued_cmd_cache);
7963 	driver_unregister(&sdebug_driverfs_driver);
7964 	bus_unregister(&pseudo_lld_bus);
7965 	root_device_unregister(pseudo_primary);
7966 
7967 	sdebug_erase_all_stores(false);
7968 	xa_destroy(per_store_ap);
7969 	debugfs_remove(sdebug_debugfs_root);
7970 }
7971 
7972 device_initcall(scsi_debug_init);
7973 module_exit(scsi_debug_exit);
7974 
sdebug_release_adapter(struct device * dev)7975 static void sdebug_release_adapter(struct device *dev)
7976 {
7977 	struct sdebug_host_info *sdbg_host;
7978 
7979 	sdbg_host = dev_to_sdebug_host(dev);
7980 	kfree(sdbg_host);
7981 }
7982 
7983 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)7984 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7985 {
7986 	if (idx < 0)
7987 		return;
7988 	if (!sip) {
7989 		if (xa_empty(per_store_ap))
7990 			return;
7991 		sip = xa_load(per_store_ap, idx);
7992 		if (!sip)
7993 			return;
7994 	}
7995 	vfree(sip->map_storep);
7996 	vfree(sip->dif_storep);
7997 	vfree(sip->storep);
7998 	xa_erase(per_store_ap, idx);
7999 	kfree(sip);
8000 }
8001 
8002 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)8003 static void sdebug_erase_all_stores(bool apart_from_first)
8004 {
8005 	unsigned long idx;
8006 	struct sdeb_store_info *sip = NULL;
8007 
8008 	xa_for_each(per_store_ap, idx, sip) {
8009 		if (apart_from_first)
8010 			apart_from_first = false;
8011 		else
8012 			sdebug_erase_store(idx, sip);
8013 	}
8014 	if (apart_from_first)
8015 		sdeb_most_recent_idx = sdeb_first_idx;
8016 }
8017 
8018 /*
8019  * Returns store xarray new element index (idx) if >=0 else negated errno.
8020  * Limit the number of stores to 65536.
8021  */
sdebug_add_store(void)8022 static int sdebug_add_store(void)
8023 {
8024 	int res;
8025 	u32 n_idx;
8026 	unsigned long iflags;
8027 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8028 	struct sdeb_store_info *sip = NULL;
8029 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8030 
8031 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8032 	if (!sip)
8033 		return -ENOMEM;
8034 
8035 	xa_lock_irqsave(per_store_ap, iflags);
8036 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8037 	if (unlikely(res < 0)) {
8038 		xa_unlock_irqrestore(per_store_ap, iflags);
8039 		kfree(sip);
8040 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8041 		return res;
8042 	}
8043 	sdeb_most_recent_idx = n_idx;
8044 	if (sdeb_first_idx < 0)
8045 		sdeb_first_idx = n_idx;
8046 	xa_unlock_irqrestore(per_store_ap, iflags);
8047 
8048 	res = -ENOMEM;
8049 	sip->storep = vzalloc(sz);
8050 	if (!sip->storep) {
8051 		pr_err("user data oom\n");
8052 		goto err;
8053 	}
8054 	if (sdebug_num_parts > 0)
8055 		sdebug_build_parts(sip->storep, sz);
8056 
8057 	/* DIF/DIX: what T10 calls Protection Information (PI) */
8058 	if (sdebug_dix) {
8059 		int dif_size;
8060 
8061 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8062 		sip->dif_storep = vmalloc(dif_size);
8063 
8064 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
8065 			sip->dif_storep);
8066 
8067 		if (!sip->dif_storep) {
8068 			pr_err("DIX oom\n");
8069 			goto err;
8070 		}
8071 		memset(sip->dif_storep, 0xff, dif_size);
8072 	}
8073 	/* Logical Block Provisioning */
8074 	if (scsi_debug_lbp()) {
8075 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8076 		sip->map_storep = vmalloc(array_size(sizeof(long),
8077 						     BITS_TO_LONGS(map_size)));
8078 
8079 		pr_info("%lu provisioning blocks\n", map_size);
8080 
8081 		if (!sip->map_storep) {
8082 			pr_err("LBP map oom\n");
8083 			goto err;
8084 		}
8085 
8086 		bitmap_zero(sip->map_storep, map_size);
8087 
8088 		/* Map first 1KB for partition table */
8089 		if (sdebug_num_parts)
8090 			map_region(sip, 0, 2);
8091 	}
8092 
8093 	rwlock_init(&sip->macc_data_lck);
8094 	rwlock_init(&sip->macc_meta_lck);
8095 	rwlock_init(&sip->macc_sector_lck);
8096 	return (int)n_idx;
8097 err:
8098 	sdebug_erase_store((int)n_idx, sip);
8099 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
8100 	return res;
8101 }
8102 
sdebug_add_host_helper(int per_host_idx)8103 static int sdebug_add_host_helper(int per_host_idx)
8104 {
8105 	int k, devs_per_host, idx;
8106 	int error = -ENOMEM;
8107 	struct sdebug_host_info *sdbg_host;
8108 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8109 
8110 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8111 	if (!sdbg_host)
8112 		return -ENOMEM;
8113 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8114 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8115 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8116 	sdbg_host->si_idx = idx;
8117 
8118 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8119 
8120 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8121 	for (k = 0; k < devs_per_host; k++) {
8122 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8123 		if (!sdbg_devinfo)
8124 			goto clean;
8125 	}
8126 
8127 	mutex_lock(&sdebug_host_list_mutex);
8128 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8129 	mutex_unlock(&sdebug_host_list_mutex);
8130 
8131 	sdbg_host->dev.bus = &pseudo_lld_bus;
8132 	sdbg_host->dev.parent = pseudo_primary;
8133 	sdbg_host->dev.release = &sdebug_release_adapter;
8134 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8135 
8136 	error = device_register(&sdbg_host->dev);
8137 	if (error) {
8138 		mutex_lock(&sdebug_host_list_mutex);
8139 		list_del(&sdbg_host->host_list);
8140 		mutex_unlock(&sdebug_host_list_mutex);
8141 		goto clean;
8142 	}
8143 
8144 	++sdebug_num_hosts;
8145 	return 0;
8146 
8147 clean:
8148 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8149 				 dev_list) {
8150 		list_del(&sdbg_devinfo->dev_list);
8151 		kfree(sdbg_devinfo->zstate);
8152 		kfree(sdbg_devinfo);
8153 	}
8154 	if (sdbg_host->dev.release)
8155 		put_device(&sdbg_host->dev);
8156 	else
8157 		kfree(sdbg_host);
8158 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
8159 	return error;
8160 }
8161 
sdebug_do_add_host(bool mk_new_store)8162 static int sdebug_do_add_host(bool mk_new_store)
8163 {
8164 	int ph_idx = sdeb_most_recent_idx;
8165 
8166 	if (mk_new_store) {
8167 		ph_idx = sdebug_add_store();
8168 		if (ph_idx < 0)
8169 			return ph_idx;
8170 	}
8171 	return sdebug_add_host_helper(ph_idx);
8172 }
8173 
sdebug_do_remove_host(bool the_end)8174 static void sdebug_do_remove_host(bool the_end)
8175 {
8176 	int idx = -1;
8177 	struct sdebug_host_info *sdbg_host = NULL;
8178 	struct sdebug_host_info *sdbg_host2;
8179 
8180 	mutex_lock(&sdebug_host_list_mutex);
8181 	if (!list_empty(&sdebug_host_list)) {
8182 		sdbg_host = list_entry(sdebug_host_list.prev,
8183 				       struct sdebug_host_info, host_list);
8184 		idx = sdbg_host->si_idx;
8185 	}
8186 	if (!the_end && idx >= 0) {
8187 		bool unique = true;
8188 
8189 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8190 			if (sdbg_host2 == sdbg_host)
8191 				continue;
8192 			if (idx == sdbg_host2->si_idx) {
8193 				unique = false;
8194 				break;
8195 			}
8196 		}
8197 		if (unique) {
8198 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8199 			if (idx == sdeb_most_recent_idx)
8200 				--sdeb_most_recent_idx;
8201 		}
8202 	}
8203 	if (sdbg_host)
8204 		list_del(&sdbg_host->host_list);
8205 	mutex_unlock(&sdebug_host_list_mutex);
8206 
8207 	if (!sdbg_host)
8208 		return;
8209 
8210 	device_unregister(&sdbg_host->dev);
8211 	--sdebug_num_hosts;
8212 }
8213 
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)8214 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8215 {
8216 	struct sdebug_dev_info *devip = sdev->hostdata;
8217 
8218 	if (!devip)
8219 		return	-ENODEV;
8220 
8221 	mutex_lock(&sdebug_host_list_mutex);
8222 	block_unblock_all_queues(true);
8223 
8224 	if (qdepth > SDEBUG_CANQUEUE) {
8225 		qdepth = SDEBUG_CANQUEUE;
8226 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
8227 			qdepth, SDEBUG_CANQUEUE);
8228 	}
8229 	if (qdepth < 1)
8230 		qdepth = 1;
8231 	if (qdepth != sdev->queue_depth)
8232 		scsi_change_queue_depth(sdev, qdepth);
8233 
8234 	block_unblock_all_queues(false);
8235 	mutex_unlock(&sdebug_host_list_mutex);
8236 
8237 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8238 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
8239 
8240 	return sdev->queue_depth;
8241 }
8242 
fake_timeout(struct scsi_cmnd * scp)8243 static bool fake_timeout(struct scsi_cmnd *scp)
8244 {
8245 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
8246 		if (sdebug_every_nth < -1)
8247 			sdebug_every_nth = -1;
8248 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
8249 			return true; /* ignore command causing timeout */
8250 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
8251 			 scsi_medium_access_command(scp))
8252 			return true; /* time out reads and writes */
8253 	}
8254 	return false;
8255 }
8256 
8257 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)8258 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
8259 {
8260 	int stopped_state;
8261 	u64 diff_ns = 0;
8262 	ktime_t now_ts = ktime_get_boottime();
8263 	struct scsi_device *sdp = scp->device;
8264 
8265 	stopped_state = atomic_read(&devip->stopped);
8266 	if (stopped_state == 2) {
8267 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
8268 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
8269 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
8270 				/* tur_ms_to_ready timer extinguished */
8271 				atomic_set(&devip->stopped, 0);
8272 				return 0;
8273 			}
8274 		}
8275 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
8276 		if (sdebug_verbose)
8277 			sdev_printk(KERN_INFO, sdp,
8278 				    "%s: Not ready: in process of becoming ready\n", my_name);
8279 		if (scp->cmnd[0] == TEST_UNIT_READY) {
8280 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
8281 
8282 			if (diff_ns <= tur_nanosecs_to_ready)
8283 				diff_ns = tur_nanosecs_to_ready - diff_ns;
8284 			else
8285 				diff_ns = tur_nanosecs_to_ready;
8286 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
8287 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
8288 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
8289 						   diff_ns);
8290 			return check_condition_result;
8291 		}
8292 	}
8293 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
8294 	if (sdebug_verbose)
8295 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
8296 			    my_name);
8297 	return check_condition_result;
8298 }
8299 
sdebug_map_queues(struct Scsi_Host * shost)8300 static void sdebug_map_queues(struct Scsi_Host *shost)
8301 {
8302 	int i, qoff;
8303 
8304 	if (shost->nr_hw_queues == 1)
8305 		return;
8306 
8307 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
8308 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
8309 
8310 		map->nr_queues  = 0;
8311 
8312 		if (i == HCTX_TYPE_DEFAULT)
8313 			map->nr_queues = submit_queues - poll_queues;
8314 		else if (i == HCTX_TYPE_POLL)
8315 			map->nr_queues = poll_queues;
8316 
8317 		if (!map->nr_queues) {
8318 			BUG_ON(i == HCTX_TYPE_DEFAULT);
8319 			continue;
8320 		}
8321 
8322 		map->queue_offset = qoff;
8323 		blk_mq_map_queues(map);
8324 
8325 		qoff += map->nr_queues;
8326 	}
8327 }
8328 
8329 struct sdebug_blk_mq_poll_data {
8330 	unsigned int queue_num;
8331 	int *num_entries;
8332 };
8333 
8334 /*
8335  * We don't handle aborted commands here, but it does not seem possible to have
8336  * aborted polled commands from schedule_resp()
8337  */
sdebug_blk_mq_poll_iter(struct request * rq,void * opaque)8338 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
8339 {
8340 	struct sdebug_blk_mq_poll_data *data = opaque;
8341 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
8342 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8343 	struct sdebug_defer *sd_dp;
8344 	u32 unique_tag = blk_mq_unique_tag(rq);
8345 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
8346 	struct sdebug_queued_cmd *sqcp;
8347 	unsigned long flags;
8348 	int queue_num = data->queue_num;
8349 	ktime_t time;
8350 
8351 	/* We're only interested in one queue for this iteration */
8352 	if (hwq != queue_num)
8353 		return true;
8354 
8355 	/* Subsequent checks would fail if this failed, but check anyway */
8356 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
8357 		return true;
8358 
8359 	time = ktime_get_boottime();
8360 
8361 	spin_lock_irqsave(&sdsc->lock, flags);
8362 	sqcp = TO_QUEUED_CMD(cmd);
8363 	if (!sqcp) {
8364 		spin_unlock_irqrestore(&sdsc->lock, flags);
8365 		return true;
8366 	}
8367 
8368 	sd_dp = &sqcp->sd_dp;
8369 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
8370 		spin_unlock_irqrestore(&sdsc->lock, flags);
8371 		return true;
8372 	}
8373 
8374 	if (time < sd_dp->cmpl_ts) {
8375 		spin_unlock_irqrestore(&sdsc->lock, flags);
8376 		return true;
8377 	}
8378 
8379 	ASSIGN_QUEUED_CMD(cmd, NULL);
8380 	spin_unlock_irqrestore(&sdsc->lock, flags);
8381 
8382 	if (sdebug_statistics) {
8383 		atomic_inc(&sdebug_completions);
8384 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
8385 			atomic_inc(&sdebug_miss_cpus);
8386 	}
8387 
8388 	sdebug_free_queued_cmd(sqcp);
8389 
8390 	scsi_done(cmd); /* callback to mid level */
8391 	(*data->num_entries)++;
8392 	return true;
8393 }
8394 
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)8395 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
8396 {
8397 	int num_entries = 0;
8398 	struct sdebug_blk_mq_poll_data data = {
8399 		.queue_num = queue_num,
8400 		.num_entries = &num_entries,
8401 	};
8402 
8403 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
8404 				&data);
8405 
8406 	if (num_entries > 0)
8407 		atomic_add(num_entries, &sdeb_mq_poll_count);
8408 	return num_entries;
8409 }
8410 
sdebug_timeout_cmd(struct scsi_cmnd * cmnd)8411 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
8412 {
8413 	struct scsi_device *sdp = cmnd->device;
8414 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8415 	struct sdebug_err_inject *err;
8416 	unsigned char *cmd = cmnd->cmnd;
8417 	int ret = 0;
8418 
8419 	if (devip == NULL)
8420 		return 0;
8421 
8422 	rcu_read_lock();
8423 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8424 		if (err->type == ERR_TMOUT_CMD &&
8425 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8426 			ret = !!err->cnt;
8427 			if (err->cnt < 0)
8428 				err->cnt++;
8429 
8430 			rcu_read_unlock();
8431 			return ret;
8432 		}
8433 	}
8434 	rcu_read_unlock();
8435 
8436 	return 0;
8437 }
8438 
sdebug_fail_queue_cmd(struct scsi_cmnd * cmnd)8439 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
8440 {
8441 	struct scsi_device *sdp = cmnd->device;
8442 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8443 	struct sdebug_err_inject *err;
8444 	unsigned char *cmd = cmnd->cmnd;
8445 	int ret = 0;
8446 
8447 	if (devip == NULL)
8448 		return 0;
8449 
8450 	rcu_read_lock();
8451 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8452 		if (err->type == ERR_FAIL_QUEUE_CMD &&
8453 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8454 			ret = err->cnt ? err->queuecmd_ret : 0;
8455 			if (err->cnt < 0)
8456 				err->cnt++;
8457 
8458 			rcu_read_unlock();
8459 			return ret;
8460 		}
8461 	}
8462 	rcu_read_unlock();
8463 
8464 	return 0;
8465 }
8466 
sdebug_fail_cmd(struct scsi_cmnd * cmnd,int * retval,struct sdebug_err_inject * info)8467 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8468 			   struct sdebug_err_inject *info)
8469 {
8470 	struct scsi_device *sdp = cmnd->device;
8471 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8472 	struct sdebug_err_inject *err;
8473 	unsigned char *cmd = cmnd->cmnd;
8474 	int ret = 0;
8475 	int result;
8476 
8477 	if (devip == NULL)
8478 		return 0;
8479 
8480 	rcu_read_lock();
8481 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8482 		if (err->type == ERR_FAIL_CMD &&
8483 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8484 			if (!err->cnt) {
8485 				rcu_read_unlock();
8486 				return 0;
8487 			}
8488 
8489 			ret = !!err->cnt;
8490 			rcu_read_unlock();
8491 			goto out_handle;
8492 		}
8493 	}
8494 	rcu_read_unlock();
8495 
8496 	return 0;
8497 
8498 out_handle:
8499 	if (err->cnt < 0)
8500 		err->cnt++;
8501 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8502 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8503 	*info = *err;
8504 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8505 
8506 	return ret;
8507 }
8508 
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)8509 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8510 				   struct scsi_cmnd *scp)
8511 {
8512 	u8 sdeb_i;
8513 	struct scsi_device *sdp = scp->device;
8514 	const struct opcode_info_t *oip;
8515 	const struct opcode_info_t *r_oip;
8516 	struct sdebug_dev_info *devip;
8517 	u8 *cmd = scp->cmnd;
8518 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8519 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8520 	int k, na;
8521 	int errsts = 0;
8522 	u64 lun_index = sdp->lun & 0x3FFF;
8523 	u32 flags;
8524 	u16 sa;
8525 	u8 opcode = cmd[0];
8526 	bool has_wlun_rl;
8527 	bool inject_now;
8528 	int ret = 0;
8529 	struct sdebug_err_inject err;
8530 
8531 	scsi_set_resid(scp, 0);
8532 	if (sdebug_statistics) {
8533 		atomic_inc(&sdebug_cmnd_count);
8534 		inject_now = inject_on_this_cmd();
8535 	} else {
8536 		inject_now = false;
8537 	}
8538 	if (unlikely(sdebug_verbose &&
8539 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8540 		char b[120];
8541 		int n, len, sb;
8542 
8543 		len = scp->cmd_len;
8544 		sb = (int)sizeof(b);
8545 		if (len > 32)
8546 			strcpy(b, "too long, over 32 bytes");
8547 		else {
8548 			for (k = 0, n = 0; k < len && n < sb; ++k)
8549 				n += scnprintf(b + n, sb - n, "%02x ",
8550 					       (u32)cmd[k]);
8551 		}
8552 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8553 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8554 	}
8555 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8556 		return SCSI_MLQUEUE_HOST_BUSY;
8557 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8558 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8559 		goto err_out;
8560 
8561 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
8562 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
8563 	devip = (struct sdebug_dev_info *)sdp->hostdata;
8564 	if (unlikely(!devip)) {
8565 		devip = find_build_dev_info(sdp);
8566 		if (NULL == devip)
8567 			goto err_out;
8568 	}
8569 
8570 	if (sdebug_timeout_cmd(scp)) {
8571 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8572 		return 0;
8573 	}
8574 
8575 	ret = sdebug_fail_queue_cmd(scp);
8576 	if (ret) {
8577 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8578 				opcode, ret);
8579 		return ret;
8580 	}
8581 
8582 	if (sdebug_fail_cmd(scp, &ret, &err)) {
8583 		scmd_printk(KERN_INFO, scp,
8584 			"fail command 0x%x with hostbyte=0x%x, "
8585 			"driverbyte=0x%x, statusbyte=0x%x, "
8586 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8587 			opcode, err.host_byte, err.driver_byte,
8588 			err.status_byte, err.sense_key, err.asc, err.asq);
8589 		return ret;
8590 	}
8591 
8592 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8593 		atomic_set(&sdeb_inject_pending, 1);
8594 
8595 	na = oip->num_attached;
8596 	r_pfp = oip->pfp;
8597 	if (na) {	/* multiple commands with this opcode */
8598 		r_oip = oip;
8599 		if (FF_SA & r_oip->flags) {
8600 			if (F_SA_LOW & oip->flags)
8601 				sa = 0x1f & cmd[1];
8602 			else
8603 				sa = get_unaligned_be16(cmd + 8);
8604 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8605 				if (opcode == oip->opcode && sa == oip->sa)
8606 					break;
8607 			}
8608 		} else {   /* since no service action only check opcode */
8609 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8610 				if (opcode == oip->opcode)
8611 					break;
8612 			}
8613 		}
8614 		if (k > na) {
8615 			if (F_SA_LOW & r_oip->flags)
8616 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8617 			else if (F_SA_HIGH & r_oip->flags)
8618 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8619 			else
8620 				mk_sense_invalid_opcode(scp);
8621 			goto check_cond;
8622 		}
8623 	}	/* else (when na==0) we assume the oip is a match */
8624 	flags = oip->flags;
8625 	if (unlikely(F_INV_OP & flags)) {
8626 		mk_sense_invalid_opcode(scp);
8627 		goto check_cond;
8628 	}
8629 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8630 		if (sdebug_verbose)
8631 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8632 				    my_name, opcode, " supported for wlun");
8633 		mk_sense_invalid_opcode(scp);
8634 		goto check_cond;
8635 	}
8636 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
8637 		u8 rem;
8638 		int j;
8639 
8640 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8641 			rem = ~oip->len_mask[k] & cmd[k];
8642 			if (rem) {
8643 				for (j = 7; j >= 0; --j, rem <<= 1) {
8644 					if (0x80 & rem)
8645 						break;
8646 				}
8647 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8648 				goto check_cond;
8649 			}
8650 		}
8651 	}
8652 	if (unlikely(!(F_SKIP_UA & flags) &&
8653 		     find_first_bit(devip->uas_bm,
8654 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8655 		errsts = make_ua(scp, devip);
8656 		if (errsts)
8657 			goto check_cond;
8658 	}
8659 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8660 		     atomic_read(&devip->stopped))) {
8661 		errsts = resp_not_ready(scp, devip);
8662 		if (errsts)
8663 			goto fini;
8664 	}
8665 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
8666 		goto fini;
8667 	if (unlikely(sdebug_every_nth)) {
8668 		if (fake_timeout(scp))
8669 			return 0;	/* ignore command: make trouble */
8670 	}
8671 	if (likely(oip->pfp))
8672 		pfp = oip->pfp;	/* calls a resp_* function */
8673 	else
8674 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8675 
8676 fini:
8677 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
8678 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8679 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8680 					    sdebug_ndelay > 10000)) {
8681 		/*
8682 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8683 		 * for Start Stop Unit (SSU) want at least 1 second delay and
8684 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
8685 		 * For Synchronize Cache want 1/20 of SSU's delay.
8686 		 */
8687 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8688 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8689 
8690 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8691 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8692 	} else
8693 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8694 				     sdebug_ndelay);
8695 check_cond:
8696 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8697 err_out:
8698 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8699 }
8700 
sdebug_init_cmd_priv(struct Scsi_Host * shost,struct scsi_cmnd * cmd)8701 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8702 {
8703 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8704 
8705 	spin_lock_init(&sdsc->lock);
8706 
8707 	return 0;
8708 }
8709 
8710 static const struct scsi_host_template sdebug_driver_template = {
8711 	.show_info =		scsi_debug_show_info,
8712 	.write_info =		scsi_debug_write_info,
8713 	.proc_name =		sdebug_proc_name,
8714 	.name =			"SCSI DEBUG",
8715 	.info =			scsi_debug_info,
8716 	.sdev_init =		scsi_debug_sdev_init,
8717 	.sdev_configure =	scsi_debug_sdev_configure,
8718 	.sdev_destroy =		scsi_debug_sdev_destroy,
8719 	.ioctl =		scsi_debug_ioctl,
8720 	.queuecommand =		scsi_debug_queuecommand,
8721 	.change_queue_depth =	sdebug_change_qdepth,
8722 	.map_queues =		sdebug_map_queues,
8723 	.mq_poll =		sdebug_blk_mq_poll,
8724 	.eh_abort_handler =	scsi_debug_abort,
8725 	.eh_device_reset_handler = scsi_debug_device_reset,
8726 	.eh_target_reset_handler = scsi_debug_target_reset,
8727 	.eh_bus_reset_handler = scsi_debug_bus_reset,
8728 	.eh_host_reset_handler = scsi_debug_host_reset,
8729 	.can_queue =		SDEBUG_CANQUEUE,
8730 	.this_id =		7,
8731 	.sg_tablesize =		SG_MAX_SEGMENTS,
8732 	.cmd_per_lun =		DEF_CMD_PER_LUN,
8733 	.max_sectors =		-1U,
8734 	.max_segment_size =	-1U,
8735 	.module =		THIS_MODULE,
8736 	.skip_settle_delay =	1,
8737 	.track_queue_depth =	1,
8738 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
8739 	.init_cmd_priv = sdebug_init_cmd_priv,
8740 	.target_alloc =		sdebug_target_alloc,
8741 	.target_destroy =	sdebug_target_destroy,
8742 };
8743 
sdebug_driver_probe(struct device * dev)8744 static int sdebug_driver_probe(struct device *dev)
8745 {
8746 	int error = 0;
8747 	struct sdebug_host_info *sdbg_host;
8748 	struct Scsi_Host *hpnt;
8749 	int hprot;
8750 
8751 	sdbg_host = dev_to_sdebug_host(dev);
8752 
8753 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8754 	if (NULL == hpnt) {
8755 		pr_err("scsi_host_alloc failed\n");
8756 		error = -ENODEV;
8757 		return error;
8758 	}
8759 	hpnt->can_queue = sdebug_max_queue;
8760 	hpnt->cmd_per_lun = sdebug_max_queue;
8761 	if (!sdebug_clustering)
8762 		hpnt->dma_boundary = PAGE_SIZE - 1;
8763 
8764 	if (submit_queues > nr_cpu_ids) {
8765 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8766 			my_name, submit_queues, nr_cpu_ids);
8767 		submit_queues = nr_cpu_ids;
8768 	}
8769 	/*
8770 	 * Decide whether to tell scsi subsystem that we want mq. The
8771 	 * following should give the same answer for each host.
8772 	 */
8773 	hpnt->nr_hw_queues = submit_queues;
8774 	if (sdebug_host_max_queue)
8775 		hpnt->host_tagset = 1;
8776 
8777 	/* poll queues are possible for nr_hw_queues > 1 */
8778 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8779 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8780 			 my_name, poll_queues, hpnt->nr_hw_queues);
8781 		poll_queues = 0;
8782 	}
8783 
8784 	/*
8785 	 * Poll queues don't need interrupts, but we need at least one I/O queue
8786 	 * left over for non-polled I/O.
8787 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
8788 	 */
8789 	if (poll_queues >= submit_queues) {
8790 		if (submit_queues < 3)
8791 			pr_warn("%s: trim poll_queues to 1\n", my_name);
8792 		else
8793 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8794 				my_name, submit_queues - 1);
8795 		poll_queues = 1;
8796 	}
8797 	if (poll_queues)
8798 		hpnt->nr_maps = 3;
8799 
8800 	sdbg_host->shost = hpnt;
8801 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8802 		hpnt->max_id = sdebug_num_tgts + 1;
8803 	else
8804 		hpnt->max_id = sdebug_num_tgts;
8805 	/* = sdebug_max_luns; */
8806 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8807 
8808 	hprot = 0;
8809 
8810 	switch (sdebug_dif) {
8811 
8812 	case T10_PI_TYPE1_PROTECTION:
8813 		hprot = SHOST_DIF_TYPE1_PROTECTION;
8814 		if (sdebug_dix)
8815 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
8816 		break;
8817 
8818 	case T10_PI_TYPE2_PROTECTION:
8819 		hprot = SHOST_DIF_TYPE2_PROTECTION;
8820 		if (sdebug_dix)
8821 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
8822 		break;
8823 
8824 	case T10_PI_TYPE3_PROTECTION:
8825 		hprot = SHOST_DIF_TYPE3_PROTECTION;
8826 		if (sdebug_dix)
8827 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
8828 		break;
8829 
8830 	default:
8831 		if (sdebug_dix)
8832 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
8833 		break;
8834 	}
8835 
8836 	scsi_host_set_prot(hpnt, hprot);
8837 
8838 	if (have_dif_prot || sdebug_dix)
8839 		pr_info("host protection%s%s%s%s%s%s%s\n",
8840 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8841 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8842 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8843 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8844 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8845 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8846 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8847 
8848 	if (sdebug_guard == 1)
8849 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8850 	else
8851 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8852 
8853 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8854 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8855 	if (sdebug_every_nth)	/* need stats counters for every_nth */
8856 		sdebug_statistics = true;
8857 	error = scsi_add_host(hpnt, &sdbg_host->dev);
8858 	if (error) {
8859 		pr_err("scsi_add_host failed\n");
8860 		error = -ENODEV;
8861 		scsi_host_put(hpnt);
8862 	} else {
8863 		scsi_scan_host(hpnt);
8864 	}
8865 
8866 	return error;
8867 }
8868 
sdebug_driver_remove(struct device * dev)8869 static void sdebug_driver_remove(struct device *dev)
8870 {
8871 	struct sdebug_host_info *sdbg_host;
8872 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8873 
8874 	sdbg_host = dev_to_sdebug_host(dev);
8875 
8876 	scsi_remove_host(sdbg_host->shost);
8877 
8878 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8879 				 dev_list) {
8880 		list_del(&sdbg_devinfo->dev_list);
8881 		kfree(sdbg_devinfo->zstate);
8882 		kfree(sdbg_devinfo);
8883 	}
8884 
8885 	scsi_host_put(sdbg_host->shost);
8886 }
8887 
8888 static const struct bus_type pseudo_lld_bus = {
8889 	.name = "pseudo",
8890 	.probe = sdebug_driver_probe,
8891 	.remove = sdebug_driver_remove,
8892 	.drv_groups = sdebug_drv_groups,
8893 };
8894