xref: /linux/drivers/scsi/scsi_debug.c (revision 4236f913808cebef1b9e078726a4e5d56064f7ad)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47 
48 #include <net/checksum.h>
49 
50 #include <linux/unaligned.h>
51 
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60 
61 #include "sd.h"
62 #include "scsi_logging.h"
63 
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67 
68 #define MY_NAME "scsi_debug"
69 
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define LOGICAL_UNIT_NOT_READY 0x4
75 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
76 #define UNRECOVERED_READ_ERR 0x11
77 #define PARAMETER_LIST_LENGTH_ERR 0x1a
78 #define INVALID_OPCODE 0x20
79 #define LBA_OUT_OF_RANGE 0x21
80 #define INVALID_FIELD_IN_CDB 0x24
81 #define INVALID_FIELD_IN_PARAM_LIST 0x26
82 #define WRITE_PROTECTED 0x27
83 #define UA_RESET_ASC 0x29
84 #define UA_CHANGED_ASC 0x2a
85 #define TARGET_CHANGED_ASC 0x3f
86 #define LUNS_CHANGED_ASCQ 0x0e
87 #define INSUFF_RES_ASC 0x55
88 #define INSUFF_RES_ASCQ 0x3
89 #define POWER_ON_RESET_ASCQ 0x0
90 #define POWER_ON_OCCURRED_ASCQ 0x1
91 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
92 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
93 #define CAPACITY_CHANGED_ASCQ 0x9
94 #define SAVING_PARAMS_UNSUP 0x39
95 #define TRANSPORT_PROBLEM 0x4b
96 #define THRESHOLD_EXCEEDED 0x5d
97 #define LOW_POWER_COND_ON 0x5e
98 #define MISCOMPARE_VERIFY_ASC 0x1d
99 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
100 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
101 #define WRITE_ERROR_ASC 0xc
102 #define UNALIGNED_WRITE_ASCQ 0x4
103 #define WRITE_BOUNDARY_ASCQ 0x5
104 #define READ_INVDATA_ASCQ 0x6
105 #define READ_BOUNDARY_ASCQ 0x7
106 #define ATTEMPT_ACCESS_GAP 0x9
107 #define INSUFF_ZONE_ASCQ 0xe
108 /* see drivers/scsi/sense_codes.h */
109 
110 /* Additional Sense Code Qualifier (ASCQ) */
111 #define ACK_NAK_TO 0x3
112 
113 /* Default values for driver parameters */
114 #define DEF_NUM_HOST   1
115 #define DEF_NUM_TGTS   1
116 #define DEF_MAX_LUNS   1
117 /* With these defaults, this driver will make 1 host with 1 target
118  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
119  */
120 #define DEF_ATO 1
121 #define DEF_CDB_LEN 10
122 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
123 #define DEF_DEV_SIZE_PRE_INIT   0
124 #define DEF_DEV_SIZE_MB   8
125 #define DEF_ZBC_DEV_SIZE_MB   128
126 #define DEF_DIF 0
127 #define DEF_DIX 0
128 #define DEF_PER_HOST_STORE false
129 #define DEF_D_SENSE   0
130 #define DEF_EVERY_NTH   0
131 #define DEF_FAKE_RW	0
132 #define DEF_GUARD 0
133 #define DEF_HOST_LOCK 0
134 #define DEF_LBPU 0
135 #define DEF_LBPWS 0
136 #define DEF_LBPWS10 0
137 #define DEF_LBPRZ 1
138 #define DEF_LOWEST_ALIGNED 0
139 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
140 #define DEF_NO_LUN_0   0
141 #define DEF_NUM_PARTS   0
142 #define DEF_OPTS   0
143 #define DEF_OPT_BLKS 1024
144 #define DEF_PHYSBLK_EXP 0
145 #define DEF_OPT_XFERLEN_EXP 0
146 #define DEF_PTYPE   TYPE_DISK
147 #define DEF_RANDOM false
148 #define DEF_REMOVABLE false
149 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
150 #define DEF_SECTOR_SIZE 512
151 #define DEF_UNMAP_ALIGNMENT 0
152 #define DEF_UNMAP_GRANULARITY 1
153 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
154 #define DEF_UNMAP_MAX_DESC 256
155 #define DEF_VIRTUAL_GB   0
156 #define DEF_VPD_USE_HOSTNO 1
157 #define DEF_WRITESAME_LENGTH 0xFFFF
158 #define DEF_ATOMIC_WR 0
159 #define DEF_ATOMIC_WR_MAX_LENGTH 8192
160 #define DEF_ATOMIC_WR_ALIGN 2
161 #define DEF_ATOMIC_WR_GRAN 2
162 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
163 #define DEF_ATOMIC_WR_MAX_BNDRY 128
164 #define DEF_STRICT 0
165 #define DEF_STATISTICS false
166 #define DEF_SUBMIT_QUEUES 1
167 #define DEF_TUR_MS_TO_READY 0
168 #define DEF_UUID_CTL 0
169 #define JDELAY_OVERRIDDEN -9999
170 
171 /* Default parameters for ZBC drives */
172 #define DEF_ZBC_ZONE_SIZE_MB	128
173 #define DEF_ZBC_MAX_OPEN_ZONES	8
174 #define DEF_ZBC_NR_CONV_ZONES	1
175 
176 #define SDEBUG_LUN_0_VAL 0
177 
178 /* bit mask values for sdebug_opts */
179 #define SDEBUG_OPT_NOISE		1
180 #define SDEBUG_OPT_MEDIUM_ERR		2
181 #define SDEBUG_OPT_TIMEOUT		4
182 #define SDEBUG_OPT_RECOVERED_ERR	8
183 #define SDEBUG_OPT_TRANSPORT_ERR	16
184 #define SDEBUG_OPT_DIF_ERR		32
185 #define SDEBUG_OPT_DIX_ERR		64
186 #define SDEBUG_OPT_MAC_TIMEOUT		128
187 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
188 #define SDEBUG_OPT_Q_NOISE		0x200
189 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
190 #define SDEBUG_OPT_RARE_TSF		0x800
191 #define SDEBUG_OPT_N_WCE		0x1000
192 #define SDEBUG_OPT_RESET_NOISE		0x2000
193 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
194 #define SDEBUG_OPT_HOST_BUSY		0x8000
195 #define SDEBUG_OPT_CMD_ABORT		0x10000
196 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
197 			      SDEBUG_OPT_RESET_NOISE)
198 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
199 				  SDEBUG_OPT_TRANSPORT_ERR | \
200 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
201 				  SDEBUG_OPT_SHORT_TRANSFER | \
202 				  SDEBUG_OPT_HOST_BUSY | \
203 				  SDEBUG_OPT_CMD_ABORT)
204 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
205 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
206 
207 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
208  * priority order. In the subset implemented here lower numbers have higher
209  * priority. The UA numbers should be a sequence starting from 0 with
210  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
211 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
212 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
213 #define SDEBUG_UA_BUS_RESET 2
214 #define SDEBUG_UA_MODE_CHANGED 3
215 #define SDEBUG_UA_CAPACITY_CHANGED 4
216 #define SDEBUG_UA_LUNS_CHANGED 5
217 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
218 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
219 #define SDEBUG_NUM_UAS 8
220 
221 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
222  * sector on read commands: */
223 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
224 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
225 
226 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
227  * (for response) per submit queue at one time. Can be reduced by max_queue
228  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
229  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
230  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
231  * but cannot exceed SDEBUG_CANQUEUE .
232  */
233 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
234 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
235 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
236 
237 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
238 #define F_D_IN			1	/* Data-in command (e.g. READ) */
239 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
240 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
241 #define F_D_UNKN		8
242 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
243 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
244 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
245 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
246 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
247 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
248 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
249 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
250 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
251 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
252 
253 /* Useful combinations of the above flags */
254 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
255 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
256 #define FF_SA (F_SA_HIGH | F_SA_LOW)
257 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
258 
259 #define SDEBUG_MAX_PARTS 4
260 
261 #define SDEBUG_MAX_CMD_LEN 32
262 
263 #define SDEB_XA_NOT_IN_USE XA_MARK_1
264 
265 static struct kmem_cache *queued_cmd_cache;
266 
267 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
268 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
269 
270 /* Zone types (zbcr05 table 25) */
271 enum sdebug_z_type {
272 	ZBC_ZTYPE_CNV	= 0x1,
273 	ZBC_ZTYPE_SWR	= 0x2,
274 	ZBC_ZTYPE_SWP	= 0x3,
275 	/* ZBC_ZTYPE_SOBR = 0x4, */
276 	ZBC_ZTYPE_GAP	= 0x5,
277 };
278 
279 /* enumeration names taken from table 26, zbcr05 */
280 enum sdebug_z_cond {
281 	ZBC_NOT_WRITE_POINTER	= 0x0,
282 	ZC1_EMPTY		= 0x1,
283 	ZC2_IMPLICIT_OPEN	= 0x2,
284 	ZC3_EXPLICIT_OPEN	= 0x3,
285 	ZC4_CLOSED		= 0x4,
286 	ZC6_READ_ONLY		= 0xd,
287 	ZC5_FULL		= 0xe,
288 	ZC7_OFFLINE		= 0xf,
289 };
290 
291 struct sdeb_zone_state {	/* ZBC: per zone state */
292 	enum sdebug_z_type z_type;
293 	enum sdebug_z_cond z_cond;
294 	bool z_non_seq_resource;
295 	unsigned int z_size;
296 	sector_t z_start;
297 	sector_t z_wp;
298 };
299 
300 enum sdebug_err_type {
301 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
302 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
303 					/* queuecmd return failed */
304 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
305 					/* queuecmd return succeed but */
306 					/* with errors set in scsi_cmnd */
307 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
308 					/* scsi_debug_abort() */
309 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
310 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
311 };
312 
313 struct sdebug_err_inject {
314 	int type;
315 	struct list_head list;
316 	int cnt;
317 	unsigned char cmd;
318 	struct rcu_head rcu;
319 
320 	union {
321 		/*
322 		 * For ERR_FAIL_QUEUE_CMD
323 		 */
324 		int queuecmd_ret;
325 
326 		/*
327 		 * For ERR_FAIL_CMD
328 		 */
329 		struct {
330 			unsigned char host_byte;
331 			unsigned char driver_byte;
332 			unsigned char status_byte;
333 			unsigned char sense_key;
334 			unsigned char asc;
335 			unsigned char asq;
336 		};
337 	};
338 };
339 
340 struct sdebug_dev_info {
341 	struct list_head dev_list;
342 	unsigned int channel;
343 	unsigned int target;
344 	u64 lun;
345 	uuid_t lu_name;
346 	struct sdebug_host_info *sdbg_host;
347 	unsigned long uas_bm[1];
348 	atomic_t stopped;	/* 1: by SSU, 2: device start */
349 	bool used;
350 
351 	/* For ZBC devices */
352 	bool zoned;
353 	unsigned int zcap;
354 	unsigned int zsize;
355 	unsigned int zsize_shift;
356 	unsigned int nr_zones;
357 	unsigned int nr_conv_zones;
358 	unsigned int nr_seq_zones;
359 	unsigned int nr_imp_open;
360 	unsigned int nr_exp_open;
361 	unsigned int nr_closed;
362 	unsigned int max_open;
363 	ktime_t create_ts;	/* time since bootup that this device was created */
364 	struct sdeb_zone_state *zstate;
365 
366 	struct dentry *debugfs_entry;
367 	struct spinlock list_lock;
368 	struct list_head inject_err_list;
369 };
370 
371 struct sdebug_target_info {
372 	bool reset_fail;
373 	struct dentry *debugfs_entry;
374 };
375 
376 struct sdebug_host_info {
377 	struct list_head host_list;
378 	int si_idx;	/* sdeb_store_info (per host) xarray index */
379 	struct Scsi_Host *shost;
380 	struct device dev;
381 	struct list_head dev_info_list;
382 };
383 
384 /* There is an xarray of pointers to this struct's objects, one per host */
385 struct sdeb_store_info {
386 	rwlock_t macc_data_lck;	/* for media data access on this store */
387 	rwlock_t macc_meta_lck;	/* for atomic media meta access on this store */
388 	rwlock_t macc_sector_lck;	/* per-sector media data access on this store */
389 	u8 *storep;		/* user data storage (ram) */
390 	struct t10_pi_tuple *dif_storep; /* protection info */
391 	void *map_storep;	/* provisioning map */
392 };
393 
394 #define dev_to_sdebug_host(d)	\
395 	container_of(d, struct sdebug_host_info, dev)
396 
397 #define shost_to_sdebug_host(shost)	\
398 	dev_to_sdebug_host(shost->dma_dev)
399 
400 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
401 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
402 
403 struct sdebug_defer {
404 	struct hrtimer hrt;
405 	struct execute_work ew;
406 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
407 	int issuing_cpu;
408 	bool aborted;	/* true when blk_abort_request() already called */
409 	enum sdeb_defer_type defer_t;
410 };
411 
412 struct sdebug_device_access_info {
413 	bool atomic_write;
414 	u64 lba;
415 	u32 num;
416 	struct scsi_cmnd *self;
417 };
418 
419 struct sdebug_queued_cmd {
420 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
421 	 * instance indicates this slot is in use.
422 	 */
423 	struct sdebug_defer sd_dp;
424 	struct scsi_cmnd *scmd;
425 	struct sdebug_device_access_info *i;
426 };
427 
428 struct sdebug_scsi_cmd {
429 	spinlock_t   lock;
430 };
431 
432 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
433 static atomic_t sdebug_completions;  /* count of deferred completions */
434 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
435 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
436 static atomic_t sdeb_inject_pending;
437 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
438 
439 struct opcode_info_t {
440 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
441 				/* for terminating element */
442 	u8 opcode;		/* if num_attached > 0, preferred */
443 	u16 sa;			/* service action */
444 	u32 flags;		/* OR-ed set of SDEB_F_* */
445 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
446 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
447 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
448 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
449 };
450 
451 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
452 enum sdeb_opcode_index {
453 	SDEB_I_INVALID_OPCODE =	0,
454 	SDEB_I_INQUIRY = 1,
455 	SDEB_I_REPORT_LUNS = 2,
456 	SDEB_I_REQUEST_SENSE = 3,
457 	SDEB_I_TEST_UNIT_READY = 4,
458 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
459 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
460 	SDEB_I_LOG_SENSE = 7,
461 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
462 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
463 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
464 	SDEB_I_START_STOP = 11,
465 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
466 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
467 	SDEB_I_MAINT_IN = 14,
468 	SDEB_I_MAINT_OUT = 15,
469 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
470 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
471 	SDEB_I_RESERVE = 18,		/* 6, 10 */
472 	SDEB_I_RELEASE = 19,		/* 6, 10 */
473 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
474 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
475 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
476 	SDEB_I_SEND_DIAG = 23,
477 	SDEB_I_UNMAP = 24,
478 	SDEB_I_WRITE_BUFFER = 25,
479 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
480 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
481 	SDEB_I_COMP_WRITE = 28,
482 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
483 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
484 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
485 	SDEB_I_ATOMIC_WRITE_16 = 32,
486 	SDEB_I_LAST_ELEM_P1 = 33,	/* keep this last (previous + 1) */
487 };
488 
489 
490 static const unsigned char opcode_ind_arr[256] = {
491 /* 0x0; 0x0->0x1f: 6 byte cdbs */
492 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
493 	    0, 0, 0, 0,
494 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
495 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
496 	    SDEB_I_RELEASE,
497 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
498 	    SDEB_I_ALLOW_REMOVAL, 0,
499 /* 0x20; 0x20->0x3f: 10 byte cdbs */
500 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
501 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
502 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
503 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
504 /* 0x40; 0x40->0x5f: 10 byte cdbs */
505 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
506 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
507 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
508 	    SDEB_I_RELEASE,
509 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
510 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
511 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
513 	0, SDEB_I_VARIABLE_LEN,
514 /* 0x80; 0x80->0x9f: 16 byte cdbs */
515 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
516 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
517 	0, 0, 0, SDEB_I_VERIFY,
518 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
519 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
520 	0, 0, 0, 0,
521 	SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
522 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
523 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
524 	     SDEB_I_MAINT_OUT, 0, 0, 0,
525 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
526 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
527 	0, 0, 0, 0, 0, 0, 0, 0,
528 	0, 0, 0, 0, 0, 0, 0, 0,
529 /* 0xc0; 0xc0->0xff: vendor specific */
530 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
531 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
532 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
533 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
534 };
535 
536 /*
537  * The following "response" functions return the SCSI mid-level's 4 byte
538  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
539  * command completion, they can mask their return value with
540  * SDEG_RES_IMMED_MASK .
541  */
542 #define SDEG_RES_IMMED_MASK 0x40000000
543 
544 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
551 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
553 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
554 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
555 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
556 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
557 static int resp_get_stream_status(struct scsi_cmnd *scp,
558 				  struct sdebug_dev_info *devip);
559 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
560 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
561 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
562 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
563 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
564 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
565 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
566 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
567 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
568 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
569 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
570 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
571 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
572 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
573 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
574 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
575 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
576 
577 static int sdebug_do_add_host(bool mk_new_store);
578 static int sdebug_add_host_helper(int per_host_idx);
579 static void sdebug_do_remove_host(bool the_end);
580 static int sdebug_add_store(void);
581 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
582 static void sdebug_erase_all_stores(bool apart_from_first);
583 
584 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
585 
586 /*
587  * The following are overflow arrays for cdbs that "hit" the same index in
588  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
589  * should be placed in opcode_info_arr[], the others should be placed here.
590  */
591 static const struct opcode_info_t msense_iarr[] = {
592 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
593 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 };
595 
596 static const struct opcode_info_t mselect_iarr[] = {
597 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
598 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
599 };
600 
601 static const struct opcode_info_t read_iarr[] = {
602 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
603 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
604 	     0, 0, 0, 0} },
605 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
606 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
607 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
608 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
609 	     0xc7, 0, 0, 0, 0} },
610 };
611 
612 static const struct opcode_info_t write_iarr[] = {
613 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
614 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
615 		   0, 0, 0, 0, 0, 0} },
616 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
617 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
618 		   0, 0, 0} },
619 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
620 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 		   0xbf, 0xc7, 0, 0, 0, 0} },
622 };
623 
624 static const struct opcode_info_t verify_iarr[] = {
625 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
626 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
627 		   0, 0, 0, 0, 0, 0} },
628 };
629 
630 static const struct opcode_info_t sa_in_16_iarr[] = {
631 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
632 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
633 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
634 	{0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
635 	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
636 	     0, 0} },	/* GET STREAM STATUS */
637 };
638 
639 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
640 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
641 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
642 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
643 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
644 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
645 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
646 };
647 
648 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
649 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
650 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
651 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
652 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
653 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
654 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
655 };
656 
657 static const struct opcode_info_t write_same_iarr[] = {
658 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
659 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
661 };
662 
663 static const struct opcode_info_t reserve_iarr[] = {
664 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
665 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
666 };
667 
668 static const struct opcode_info_t release_iarr[] = {
669 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
670 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
671 };
672 
673 static const struct opcode_info_t sync_cache_iarr[] = {
674 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
675 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
676 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
677 };
678 
679 static const struct opcode_info_t pre_fetch_iarr[] = {
680 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
681 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
682 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
683 };
684 
685 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
686 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
687 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
688 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
689 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
690 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
691 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
692 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
693 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
694 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
695 };
696 
697 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
698 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
699 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
700 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
701 };
702 
703 
704 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
705  * plus the terminating elements for logic that scans this table such as
706  * REPORT SUPPORTED OPERATION CODES. */
707 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
708 /* 0 */
709 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
710 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
711 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
712 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
713 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
714 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
715 	     0, 0} },					/* REPORT LUNS */
716 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
717 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
718 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
719 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
720 /* 5 */
721 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
722 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
723 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
724 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
725 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
726 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
727 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
728 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
729 	     0, 0, 0} },
730 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
731 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
732 	     0, 0} },
733 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
734 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
735 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
736 /* 10 */
737 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
738 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
739 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
740 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
741 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
742 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
743 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
744 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
745 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
746 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
747 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
748 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
749 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
750 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
751 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
752 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
753 				0xff, 0, 0xc7, 0, 0, 0, 0} },
754 /* 15 */
755 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
756 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
757 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
758 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
759 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
760 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
761 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
762 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
763 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
764 	     0xff, 0xff} },
765 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
766 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
767 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
768 	     0} },
769 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
770 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
771 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
772 	     0} },
773 /* 20 */
774 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
775 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
776 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
777 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
778 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
779 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
780 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
781 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
782 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
783 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
784 /* 25 */
785 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
786 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
787 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
788 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
789 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
790 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
791 		 0, 0, 0, 0, 0} },
792 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
793 	    resp_sync_cache, sync_cache_iarr,
794 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
795 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
796 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
797 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
798 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
799 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
800 	    resp_pre_fetch, pre_fetch_iarr,
801 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
802 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
803 
804 /* 30 */
805 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
806 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
807 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
808 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
809 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
810 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
811 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
812 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
813 /* 31 */
814 	{0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
815 	    resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
816 		{16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
817 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
818 /* sentinel */
819 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
820 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
821 };
822 
823 static int sdebug_num_hosts;
824 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
825 static int sdebug_ato = DEF_ATO;
826 static int sdebug_cdb_len = DEF_CDB_LEN;
827 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
828 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
829 static int sdebug_dif = DEF_DIF;
830 static int sdebug_dix = DEF_DIX;
831 static int sdebug_dsense = DEF_D_SENSE;
832 static int sdebug_every_nth = DEF_EVERY_NTH;
833 static int sdebug_fake_rw = DEF_FAKE_RW;
834 static unsigned int sdebug_guard = DEF_GUARD;
835 static int sdebug_host_max_queue;	/* per host */
836 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
837 static int sdebug_max_luns = DEF_MAX_LUNS;
838 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
839 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
840 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
841 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
842 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
843 static int sdebug_no_uld;
844 static int sdebug_num_parts = DEF_NUM_PARTS;
845 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
846 static int sdebug_opt_blks = DEF_OPT_BLKS;
847 static int sdebug_opts = DEF_OPTS;
848 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
849 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
850 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
851 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
852 static int sdebug_sector_size = DEF_SECTOR_SIZE;
853 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
854 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
855 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
856 static unsigned int sdebug_lbpu = DEF_LBPU;
857 static unsigned int sdebug_lbpws = DEF_LBPWS;
858 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
859 static unsigned int sdebug_lbprz = DEF_LBPRZ;
860 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
861 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
862 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
863 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
864 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
865 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
866 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
867 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
868 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
869 static unsigned int sdebug_atomic_wr_max_length_bndry =
870 			DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
871 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
872 static int sdebug_uuid_ctl = DEF_UUID_CTL;
873 static bool sdebug_random = DEF_RANDOM;
874 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
875 static bool sdebug_removable = DEF_REMOVABLE;
876 static bool sdebug_clustering;
877 static bool sdebug_host_lock = DEF_HOST_LOCK;
878 static bool sdebug_strict = DEF_STRICT;
879 static bool sdebug_any_injecting_opt;
880 static bool sdebug_no_rwlock;
881 static bool sdebug_verbose;
882 static bool have_dif_prot;
883 static bool write_since_sync;
884 static bool sdebug_statistics = DEF_STATISTICS;
885 static bool sdebug_wp;
886 static bool sdebug_allow_restart;
887 static enum {
888 	BLK_ZONED_NONE	= 0,
889 	BLK_ZONED_HA	= 1,
890 	BLK_ZONED_HM	= 2,
891 } sdeb_zbc_model = BLK_ZONED_NONE;
892 static char *sdeb_zbc_model_s;
893 
894 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
895 			  SAM_LUN_AM_FLAT = 0x1,
896 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
897 			  SAM_LUN_AM_EXTENDED = 0x3};
898 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
899 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
900 
901 static unsigned int sdebug_store_sectors;
902 static sector_t sdebug_capacity;	/* in sectors */
903 
904 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
905    may still need them */
906 static int sdebug_heads;		/* heads per disk */
907 static int sdebug_cylinders_per;	/* cylinders per surface */
908 static int sdebug_sectors_per;		/* sectors per cylinder */
909 
910 static LIST_HEAD(sdebug_host_list);
911 static DEFINE_MUTEX(sdebug_host_list_mutex);
912 
913 static struct xarray per_store_arr;
914 static struct xarray *per_store_ap = &per_store_arr;
915 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
916 static int sdeb_most_recent_idx = -1;
917 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
918 
919 static unsigned long map_size;
920 static int num_aborts;
921 static int num_dev_resets;
922 static int num_target_resets;
923 static int num_bus_resets;
924 static int num_host_resets;
925 static int dix_writes;
926 static int dix_reads;
927 static int dif_errors;
928 
929 /* ZBC global data */
930 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
931 static int sdeb_zbc_zone_cap_mb;
932 static int sdeb_zbc_zone_size_mb;
933 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
934 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
935 
936 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
937 static int poll_queues; /* iouring iopoll interface.*/
938 
939 static atomic_long_t writes_by_group_number[64];
940 
941 static char sdebug_proc_name[] = MY_NAME;
942 static const char *my_name = MY_NAME;
943 
944 static const struct bus_type pseudo_lld_bus;
945 
946 static struct device_driver sdebug_driverfs_driver = {
947 	.name 		= sdebug_proc_name,
948 	.bus		= &pseudo_lld_bus,
949 };
950 
951 static const int check_condition_result =
952 	SAM_STAT_CHECK_CONDITION;
953 
954 static const int illegal_condition_result =
955 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
956 
957 static const int device_qfull_result =
958 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
959 
960 static const int condition_met_result = SAM_STAT_CONDITION_MET;
961 
962 static struct dentry *sdebug_debugfs_root;
963 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
964 
sdebug_err_free(struct rcu_head * head)965 static void sdebug_err_free(struct rcu_head *head)
966 {
967 	struct sdebug_err_inject *inject =
968 		container_of(head, typeof(*inject), rcu);
969 
970 	kfree(inject);
971 }
972 
sdebug_err_add(struct scsi_device * sdev,struct sdebug_err_inject * new)973 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
974 {
975 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
976 	struct sdebug_err_inject *err;
977 
978 	spin_lock(&devip->list_lock);
979 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
980 		if (err->type == new->type && err->cmd == new->cmd) {
981 			list_del_rcu(&err->list);
982 			call_rcu(&err->rcu, sdebug_err_free);
983 		}
984 	}
985 
986 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
987 	spin_unlock(&devip->list_lock);
988 }
989 
sdebug_err_remove(struct scsi_device * sdev,const char * buf,size_t count)990 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
991 {
992 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
993 	struct sdebug_err_inject *err;
994 	int type;
995 	unsigned char cmd;
996 
997 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
998 		kfree(buf);
999 		return -EINVAL;
1000 	}
1001 
1002 	spin_lock(&devip->list_lock);
1003 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1004 		if (err->type == type && err->cmd == cmd) {
1005 			list_del_rcu(&err->list);
1006 			call_rcu(&err->rcu, sdebug_err_free);
1007 			spin_unlock(&devip->list_lock);
1008 			kfree(buf);
1009 			return count;
1010 		}
1011 	}
1012 	spin_unlock(&devip->list_lock);
1013 
1014 	kfree(buf);
1015 	return -EINVAL;
1016 }
1017 
sdebug_error_show(struct seq_file * m,void * p)1018 static int sdebug_error_show(struct seq_file *m, void *p)
1019 {
1020 	struct scsi_device *sdev = (struct scsi_device *)m->private;
1021 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1022 	struct sdebug_err_inject *err;
1023 
1024 	seq_puts(m, "Type\tCount\tCommand\n");
1025 
1026 	rcu_read_lock();
1027 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1028 		switch (err->type) {
1029 		case ERR_TMOUT_CMD:
1030 		case ERR_ABORT_CMD_FAILED:
1031 		case ERR_LUN_RESET_FAILED:
1032 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1033 				err->cmd);
1034 		break;
1035 
1036 		case ERR_FAIL_QUEUE_CMD:
1037 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1038 				err->cnt, err->cmd, err->queuecmd_ret);
1039 		break;
1040 
1041 		case ERR_FAIL_CMD:
1042 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1043 				err->type, err->cnt, err->cmd,
1044 				err->host_byte, err->driver_byte,
1045 				err->status_byte, err->sense_key,
1046 				err->asc, err->asq);
1047 		break;
1048 		}
1049 	}
1050 	rcu_read_unlock();
1051 
1052 	return 0;
1053 }
1054 
sdebug_error_open(struct inode * inode,struct file * file)1055 static int sdebug_error_open(struct inode *inode, struct file *file)
1056 {
1057 	return single_open(file, sdebug_error_show, inode->i_private);
1058 }
1059 
sdebug_error_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1060 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1061 		size_t count, loff_t *ppos)
1062 {
1063 	char *buf;
1064 	unsigned int inject_type;
1065 	struct sdebug_err_inject *inject;
1066 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1067 
1068 	buf = kzalloc(count + 1, GFP_KERNEL);
1069 	if (!buf)
1070 		return -ENOMEM;
1071 
1072 	if (copy_from_user(buf, ubuf, count)) {
1073 		kfree(buf);
1074 		return -EFAULT;
1075 	}
1076 
1077 	if (buf[0] == '-')
1078 		return sdebug_err_remove(sdev, buf, count);
1079 
1080 	if (sscanf(buf, "%d", &inject_type) != 1) {
1081 		kfree(buf);
1082 		return -EINVAL;
1083 	}
1084 
1085 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1086 	if (!inject) {
1087 		kfree(buf);
1088 		return -ENOMEM;
1089 	}
1090 
1091 	switch (inject_type) {
1092 	case ERR_TMOUT_CMD:
1093 	case ERR_ABORT_CMD_FAILED:
1094 	case ERR_LUN_RESET_FAILED:
1095 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1096 			   &inject->cmd) != 3)
1097 			goto out_error;
1098 	break;
1099 
1100 	case ERR_FAIL_QUEUE_CMD:
1101 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1102 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1103 			goto out_error;
1104 	break;
1105 
1106 	case ERR_FAIL_CMD:
1107 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1108 			   &inject->type, &inject->cnt, &inject->cmd,
1109 			   &inject->host_byte, &inject->driver_byte,
1110 			   &inject->status_byte, &inject->sense_key,
1111 			   &inject->asc, &inject->asq) != 9)
1112 			goto out_error;
1113 	break;
1114 
1115 	default:
1116 		goto out_error;
1117 	break;
1118 	}
1119 
1120 	kfree(buf);
1121 	sdebug_err_add(sdev, inject);
1122 
1123 	return count;
1124 
1125 out_error:
1126 	kfree(buf);
1127 	kfree(inject);
1128 	return -EINVAL;
1129 }
1130 
1131 static const struct file_operations sdebug_error_fops = {
1132 	.open	= sdebug_error_open,
1133 	.read	= seq_read,
1134 	.write	= sdebug_error_write,
1135 	.release = single_release,
1136 };
1137 
sdebug_target_reset_fail_show(struct seq_file * m,void * p)1138 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1139 {
1140 	struct scsi_target *starget = (struct scsi_target *)m->private;
1141 	struct sdebug_target_info *targetip =
1142 		(struct sdebug_target_info *)starget->hostdata;
1143 
1144 	if (targetip)
1145 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1146 
1147 	return 0;
1148 }
1149 
sdebug_target_reset_fail_open(struct inode * inode,struct file * file)1150 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1151 {
1152 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1153 }
1154 
sdebug_target_reset_fail_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1155 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1156 		const char __user *ubuf, size_t count, loff_t *ppos)
1157 {
1158 	int ret;
1159 	struct scsi_target *starget =
1160 		(struct scsi_target *)file->f_inode->i_private;
1161 	struct sdebug_target_info *targetip =
1162 		(struct sdebug_target_info *)starget->hostdata;
1163 
1164 	if (targetip) {
1165 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1166 		return ret < 0 ? ret : count;
1167 	}
1168 	return -ENODEV;
1169 }
1170 
1171 static const struct file_operations sdebug_target_reset_fail_fops = {
1172 	.open	= sdebug_target_reset_fail_open,
1173 	.read	= seq_read,
1174 	.write	= sdebug_target_reset_fail_write,
1175 	.release = single_release,
1176 };
1177 
sdebug_target_alloc(struct scsi_target * starget)1178 static int sdebug_target_alloc(struct scsi_target *starget)
1179 {
1180 	struct sdebug_target_info *targetip;
1181 
1182 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1183 	if (!targetip)
1184 		return -ENOMEM;
1185 
1186 	async_synchronize_full_domain(&sdebug_async_domain);
1187 
1188 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1189 				sdebug_debugfs_root);
1190 
1191 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1192 				&sdebug_target_reset_fail_fops);
1193 
1194 	starget->hostdata = targetip;
1195 
1196 	return 0;
1197 }
1198 
sdebug_tartget_cleanup_async(void * data,async_cookie_t cookie)1199 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1200 {
1201 	struct sdebug_target_info *targetip = data;
1202 
1203 	debugfs_remove(targetip->debugfs_entry);
1204 	kfree(targetip);
1205 }
1206 
sdebug_target_destroy(struct scsi_target * starget)1207 static void sdebug_target_destroy(struct scsi_target *starget)
1208 {
1209 	struct sdebug_target_info *targetip;
1210 
1211 	targetip = (struct sdebug_target_info *)starget->hostdata;
1212 	if (targetip) {
1213 		starget->hostdata = NULL;
1214 		async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1215 				&sdebug_async_domain);
1216 	}
1217 }
1218 
1219 /* Only do the extra work involved in logical block provisioning if one or
1220  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1221  * real reads and writes (i.e. not skipping them for speed).
1222  */
scsi_debug_lbp(void)1223 static inline bool scsi_debug_lbp(void)
1224 {
1225 	return 0 == sdebug_fake_rw &&
1226 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1227 }
1228 
scsi_debug_atomic_write(void)1229 static inline bool scsi_debug_atomic_write(void)
1230 {
1231 	return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1232 }
1233 
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)1234 static void *lba2fake_store(struct sdeb_store_info *sip,
1235 			    unsigned long long lba)
1236 {
1237 	struct sdeb_store_info *lsip = sip;
1238 
1239 	lba = do_div(lba, sdebug_store_sectors);
1240 	if (!sip || !sip->storep) {
1241 		WARN_ON_ONCE(true);
1242 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1243 	}
1244 	return lsip->storep + lba * sdebug_sector_size;
1245 }
1246 
dif_store(struct sdeb_store_info * sip,sector_t sector)1247 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1248 				      sector_t sector)
1249 {
1250 	sector = sector_div(sector, sdebug_store_sectors);
1251 
1252 	return sip->dif_storep + sector;
1253 }
1254 
sdebug_max_tgts_luns(void)1255 static void sdebug_max_tgts_luns(void)
1256 {
1257 	struct sdebug_host_info *sdbg_host;
1258 	struct Scsi_Host *hpnt;
1259 
1260 	mutex_lock(&sdebug_host_list_mutex);
1261 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1262 		hpnt = sdbg_host->shost;
1263 		if ((hpnt->this_id >= 0) &&
1264 		    (sdebug_num_tgts > hpnt->this_id))
1265 			hpnt->max_id = sdebug_num_tgts + 1;
1266 		else
1267 			hpnt->max_id = sdebug_num_tgts;
1268 		/* sdebug_max_luns; */
1269 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1270 	}
1271 	mutex_unlock(&sdebug_host_list_mutex);
1272 }
1273 
1274 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1275 
1276 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)1277 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1278 				 enum sdeb_cmd_data c_d,
1279 				 int in_byte, int in_bit)
1280 {
1281 	unsigned char *sbuff;
1282 	u8 sks[4];
1283 	int sl, asc;
1284 
1285 	sbuff = scp->sense_buffer;
1286 	if (!sbuff) {
1287 		sdev_printk(KERN_ERR, scp->device,
1288 			    "%s: sense_buffer is NULL\n", __func__);
1289 		return;
1290 	}
1291 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1292 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1293 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1294 	memset(sks, 0, sizeof(sks));
1295 	sks[0] = 0x80;
1296 	if (c_d)
1297 		sks[0] |= 0x40;
1298 	if (in_bit >= 0) {
1299 		sks[0] |= 0x8;
1300 		sks[0] |= 0x7 & in_bit;
1301 	}
1302 	put_unaligned_be16(in_byte, sks + 1);
1303 	if (sdebug_dsense) {
1304 		sl = sbuff[7] + 8;
1305 		sbuff[7] = sl;
1306 		sbuff[sl] = 0x2;
1307 		sbuff[sl + 1] = 0x6;
1308 		memcpy(sbuff + sl + 4, sks, 3);
1309 	} else
1310 		memcpy(sbuff + 15, sks, 3);
1311 	if (sdebug_verbose)
1312 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1313 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1314 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1315 }
1316 
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)1317 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1318 {
1319 	if (!scp->sense_buffer) {
1320 		sdev_printk(KERN_ERR, scp->device,
1321 			    "%s: sense_buffer is NULL\n", __func__);
1322 		return;
1323 	}
1324 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1325 
1326 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1327 
1328 	if (sdebug_verbose)
1329 		sdev_printk(KERN_INFO, scp->device,
1330 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1331 			    my_name, key, asc, asq);
1332 }
1333 
mk_sense_invalid_opcode(struct scsi_cmnd * scp)1334 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1335 {
1336 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1337 }
1338 
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)1339 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1340 			    void __user *arg)
1341 {
1342 	if (sdebug_verbose) {
1343 		if (0x1261 == cmd)
1344 			sdev_printk(KERN_INFO, dev,
1345 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1346 		else if (0x5331 == cmd)
1347 			sdev_printk(KERN_INFO, dev,
1348 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1349 				    __func__);
1350 		else
1351 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1352 				    __func__, cmd);
1353 	}
1354 	return -EINVAL;
1355 	/* return -ENOTTY; // correct return but upsets fdisk */
1356 }
1357 
config_cdb_len(struct scsi_device * sdev)1358 static void config_cdb_len(struct scsi_device *sdev)
1359 {
1360 	switch (sdebug_cdb_len) {
1361 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1362 		sdev->use_10_for_rw = false;
1363 		sdev->use_16_for_rw = false;
1364 		sdev->use_10_for_ms = false;
1365 		break;
1366 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1367 		sdev->use_10_for_rw = true;
1368 		sdev->use_16_for_rw = false;
1369 		sdev->use_10_for_ms = false;
1370 		break;
1371 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1372 		sdev->use_10_for_rw = true;
1373 		sdev->use_16_for_rw = false;
1374 		sdev->use_10_for_ms = true;
1375 		break;
1376 	case 16:
1377 		sdev->use_10_for_rw = false;
1378 		sdev->use_16_for_rw = true;
1379 		sdev->use_10_for_ms = true;
1380 		break;
1381 	case 32: /* No knobs to suggest this so same as 16 for now */
1382 		sdev->use_10_for_rw = false;
1383 		sdev->use_16_for_rw = true;
1384 		sdev->use_10_for_ms = true;
1385 		break;
1386 	default:
1387 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1388 			sdebug_cdb_len);
1389 		sdev->use_10_for_rw = true;
1390 		sdev->use_16_for_rw = false;
1391 		sdev->use_10_for_ms = false;
1392 		sdebug_cdb_len = 10;
1393 		break;
1394 	}
1395 }
1396 
all_config_cdb_len(void)1397 static void all_config_cdb_len(void)
1398 {
1399 	struct sdebug_host_info *sdbg_host;
1400 	struct Scsi_Host *shost;
1401 	struct scsi_device *sdev;
1402 
1403 	mutex_lock(&sdebug_host_list_mutex);
1404 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1405 		shost = sdbg_host->shost;
1406 		shost_for_each_device(sdev, shost) {
1407 			config_cdb_len(sdev);
1408 		}
1409 	}
1410 	mutex_unlock(&sdebug_host_list_mutex);
1411 }
1412 
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1413 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1414 {
1415 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1416 	struct sdebug_dev_info *dp;
1417 
1418 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1419 		if ((devip->sdbg_host == dp->sdbg_host) &&
1420 		    (devip->target == dp->target)) {
1421 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1422 		}
1423 	}
1424 }
1425 
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1426 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1427 {
1428 	int k;
1429 
1430 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1431 	if (k != SDEBUG_NUM_UAS) {
1432 		const char *cp = NULL;
1433 
1434 		switch (k) {
1435 		case SDEBUG_UA_POR:
1436 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1437 					POWER_ON_RESET_ASCQ);
1438 			if (sdebug_verbose)
1439 				cp = "power on reset";
1440 			break;
1441 		case SDEBUG_UA_POOCCUR:
1442 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1443 					POWER_ON_OCCURRED_ASCQ);
1444 			if (sdebug_verbose)
1445 				cp = "power on occurred";
1446 			break;
1447 		case SDEBUG_UA_BUS_RESET:
1448 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1449 					BUS_RESET_ASCQ);
1450 			if (sdebug_verbose)
1451 				cp = "bus reset";
1452 			break;
1453 		case SDEBUG_UA_MODE_CHANGED:
1454 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1455 					MODE_CHANGED_ASCQ);
1456 			if (sdebug_verbose)
1457 				cp = "mode parameters changed";
1458 			break;
1459 		case SDEBUG_UA_CAPACITY_CHANGED:
1460 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1461 					CAPACITY_CHANGED_ASCQ);
1462 			if (sdebug_verbose)
1463 				cp = "capacity data changed";
1464 			break;
1465 		case SDEBUG_UA_MICROCODE_CHANGED:
1466 			mk_sense_buffer(scp, UNIT_ATTENTION,
1467 					TARGET_CHANGED_ASC,
1468 					MICROCODE_CHANGED_ASCQ);
1469 			if (sdebug_verbose)
1470 				cp = "microcode has been changed";
1471 			break;
1472 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1473 			mk_sense_buffer(scp, UNIT_ATTENTION,
1474 					TARGET_CHANGED_ASC,
1475 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1476 			if (sdebug_verbose)
1477 				cp = "microcode has been changed without reset";
1478 			break;
1479 		case SDEBUG_UA_LUNS_CHANGED:
1480 			/*
1481 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1482 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1483 			 * on the target, until a REPORT LUNS command is
1484 			 * received.  SPC-4 behavior is to report it only once.
1485 			 * NOTE:  sdebug_scsi_level does not use the same
1486 			 * values as struct scsi_device->scsi_level.
1487 			 */
1488 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1489 				clear_luns_changed_on_target(devip);
1490 			mk_sense_buffer(scp, UNIT_ATTENTION,
1491 					TARGET_CHANGED_ASC,
1492 					LUNS_CHANGED_ASCQ);
1493 			if (sdebug_verbose)
1494 				cp = "reported luns data has changed";
1495 			break;
1496 		default:
1497 			pr_warn("unexpected unit attention code=%d\n", k);
1498 			if (sdebug_verbose)
1499 				cp = "unknown";
1500 			break;
1501 		}
1502 		clear_bit(k, devip->uas_bm);
1503 		if (sdebug_verbose)
1504 			sdev_printk(KERN_INFO, scp->device,
1505 				   "%s reports: Unit attention: %s\n",
1506 				   my_name, cp);
1507 		return check_condition_result;
1508 	}
1509 	return 0;
1510 }
1511 
1512 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1513 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1514 				int arr_len)
1515 {
1516 	int act_len;
1517 	struct scsi_data_buffer *sdb = &scp->sdb;
1518 
1519 	if (!sdb->length)
1520 		return 0;
1521 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1522 		return DID_ERROR << 16;
1523 
1524 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1525 				      arr, arr_len);
1526 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1527 
1528 	return 0;
1529 }
1530 
1531 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1532  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1533  * calls, not required to write in ascending offset order. Assumes resid
1534  * set to scsi_bufflen() prior to any calls.
1535  */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1536 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1537 				  int arr_len, unsigned int off_dst)
1538 {
1539 	unsigned int act_len, n;
1540 	struct scsi_data_buffer *sdb = &scp->sdb;
1541 	off_t skip = off_dst;
1542 
1543 	if (sdb->length <= off_dst)
1544 		return 0;
1545 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1546 		return DID_ERROR << 16;
1547 
1548 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1549 				       arr, arr_len, skip);
1550 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1551 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1552 		 scsi_get_resid(scp));
1553 	n = scsi_bufflen(scp) - (off_dst + act_len);
1554 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1555 	return 0;
1556 }
1557 
1558 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1559  * 'arr' or -1 if error.
1560  */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1561 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1562 			       int arr_len)
1563 {
1564 	if (!scsi_bufflen(scp))
1565 		return 0;
1566 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1567 		return -1;
1568 
1569 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1570 }
1571 
1572 
1573 static char sdebug_inq_vendor_id[9] = "Linux   ";
1574 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1575 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1576 /* Use some locally assigned NAAs for SAS addresses. */
1577 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1578 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1579 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1580 
1581 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1582 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1583 			  int target_dev_id, int dev_id_num,
1584 			  const char *dev_id_str, int dev_id_str_len,
1585 			  const uuid_t *lu_name)
1586 {
1587 	int num, port_a;
1588 	char b[32];
1589 
1590 	port_a = target_dev_id + 1;
1591 	/* T10 vendor identifier field format (faked) */
1592 	arr[0] = 0x2;	/* ASCII */
1593 	arr[1] = 0x1;
1594 	arr[2] = 0x0;
1595 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1596 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1597 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1598 	num = 8 + 16 + dev_id_str_len;
1599 	arr[3] = num;
1600 	num += 4;
1601 	if (dev_id_num >= 0) {
1602 		if (sdebug_uuid_ctl) {
1603 			/* Locally assigned UUID */
1604 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1605 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1606 			arr[num++] = 0x0;
1607 			arr[num++] = 0x12;
1608 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1609 			arr[num++] = 0x0;
1610 			memcpy(arr + num, lu_name, 16);
1611 			num += 16;
1612 		} else {
1613 			/* NAA-3, Logical unit identifier (binary) */
1614 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1615 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1616 			arr[num++] = 0x0;
1617 			arr[num++] = 0x8;
1618 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1619 			num += 8;
1620 		}
1621 		/* Target relative port number */
1622 		arr[num++] = 0x61;	/* proto=sas, binary */
1623 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1624 		arr[num++] = 0x0;	/* reserved */
1625 		arr[num++] = 0x4;	/* length */
1626 		arr[num++] = 0x0;	/* reserved */
1627 		arr[num++] = 0x0;	/* reserved */
1628 		arr[num++] = 0x0;
1629 		arr[num++] = 0x1;	/* relative port A */
1630 	}
1631 	/* NAA-3, Target port identifier */
1632 	arr[num++] = 0x61;	/* proto=sas, binary */
1633 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1634 	arr[num++] = 0x0;
1635 	arr[num++] = 0x8;
1636 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1637 	num += 8;
1638 	/* NAA-3, Target port group identifier */
1639 	arr[num++] = 0x61;	/* proto=sas, binary */
1640 	arr[num++] = 0x95;	/* piv=1, target port group id */
1641 	arr[num++] = 0x0;
1642 	arr[num++] = 0x4;
1643 	arr[num++] = 0;
1644 	arr[num++] = 0;
1645 	put_unaligned_be16(port_group_id, arr + num);
1646 	num += 2;
1647 	/* NAA-3, Target device identifier */
1648 	arr[num++] = 0x61;	/* proto=sas, binary */
1649 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1650 	arr[num++] = 0x0;
1651 	arr[num++] = 0x8;
1652 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1653 	num += 8;
1654 	/* SCSI name string: Target device identifier */
1655 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1656 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1657 	arr[num++] = 0x0;
1658 	arr[num++] = 24;
1659 	memcpy(arr + num, "naa.32222220", 12);
1660 	num += 12;
1661 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1662 	memcpy(arr + num, b, 8);
1663 	num += 8;
1664 	memset(arr + num, 0, 4);
1665 	num += 4;
1666 	return num;
1667 }
1668 
1669 static unsigned char vpd84_data[] = {
1670 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1671     0x22,0x22,0x22,0x0,0xbb,0x1,
1672     0x22,0x22,0x22,0x0,0xbb,0x2,
1673 };
1674 
1675 /*  Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1676 static int inquiry_vpd_84(unsigned char *arr)
1677 {
1678 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1679 	return sizeof(vpd84_data);
1680 }
1681 
1682 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1683 static int inquiry_vpd_85(unsigned char *arr)
1684 {
1685 	int num = 0;
1686 	const char *na1 = "https://www.kernel.org/config";
1687 	const char *na2 = "http://www.kernel.org/log";
1688 	int plen, olen;
1689 
1690 	arr[num++] = 0x1;	/* lu, storage config */
1691 	arr[num++] = 0x0;	/* reserved */
1692 	arr[num++] = 0x0;
1693 	olen = strlen(na1);
1694 	plen = olen + 1;
1695 	if (plen % 4)
1696 		plen = ((plen / 4) + 1) * 4;
1697 	arr[num++] = plen;	/* length, null termianted, padded */
1698 	memcpy(arr + num, na1, olen);
1699 	memset(arr + num + olen, 0, plen - olen);
1700 	num += plen;
1701 
1702 	arr[num++] = 0x4;	/* lu, logging */
1703 	arr[num++] = 0x0;	/* reserved */
1704 	arr[num++] = 0x0;
1705 	olen = strlen(na2);
1706 	plen = olen + 1;
1707 	if (plen % 4)
1708 		plen = ((plen / 4) + 1) * 4;
1709 	arr[num++] = plen;	/* length, null terminated, padded */
1710 	memcpy(arr + num, na2, olen);
1711 	memset(arr + num + olen, 0, plen - olen);
1712 	num += plen;
1713 
1714 	return num;
1715 }
1716 
1717 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1718 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1719 {
1720 	int num = 0;
1721 	int port_a, port_b;
1722 
1723 	port_a = target_dev_id + 1;
1724 	port_b = port_a + 1;
1725 	arr[num++] = 0x0;	/* reserved */
1726 	arr[num++] = 0x0;	/* reserved */
1727 	arr[num++] = 0x0;
1728 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1729 	memset(arr + num, 0, 6);
1730 	num += 6;
1731 	arr[num++] = 0x0;
1732 	arr[num++] = 12;	/* length tp descriptor */
1733 	/* naa-5 target port identifier (A) */
1734 	arr[num++] = 0x61;	/* proto=sas, binary */
1735 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1736 	arr[num++] = 0x0;	/* reserved */
1737 	arr[num++] = 0x8;	/* length */
1738 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1739 	num += 8;
1740 	arr[num++] = 0x0;	/* reserved */
1741 	arr[num++] = 0x0;	/* reserved */
1742 	arr[num++] = 0x0;
1743 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1744 	memset(arr + num, 0, 6);
1745 	num += 6;
1746 	arr[num++] = 0x0;
1747 	arr[num++] = 12;	/* length tp descriptor */
1748 	/* naa-5 target port identifier (B) */
1749 	arr[num++] = 0x61;	/* proto=sas, binary */
1750 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1751 	arr[num++] = 0x0;	/* reserved */
1752 	arr[num++] = 0x8;	/* length */
1753 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1754 	num += 8;
1755 
1756 	return num;
1757 }
1758 
1759 
1760 static unsigned char vpd89_data[] = {
1761 /* from 4th byte */ 0,0,0,0,
1762 'l','i','n','u','x',' ',' ',' ',
1763 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1764 '1','2','3','4',
1765 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1766 0xec,0,0,0,
1767 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1768 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1769 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1770 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1771 0x53,0x41,
1772 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1773 0x20,0x20,
1774 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1775 0x10,0x80,
1776 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1777 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1778 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1779 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1780 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1781 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1782 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1783 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1784 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1785 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1786 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1787 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1788 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1789 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1790 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1791 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1792 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1793 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1794 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1795 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1796 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1797 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1798 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1799 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1800 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1801 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1802 };
1803 
1804 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1805 static int inquiry_vpd_89(unsigned char *arr)
1806 {
1807 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1808 	return sizeof(vpd89_data);
1809 }
1810 
1811 
1812 static unsigned char vpdb0_data[] = {
1813 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1814 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1815 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1816 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1817 };
1818 
1819 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1820 static int inquiry_vpd_b0(unsigned char *arr)
1821 {
1822 	unsigned int gran;
1823 
1824 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1825 
1826 	/* Optimal transfer length granularity */
1827 	if (sdebug_opt_xferlen_exp != 0 &&
1828 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1829 		gran = 1 << sdebug_opt_xferlen_exp;
1830 	else
1831 		gran = 1 << sdebug_physblk_exp;
1832 	put_unaligned_be16(gran, arr + 2);
1833 
1834 	/* Maximum Transfer Length */
1835 	if (sdebug_store_sectors > 0x400)
1836 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1837 
1838 	/* Optimal Transfer Length */
1839 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1840 
1841 	if (sdebug_lbpu) {
1842 		/* Maximum Unmap LBA Count */
1843 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1844 
1845 		/* Maximum Unmap Block Descriptor Count */
1846 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1847 	}
1848 
1849 	/* Unmap Granularity Alignment */
1850 	if (sdebug_unmap_alignment) {
1851 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1852 		arr[28] |= 0x80; /* UGAVALID */
1853 	}
1854 
1855 	/* Optimal Unmap Granularity */
1856 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1857 
1858 	/* Maximum WRITE SAME Length */
1859 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1860 
1861 	if (sdebug_atomic_wr) {
1862 		put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1863 		put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1864 		put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1865 		put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1866 		put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1867 	}
1868 
1869 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1870 }
1871 
1872 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1873 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1874 {
1875 	memset(arr, 0, 0x3c);
1876 	arr[0] = 0;
1877 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1878 	arr[2] = 0;
1879 	arr[3] = 5;	/* less than 1.8" */
1880 
1881 	return 0x3c;
1882 }
1883 
1884 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1885 static int inquiry_vpd_b2(unsigned char *arr)
1886 {
1887 	memset(arr, 0, 0x4);
1888 	arr[0] = 0;			/* threshold exponent */
1889 	if (sdebug_lbpu)
1890 		arr[1] = 1 << 7;
1891 	if (sdebug_lbpws)
1892 		arr[1] |= 1 << 6;
1893 	if (sdebug_lbpws10)
1894 		arr[1] |= 1 << 5;
1895 	if (sdebug_lbprz && scsi_debug_lbp())
1896 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1897 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1898 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1899 	/* threshold_percentage=0 */
1900 	return 0x4;
1901 }
1902 
1903 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1904 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1905 {
1906 	memset(arr, 0, 0x3c);
1907 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1908 	/*
1909 	 * Set Optimal number of open sequential write preferred zones and
1910 	 * Optimal number of non-sequentially written sequential write
1911 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1912 	 * fields set to zero, apart from Max. number of open swrz_s field.
1913 	 */
1914 	put_unaligned_be32(0xffffffff, &arr[4]);
1915 	put_unaligned_be32(0xffffffff, &arr[8]);
1916 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1917 		put_unaligned_be32(devip->max_open, &arr[12]);
1918 	else
1919 		put_unaligned_be32(0xffffffff, &arr[12]);
1920 	if (devip->zcap < devip->zsize) {
1921 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1922 		put_unaligned_be64(devip->zsize, &arr[20]);
1923 	} else {
1924 		arr[19] = 0;
1925 	}
1926 	return 0x3c;
1927 }
1928 
1929 #define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
1930 
1931 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1932 
1933 /* Block limits extension VPD page (SBC-4) */
inquiry_vpd_b7(unsigned char * arrb4)1934 static int inquiry_vpd_b7(unsigned char *arrb4)
1935 {
1936 	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1937 	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1938 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1939 	return SDEBUG_BLE_LEN_AFTER_B4;
1940 }
1941 
1942 #define SDEBUG_LONG_INQ_SZ 96
1943 #define SDEBUG_MAX_INQ_ARR_SZ 584
1944 
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1945 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1946 {
1947 	unsigned char pq_pdt;
1948 	unsigned char *arr;
1949 	unsigned char *cmd = scp->cmnd;
1950 	u32 alloc_len, n;
1951 	int ret;
1952 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1953 
1954 	alloc_len = get_unaligned_be16(cmd + 3);
1955 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1956 	if (! arr)
1957 		return DID_REQUEUE << 16;
1958 	is_disk = (sdebug_ptype == TYPE_DISK);
1959 	is_zbc = devip->zoned;
1960 	is_disk_zbc = (is_disk || is_zbc);
1961 	have_wlun = scsi_is_wlun(scp->device->lun);
1962 	if (have_wlun)
1963 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1964 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1965 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1966 	else
1967 		pq_pdt = (sdebug_ptype & 0x1f);
1968 	arr[0] = pq_pdt;
1969 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1970 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1971 		kfree(arr);
1972 		return check_condition_result;
1973 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1974 		int lu_id_num, port_group_id, target_dev_id;
1975 		u32 len;
1976 		char lu_id_str[6];
1977 		int host_no = devip->sdbg_host->shost->host_no;
1978 
1979 		arr[1] = cmd[2];
1980 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1981 		    (devip->channel & 0x7f);
1982 		if (sdebug_vpd_use_hostno == 0)
1983 			host_no = 0;
1984 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1985 			    (devip->target * 1000) + devip->lun);
1986 		target_dev_id = ((host_no + 1) * 2000) +
1987 				 (devip->target * 1000) - 3;
1988 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1989 		if (0 == cmd[2]) { /* supported vital product data pages */
1990 			n = 4;
1991 			arr[n++] = 0x0;   /* this page */
1992 			arr[n++] = 0x80;  /* unit serial number */
1993 			arr[n++] = 0x83;  /* device identification */
1994 			arr[n++] = 0x84;  /* software interface ident. */
1995 			arr[n++] = 0x85;  /* management network addresses */
1996 			arr[n++] = 0x86;  /* extended inquiry */
1997 			arr[n++] = 0x87;  /* mode page policy */
1998 			arr[n++] = 0x88;  /* SCSI ports */
1999 			if (is_disk_zbc) {	  /* SBC or ZBC */
2000 				arr[n++] = 0x89;  /* ATA information */
2001 				arr[n++] = 0xb0;  /* Block limits */
2002 				arr[n++] = 0xb1;  /* Block characteristics */
2003 				if (is_disk)
2004 					arr[n++] = 0xb2;  /* LB Provisioning */
2005 				if (is_zbc)
2006 					arr[n++] = 0xb6;  /* ZB dev. char. */
2007 				arr[n++] = 0xb7;  /* Block limits extension */
2008 			}
2009 			arr[3] = n - 4;	  /* number of supported VPD pages */
2010 		} else if (0x80 == cmd[2]) { /* unit serial number */
2011 			arr[3] = len;
2012 			memcpy(&arr[4], lu_id_str, len);
2013 		} else if (0x83 == cmd[2]) { /* device identification */
2014 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2015 						target_dev_id, lu_id_num,
2016 						lu_id_str, len,
2017 						&devip->lu_name);
2018 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
2019 			arr[3] = inquiry_vpd_84(&arr[4]);
2020 		} else if (0x85 == cmd[2]) { /* Management network addresses */
2021 			arr[3] = inquiry_vpd_85(&arr[4]);
2022 		} else if (0x86 == cmd[2]) { /* extended inquiry */
2023 			arr[3] = 0x3c;	/* number of following entries */
2024 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2025 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
2026 			else if (have_dif_prot)
2027 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
2028 			else
2029 				arr[4] = 0x0;   /* no protection stuff */
2030 			/*
2031 			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2032 			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2033 			 */
2034 			arr[5] = 0x17;
2035 		} else if (0x87 == cmd[2]) { /* mode page policy */
2036 			arr[3] = 0x8;	/* number of following entries */
2037 			arr[4] = 0x2;	/* disconnect-reconnect mp */
2038 			arr[6] = 0x80;	/* mlus, shared */
2039 			arr[8] = 0x18;	 /* protocol specific lu */
2040 			arr[10] = 0x82;	 /* mlus, per initiator port */
2041 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
2042 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2043 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2044 			n = inquiry_vpd_89(&arr[4]);
2045 			put_unaligned_be16(n, arr + 2);
2046 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2047 			arr[3] = inquiry_vpd_b0(&arr[4]);
2048 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2049 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2050 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2051 			arr[3] = inquiry_vpd_b2(&arr[4]);
2052 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2053 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2054 		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2055 			arr[3] = inquiry_vpd_b7(&arr[4]);
2056 		} else {
2057 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2058 			kfree(arr);
2059 			return check_condition_result;
2060 		}
2061 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2062 		ret = fill_from_dev_buffer(scp, arr,
2063 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2064 		kfree(arr);
2065 		return ret;
2066 	}
2067 	/* drops through here for a standard inquiry */
2068 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2069 	arr[2] = sdebug_scsi_level;
2070 	arr[3] = 2;    /* response_data_format==2 */
2071 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2072 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2073 	if (sdebug_vpd_use_hostno == 0)
2074 		arr[5] |= 0x10; /* claim: implicit TPGS */
2075 	arr[6] = 0x10; /* claim: MultiP */
2076 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2077 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2078 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2079 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2080 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2081 	/* Use Vendor Specific area to place driver date in ASCII hex */
2082 	memcpy(&arr[36], sdebug_version_date, 8);
2083 	/* version descriptors (2 bytes each) follow */
2084 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2085 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2086 	n = 62;
2087 	if (is_disk) {		/* SBC-4 no version claimed */
2088 		put_unaligned_be16(0x600, arr + n);
2089 		n += 2;
2090 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
2091 		put_unaligned_be16(0x525, arr + n);
2092 		n += 2;
2093 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2094 		put_unaligned_be16(0x624, arr + n);
2095 		n += 2;
2096 	}
2097 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2098 	ret = fill_from_dev_buffer(scp, arr,
2099 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2100 	kfree(arr);
2101 	return ret;
2102 }
2103 
2104 /* See resp_iec_m_pg() for how this data is manipulated */
2105 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2106 				   0, 0, 0x0, 0x0};
2107 
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2108 static int resp_requests(struct scsi_cmnd *scp,
2109 			 struct sdebug_dev_info *devip)
2110 {
2111 	unsigned char *cmd = scp->cmnd;
2112 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2113 	bool dsense = !!(cmd[1] & 1);
2114 	u32 alloc_len = cmd[4];
2115 	u32 len = 18;
2116 	int stopped_state = atomic_read(&devip->stopped);
2117 
2118 	memset(arr, 0, sizeof(arr));
2119 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2120 		if (dsense) {
2121 			arr[0] = 0x72;
2122 			arr[1] = NOT_READY;
2123 			arr[2] = LOGICAL_UNIT_NOT_READY;
2124 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2125 			len = 8;
2126 		} else {
2127 			arr[0] = 0x70;
2128 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2129 			arr[7] = 0xa;			/* 18 byte sense buffer */
2130 			arr[12] = LOGICAL_UNIT_NOT_READY;
2131 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2132 		}
2133 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2134 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2135 		if (dsense) {
2136 			arr[0] = 0x72;
2137 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2138 			arr[2] = THRESHOLD_EXCEEDED;
2139 			arr[3] = 0xff;		/* Failure prediction(false) */
2140 			len = 8;
2141 		} else {
2142 			arr[0] = 0x70;
2143 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2144 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2145 			arr[12] = THRESHOLD_EXCEEDED;
2146 			arr[13] = 0xff;		/* Failure prediction(false) */
2147 		}
2148 	} else {	/* nothing to report */
2149 		if (dsense) {
2150 			len = 8;
2151 			memset(arr, 0, len);
2152 			arr[0] = 0x72;
2153 		} else {
2154 			memset(arr, 0, len);
2155 			arr[0] = 0x70;
2156 			arr[7] = 0xa;
2157 		}
2158 	}
2159 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2160 }
2161 
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2162 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2163 {
2164 	unsigned char *cmd = scp->cmnd;
2165 	int power_cond, want_stop, stopped_state;
2166 	bool changing;
2167 
2168 	power_cond = (cmd[4] & 0xf0) >> 4;
2169 	if (power_cond) {
2170 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2171 		return check_condition_result;
2172 	}
2173 	want_stop = !(cmd[4] & 1);
2174 	stopped_state = atomic_read(&devip->stopped);
2175 	if (stopped_state == 2) {
2176 		ktime_t now_ts = ktime_get_boottime();
2177 
2178 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2179 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2180 
2181 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2182 				/* tur_ms_to_ready timer extinguished */
2183 				atomic_set(&devip->stopped, 0);
2184 				stopped_state = 0;
2185 			}
2186 		}
2187 		if (stopped_state == 2) {
2188 			if (want_stop) {
2189 				stopped_state = 1;	/* dummy up success */
2190 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2191 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2192 				return check_condition_result;
2193 			}
2194 		}
2195 	}
2196 	changing = (stopped_state != want_stop);
2197 	if (changing)
2198 		atomic_xchg(&devip->stopped, want_stop);
2199 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2200 		return SDEG_RES_IMMED_MASK;
2201 	else
2202 		return 0;
2203 }
2204 
get_sdebug_capacity(void)2205 static sector_t get_sdebug_capacity(void)
2206 {
2207 	static const unsigned int gibibyte = 1073741824;
2208 
2209 	if (sdebug_virtual_gb > 0)
2210 		return (sector_t)sdebug_virtual_gb *
2211 			(gibibyte / sdebug_sector_size);
2212 	else
2213 		return sdebug_store_sectors;
2214 }
2215 
2216 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2217 static int resp_readcap(struct scsi_cmnd *scp,
2218 			struct sdebug_dev_info *devip)
2219 {
2220 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2221 	unsigned int capac;
2222 
2223 	/* following just in case virtual_gb changed */
2224 	sdebug_capacity = get_sdebug_capacity();
2225 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2226 	if (sdebug_capacity < 0xffffffff) {
2227 		capac = (unsigned int)sdebug_capacity - 1;
2228 		put_unaligned_be32(capac, arr + 0);
2229 	} else
2230 		put_unaligned_be32(0xffffffff, arr + 0);
2231 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2232 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2233 }
2234 
2235 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2236 static int resp_readcap16(struct scsi_cmnd *scp,
2237 			  struct sdebug_dev_info *devip)
2238 {
2239 	unsigned char *cmd = scp->cmnd;
2240 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2241 	u32 alloc_len;
2242 
2243 	alloc_len = get_unaligned_be32(cmd + 10);
2244 	/* following just in case virtual_gb changed */
2245 	sdebug_capacity = get_sdebug_capacity();
2246 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2247 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2248 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2249 	arr[13] = sdebug_physblk_exp & 0xf;
2250 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2251 
2252 	if (scsi_debug_lbp()) {
2253 		arr[14] |= 0x80; /* LBPME */
2254 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2255 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2256 		 * in the wider field maps to 0 in this field.
2257 		 */
2258 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2259 			arr[14] |= 0x40;
2260 	}
2261 
2262 	/*
2263 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2264 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2265 	 */
2266 	if (devip->zoned)
2267 		arr[12] |= 1 << 4;
2268 
2269 	arr[15] = sdebug_lowest_aligned & 0xff;
2270 
2271 	if (have_dif_prot) {
2272 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2273 		arr[12] |= 1; /* PROT_EN */
2274 	}
2275 
2276 	return fill_from_dev_buffer(scp, arr,
2277 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2278 }
2279 
2280 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2281 
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2282 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2283 			      struct sdebug_dev_info *devip)
2284 {
2285 	unsigned char *cmd = scp->cmnd;
2286 	unsigned char *arr;
2287 	int host_no = devip->sdbg_host->shost->host_no;
2288 	int port_group_a, port_group_b, port_a, port_b;
2289 	u32 alen, n, rlen;
2290 	int ret;
2291 
2292 	alen = get_unaligned_be32(cmd + 6);
2293 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2294 	if (! arr)
2295 		return DID_REQUEUE << 16;
2296 	/*
2297 	 * EVPD page 0x88 states we have two ports, one
2298 	 * real and a fake port with no device connected.
2299 	 * So we create two port groups with one port each
2300 	 * and set the group with port B to unavailable.
2301 	 */
2302 	port_a = 0x1; /* relative port A */
2303 	port_b = 0x2; /* relative port B */
2304 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2305 			(devip->channel & 0x7f);
2306 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2307 			(devip->channel & 0x7f) + 0x80;
2308 
2309 	/*
2310 	 * The asymmetric access state is cycled according to the host_id.
2311 	 */
2312 	n = 4;
2313 	if (sdebug_vpd_use_hostno == 0) {
2314 		arr[n++] = host_no % 3; /* Asymm access state */
2315 		arr[n++] = 0x0F; /* claim: all states are supported */
2316 	} else {
2317 		arr[n++] = 0x0; /* Active/Optimized path */
2318 		arr[n++] = 0x01; /* only support active/optimized paths */
2319 	}
2320 	put_unaligned_be16(port_group_a, arr + n);
2321 	n += 2;
2322 	arr[n++] = 0;    /* Reserved */
2323 	arr[n++] = 0;    /* Status code */
2324 	arr[n++] = 0;    /* Vendor unique */
2325 	arr[n++] = 0x1;  /* One port per group */
2326 	arr[n++] = 0;    /* Reserved */
2327 	arr[n++] = 0;    /* Reserved */
2328 	put_unaligned_be16(port_a, arr + n);
2329 	n += 2;
2330 	arr[n++] = 3;    /* Port unavailable */
2331 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2332 	put_unaligned_be16(port_group_b, arr + n);
2333 	n += 2;
2334 	arr[n++] = 0;    /* Reserved */
2335 	arr[n++] = 0;    /* Status code */
2336 	arr[n++] = 0;    /* Vendor unique */
2337 	arr[n++] = 0x1;  /* One port per group */
2338 	arr[n++] = 0;    /* Reserved */
2339 	arr[n++] = 0;    /* Reserved */
2340 	put_unaligned_be16(port_b, arr + n);
2341 	n += 2;
2342 
2343 	rlen = n - 4;
2344 	put_unaligned_be32(rlen, arr + 0);
2345 
2346 	/*
2347 	 * Return the smallest value of either
2348 	 * - The allocated length
2349 	 * - The constructed command length
2350 	 * - The maximum array size
2351 	 */
2352 	rlen = min(alen, n);
2353 	ret = fill_from_dev_buffer(scp, arr,
2354 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2355 	kfree(arr);
2356 	return ret;
2357 }
2358 
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2359 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2360 			     struct sdebug_dev_info *devip)
2361 {
2362 	bool rctd;
2363 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2364 	u16 req_sa, u;
2365 	u32 alloc_len, a_len;
2366 	int k, offset, len, errsts, count, bump, na;
2367 	const struct opcode_info_t *oip;
2368 	const struct opcode_info_t *r_oip;
2369 	u8 *arr;
2370 	u8 *cmd = scp->cmnd;
2371 
2372 	rctd = !!(cmd[2] & 0x80);
2373 	reporting_opts = cmd[2] & 0x7;
2374 	req_opcode = cmd[3];
2375 	req_sa = get_unaligned_be16(cmd + 4);
2376 	alloc_len = get_unaligned_be32(cmd + 6);
2377 	if (alloc_len < 4 || alloc_len > 0xffff) {
2378 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2379 		return check_condition_result;
2380 	}
2381 	if (alloc_len > 8192)
2382 		a_len = 8192;
2383 	else
2384 		a_len = alloc_len;
2385 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2386 	if (NULL == arr) {
2387 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2388 				INSUFF_RES_ASCQ);
2389 		return check_condition_result;
2390 	}
2391 	switch (reporting_opts) {
2392 	case 0:	/* all commands */
2393 		/* count number of commands */
2394 		for (count = 0, oip = opcode_info_arr;
2395 		     oip->num_attached != 0xff; ++oip) {
2396 			if (F_INV_OP & oip->flags)
2397 				continue;
2398 			count += (oip->num_attached + 1);
2399 		}
2400 		bump = rctd ? 20 : 8;
2401 		put_unaligned_be32(count * bump, arr);
2402 		for (offset = 4, oip = opcode_info_arr;
2403 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2404 			if (F_INV_OP & oip->flags)
2405 				continue;
2406 			na = oip->num_attached;
2407 			arr[offset] = oip->opcode;
2408 			put_unaligned_be16(oip->sa, arr + offset + 2);
2409 			if (rctd)
2410 				arr[offset + 5] |= 0x2;
2411 			if (FF_SA & oip->flags)
2412 				arr[offset + 5] |= 0x1;
2413 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2414 			if (rctd)
2415 				put_unaligned_be16(0xa, arr + offset + 8);
2416 			r_oip = oip;
2417 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2418 				if (F_INV_OP & oip->flags)
2419 					continue;
2420 				offset += bump;
2421 				arr[offset] = oip->opcode;
2422 				put_unaligned_be16(oip->sa, arr + offset + 2);
2423 				if (rctd)
2424 					arr[offset + 5] |= 0x2;
2425 				if (FF_SA & oip->flags)
2426 					arr[offset + 5] |= 0x1;
2427 				put_unaligned_be16(oip->len_mask[0],
2428 						   arr + offset + 6);
2429 				if (rctd)
2430 					put_unaligned_be16(0xa,
2431 							   arr + offset + 8);
2432 			}
2433 			oip = r_oip;
2434 			offset += bump;
2435 		}
2436 		break;
2437 	case 1:	/* one command: opcode only */
2438 	case 2:	/* one command: opcode plus service action */
2439 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2440 		sdeb_i = opcode_ind_arr[req_opcode];
2441 		oip = &opcode_info_arr[sdeb_i];
2442 		if (F_INV_OP & oip->flags) {
2443 			supp = 1;
2444 			offset = 4;
2445 		} else {
2446 			if (1 == reporting_opts) {
2447 				if (FF_SA & oip->flags) {
2448 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2449 							     2, 2);
2450 					kfree(arr);
2451 					return check_condition_result;
2452 				}
2453 				req_sa = 0;
2454 			} else if (2 == reporting_opts &&
2455 				   0 == (FF_SA & oip->flags)) {
2456 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2457 				kfree(arr);	/* point at requested sa */
2458 				return check_condition_result;
2459 			}
2460 			if (0 == (FF_SA & oip->flags) &&
2461 			    req_opcode == oip->opcode)
2462 				supp = 3;
2463 			else if (0 == (FF_SA & oip->flags)) {
2464 				na = oip->num_attached;
2465 				for (k = 0, oip = oip->arrp; k < na;
2466 				     ++k, ++oip) {
2467 					if (req_opcode == oip->opcode)
2468 						break;
2469 				}
2470 				supp = (k >= na) ? 1 : 3;
2471 			} else if (req_sa != oip->sa) {
2472 				na = oip->num_attached;
2473 				for (k = 0, oip = oip->arrp; k < na;
2474 				     ++k, ++oip) {
2475 					if (req_sa == oip->sa)
2476 						break;
2477 				}
2478 				supp = (k >= na) ? 1 : 3;
2479 			} else
2480 				supp = 3;
2481 			if (3 == supp) {
2482 				u = oip->len_mask[0];
2483 				put_unaligned_be16(u, arr + 2);
2484 				arr[4] = oip->opcode;
2485 				for (k = 1; k < u; ++k)
2486 					arr[4 + k] = (k < 16) ?
2487 						 oip->len_mask[k] : 0xff;
2488 				offset = 4 + u;
2489 			} else
2490 				offset = 4;
2491 		}
2492 		arr[1] = (rctd ? 0x80 : 0) | supp;
2493 		if (rctd) {
2494 			put_unaligned_be16(0xa, arr + offset);
2495 			offset += 12;
2496 		}
2497 		break;
2498 	default:
2499 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2500 		kfree(arr);
2501 		return check_condition_result;
2502 	}
2503 	offset = (offset < a_len) ? offset : a_len;
2504 	len = (offset < alloc_len) ? offset : alloc_len;
2505 	errsts = fill_from_dev_buffer(scp, arr, len);
2506 	kfree(arr);
2507 	return errsts;
2508 }
2509 
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2510 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2511 			  struct sdebug_dev_info *devip)
2512 {
2513 	bool repd;
2514 	u32 alloc_len, len;
2515 	u8 arr[16];
2516 	u8 *cmd = scp->cmnd;
2517 
2518 	memset(arr, 0, sizeof(arr));
2519 	repd = !!(cmd[2] & 0x80);
2520 	alloc_len = get_unaligned_be32(cmd + 6);
2521 	if (alloc_len < 4) {
2522 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2523 		return check_condition_result;
2524 	}
2525 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2526 	arr[1] = 0x1;		/* ITNRS */
2527 	if (repd) {
2528 		arr[3] = 0xc;
2529 		len = 16;
2530 	} else
2531 		len = 4;
2532 
2533 	len = (len < alloc_len) ? len : alloc_len;
2534 	return fill_from_dev_buffer(scp, arr, len);
2535 }
2536 
2537 /* <<Following mode page info copied from ST318451LW>> */
2538 
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2539 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2540 {	/* Read-Write Error Recovery page for mode_sense */
2541 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2542 					5, 0, 0xff, 0xff};
2543 
2544 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2545 	if (1 == pcontrol)
2546 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2547 	return sizeof(err_recov_pg);
2548 }
2549 
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2550 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2551 { 	/* Disconnect-Reconnect page for mode_sense */
2552 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2553 					 0, 0, 0, 0, 0, 0, 0, 0};
2554 
2555 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2556 	if (1 == pcontrol)
2557 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2558 	return sizeof(disconnect_pg);
2559 }
2560 
resp_format_pg(unsigned char * p,int pcontrol,int target)2561 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2562 {       /* Format device page for mode_sense */
2563 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2564 				     0, 0, 0, 0, 0, 0, 0, 0,
2565 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2566 
2567 	memcpy(p, format_pg, sizeof(format_pg));
2568 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2569 	put_unaligned_be16(sdebug_sector_size, p + 12);
2570 	if (sdebug_removable)
2571 		p[20] |= 0x20; /* should agree with INQUIRY */
2572 	if (1 == pcontrol)
2573 		memset(p + 2, 0, sizeof(format_pg) - 2);
2574 	return sizeof(format_pg);
2575 }
2576 
2577 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2578 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2579 				     0, 0, 0, 0};
2580 
resp_caching_pg(unsigned char * p,int pcontrol,int target)2581 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2582 { 	/* Caching page for mode_sense */
2583 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2584 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2585 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2586 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2587 
2588 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2589 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2590 	memcpy(p, caching_pg, sizeof(caching_pg));
2591 	if (1 == pcontrol)
2592 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2593 	else if (2 == pcontrol)
2594 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2595 	return sizeof(caching_pg);
2596 }
2597 
2598 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2599 				    0, 0, 0x2, 0x4b};
2600 
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2601 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2602 { 	/* Control mode page for mode_sense */
2603 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2604 					0, 0, 0, 0};
2605 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2606 				     0, 0, 0x2, 0x4b};
2607 
2608 	if (sdebug_dsense)
2609 		ctrl_m_pg[2] |= 0x4;
2610 	else
2611 		ctrl_m_pg[2] &= ~0x4;
2612 
2613 	if (sdebug_ato)
2614 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2615 
2616 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2617 	if (1 == pcontrol)
2618 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2619 	else if (2 == pcontrol)
2620 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2621 	return sizeof(ctrl_m_pg);
2622 }
2623 
2624 /* IO Advice Hints Grouping mode page */
resp_grouping_m_pg(unsigned char * p,int pcontrol,int target)2625 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2626 {
2627 	/* IO Advice Hints Grouping mode page */
2628 	struct grouping_m_pg {
2629 		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2630 		u8 subpage_code;
2631 		__be16 page_length;
2632 		u8 reserved[12];
2633 		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2634 	};
2635 	static const struct grouping_m_pg gr_m_pg = {
2636 		.page_code = 0xa | 0x40,
2637 		.subpage_code = 5,
2638 		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2639 		.descr = {
2640 			{ .st_enble = 1 },
2641 			{ .st_enble = 1 },
2642 			{ .st_enble = 1 },
2643 			{ .st_enble = 1 },
2644 			{ .st_enble = 1 },
2645 			{ .st_enble = 0 },
2646 		}
2647 	};
2648 
2649 	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2650 		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2651 	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2652 	if (1 == pcontrol) {
2653 		/* There are no changeable values so clear from byte 4 on. */
2654 		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2655 	}
2656 	return sizeof(gr_m_pg);
2657 }
2658 
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2659 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2660 {	/* Informational Exceptions control mode page for mode_sense */
2661 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2662 				       0, 0, 0x0, 0x0};
2663 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2664 				      0, 0, 0x0, 0x0};
2665 
2666 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2667 	if (1 == pcontrol)
2668 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2669 	else if (2 == pcontrol)
2670 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2671 	return sizeof(iec_m_pg);
2672 }
2673 
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2674 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2675 {	/* SAS SSP mode page - short format for mode_sense */
2676 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2677 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2678 
2679 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2680 	if (1 == pcontrol)
2681 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2682 	return sizeof(sas_sf_m_pg);
2683 }
2684 
2685 
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2686 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2687 			      int target_dev_id)
2688 {	/* SAS phy control and discover mode page for mode_sense */
2689 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2690 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2691 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2692 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2693 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2694 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2695 		    0, 0, 0, 0, 0, 0, 0, 0,
2696 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2697 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2698 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2699 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2700 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2701 		    0, 0, 0, 0, 0, 0, 0, 0,
2702 		};
2703 	int port_a, port_b;
2704 
2705 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2706 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2707 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2708 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2709 	port_a = target_dev_id + 1;
2710 	port_b = port_a + 1;
2711 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2712 	put_unaligned_be32(port_a, p + 20);
2713 	put_unaligned_be32(port_b, p + 48 + 20);
2714 	if (1 == pcontrol)
2715 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2716 	return sizeof(sas_pcd_m_pg);
2717 }
2718 
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2719 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2720 {	/* SAS SSP shared protocol specific port mode subpage */
2721 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2722 		    0, 0, 0, 0, 0, 0, 0, 0,
2723 		};
2724 
2725 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2726 	if (1 == pcontrol)
2727 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2728 	return sizeof(sas_sha_m_pg);
2729 }
2730 
2731 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2732 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2733 
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2734 static int resp_mode_sense(struct scsi_cmnd *scp,
2735 			   struct sdebug_dev_info *devip)
2736 {
2737 	int pcontrol, pcode, subpcode, bd_len;
2738 	unsigned char dev_spec;
2739 	u32 alloc_len, offset, len;
2740 	int target_dev_id;
2741 	int target = scp->device->id;
2742 	unsigned char *ap;
2743 	unsigned char *arr __free(kfree);
2744 	unsigned char *cmd = scp->cmnd;
2745 	bool dbd, llbaa, msense_6, is_disk, is_zbc;
2746 
2747 	arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2748 	if (!arr)
2749 		return -ENOMEM;
2750 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2751 	pcontrol = (cmd[2] & 0xc0) >> 6;
2752 	pcode = cmd[2] & 0x3f;
2753 	subpcode = cmd[3];
2754 	msense_6 = (MODE_SENSE == cmd[0]);
2755 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2756 	is_disk = (sdebug_ptype == TYPE_DISK);
2757 	is_zbc = devip->zoned;
2758 	if ((is_disk || is_zbc) && !dbd)
2759 		bd_len = llbaa ? 16 : 8;
2760 	else
2761 		bd_len = 0;
2762 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2763 	if (0x3 == pcontrol) {  /* Saving values not supported */
2764 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2765 		return check_condition_result;
2766 	}
2767 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2768 			(devip->target * 1000) - 3;
2769 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2770 	if (is_disk || is_zbc) {
2771 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2772 		if (sdebug_wp)
2773 			dev_spec |= 0x80;
2774 	} else
2775 		dev_spec = 0x0;
2776 	if (msense_6) {
2777 		arr[2] = dev_spec;
2778 		arr[3] = bd_len;
2779 		offset = 4;
2780 	} else {
2781 		arr[3] = dev_spec;
2782 		if (16 == bd_len)
2783 			arr[4] = 0x1;	/* set LONGLBA bit */
2784 		arr[7] = bd_len;	/* assume 255 or less */
2785 		offset = 8;
2786 	}
2787 	ap = arr + offset;
2788 	if ((bd_len > 0) && (!sdebug_capacity))
2789 		sdebug_capacity = get_sdebug_capacity();
2790 
2791 	if (8 == bd_len) {
2792 		if (sdebug_capacity > 0xfffffffe)
2793 			put_unaligned_be32(0xffffffff, ap + 0);
2794 		else
2795 			put_unaligned_be32(sdebug_capacity, ap + 0);
2796 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2797 		offset += bd_len;
2798 		ap = arr + offset;
2799 	} else if (16 == bd_len) {
2800 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2801 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2802 		offset += bd_len;
2803 		ap = arr + offset;
2804 	}
2805 
2806 	/*
2807 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2808 	 *        len += resp_*_pg(ap + len, pcontrol, target);
2809 	 */
2810 	switch (pcode) {
2811 	case 0x1:	/* Read-Write error recovery page, direct access */
2812 		if (subpcode > 0x0 && subpcode < 0xff)
2813 			goto bad_subpcode;
2814 		len = resp_err_recov_pg(ap, pcontrol, target);
2815 		offset += len;
2816 		break;
2817 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2818 		if (subpcode > 0x0 && subpcode < 0xff)
2819 			goto bad_subpcode;
2820 		len = resp_disconnect_pg(ap, pcontrol, target);
2821 		offset += len;
2822 		break;
2823 	case 0x3:       /* Format device page, direct access */
2824 		if (subpcode > 0x0 && subpcode < 0xff)
2825 			goto bad_subpcode;
2826 		if (is_disk) {
2827 			len = resp_format_pg(ap, pcontrol, target);
2828 			offset += len;
2829 		} else {
2830 			goto bad_pcode;
2831 		}
2832 		break;
2833 	case 0x8:	/* Caching page, direct access */
2834 		if (subpcode > 0x0 && subpcode < 0xff)
2835 			goto bad_subpcode;
2836 		if (is_disk || is_zbc) {
2837 			len = resp_caching_pg(ap, pcontrol, target);
2838 			offset += len;
2839 		} else {
2840 			goto bad_pcode;
2841 		}
2842 		break;
2843 	case 0xa:	/* Control Mode page, all devices */
2844 		switch (subpcode) {
2845 		case 0:
2846 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2847 			break;
2848 		case 0x05:
2849 			len = resp_grouping_m_pg(ap, pcontrol, target);
2850 			break;
2851 		case 0xff:
2852 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2853 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2854 			break;
2855 		default:
2856 			goto bad_subpcode;
2857 		}
2858 		offset += len;
2859 		break;
2860 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2861 		if (subpcode > 0x2 && subpcode < 0xff)
2862 			goto bad_subpcode;
2863 		len = 0;
2864 		if ((0x0 == subpcode) || (0xff == subpcode))
2865 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2866 		if ((0x1 == subpcode) || (0xff == subpcode))
2867 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2868 						  target_dev_id);
2869 		if ((0x2 == subpcode) || (0xff == subpcode))
2870 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2871 		offset += len;
2872 		break;
2873 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2874 		if (subpcode > 0x0 && subpcode < 0xff)
2875 			goto bad_subpcode;
2876 		len = resp_iec_m_pg(ap, pcontrol, target);
2877 		offset += len;
2878 		break;
2879 	case 0x3f:	/* Read all Mode pages */
2880 		if (subpcode > 0x0 && subpcode < 0xff)
2881 			goto bad_subpcode;
2882 		len = resp_err_recov_pg(ap, pcontrol, target);
2883 		len += resp_disconnect_pg(ap + len, pcontrol, target);
2884 		if (is_disk) {
2885 			len += resp_format_pg(ap + len, pcontrol, target);
2886 			len += resp_caching_pg(ap + len, pcontrol, target);
2887 		} else if (is_zbc) {
2888 			len += resp_caching_pg(ap + len, pcontrol, target);
2889 		}
2890 		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2891 		if (0xff == subpcode)
2892 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2893 		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2894 		if (0xff == subpcode) {
2895 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2896 						  target_dev_id);
2897 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2898 		}
2899 		len += resp_iec_m_pg(ap + len, pcontrol, target);
2900 		offset += len;
2901 		break;
2902 	default:
2903 		goto bad_pcode;
2904 	}
2905 	if (msense_6)
2906 		arr[0] = offset - 1;
2907 	else
2908 		put_unaligned_be16((offset - 2), arr + 0);
2909 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2910 
2911 bad_pcode:
2912 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2913 	return check_condition_result;
2914 
2915 bad_subpcode:
2916 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2917 	return check_condition_result;
2918 }
2919 
2920 #define SDEBUG_MAX_MSELECT_SZ 512
2921 
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2922 static int resp_mode_select(struct scsi_cmnd *scp,
2923 			    struct sdebug_dev_info *devip)
2924 {
2925 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2926 	int param_len, res, mpage;
2927 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2928 	unsigned char *cmd = scp->cmnd;
2929 	int mselect6 = (MODE_SELECT == cmd[0]);
2930 
2931 	memset(arr, 0, sizeof(arr));
2932 	pf = cmd[1] & 0x10;
2933 	sp = cmd[1] & 0x1;
2934 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2935 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2936 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2937 		return check_condition_result;
2938 	}
2939 	res = fetch_to_dev_buffer(scp, arr, param_len);
2940 	if (-1 == res)
2941 		return DID_ERROR << 16;
2942 	else if (sdebug_verbose && (res < param_len))
2943 		sdev_printk(KERN_INFO, scp->device,
2944 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2945 			    __func__, param_len, res);
2946 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2947 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2948 	off = bd_len + (mselect6 ? 4 : 8);
2949 	if (md_len > 2 || off >= res) {
2950 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2951 		return check_condition_result;
2952 	}
2953 	mpage = arr[off] & 0x3f;
2954 	ps = !!(arr[off] & 0x80);
2955 	if (ps) {
2956 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2957 		return check_condition_result;
2958 	}
2959 	spf = !!(arr[off] & 0x40);
2960 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2961 		       (arr[off + 1] + 2);
2962 	if ((pg_len + off) > param_len) {
2963 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2964 				PARAMETER_LIST_LENGTH_ERR, 0);
2965 		return check_condition_result;
2966 	}
2967 	switch (mpage) {
2968 	case 0x8:      /* Caching Mode page */
2969 		if (caching_pg[1] == arr[off + 1]) {
2970 			memcpy(caching_pg + 2, arr + off + 2,
2971 			       sizeof(caching_pg) - 2);
2972 			goto set_mode_changed_ua;
2973 		}
2974 		break;
2975 	case 0xa:      /* Control Mode page */
2976 		if (ctrl_m_pg[1] == arr[off + 1]) {
2977 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2978 			       sizeof(ctrl_m_pg) - 2);
2979 			if (ctrl_m_pg[4] & 0x8)
2980 				sdebug_wp = true;
2981 			else
2982 				sdebug_wp = false;
2983 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2984 			goto set_mode_changed_ua;
2985 		}
2986 		break;
2987 	case 0x1c:      /* Informational Exceptions Mode page */
2988 		if (iec_m_pg[1] == arr[off + 1]) {
2989 			memcpy(iec_m_pg + 2, arr + off + 2,
2990 			       sizeof(iec_m_pg) - 2);
2991 			goto set_mode_changed_ua;
2992 		}
2993 		break;
2994 	default:
2995 		break;
2996 	}
2997 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2998 	return check_condition_result;
2999 set_mode_changed_ua:
3000 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3001 	return 0;
3002 }
3003 
resp_temp_l_pg(unsigned char * arr)3004 static int resp_temp_l_pg(unsigned char *arr)
3005 {
3006 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
3007 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
3008 		};
3009 
3010 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3011 	return sizeof(temp_l_pg);
3012 }
3013 
resp_ie_l_pg(unsigned char * arr)3014 static int resp_ie_l_pg(unsigned char *arr)
3015 {
3016 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3017 		};
3018 
3019 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3020 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
3021 		arr[4] = THRESHOLD_EXCEEDED;
3022 		arr[5] = 0xff;
3023 	}
3024 	return sizeof(ie_l_pg);
3025 }
3026 
resp_env_rep_l_spg(unsigned char * arr)3027 static int resp_env_rep_l_spg(unsigned char *arr)
3028 {
3029 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
3030 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
3031 					 0x1, 0x0, 0x23, 0x8,
3032 					 0x0, 55, 72, 35, 55, 45, 0, 0,
3033 		};
3034 
3035 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3036 	return sizeof(env_rep_l_spg);
3037 }
3038 
3039 #define SDEBUG_MAX_LSENSE_SZ 512
3040 
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3041 static int resp_log_sense(struct scsi_cmnd *scp,
3042 			  struct sdebug_dev_info *devip)
3043 {
3044 	int ppc, sp, pcode, subpcode;
3045 	u32 alloc_len, len, n;
3046 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3047 	unsigned char *cmd = scp->cmnd;
3048 
3049 	memset(arr, 0, sizeof(arr));
3050 	ppc = cmd[1] & 0x2;
3051 	sp = cmd[1] & 0x1;
3052 	if (ppc || sp) {
3053 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3054 		return check_condition_result;
3055 	}
3056 	pcode = cmd[2] & 0x3f;
3057 	subpcode = cmd[3] & 0xff;
3058 	alloc_len = get_unaligned_be16(cmd + 7);
3059 	arr[0] = pcode;
3060 	if (0 == subpcode) {
3061 		switch (pcode) {
3062 		case 0x0:	/* Supported log pages log page */
3063 			n = 4;
3064 			arr[n++] = 0x0;		/* this page */
3065 			arr[n++] = 0xd;		/* Temperature */
3066 			arr[n++] = 0x2f;	/* Informational exceptions */
3067 			arr[3] = n - 4;
3068 			break;
3069 		case 0xd:	/* Temperature log page */
3070 			arr[3] = resp_temp_l_pg(arr + 4);
3071 			break;
3072 		case 0x2f:	/* Informational exceptions log page */
3073 			arr[3] = resp_ie_l_pg(arr + 4);
3074 			break;
3075 		default:
3076 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3077 			return check_condition_result;
3078 		}
3079 	} else if (0xff == subpcode) {
3080 		arr[0] |= 0x40;
3081 		arr[1] = subpcode;
3082 		switch (pcode) {
3083 		case 0x0:	/* Supported log pages and subpages log page */
3084 			n = 4;
3085 			arr[n++] = 0x0;
3086 			arr[n++] = 0x0;		/* 0,0 page */
3087 			arr[n++] = 0x0;
3088 			arr[n++] = 0xff;	/* this page */
3089 			arr[n++] = 0xd;
3090 			arr[n++] = 0x0;		/* Temperature */
3091 			arr[n++] = 0xd;
3092 			arr[n++] = 0x1;		/* Environment reporting */
3093 			arr[n++] = 0xd;
3094 			arr[n++] = 0xff;	/* all 0xd subpages */
3095 			arr[n++] = 0x2f;
3096 			arr[n++] = 0x0;	/* Informational exceptions */
3097 			arr[n++] = 0x2f;
3098 			arr[n++] = 0xff;	/* all 0x2f subpages */
3099 			arr[3] = n - 4;
3100 			break;
3101 		case 0xd:	/* Temperature subpages */
3102 			n = 4;
3103 			arr[n++] = 0xd;
3104 			arr[n++] = 0x0;		/* Temperature */
3105 			arr[n++] = 0xd;
3106 			arr[n++] = 0x1;		/* Environment reporting */
3107 			arr[n++] = 0xd;
3108 			arr[n++] = 0xff;	/* these subpages */
3109 			arr[3] = n - 4;
3110 			break;
3111 		case 0x2f:	/* Informational exceptions subpages */
3112 			n = 4;
3113 			arr[n++] = 0x2f;
3114 			arr[n++] = 0x0;		/* Informational exceptions */
3115 			arr[n++] = 0x2f;
3116 			arr[n++] = 0xff;	/* these subpages */
3117 			arr[3] = n - 4;
3118 			break;
3119 		default:
3120 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3121 			return check_condition_result;
3122 		}
3123 	} else if (subpcode > 0) {
3124 		arr[0] |= 0x40;
3125 		arr[1] = subpcode;
3126 		if (pcode == 0xd && subpcode == 1)
3127 			arr[3] = resp_env_rep_l_spg(arr + 4);
3128 		else {
3129 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3130 			return check_condition_result;
3131 		}
3132 	} else {
3133 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3134 		return check_condition_result;
3135 	}
3136 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3137 	return fill_from_dev_buffer(scp, arr,
3138 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3139 }
3140 
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)3141 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3142 {
3143 	return devip->nr_zones != 0;
3144 }
3145 
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)3146 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3147 					unsigned long long lba)
3148 {
3149 	u32 zno = lba >> devip->zsize_shift;
3150 	struct sdeb_zone_state *zsp;
3151 
3152 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3153 		return &devip->zstate[zno];
3154 
3155 	/*
3156 	 * If the zone capacity is less than the zone size, adjust for gap
3157 	 * zones.
3158 	 */
3159 	zno = 2 * zno - devip->nr_conv_zones;
3160 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3161 	zsp = &devip->zstate[zno];
3162 	if (lba >= zsp->z_start + zsp->z_size)
3163 		zsp++;
3164 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3165 	return zsp;
3166 }
3167 
zbc_zone_is_conv(struct sdeb_zone_state * zsp)3168 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3169 {
3170 	return zsp->z_type == ZBC_ZTYPE_CNV;
3171 }
3172 
zbc_zone_is_gap(struct sdeb_zone_state * zsp)3173 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3174 {
3175 	return zsp->z_type == ZBC_ZTYPE_GAP;
3176 }
3177 
zbc_zone_is_seq(struct sdeb_zone_state * zsp)3178 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3179 {
3180 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3181 }
3182 
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3183 static void zbc_close_zone(struct sdebug_dev_info *devip,
3184 			   struct sdeb_zone_state *zsp)
3185 {
3186 	enum sdebug_z_cond zc;
3187 
3188 	if (!zbc_zone_is_seq(zsp))
3189 		return;
3190 
3191 	zc = zsp->z_cond;
3192 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3193 		return;
3194 
3195 	if (zc == ZC2_IMPLICIT_OPEN)
3196 		devip->nr_imp_open--;
3197 	else
3198 		devip->nr_exp_open--;
3199 
3200 	if (zsp->z_wp == zsp->z_start) {
3201 		zsp->z_cond = ZC1_EMPTY;
3202 	} else {
3203 		zsp->z_cond = ZC4_CLOSED;
3204 		devip->nr_closed++;
3205 	}
3206 }
3207 
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)3208 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3209 {
3210 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3211 	unsigned int i;
3212 
3213 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3214 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3215 			zbc_close_zone(devip, zsp);
3216 			return;
3217 		}
3218 	}
3219 }
3220 
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)3221 static void zbc_open_zone(struct sdebug_dev_info *devip,
3222 			  struct sdeb_zone_state *zsp, bool explicit)
3223 {
3224 	enum sdebug_z_cond zc;
3225 
3226 	if (!zbc_zone_is_seq(zsp))
3227 		return;
3228 
3229 	zc = zsp->z_cond;
3230 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3231 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3232 		return;
3233 
3234 	/* Close an implicit open zone if necessary */
3235 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3236 		zbc_close_zone(devip, zsp);
3237 	else if (devip->max_open &&
3238 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3239 		zbc_close_imp_open_zone(devip);
3240 
3241 	if (zsp->z_cond == ZC4_CLOSED)
3242 		devip->nr_closed--;
3243 	if (explicit) {
3244 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3245 		devip->nr_exp_open++;
3246 	} else {
3247 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3248 		devip->nr_imp_open++;
3249 	}
3250 }
3251 
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3252 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3253 				     struct sdeb_zone_state *zsp)
3254 {
3255 	switch (zsp->z_cond) {
3256 	case ZC2_IMPLICIT_OPEN:
3257 		devip->nr_imp_open--;
3258 		break;
3259 	case ZC3_EXPLICIT_OPEN:
3260 		devip->nr_exp_open--;
3261 		break;
3262 	default:
3263 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3264 			  zsp->z_start, zsp->z_cond);
3265 		break;
3266 	}
3267 	zsp->z_cond = ZC5_FULL;
3268 }
3269 
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)3270 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3271 		       unsigned long long lba, unsigned int num)
3272 {
3273 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3274 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3275 
3276 	if (!zbc_zone_is_seq(zsp))
3277 		return;
3278 
3279 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3280 		zsp->z_wp += num;
3281 		if (zsp->z_wp >= zend)
3282 			zbc_set_zone_full(devip, zsp);
3283 		return;
3284 	}
3285 
3286 	while (num) {
3287 		if (lba != zsp->z_wp)
3288 			zsp->z_non_seq_resource = true;
3289 
3290 		end = lba + num;
3291 		if (end >= zend) {
3292 			n = zend - lba;
3293 			zsp->z_wp = zend;
3294 		} else if (end > zsp->z_wp) {
3295 			n = num;
3296 			zsp->z_wp = end;
3297 		} else {
3298 			n = num;
3299 		}
3300 		if (zsp->z_wp >= zend)
3301 			zbc_set_zone_full(devip, zsp);
3302 
3303 		num -= n;
3304 		lba += n;
3305 		if (num) {
3306 			zsp++;
3307 			zend = zsp->z_start + zsp->z_size;
3308 		}
3309 	}
3310 }
3311 
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3312 static int check_zbc_access_params(struct scsi_cmnd *scp,
3313 			unsigned long long lba, unsigned int num, bool write)
3314 {
3315 	struct scsi_device *sdp = scp->device;
3316 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3317 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3318 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3319 
3320 	if (!write) {
3321 		/* For host-managed, reads cannot cross zone types boundaries */
3322 		if (zsp->z_type != zsp_end->z_type) {
3323 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3324 					LBA_OUT_OF_RANGE,
3325 					READ_INVDATA_ASCQ);
3326 			return check_condition_result;
3327 		}
3328 		return 0;
3329 	}
3330 
3331 	/* Writing into a gap zone is not allowed */
3332 	if (zbc_zone_is_gap(zsp)) {
3333 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3334 				ATTEMPT_ACCESS_GAP);
3335 		return check_condition_result;
3336 	}
3337 
3338 	/* No restrictions for writes within conventional zones */
3339 	if (zbc_zone_is_conv(zsp)) {
3340 		if (!zbc_zone_is_conv(zsp_end)) {
3341 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3342 					LBA_OUT_OF_RANGE,
3343 					WRITE_BOUNDARY_ASCQ);
3344 			return check_condition_result;
3345 		}
3346 		return 0;
3347 	}
3348 
3349 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3350 		/* Writes cannot cross sequential zone boundaries */
3351 		if (zsp_end != zsp) {
3352 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3353 					LBA_OUT_OF_RANGE,
3354 					WRITE_BOUNDARY_ASCQ);
3355 			return check_condition_result;
3356 		}
3357 		/* Cannot write full zones */
3358 		if (zsp->z_cond == ZC5_FULL) {
3359 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3360 					INVALID_FIELD_IN_CDB, 0);
3361 			return check_condition_result;
3362 		}
3363 		/* Writes must be aligned to the zone WP */
3364 		if (lba != zsp->z_wp) {
3365 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3366 					LBA_OUT_OF_RANGE,
3367 					UNALIGNED_WRITE_ASCQ);
3368 			return check_condition_result;
3369 		}
3370 	}
3371 
3372 	/* Handle implicit open of closed and empty zones */
3373 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3374 		if (devip->max_open &&
3375 		    devip->nr_exp_open >= devip->max_open) {
3376 			mk_sense_buffer(scp, DATA_PROTECT,
3377 					INSUFF_RES_ASC,
3378 					INSUFF_ZONE_ASCQ);
3379 			return check_condition_result;
3380 		}
3381 		zbc_open_zone(devip, zsp, false);
3382 	}
3383 
3384 	return 0;
3385 }
3386 
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3387 static inline int check_device_access_params
3388 			(struct scsi_cmnd *scp, unsigned long long lba,
3389 			 unsigned int num, bool write)
3390 {
3391 	struct scsi_device *sdp = scp->device;
3392 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3393 
3394 	if (lba + num > sdebug_capacity) {
3395 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3396 		return check_condition_result;
3397 	}
3398 	/* transfer length excessive (tie in to block limits VPD page) */
3399 	if (num > sdebug_store_sectors) {
3400 		/* needs work to find which cdb byte 'num' comes from */
3401 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3402 		return check_condition_result;
3403 	}
3404 	if (write && unlikely(sdebug_wp)) {
3405 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3406 		return check_condition_result;
3407 	}
3408 	if (sdebug_dev_is_zoned(devip))
3409 		return check_zbc_access_params(scp, lba, num, write);
3410 
3411 	return 0;
3412 }
3413 
3414 /*
3415  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3416  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3417  * that access any of the "stores" in struct sdeb_store_info should call this
3418  * function with bug_if_fake_rw set to true.
3419  */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)3420 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3421 						bool bug_if_fake_rw)
3422 {
3423 	if (sdebug_fake_rw) {
3424 		BUG_ON(bug_if_fake_rw);	/* See note above */
3425 		return NULL;
3426 	}
3427 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3428 }
3429 
3430 static inline void
sdeb_read_lock(rwlock_t * lock)3431 sdeb_read_lock(rwlock_t *lock)
3432 {
3433 	if (sdebug_no_rwlock)
3434 		__acquire(lock);
3435 	else
3436 		read_lock(lock);
3437 }
3438 
3439 static inline void
sdeb_read_unlock(rwlock_t * lock)3440 sdeb_read_unlock(rwlock_t *lock)
3441 {
3442 	if (sdebug_no_rwlock)
3443 		__release(lock);
3444 	else
3445 		read_unlock(lock);
3446 }
3447 
3448 static inline void
sdeb_write_lock(rwlock_t * lock)3449 sdeb_write_lock(rwlock_t *lock)
3450 {
3451 	if (sdebug_no_rwlock)
3452 		__acquire(lock);
3453 	else
3454 		write_lock(lock);
3455 }
3456 
3457 static inline void
sdeb_write_unlock(rwlock_t * lock)3458 sdeb_write_unlock(rwlock_t *lock)
3459 {
3460 	if (sdebug_no_rwlock)
3461 		__release(lock);
3462 	else
3463 		write_unlock(lock);
3464 }
3465 
3466 static inline void
sdeb_data_read_lock(struct sdeb_store_info * sip)3467 sdeb_data_read_lock(struct sdeb_store_info *sip)
3468 {
3469 	BUG_ON(!sip);
3470 
3471 	sdeb_read_lock(&sip->macc_data_lck);
3472 }
3473 
3474 static inline void
sdeb_data_read_unlock(struct sdeb_store_info * sip)3475 sdeb_data_read_unlock(struct sdeb_store_info *sip)
3476 {
3477 	BUG_ON(!sip);
3478 
3479 	sdeb_read_unlock(&sip->macc_data_lck);
3480 }
3481 
3482 static inline void
sdeb_data_write_lock(struct sdeb_store_info * sip)3483 sdeb_data_write_lock(struct sdeb_store_info *sip)
3484 {
3485 	BUG_ON(!sip);
3486 
3487 	sdeb_write_lock(&sip->macc_data_lck);
3488 }
3489 
3490 static inline void
sdeb_data_write_unlock(struct sdeb_store_info * sip)3491 sdeb_data_write_unlock(struct sdeb_store_info *sip)
3492 {
3493 	BUG_ON(!sip);
3494 
3495 	sdeb_write_unlock(&sip->macc_data_lck);
3496 }
3497 
3498 static inline void
sdeb_data_sector_read_lock(struct sdeb_store_info * sip)3499 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
3500 {
3501 	BUG_ON(!sip);
3502 
3503 	sdeb_read_lock(&sip->macc_sector_lck);
3504 }
3505 
3506 static inline void
sdeb_data_sector_read_unlock(struct sdeb_store_info * sip)3507 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
3508 {
3509 	BUG_ON(!sip);
3510 
3511 	sdeb_read_unlock(&sip->macc_sector_lck);
3512 }
3513 
3514 static inline void
sdeb_data_sector_write_lock(struct sdeb_store_info * sip)3515 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
3516 {
3517 	BUG_ON(!sip);
3518 
3519 	sdeb_write_lock(&sip->macc_sector_lck);
3520 }
3521 
3522 static inline void
sdeb_data_sector_write_unlock(struct sdeb_store_info * sip)3523 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
3524 {
3525 	BUG_ON(!sip);
3526 
3527 	sdeb_write_unlock(&sip->macc_sector_lck);
3528 }
3529 
3530 /*
3531  * Atomic locking:
3532  * We simplify the atomic model to allow only 1x atomic write and many non-
3533  * atomic reads or writes for all LBAs.
3534 
3535  * A RW lock has a similar bahaviour:
3536  * Only 1x writer and many readers.
3537 
3538  * So use a RW lock for per-device read and write locking:
3539  * An atomic access grabs the lock as a writer and non-atomic grabs the lock
3540  * as a reader.
3541  */
3542 
3543 static inline void
sdeb_data_lock(struct sdeb_store_info * sip,bool atomic)3544 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
3545 {
3546 	if (atomic)
3547 		sdeb_data_write_lock(sip);
3548 	else
3549 		sdeb_data_read_lock(sip);
3550 }
3551 
3552 static inline void
sdeb_data_unlock(struct sdeb_store_info * sip,bool atomic)3553 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
3554 {
3555 	if (atomic)
3556 		sdeb_data_write_unlock(sip);
3557 	else
3558 		sdeb_data_read_unlock(sip);
3559 }
3560 
3561 /* Allow many reads but only 1x write per sector */
3562 static inline void
sdeb_data_sector_lock(struct sdeb_store_info * sip,bool do_write)3563 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
3564 {
3565 	if (do_write)
3566 		sdeb_data_sector_write_lock(sip);
3567 	else
3568 		sdeb_data_sector_read_lock(sip);
3569 }
3570 
3571 static inline void
sdeb_data_sector_unlock(struct sdeb_store_info * sip,bool do_write)3572 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
3573 {
3574 	if (do_write)
3575 		sdeb_data_sector_write_unlock(sip);
3576 	else
3577 		sdeb_data_sector_read_unlock(sip);
3578 }
3579 
3580 static inline void
sdeb_meta_read_lock(struct sdeb_store_info * sip)3581 sdeb_meta_read_lock(struct sdeb_store_info *sip)
3582 {
3583 	if (sdebug_no_rwlock) {
3584 		if (sip)
3585 			__acquire(&sip->macc_meta_lck);
3586 		else
3587 			__acquire(&sdeb_fake_rw_lck);
3588 	} else {
3589 		if (sip)
3590 			read_lock(&sip->macc_meta_lck);
3591 		else
3592 			read_lock(&sdeb_fake_rw_lck);
3593 	}
3594 }
3595 
3596 static inline void
sdeb_meta_read_unlock(struct sdeb_store_info * sip)3597 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
3598 {
3599 	if (sdebug_no_rwlock) {
3600 		if (sip)
3601 			__release(&sip->macc_meta_lck);
3602 		else
3603 			__release(&sdeb_fake_rw_lck);
3604 	} else {
3605 		if (sip)
3606 			read_unlock(&sip->macc_meta_lck);
3607 		else
3608 			read_unlock(&sdeb_fake_rw_lck);
3609 	}
3610 }
3611 
3612 static inline void
sdeb_meta_write_lock(struct sdeb_store_info * sip)3613 sdeb_meta_write_lock(struct sdeb_store_info *sip)
3614 {
3615 	if (sdebug_no_rwlock) {
3616 		if (sip)
3617 			__acquire(&sip->macc_meta_lck);
3618 		else
3619 			__acquire(&sdeb_fake_rw_lck);
3620 	} else {
3621 		if (sip)
3622 			write_lock(&sip->macc_meta_lck);
3623 		else
3624 			write_lock(&sdeb_fake_rw_lck);
3625 	}
3626 }
3627 
3628 static inline void
sdeb_meta_write_unlock(struct sdeb_store_info * sip)3629 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
3630 {
3631 	if (sdebug_no_rwlock) {
3632 		if (sip)
3633 			__release(&sip->macc_meta_lck);
3634 		else
3635 			__release(&sdeb_fake_rw_lck);
3636 	} else {
3637 		if (sip)
3638 			write_unlock(&sip->macc_meta_lck);
3639 		else
3640 			write_unlock(&sdeb_fake_rw_lck);
3641 	}
3642 }
3643 
3644 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,u8 group_number,bool do_write,bool atomic)3645 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3646 			    u32 sg_skip, u64 lba, u32 num, u8 group_number,
3647 			    bool do_write, bool atomic)
3648 {
3649 	int ret;
3650 	u64 block;
3651 	enum dma_data_direction dir;
3652 	struct scsi_data_buffer *sdb = &scp->sdb;
3653 	u8 *fsp;
3654 	int i, total = 0;
3655 
3656 	/*
3657 	 * Even though reads are inherently atomic (in this driver), we expect
3658 	 * the atomic flag only for writes.
3659 	 */
3660 	if (!do_write && atomic)
3661 		return -1;
3662 
3663 	if (do_write) {
3664 		dir = DMA_TO_DEVICE;
3665 		write_since_sync = true;
3666 	} else {
3667 		dir = DMA_FROM_DEVICE;
3668 	}
3669 
3670 	if (!sdb->length || !sip)
3671 		return 0;
3672 	if (scp->sc_data_direction != dir)
3673 		return -1;
3674 
3675 	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
3676 		atomic_long_inc(&writes_by_group_number[group_number]);
3677 
3678 	fsp = sip->storep;
3679 
3680 	block = do_div(lba, sdebug_store_sectors);
3681 
3682 	/* Only allow 1x atomic write or multiple non-atomic writes at any given time */
3683 	sdeb_data_lock(sip, atomic);
3684 	for (i = 0; i < num; i++) {
3685 		/* We shouldn't need to lock for atomic writes, but do it anyway */
3686 		sdeb_data_sector_lock(sip, do_write);
3687 		ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3688 		   fsp + (block * sdebug_sector_size),
3689 		   sdebug_sector_size, sg_skip, do_write);
3690 		sdeb_data_sector_unlock(sip, do_write);
3691 		total += ret;
3692 		if (ret != sdebug_sector_size)
3693 			break;
3694 		sg_skip += sdebug_sector_size;
3695 		if (++block >= sdebug_store_sectors)
3696 			block = 0;
3697 	}
3698 	sdeb_data_unlock(sip, atomic);
3699 
3700 	return total;
3701 }
3702 
3703 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)3704 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3705 {
3706 	struct scsi_data_buffer *sdb = &scp->sdb;
3707 
3708 	if (!sdb->length)
3709 		return 0;
3710 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3711 		return -1;
3712 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3713 			      num * sdebug_sector_size, 0, true);
3714 }
3715 
3716 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3717  * arr into sip->storep+lba and return true. If comparison fails then
3718  * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)3719 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3720 			      const u8 *arr, bool compare_only)
3721 {
3722 	bool res;
3723 	u64 block, rest = 0;
3724 	u32 store_blks = sdebug_store_sectors;
3725 	u32 lb_size = sdebug_sector_size;
3726 	u8 *fsp = sip->storep;
3727 
3728 	block = do_div(lba, store_blks);
3729 	if (block + num > store_blks)
3730 		rest = block + num - store_blks;
3731 
3732 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3733 	if (!res)
3734 		return res;
3735 	if (rest)
3736 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3737 			     rest * lb_size);
3738 	if (!res)
3739 		return res;
3740 	if (compare_only)
3741 		return true;
3742 	arr += num * lb_size;
3743 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3744 	if (rest)
3745 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3746 	return res;
3747 }
3748 
dif_compute_csum(const void * buf,int len)3749 static __be16 dif_compute_csum(const void *buf, int len)
3750 {
3751 	__be16 csum;
3752 
3753 	if (sdebug_guard)
3754 		csum = (__force __be16)ip_compute_csum(buf, len);
3755 	else
3756 		csum = cpu_to_be16(crc_t10dif(buf, len));
3757 
3758 	return csum;
3759 }
3760 
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3761 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3762 		      sector_t sector, u32 ei_lba)
3763 {
3764 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3765 
3766 	if (sdt->guard_tag != csum) {
3767 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3768 			(unsigned long)sector,
3769 			be16_to_cpu(sdt->guard_tag),
3770 			be16_to_cpu(csum));
3771 		return 0x01;
3772 	}
3773 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3774 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3775 		pr_err("REF check failed on sector %lu\n",
3776 			(unsigned long)sector);
3777 		return 0x03;
3778 	}
3779 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3780 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3781 		pr_err("REF check failed on sector %lu\n",
3782 			(unsigned long)sector);
3783 		return 0x03;
3784 	}
3785 	return 0;
3786 }
3787 
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3788 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3789 			  unsigned int sectors, bool read)
3790 {
3791 	size_t resid;
3792 	void *paddr;
3793 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3794 						scp->device->hostdata, true);
3795 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3796 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3797 	struct sg_mapping_iter miter;
3798 
3799 	/* Bytes of protection data to copy into sgl */
3800 	resid = sectors * sizeof(*dif_storep);
3801 
3802 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3803 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3804 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3805 
3806 	while (sg_miter_next(&miter) && resid > 0) {
3807 		size_t len = min_t(size_t, miter.length, resid);
3808 		void *start = dif_store(sip, sector);
3809 		size_t rest = 0;
3810 
3811 		if (dif_store_end < start + len)
3812 			rest = start + len - dif_store_end;
3813 
3814 		paddr = miter.addr;
3815 
3816 		if (read)
3817 			memcpy(paddr, start, len - rest);
3818 		else
3819 			memcpy(start, paddr, len - rest);
3820 
3821 		if (rest) {
3822 			if (read)
3823 				memcpy(paddr + len - rest, dif_storep, rest);
3824 			else
3825 				memcpy(dif_storep, paddr + len - rest, rest);
3826 		}
3827 
3828 		sector += len / sizeof(*dif_storep);
3829 		resid -= len;
3830 	}
3831 	sg_miter_stop(&miter);
3832 }
3833 
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3834 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3835 			    unsigned int sectors, u32 ei_lba)
3836 {
3837 	int ret = 0;
3838 	unsigned int i;
3839 	sector_t sector;
3840 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3841 						scp->device->hostdata, true);
3842 	struct t10_pi_tuple *sdt;
3843 
3844 	for (i = 0; i < sectors; i++, ei_lba++) {
3845 		sector = start_sec + i;
3846 		sdt = dif_store(sip, sector);
3847 
3848 		if (sdt->app_tag == cpu_to_be16(0xffff))
3849 			continue;
3850 
3851 		/*
3852 		 * Because scsi_debug acts as both initiator and
3853 		 * target we proceed to verify the PI even if
3854 		 * RDPROTECT=3. This is done so the "initiator" knows
3855 		 * which type of error to return. Otherwise we would
3856 		 * have to iterate over the PI twice.
3857 		 */
3858 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3859 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3860 					 sector, ei_lba);
3861 			if (ret) {
3862 				dif_errors++;
3863 				break;
3864 			}
3865 		}
3866 	}
3867 
3868 	dif_copy_prot(scp, start_sec, sectors, true);
3869 	dix_reads++;
3870 
3871 	return ret;
3872 }
3873 
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3874 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3875 {
3876 	bool check_prot;
3877 	u32 num;
3878 	u32 ei_lba;
3879 	int ret;
3880 	u64 lba;
3881 	struct sdeb_store_info *sip = devip2sip(devip, true);
3882 	u8 *cmd = scp->cmnd;
3883 	bool meta_data_locked = false;
3884 
3885 	switch (cmd[0]) {
3886 	case READ_16:
3887 		ei_lba = 0;
3888 		lba = get_unaligned_be64(cmd + 2);
3889 		num = get_unaligned_be32(cmd + 10);
3890 		check_prot = true;
3891 		break;
3892 	case READ_10:
3893 		ei_lba = 0;
3894 		lba = get_unaligned_be32(cmd + 2);
3895 		num = get_unaligned_be16(cmd + 7);
3896 		check_prot = true;
3897 		break;
3898 	case READ_6:
3899 		ei_lba = 0;
3900 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3901 		      (u32)(cmd[1] & 0x1f) << 16;
3902 		num = (0 == cmd[4]) ? 256 : cmd[4];
3903 		check_prot = true;
3904 		break;
3905 	case READ_12:
3906 		ei_lba = 0;
3907 		lba = get_unaligned_be32(cmd + 2);
3908 		num = get_unaligned_be32(cmd + 6);
3909 		check_prot = true;
3910 		break;
3911 	case XDWRITEREAD_10:
3912 		ei_lba = 0;
3913 		lba = get_unaligned_be32(cmd + 2);
3914 		num = get_unaligned_be16(cmd + 7);
3915 		check_prot = false;
3916 		break;
3917 	default:	/* assume READ(32) */
3918 		lba = get_unaligned_be64(cmd + 12);
3919 		ei_lba = get_unaligned_be32(cmd + 20);
3920 		num = get_unaligned_be32(cmd + 28);
3921 		check_prot = false;
3922 		break;
3923 	}
3924 	if (unlikely(have_dif_prot && check_prot)) {
3925 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3926 		    (cmd[1] & 0xe0)) {
3927 			mk_sense_invalid_opcode(scp);
3928 			return check_condition_result;
3929 		}
3930 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3931 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3932 		    (cmd[1] & 0xe0) == 0)
3933 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3934 				    "to DIF device\n");
3935 	}
3936 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3937 		     atomic_read(&sdeb_inject_pending))) {
3938 		num /= 2;
3939 		atomic_set(&sdeb_inject_pending, 0);
3940 	}
3941 
3942 	/*
3943 	 * When checking device access params, for reads we only check data
3944 	 * versus what is set at init time, so no need to lock.
3945 	 */
3946 	ret = check_device_access_params(scp, lba, num, false);
3947 	if (ret)
3948 		return ret;
3949 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3950 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3951 		     ((lba + num) > sdebug_medium_error_start))) {
3952 		/* claim unrecoverable read error */
3953 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3954 		/* set info field and valid bit for fixed descriptor */
3955 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3956 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3957 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3958 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3959 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3960 		}
3961 		scsi_set_resid(scp, scsi_bufflen(scp));
3962 		return check_condition_result;
3963 	}
3964 
3965 	if (sdebug_dev_is_zoned(devip) ||
3966 	    (sdebug_dix && scsi_prot_sg_count(scp)))  {
3967 		sdeb_meta_read_lock(sip);
3968 		meta_data_locked = true;
3969 	}
3970 
3971 	/* DIX + T10 DIF */
3972 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3973 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3974 		case 1: /* Guard tag error */
3975 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3976 				sdeb_meta_read_unlock(sip);
3977 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3978 				return check_condition_result;
3979 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3980 				sdeb_meta_read_unlock(sip);
3981 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3982 				return illegal_condition_result;
3983 			}
3984 			break;
3985 		case 3: /* Reference tag error */
3986 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3987 				sdeb_meta_read_unlock(sip);
3988 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3989 				return check_condition_result;
3990 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3991 				sdeb_meta_read_unlock(sip);
3992 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3993 				return illegal_condition_result;
3994 			}
3995 			break;
3996 		}
3997 	}
3998 
3999 	ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4000 	if (meta_data_locked)
4001 		sdeb_meta_read_unlock(sip);
4002 	if (unlikely(ret == -1))
4003 		return DID_ERROR << 16;
4004 
4005 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4006 
4007 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4008 		     atomic_read(&sdeb_inject_pending))) {
4009 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4010 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4011 			atomic_set(&sdeb_inject_pending, 0);
4012 			return check_condition_result;
4013 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4014 			/* Logical block guard check failed */
4015 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4016 			atomic_set(&sdeb_inject_pending, 0);
4017 			return illegal_condition_result;
4018 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4019 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4020 			atomic_set(&sdeb_inject_pending, 0);
4021 			return illegal_condition_result;
4022 		}
4023 	}
4024 	return 0;
4025 }
4026 
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)4027 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4028 			     unsigned int sectors, u32 ei_lba)
4029 {
4030 	int ret;
4031 	struct t10_pi_tuple *sdt;
4032 	void *daddr;
4033 	sector_t sector = start_sec;
4034 	int ppage_offset;
4035 	int dpage_offset;
4036 	struct sg_mapping_iter diter;
4037 	struct sg_mapping_iter piter;
4038 
4039 	BUG_ON(scsi_sg_count(SCpnt) == 0);
4040 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4041 
4042 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4043 			scsi_prot_sg_count(SCpnt),
4044 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4045 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4046 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4047 
4048 	/* For each protection page */
4049 	while (sg_miter_next(&piter)) {
4050 		dpage_offset = 0;
4051 		if (WARN_ON(!sg_miter_next(&diter))) {
4052 			ret = 0x01;
4053 			goto out;
4054 		}
4055 
4056 		for (ppage_offset = 0; ppage_offset < piter.length;
4057 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
4058 			/* If we're at the end of the current
4059 			 * data page advance to the next one
4060 			 */
4061 			if (dpage_offset >= diter.length) {
4062 				if (WARN_ON(!sg_miter_next(&diter))) {
4063 					ret = 0x01;
4064 					goto out;
4065 				}
4066 				dpage_offset = 0;
4067 			}
4068 
4069 			sdt = piter.addr + ppage_offset;
4070 			daddr = diter.addr + dpage_offset;
4071 
4072 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4073 				ret = dif_verify(sdt, daddr, sector, ei_lba);
4074 				if (ret)
4075 					goto out;
4076 			}
4077 
4078 			sector++;
4079 			ei_lba++;
4080 			dpage_offset += sdebug_sector_size;
4081 		}
4082 		diter.consumed = dpage_offset;
4083 		sg_miter_stop(&diter);
4084 	}
4085 	sg_miter_stop(&piter);
4086 
4087 	dif_copy_prot(SCpnt, start_sec, sectors, false);
4088 	dix_writes++;
4089 
4090 	return 0;
4091 
4092 out:
4093 	dif_errors++;
4094 	sg_miter_stop(&diter);
4095 	sg_miter_stop(&piter);
4096 	return ret;
4097 }
4098 
lba_to_map_index(sector_t lba)4099 static unsigned long lba_to_map_index(sector_t lba)
4100 {
4101 	if (sdebug_unmap_alignment)
4102 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4103 	sector_div(lba, sdebug_unmap_granularity);
4104 	return lba;
4105 }
4106 
map_index_to_lba(unsigned long index)4107 static sector_t map_index_to_lba(unsigned long index)
4108 {
4109 	sector_t lba = index * sdebug_unmap_granularity;
4110 
4111 	if (sdebug_unmap_alignment)
4112 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4113 	return lba;
4114 }
4115 
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)4116 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4117 			      unsigned int *num)
4118 {
4119 	sector_t end;
4120 	unsigned int mapped;
4121 	unsigned long index;
4122 	unsigned long next;
4123 
4124 	index = lba_to_map_index(lba);
4125 	mapped = test_bit(index, sip->map_storep);
4126 
4127 	if (mapped)
4128 		next = find_next_zero_bit(sip->map_storep, map_size, index);
4129 	else
4130 		next = find_next_bit(sip->map_storep, map_size, index);
4131 
4132 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
4133 	*num = end - lba;
4134 	return mapped;
4135 }
4136 
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4137 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4138 		       unsigned int len)
4139 {
4140 	sector_t end = lba + len;
4141 
4142 	while (lba < end) {
4143 		unsigned long index = lba_to_map_index(lba);
4144 
4145 		if (index < map_size)
4146 			set_bit(index, sip->map_storep);
4147 
4148 		lba = map_index_to_lba(index + 1);
4149 	}
4150 }
4151 
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4152 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4153 			 unsigned int len)
4154 {
4155 	sector_t end = lba + len;
4156 	u8 *fsp = sip->storep;
4157 
4158 	while (lba < end) {
4159 		unsigned long index = lba_to_map_index(lba);
4160 
4161 		if (lba == map_index_to_lba(index) &&
4162 		    lba + sdebug_unmap_granularity <= end &&
4163 		    index < map_size) {
4164 			clear_bit(index, sip->map_storep);
4165 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
4166 				memset(fsp + lba * sdebug_sector_size,
4167 				       (sdebug_lbprz & 1) ? 0 : 0xff,
4168 				       sdebug_sector_size *
4169 				       sdebug_unmap_granularity);
4170 			}
4171 			if (sip->dif_storep) {
4172 				memset(sip->dif_storep + lba, 0xff,
4173 				       sizeof(*sip->dif_storep) *
4174 				       sdebug_unmap_granularity);
4175 			}
4176 		}
4177 		lba = map_index_to_lba(index + 1);
4178 	}
4179 }
4180 
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4181 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4182 {
4183 	bool check_prot;
4184 	u32 num;
4185 	u8 group = 0;
4186 	u32 ei_lba;
4187 	int ret;
4188 	u64 lba;
4189 	struct sdeb_store_info *sip = devip2sip(devip, true);
4190 	u8 *cmd = scp->cmnd;
4191 	bool meta_data_locked = false;
4192 
4193 	switch (cmd[0]) {
4194 	case WRITE_16:
4195 		ei_lba = 0;
4196 		lba = get_unaligned_be64(cmd + 2);
4197 		num = get_unaligned_be32(cmd + 10);
4198 		group = cmd[14] & 0x3f;
4199 		check_prot = true;
4200 		break;
4201 	case WRITE_10:
4202 		ei_lba = 0;
4203 		lba = get_unaligned_be32(cmd + 2);
4204 		group = cmd[6] & 0x3f;
4205 		num = get_unaligned_be16(cmd + 7);
4206 		check_prot = true;
4207 		break;
4208 	case WRITE_6:
4209 		ei_lba = 0;
4210 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4211 		      (u32)(cmd[1] & 0x1f) << 16;
4212 		num = (0 == cmd[4]) ? 256 : cmd[4];
4213 		check_prot = true;
4214 		break;
4215 	case WRITE_12:
4216 		ei_lba = 0;
4217 		lba = get_unaligned_be32(cmd + 2);
4218 		num = get_unaligned_be32(cmd + 6);
4219 		group = cmd[6] & 0x3f;
4220 		check_prot = true;
4221 		break;
4222 	case 0x53:	/* XDWRITEREAD(10) */
4223 		ei_lba = 0;
4224 		lba = get_unaligned_be32(cmd + 2);
4225 		group = cmd[6] & 0x1f;
4226 		num = get_unaligned_be16(cmd + 7);
4227 		check_prot = false;
4228 		break;
4229 	default:	/* assume WRITE(32) */
4230 		group = cmd[6] & 0x3f;
4231 		lba = get_unaligned_be64(cmd + 12);
4232 		ei_lba = get_unaligned_be32(cmd + 20);
4233 		num = get_unaligned_be32(cmd + 28);
4234 		check_prot = false;
4235 		break;
4236 	}
4237 	if (unlikely(have_dif_prot && check_prot)) {
4238 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4239 		    (cmd[1] & 0xe0)) {
4240 			mk_sense_invalid_opcode(scp);
4241 			return check_condition_result;
4242 		}
4243 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4244 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4245 		    (cmd[1] & 0xe0) == 0)
4246 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4247 				    "to DIF device\n");
4248 	}
4249 
4250 	if (sdebug_dev_is_zoned(devip) ||
4251 	    (sdebug_dix && scsi_prot_sg_count(scp)) ||
4252 	    scsi_debug_lbp())  {
4253 		sdeb_meta_write_lock(sip);
4254 		meta_data_locked = true;
4255 	}
4256 
4257 	ret = check_device_access_params(scp, lba, num, true);
4258 	if (ret) {
4259 		if (meta_data_locked)
4260 			sdeb_meta_write_unlock(sip);
4261 		return ret;
4262 	}
4263 
4264 	/* DIX + T10 DIF */
4265 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4266 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
4267 		case 1: /* Guard tag error */
4268 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4269 				sdeb_meta_write_unlock(sip);
4270 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4271 				return illegal_condition_result;
4272 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4273 				sdeb_meta_write_unlock(sip);
4274 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4275 				return check_condition_result;
4276 			}
4277 			break;
4278 		case 3: /* Reference tag error */
4279 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4280 				sdeb_meta_write_unlock(sip);
4281 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4282 				return illegal_condition_result;
4283 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4284 				sdeb_meta_write_unlock(sip);
4285 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4286 				return check_condition_result;
4287 			}
4288 			break;
4289 		}
4290 	}
4291 
4292 	ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
4293 	if (unlikely(scsi_debug_lbp()))
4294 		map_region(sip, lba, num);
4295 
4296 	/* If ZBC zone then bump its write pointer */
4297 	if (sdebug_dev_is_zoned(devip))
4298 		zbc_inc_wp(devip, lba, num);
4299 	if (meta_data_locked)
4300 		sdeb_meta_write_unlock(sip);
4301 
4302 	if (unlikely(-1 == ret))
4303 		return DID_ERROR << 16;
4304 	else if (unlikely(sdebug_verbose &&
4305 			  (ret < (num * sdebug_sector_size))))
4306 		sdev_printk(KERN_INFO, scp->device,
4307 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4308 			    my_name, num * sdebug_sector_size, ret);
4309 
4310 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4311 		     atomic_read(&sdeb_inject_pending))) {
4312 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4313 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4314 			atomic_set(&sdeb_inject_pending, 0);
4315 			return check_condition_result;
4316 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4317 			/* Logical block guard check failed */
4318 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4319 			atomic_set(&sdeb_inject_pending, 0);
4320 			return illegal_condition_result;
4321 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4322 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4323 			atomic_set(&sdeb_inject_pending, 0);
4324 			return illegal_condition_result;
4325 		}
4326 	}
4327 	return 0;
4328 }
4329 
4330 /*
4331  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4332  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4333  */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4334 static int resp_write_scat(struct scsi_cmnd *scp,
4335 			   struct sdebug_dev_info *devip)
4336 {
4337 	u8 *cmd = scp->cmnd;
4338 	u8 *lrdp = NULL;
4339 	u8 *up;
4340 	struct sdeb_store_info *sip = devip2sip(devip, true);
4341 	u8 wrprotect;
4342 	u16 lbdof, num_lrd, k;
4343 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4344 	u32 lb_size = sdebug_sector_size;
4345 	u32 ei_lba;
4346 	u64 lba;
4347 	u8 group;
4348 	int ret, res;
4349 	bool is_16;
4350 	static const u32 lrd_size = 32; /* + parameter list header size */
4351 
4352 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
4353 		is_16 = false;
4354 		group = cmd[6] & 0x3f;
4355 		wrprotect = (cmd[10] >> 5) & 0x7;
4356 		lbdof = get_unaligned_be16(cmd + 12);
4357 		num_lrd = get_unaligned_be16(cmd + 16);
4358 		bt_len = get_unaligned_be32(cmd + 28);
4359 	} else {        /* that leaves WRITE SCATTERED(16) */
4360 		is_16 = true;
4361 		wrprotect = (cmd[2] >> 5) & 0x7;
4362 		lbdof = get_unaligned_be16(cmd + 4);
4363 		num_lrd = get_unaligned_be16(cmd + 8);
4364 		bt_len = get_unaligned_be32(cmd + 10);
4365 		group = cmd[14] & 0x3f;
4366 		if (unlikely(have_dif_prot)) {
4367 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4368 			    wrprotect) {
4369 				mk_sense_invalid_opcode(scp);
4370 				return illegal_condition_result;
4371 			}
4372 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4373 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4374 			     wrprotect == 0)
4375 				sdev_printk(KERN_ERR, scp->device,
4376 					    "Unprotected WR to DIF device\n");
4377 		}
4378 	}
4379 	if ((num_lrd == 0) || (bt_len == 0))
4380 		return 0;       /* T10 says these do-nothings are not errors */
4381 	if (lbdof == 0) {
4382 		if (sdebug_verbose)
4383 			sdev_printk(KERN_INFO, scp->device,
4384 				"%s: %s: LB Data Offset field bad\n",
4385 				my_name, __func__);
4386 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4387 		return illegal_condition_result;
4388 	}
4389 	lbdof_blen = lbdof * lb_size;
4390 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4391 		if (sdebug_verbose)
4392 			sdev_printk(KERN_INFO, scp->device,
4393 				"%s: %s: LBA range descriptors don't fit\n",
4394 				my_name, __func__);
4395 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4396 		return illegal_condition_result;
4397 	}
4398 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4399 	if (lrdp == NULL)
4400 		return SCSI_MLQUEUE_HOST_BUSY;
4401 	if (sdebug_verbose)
4402 		sdev_printk(KERN_INFO, scp->device,
4403 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4404 			my_name, __func__, lbdof_blen);
4405 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4406 	if (res == -1) {
4407 		ret = DID_ERROR << 16;
4408 		goto err_out;
4409 	}
4410 
4411 	/* Just keep it simple and always lock for now */
4412 	sdeb_meta_write_lock(sip);
4413 	sg_off = lbdof_blen;
4414 	/* Spec says Buffer xfer Length field in number of LBs in dout */
4415 	cum_lb = 0;
4416 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4417 		lba = get_unaligned_be64(up + 0);
4418 		num = get_unaligned_be32(up + 8);
4419 		if (sdebug_verbose)
4420 			sdev_printk(KERN_INFO, scp->device,
4421 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4422 				my_name, __func__, k, lba, num, sg_off);
4423 		if (num == 0)
4424 			continue;
4425 		ret = check_device_access_params(scp, lba, num, true);
4426 		if (ret)
4427 			goto err_out_unlock;
4428 		num_by = num * lb_size;
4429 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4430 
4431 		if ((cum_lb + num) > bt_len) {
4432 			if (sdebug_verbose)
4433 				sdev_printk(KERN_INFO, scp->device,
4434 				    "%s: %s: sum of blocks > data provided\n",
4435 				    my_name, __func__);
4436 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4437 					0);
4438 			ret = illegal_condition_result;
4439 			goto err_out_unlock;
4440 		}
4441 
4442 		/* DIX + T10 DIF */
4443 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4444 			int prot_ret = prot_verify_write(scp, lba, num,
4445 							 ei_lba);
4446 
4447 			if (prot_ret) {
4448 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4449 						prot_ret);
4450 				ret = illegal_condition_result;
4451 				goto err_out_unlock;
4452 			}
4453 		}
4454 
4455 		/*
4456 		 * Write ranges atomically to keep as close to pre-atomic
4457 		 * writes behaviour as possible.
4458 		 */
4459 		ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
4460 		/* If ZBC zone then bump its write pointer */
4461 		if (sdebug_dev_is_zoned(devip))
4462 			zbc_inc_wp(devip, lba, num);
4463 		if (unlikely(scsi_debug_lbp()))
4464 			map_region(sip, lba, num);
4465 		if (unlikely(-1 == ret)) {
4466 			ret = DID_ERROR << 16;
4467 			goto err_out_unlock;
4468 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
4469 			sdev_printk(KERN_INFO, scp->device,
4470 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4471 			    my_name, num_by, ret);
4472 
4473 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4474 			     atomic_read(&sdeb_inject_pending))) {
4475 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4476 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4477 				atomic_set(&sdeb_inject_pending, 0);
4478 				ret = check_condition_result;
4479 				goto err_out_unlock;
4480 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4481 				/* Logical block guard check failed */
4482 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4483 				atomic_set(&sdeb_inject_pending, 0);
4484 				ret = illegal_condition_result;
4485 				goto err_out_unlock;
4486 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4487 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4488 				atomic_set(&sdeb_inject_pending, 0);
4489 				ret = illegal_condition_result;
4490 				goto err_out_unlock;
4491 			}
4492 		}
4493 		sg_off += num_by;
4494 		cum_lb += num;
4495 	}
4496 	ret = 0;
4497 err_out_unlock:
4498 	sdeb_meta_write_unlock(sip);
4499 err_out:
4500 	kfree(lrdp);
4501 	return ret;
4502 }
4503 
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)4504 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4505 			   u32 ei_lba, bool unmap, bool ndob)
4506 {
4507 	struct scsi_device *sdp = scp->device;
4508 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4509 	unsigned long long i;
4510 	u64 block, lbaa;
4511 	u32 lb_size = sdebug_sector_size;
4512 	int ret;
4513 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4514 						scp->device->hostdata, true);
4515 	u8 *fs1p;
4516 	u8 *fsp;
4517 	bool meta_data_locked = false;
4518 
4519 	if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
4520 		sdeb_meta_write_lock(sip);
4521 		meta_data_locked = true;
4522 	}
4523 
4524 	ret = check_device_access_params(scp, lba, num, true);
4525 	if (ret)
4526 		goto out;
4527 
4528 	if (unmap && scsi_debug_lbp()) {
4529 		unmap_region(sip, lba, num);
4530 		goto out;
4531 	}
4532 	lbaa = lba;
4533 	block = do_div(lbaa, sdebug_store_sectors);
4534 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
4535 	fsp = sip->storep;
4536 	fs1p = fsp + (block * lb_size);
4537 	sdeb_data_write_lock(sip);
4538 	if (ndob) {
4539 		memset(fs1p, 0, lb_size);
4540 		ret = 0;
4541 	} else
4542 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4543 
4544 	if (-1 == ret) {
4545 		ret = DID_ERROR << 16;
4546 		goto out;
4547 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
4548 		sdev_printk(KERN_INFO, scp->device,
4549 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
4550 			    my_name, "write same", lb_size, ret);
4551 
4552 	/* Copy first sector to remaining blocks */
4553 	for (i = 1 ; i < num ; i++) {
4554 		lbaa = lba + i;
4555 		block = do_div(lbaa, sdebug_store_sectors);
4556 		memmove(fsp + (block * lb_size), fs1p, lb_size);
4557 	}
4558 	if (scsi_debug_lbp())
4559 		map_region(sip, lba, num);
4560 	/* If ZBC zone then bump its write pointer */
4561 	if (sdebug_dev_is_zoned(devip))
4562 		zbc_inc_wp(devip, lba, num);
4563 	sdeb_data_write_unlock(sip);
4564 	ret = 0;
4565 out:
4566 	if (meta_data_locked)
4567 		sdeb_meta_write_unlock(sip);
4568 	return ret;
4569 }
4570 
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4571 static int resp_write_same_10(struct scsi_cmnd *scp,
4572 			      struct sdebug_dev_info *devip)
4573 {
4574 	u8 *cmd = scp->cmnd;
4575 	u32 lba;
4576 	u16 num;
4577 	u32 ei_lba = 0;
4578 	bool unmap = false;
4579 
4580 	if (cmd[1] & 0x8) {
4581 		if (sdebug_lbpws10 == 0) {
4582 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4583 			return check_condition_result;
4584 		} else
4585 			unmap = true;
4586 	}
4587 	lba = get_unaligned_be32(cmd + 2);
4588 	num = get_unaligned_be16(cmd + 7);
4589 	if (num > sdebug_write_same_length) {
4590 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4591 		return check_condition_result;
4592 	}
4593 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4594 }
4595 
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4596 static int resp_write_same_16(struct scsi_cmnd *scp,
4597 			      struct sdebug_dev_info *devip)
4598 {
4599 	u8 *cmd = scp->cmnd;
4600 	u64 lba;
4601 	u32 num;
4602 	u32 ei_lba = 0;
4603 	bool unmap = false;
4604 	bool ndob = false;
4605 
4606 	if (cmd[1] & 0x8) {	/* UNMAP */
4607 		if (sdebug_lbpws == 0) {
4608 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4609 			return check_condition_result;
4610 		} else
4611 			unmap = true;
4612 	}
4613 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4614 		ndob = true;
4615 	lba = get_unaligned_be64(cmd + 2);
4616 	num = get_unaligned_be32(cmd + 10);
4617 	if (num > sdebug_write_same_length) {
4618 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4619 		return check_condition_result;
4620 	}
4621 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4622 }
4623 
4624 /* Note the mode field is in the same position as the (lower) service action
4625  * field. For the Report supported operation codes command, SPC-4 suggests
4626  * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4627 static int resp_write_buffer(struct scsi_cmnd *scp,
4628 			     struct sdebug_dev_info *devip)
4629 {
4630 	u8 *cmd = scp->cmnd;
4631 	struct scsi_device *sdp = scp->device;
4632 	struct sdebug_dev_info *dp;
4633 	u8 mode;
4634 
4635 	mode = cmd[1] & 0x1f;
4636 	switch (mode) {
4637 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4638 		/* set UAs on this device only */
4639 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4640 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4641 		break;
4642 	case 0x5:	/* download MC, save and ACT */
4643 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4644 		break;
4645 	case 0x6:	/* download MC with offsets and ACT */
4646 		/* set UAs on most devices (LUs) in this target */
4647 		list_for_each_entry(dp,
4648 				    &devip->sdbg_host->dev_info_list,
4649 				    dev_list)
4650 			if (dp->target == sdp->id) {
4651 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4652 				if (devip != dp)
4653 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4654 						dp->uas_bm);
4655 			}
4656 		break;
4657 	case 0x7:	/* download MC with offsets, save, and ACT */
4658 		/* set UA on all devices (LUs) in this target */
4659 		list_for_each_entry(dp,
4660 				    &devip->sdbg_host->dev_info_list,
4661 				    dev_list)
4662 			if (dp->target == sdp->id)
4663 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4664 					dp->uas_bm);
4665 		break;
4666 	default:
4667 		/* do nothing for this command for other mode values */
4668 		break;
4669 	}
4670 	return 0;
4671 }
4672 
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4673 static int resp_comp_write(struct scsi_cmnd *scp,
4674 			   struct sdebug_dev_info *devip)
4675 {
4676 	u8 *cmd = scp->cmnd;
4677 	u8 *arr;
4678 	struct sdeb_store_info *sip = devip2sip(devip, true);
4679 	u64 lba;
4680 	u32 dnum;
4681 	u32 lb_size = sdebug_sector_size;
4682 	u8 num;
4683 	int ret;
4684 	int retval = 0;
4685 
4686 	lba = get_unaligned_be64(cmd + 2);
4687 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4688 	if (0 == num)
4689 		return 0;	/* degenerate case, not an error */
4690 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4691 	    (cmd[1] & 0xe0)) {
4692 		mk_sense_invalid_opcode(scp);
4693 		return check_condition_result;
4694 	}
4695 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4696 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4697 	    (cmd[1] & 0xe0) == 0)
4698 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4699 			    "to DIF device\n");
4700 	ret = check_device_access_params(scp, lba, num, false);
4701 	if (ret)
4702 		return ret;
4703 	dnum = 2 * num;
4704 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4705 	if (NULL == arr) {
4706 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4707 				INSUFF_RES_ASCQ);
4708 		return check_condition_result;
4709 	}
4710 
4711 	ret = do_dout_fetch(scp, dnum, arr);
4712 	if (ret == -1) {
4713 		retval = DID_ERROR << 16;
4714 		goto cleanup_free;
4715 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4716 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4717 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4718 			    dnum * lb_size, ret);
4719 
4720 	sdeb_data_write_lock(sip);
4721 	sdeb_meta_write_lock(sip);
4722 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4723 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4724 		retval = check_condition_result;
4725 		goto cleanup_unlock;
4726 	}
4727 
4728 	/* Cover sip->map_storep (which map_region()) sets with data lock */
4729 	if (scsi_debug_lbp())
4730 		map_region(sip, lba, num);
4731 cleanup_unlock:
4732 	sdeb_meta_write_unlock(sip);
4733 	sdeb_data_write_unlock(sip);
4734 cleanup_free:
4735 	kfree(arr);
4736 	return retval;
4737 }
4738 
4739 struct unmap_block_desc {
4740 	__be64	lba;
4741 	__be32	blocks;
4742 	__be32	__reserved;
4743 };
4744 
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4745 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4746 {
4747 	unsigned char *buf;
4748 	struct unmap_block_desc *desc;
4749 	struct sdeb_store_info *sip = devip2sip(devip, true);
4750 	unsigned int i, payload_len, descriptors;
4751 	int ret;
4752 
4753 	if (!scsi_debug_lbp())
4754 		return 0;	/* fib and say its done */
4755 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4756 	BUG_ON(scsi_bufflen(scp) != payload_len);
4757 
4758 	descriptors = (payload_len - 8) / 16;
4759 	if (descriptors > sdebug_unmap_max_desc) {
4760 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4761 		return check_condition_result;
4762 	}
4763 
4764 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4765 	if (!buf) {
4766 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4767 				INSUFF_RES_ASCQ);
4768 		return check_condition_result;
4769 	}
4770 
4771 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4772 
4773 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4774 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4775 
4776 	desc = (void *)&buf[8];
4777 
4778 	sdeb_meta_write_lock(sip);
4779 
4780 	for (i = 0 ; i < descriptors ; i++) {
4781 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4782 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4783 
4784 		ret = check_device_access_params(scp, lba, num, true);
4785 		if (ret)
4786 			goto out;
4787 
4788 		unmap_region(sip, lba, num);
4789 	}
4790 
4791 	ret = 0;
4792 
4793 out:
4794 	sdeb_meta_write_unlock(sip);
4795 	kfree(buf);
4796 
4797 	return ret;
4798 }
4799 
4800 #define SDEBUG_GET_LBA_STATUS_LEN 32
4801 
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4802 static int resp_get_lba_status(struct scsi_cmnd *scp,
4803 			       struct sdebug_dev_info *devip)
4804 {
4805 	u8 *cmd = scp->cmnd;
4806 	u64 lba;
4807 	u32 alloc_len, mapped, num;
4808 	int ret;
4809 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4810 
4811 	lba = get_unaligned_be64(cmd + 2);
4812 	alloc_len = get_unaligned_be32(cmd + 10);
4813 
4814 	if (alloc_len < 24)
4815 		return 0;
4816 
4817 	ret = check_device_access_params(scp, lba, 1, false);
4818 	if (ret)
4819 		return ret;
4820 
4821 	if (scsi_debug_lbp()) {
4822 		struct sdeb_store_info *sip = devip2sip(devip, true);
4823 
4824 		mapped = map_state(sip, lba, &num);
4825 	} else {
4826 		mapped = 1;
4827 		/* following just in case virtual_gb changed */
4828 		sdebug_capacity = get_sdebug_capacity();
4829 		if (sdebug_capacity - lba <= 0xffffffff)
4830 			num = sdebug_capacity - lba;
4831 		else
4832 			num = 0xffffffff;
4833 	}
4834 
4835 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4836 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4837 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4838 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4839 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4840 
4841 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4842 }
4843 
resp_get_stream_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4844 static int resp_get_stream_status(struct scsi_cmnd *scp,
4845 				  struct sdebug_dev_info *devip)
4846 {
4847 	u16 starting_stream_id, stream_id;
4848 	const u8 *cmd = scp->cmnd;
4849 	u32 alloc_len, offset;
4850 	u8 arr[256] = {};
4851 	struct scsi_stream_status_header *h = (void *)arr;
4852 
4853 	starting_stream_id = get_unaligned_be16(cmd + 4);
4854 	alloc_len = get_unaligned_be32(cmd + 10);
4855 
4856 	if (alloc_len < 8) {
4857 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4858 		return check_condition_result;
4859 	}
4860 
4861 	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
4862 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
4863 		return check_condition_result;
4864 	}
4865 
4866 	/*
4867 	 * The GET STREAM STATUS command only reports status information
4868 	 * about open streams. Treat the non-permanent stream as open.
4869 	 */
4870 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
4871 			   &h->number_of_open_streams);
4872 
4873 	for (offset = 8, stream_id = starting_stream_id;
4874 	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
4875 		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
4876 	     offset += 8, stream_id++) {
4877 		struct scsi_stream_status *stream_status = (void *)arr + offset;
4878 
4879 		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
4880 		put_unaligned_be16(stream_id,
4881 				   &stream_status->stream_identifier);
4882 		stream_status->rel_lifetime = stream_id + 1;
4883 	}
4884 	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
4885 
4886 	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
4887 }
4888 
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4889 static int resp_sync_cache(struct scsi_cmnd *scp,
4890 			   struct sdebug_dev_info *devip)
4891 {
4892 	int res = 0;
4893 	u64 lba;
4894 	u32 num_blocks;
4895 	u8 *cmd = scp->cmnd;
4896 
4897 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4898 		lba = get_unaligned_be32(cmd + 2);
4899 		num_blocks = get_unaligned_be16(cmd + 7);
4900 	} else {				/* SYNCHRONIZE_CACHE(16) */
4901 		lba = get_unaligned_be64(cmd + 2);
4902 		num_blocks = get_unaligned_be32(cmd + 10);
4903 	}
4904 	if (lba + num_blocks > sdebug_capacity) {
4905 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4906 		return check_condition_result;
4907 	}
4908 	if (!write_since_sync || (cmd[1] & 0x2))
4909 		res = SDEG_RES_IMMED_MASK;
4910 	else		/* delay if write_since_sync and IMMED clear */
4911 		write_since_sync = false;
4912 	return res;
4913 }
4914 
4915 /*
4916  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4917  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4918  * a GOOD status otherwise. Model a disk with a big cache and yield
4919  * CONDITION MET. Actually tries to bring range in main memory into the
4920  * cache associated with the CPU(s).
4921  */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4922 static int resp_pre_fetch(struct scsi_cmnd *scp,
4923 			  struct sdebug_dev_info *devip)
4924 {
4925 	int res = 0;
4926 	u64 lba;
4927 	u64 block, rest = 0;
4928 	u32 nblks;
4929 	u8 *cmd = scp->cmnd;
4930 	struct sdeb_store_info *sip = devip2sip(devip, true);
4931 	u8 *fsp = sip->storep;
4932 
4933 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4934 		lba = get_unaligned_be32(cmd + 2);
4935 		nblks = get_unaligned_be16(cmd + 7);
4936 	} else {			/* PRE-FETCH(16) */
4937 		lba = get_unaligned_be64(cmd + 2);
4938 		nblks = get_unaligned_be32(cmd + 10);
4939 	}
4940 	if (lba + nblks > sdebug_capacity) {
4941 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4942 		return check_condition_result;
4943 	}
4944 	if (!fsp)
4945 		goto fini;
4946 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4947 	block = do_div(lba, sdebug_store_sectors);
4948 	if (block + nblks > sdebug_store_sectors)
4949 		rest = block + nblks - sdebug_store_sectors;
4950 
4951 	/* Try to bring the PRE-FETCH range into CPU's cache */
4952 	sdeb_data_read_lock(sip);
4953 	prefetch_range(fsp + (sdebug_sector_size * block),
4954 		       (nblks - rest) * sdebug_sector_size);
4955 	if (rest)
4956 		prefetch_range(fsp, rest * sdebug_sector_size);
4957 
4958 	sdeb_data_read_unlock(sip);
4959 fini:
4960 	if (cmd[1] & 0x2)
4961 		res = SDEG_RES_IMMED_MASK;
4962 	return res | condition_met_result;
4963 }
4964 
4965 #define RL_BUCKET_ELEMS 8
4966 
4967 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4968  * (W-LUN), the normal Linux scanning logic does not associate it with a
4969  * device (e.g. /dev/sg7). The following magic will make that association:
4970  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4971  * where <n> is a host number. If there are multiple targets in a host then
4972  * the above will associate a W-LUN to each target. To only get a W-LUN
4973  * for target 2, then use "echo '- 2 49409' > scan" .
4974  */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4975 static int resp_report_luns(struct scsi_cmnd *scp,
4976 			    struct sdebug_dev_info *devip)
4977 {
4978 	unsigned char *cmd = scp->cmnd;
4979 	unsigned int alloc_len;
4980 	unsigned char select_report;
4981 	u64 lun;
4982 	struct scsi_lun *lun_p;
4983 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4984 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4985 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4986 	unsigned int tlun_cnt;	/* total LUN count */
4987 	unsigned int rlen;	/* response length (in bytes) */
4988 	int k, j, n, res;
4989 	unsigned int off_rsp = 0;
4990 	const int sz_lun = sizeof(struct scsi_lun);
4991 
4992 	clear_luns_changed_on_target(devip);
4993 
4994 	select_report = cmd[2];
4995 	alloc_len = get_unaligned_be32(cmd + 6);
4996 
4997 	if (alloc_len < 4) {
4998 		pr_err("alloc len too small %d\n", alloc_len);
4999 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5000 		return check_condition_result;
5001 	}
5002 
5003 	switch (select_report) {
5004 	case 0:		/* all LUNs apart from W-LUNs */
5005 		lun_cnt = sdebug_max_luns;
5006 		wlun_cnt = 0;
5007 		break;
5008 	case 1:		/* only W-LUNs */
5009 		lun_cnt = 0;
5010 		wlun_cnt = 1;
5011 		break;
5012 	case 2:		/* all LUNs */
5013 		lun_cnt = sdebug_max_luns;
5014 		wlun_cnt = 1;
5015 		break;
5016 	case 0x10:	/* only administrative LUs */
5017 	case 0x11:	/* see SPC-5 */
5018 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
5019 	default:
5020 		pr_debug("select report invalid %d\n", select_report);
5021 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5022 		return check_condition_result;
5023 	}
5024 
5025 	if (sdebug_no_lun_0 && (lun_cnt > 0))
5026 		--lun_cnt;
5027 
5028 	tlun_cnt = lun_cnt + wlun_cnt;
5029 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
5030 	scsi_set_resid(scp, scsi_bufflen(scp));
5031 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5032 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5033 
5034 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
5035 	lun = sdebug_no_lun_0 ? 1 : 0;
5036 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5037 		memset(arr, 0, sizeof(arr));
5038 		lun_p = (struct scsi_lun *)&arr[0];
5039 		if (k == 0) {
5040 			put_unaligned_be32(rlen, &arr[0]);
5041 			++lun_p;
5042 			j = 1;
5043 		}
5044 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5045 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5046 				break;
5047 			int_to_scsilun(lun++, lun_p);
5048 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5049 				lun_p->scsi_lun[0] |= 0x40;
5050 		}
5051 		if (j < RL_BUCKET_ELEMS)
5052 			break;
5053 		n = j * sz_lun;
5054 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5055 		if (res)
5056 			return res;
5057 		off_rsp += n;
5058 	}
5059 	if (wlun_cnt) {
5060 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5061 		++j;
5062 	}
5063 	if (j > 0)
5064 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5065 	return res;
5066 }
5067 
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5068 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5069 {
5070 	bool is_bytchk3 = false;
5071 	u8 bytchk;
5072 	int ret, j;
5073 	u32 vnum, a_num, off;
5074 	const u32 lb_size = sdebug_sector_size;
5075 	u64 lba;
5076 	u8 *arr;
5077 	u8 *cmd = scp->cmnd;
5078 	struct sdeb_store_info *sip = devip2sip(devip, true);
5079 
5080 	bytchk = (cmd[1] >> 1) & 0x3;
5081 	if (bytchk == 0) {
5082 		return 0;	/* always claim internal verify okay */
5083 	} else if (bytchk == 2) {
5084 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5085 		return check_condition_result;
5086 	} else if (bytchk == 3) {
5087 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
5088 	}
5089 	switch (cmd[0]) {
5090 	case VERIFY_16:
5091 		lba = get_unaligned_be64(cmd + 2);
5092 		vnum = get_unaligned_be32(cmd + 10);
5093 		break;
5094 	case VERIFY:		/* is VERIFY(10) */
5095 		lba = get_unaligned_be32(cmd + 2);
5096 		vnum = get_unaligned_be16(cmd + 7);
5097 		break;
5098 	default:
5099 		mk_sense_invalid_opcode(scp);
5100 		return check_condition_result;
5101 	}
5102 	if (vnum == 0)
5103 		return 0;	/* not an error */
5104 	a_num = is_bytchk3 ? 1 : vnum;
5105 	/* Treat following check like one for read (i.e. no write) access */
5106 	ret = check_device_access_params(scp, lba, a_num, false);
5107 	if (ret)
5108 		return ret;
5109 
5110 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5111 	if (!arr) {
5112 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5113 				INSUFF_RES_ASCQ);
5114 		return check_condition_result;
5115 	}
5116 	/* Not changing store, so only need read access */
5117 	sdeb_data_read_lock(sip);
5118 
5119 	ret = do_dout_fetch(scp, a_num, arr);
5120 	if (ret == -1) {
5121 		ret = DID_ERROR << 16;
5122 		goto cleanup;
5123 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5124 		sdev_printk(KERN_INFO, scp->device,
5125 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5126 			    my_name, __func__, a_num * lb_size, ret);
5127 	}
5128 	if (is_bytchk3) {
5129 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5130 			memcpy(arr + off, arr, lb_size);
5131 	}
5132 	ret = 0;
5133 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5134 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5135 		ret = check_condition_result;
5136 		goto cleanup;
5137 	}
5138 cleanup:
5139 	sdeb_data_read_unlock(sip);
5140 	kfree(arr);
5141 	return ret;
5142 }
5143 
5144 #define RZONES_DESC_HD 64
5145 
5146 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5147 static int resp_report_zones(struct scsi_cmnd *scp,
5148 			     struct sdebug_dev_info *devip)
5149 {
5150 	unsigned int rep_max_zones, nrz = 0;
5151 	int ret = 0;
5152 	u32 alloc_len, rep_opts, rep_len;
5153 	bool partial;
5154 	u64 lba, zs_lba;
5155 	u8 *arr = NULL, *desc;
5156 	u8 *cmd = scp->cmnd;
5157 	struct sdeb_zone_state *zsp = NULL;
5158 	struct sdeb_store_info *sip = devip2sip(devip, false);
5159 
5160 	if (!sdebug_dev_is_zoned(devip)) {
5161 		mk_sense_invalid_opcode(scp);
5162 		return check_condition_result;
5163 	}
5164 	zs_lba = get_unaligned_be64(cmd + 2);
5165 	alloc_len = get_unaligned_be32(cmd + 10);
5166 	if (alloc_len == 0)
5167 		return 0;	/* not an error */
5168 	rep_opts = cmd[14] & 0x3f;
5169 	partial = cmd[14] & 0x80;
5170 
5171 	if (zs_lba >= sdebug_capacity) {
5172 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5173 		return check_condition_result;
5174 	}
5175 
5176 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5177 
5178 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5179 	if (!arr) {
5180 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5181 				INSUFF_RES_ASCQ);
5182 		return check_condition_result;
5183 	}
5184 
5185 	sdeb_meta_read_lock(sip);
5186 
5187 	desc = arr + 64;
5188 	for (lba = zs_lba; lba < sdebug_capacity;
5189 	     lba = zsp->z_start + zsp->z_size) {
5190 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5191 			break;
5192 		zsp = zbc_zone(devip, lba);
5193 		switch (rep_opts) {
5194 		case 0x00:
5195 			/* All zones */
5196 			break;
5197 		case 0x01:
5198 			/* Empty zones */
5199 			if (zsp->z_cond != ZC1_EMPTY)
5200 				continue;
5201 			break;
5202 		case 0x02:
5203 			/* Implicit open zones */
5204 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5205 				continue;
5206 			break;
5207 		case 0x03:
5208 			/* Explicit open zones */
5209 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5210 				continue;
5211 			break;
5212 		case 0x04:
5213 			/* Closed zones */
5214 			if (zsp->z_cond != ZC4_CLOSED)
5215 				continue;
5216 			break;
5217 		case 0x05:
5218 			/* Full zones */
5219 			if (zsp->z_cond != ZC5_FULL)
5220 				continue;
5221 			break;
5222 		case 0x06:
5223 		case 0x07:
5224 		case 0x10:
5225 			/*
5226 			 * Read-only, offline, reset WP recommended are
5227 			 * not emulated: no zones to report;
5228 			 */
5229 			continue;
5230 		case 0x11:
5231 			/* non-seq-resource set */
5232 			if (!zsp->z_non_seq_resource)
5233 				continue;
5234 			break;
5235 		case 0x3e:
5236 			/* All zones except gap zones. */
5237 			if (zbc_zone_is_gap(zsp))
5238 				continue;
5239 			break;
5240 		case 0x3f:
5241 			/* Not write pointer (conventional) zones */
5242 			if (zbc_zone_is_seq(zsp))
5243 				continue;
5244 			break;
5245 		default:
5246 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
5247 					INVALID_FIELD_IN_CDB, 0);
5248 			ret = check_condition_result;
5249 			goto fini;
5250 		}
5251 
5252 		if (nrz < rep_max_zones) {
5253 			/* Fill zone descriptor */
5254 			desc[0] = zsp->z_type;
5255 			desc[1] = zsp->z_cond << 4;
5256 			if (zsp->z_non_seq_resource)
5257 				desc[1] |= 1 << 1;
5258 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
5259 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
5260 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5261 			desc += 64;
5262 		}
5263 
5264 		if (partial && nrz >= rep_max_zones)
5265 			break;
5266 
5267 		nrz++;
5268 	}
5269 
5270 	/* Report header */
5271 	/* Zone list length. */
5272 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5273 	/* Maximum LBA */
5274 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5275 	/* Zone starting LBA granularity. */
5276 	if (devip->zcap < devip->zsize)
5277 		put_unaligned_be64(devip->zsize, arr + 16);
5278 
5279 	rep_len = (unsigned long)desc - (unsigned long)arr;
5280 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5281 
5282 fini:
5283 	sdeb_meta_read_unlock(sip);
5284 	kfree(arr);
5285 	return ret;
5286 }
5287 
resp_atomic_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5288 static int resp_atomic_write(struct scsi_cmnd *scp,
5289 			     struct sdebug_dev_info *devip)
5290 {
5291 	struct sdeb_store_info *sip;
5292 	u8 *cmd = scp->cmnd;
5293 	u16 boundary, len;
5294 	u64 lba, lba_tmp;
5295 	int ret;
5296 
5297 	if (!scsi_debug_atomic_write()) {
5298 		mk_sense_invalid_opcode(scp);
5299 		return check_condition_result;
5300 	}
5301 
5302 	sip = devip2sip(devip, true);
5303 
5304 	lba = get_unaligned_be64(cmd + 2);
5305 	boundary = get_unaligned_be16(cmd + 10);
5306 	len = get_unaligned_be16(cmd + 12);
5307 
5308 	lba_tmp = lba;
5309 	if (sdebug_atomic_wr_align &&
5310 	    do_div(lba_tmp, sdebug_atomic_wr_align)) {
5311 		/* Does not meet alignment requirement */
5312 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5313 		return check_condition_result;
5314 	}
5315 
5316 	if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
5317 		/* Does not meet alignment requirement */
5318 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5319 		return check_condition_result;
5320 	}
5321 
5322 	if (boundary > 0) {
5323 		if (boundary > sdebug_atomic_wr_max_bndry) {
5324 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5325 			return check_condition_result;
5326 		}
5327 
5328 		if (len > sdebug_atomic_wr_max_length_bndry) {
5329 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5330 			return check_condition_result;
5331 		}
5332 	} else {
5333 		if (len > sdebug_atomic_wr_max_length) {
5334 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5335 			return check_condition_result;
5336 		}
5337 	}
5338 
5339 	ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
5340 	if (unlikely(ret == -1))
5341 		return DID_ERROR << 16;
5342 	if (unlikely(ret != len * sdebug_sector_size))
5343 		return DID_ERROR << 16;
5344 	return 0;
5345 }
5346 
5347 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)5348 static void zbc_open_all(struct sdebug_dev_info *devip)
5349 {
5350 	struct sdeb_zone_state *zsp = &devip->zstate[0];
5351 	unsigned int i;
5352 
5353 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
5354 		if (zsp->z_cond == ZC4_CLOSED)
5355 			zbc_open_zone(devip, &devip->zstate[i], true);
5356 	}
5357 }
5358 
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5359 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5360 {
5361 	int res = 0;
5362 	u64 z_id;
5363 	enum sdebug_z_cond zc;
5364 	u8 *cmd = scp->cmnd;
5365 	struct sdeb_zone_state *zsp;
5366 	bool all = cmd[14] & 0x01;
5367 	struct sdeb_store_info *sip = devip2sip(devip, false);
5368 
5369 	if (!sdebug_dev_is_zoned(devip)) {
5370 		mk_sense_invalid_opcode(scp);
5371 		return check_condition_result;
5372 	}
5373 	sdeb_meta_write_lock(sip);
5374 
5375 	if (all) {
5376 		/* Check if all closed zones can be open */
5377 		if (devip->max_open &&
5378 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
5379 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5380 					INSUFF_ZONE_ASCQ);
5381 			res = check_condition_result;
5382 			goto fini;
5383 		}
5384 		/* Open all closed zones */
5385 		zbc_open_all(devip);
5386 		goto fini;
5387 	}
5388 
5389 	/* Open the specified zone */
5390 	z_id = get_unaligned_be64(cmd + 2);
5391 	if (z_id >= sdebug_capacity) {
5392 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5393 		res = check_condition_result;
5394 		goto fini;
5395 	}
5396 
5397 	zsp = zbc_zone(devip, z_id);
5398 	if (z_id != zsp->z_start) {
5399 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5400 		res = check_condition_result;
5401 		goto fini;
5402 	}
5403 	if (zbc_zone_is_conv(zsp)) {
5404 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5405 		res = check_condition_result;
5406 		goto fini;
5407 	}
5408 
5409 	zc = zsp->z_cond;
5410 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
5411 		goto fini;
5412 
5413 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
5414 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5415 				INSUFF_ZONE_ASCQ);
5416 		res = check_condition_result;
5417 		goto fini;
5418 	}
5419 
5420 	zbc_open_zone(devip, zsp, true);
5421 fini:
5422 	sdeb_meta_write_unlock(sip);
5423 	return res;
5424 }
5425 
zbc_close_all(struct sdebug_dev_info * devip)5426 static void zbc_close_all(struct sdebug_dev_info *devip)
5427 {
5428 	unsigned int i;
5429 
5430 	for (i = 0; i < devip->nr_zones; i++)
5431 		zbc_close_zone(devip, &devip->zstate[i]);
5432 }
5433 
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5434 static int resp_close_zone(struct scsi_cmnd *scp,
5435 			   struct sdebug_dev_info *devip)
5436 {
5437 	int res = 0;
5438 	u64 z_id;
5439 	u8 *cmd = scp->cmnd;
5440 	struct sdeb_zone_state *zsp;
5441 	bool all = cmd[14] & 0x01;
5442 	struct sdeb_store_info *sip = devip2sip(devip, false);
5443 
5444 	if (!sdebug_dev_is_zoned(devip)) {
5445 		mk_sense_invalid_opcode(scp);
5446 		return check_condition_result;
5447 	}
5448 
5449 	sdeb_meta_write_lock(sip);
5450 
5451 	if (all) {
5452 		zbc_close_all(devip);
5453 		goto fini;
5454 	}
5455 
5456 	/* Close specified zone */
5457 	z_id = get_unaligned_be64(cmd + 2);
5458 	if (z_id >= sdebug_capacity) {
5459 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5460 		res = check_condition_result;
5461 		goto fini;
5462 	}
5463 
5464 	zsp = zbc_zone(devip, z_id);
5465 	if (z_id != zsp->z_start) {
5466 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5467 		res = check_condition_result;
5468 		goto fini;
5469 	}
5470 	if (zbc_zone_is_conv(zsp)) {
5471 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5472 		res = check_condition_result;
5473 		goto fini;
5474 	}
5475 
5476 	zbc_close_zone(devip, zsp);
5477 fini:
5478 	sdeb_meta_write_unlock(sip);
5479 	return res;
5480 }
5481 
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)5482 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5483 			    struct sdeb_zone_state *zsp, bool empty)
5484 {
5485 	enum sdebug_z_cond zc = zsp->z_cond;
5486 
5487 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5488 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5489 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5490 			zbc_close_zone(devip, zsp);
5491 		if (zsp->z_cond == ZC4_CLOSED)
5492 			devip->nr_closed--;
5493 		zsp->z_wp = zsp->z_start + zsp->z_size;
5494 		zsp->z_cond = ZC5_FULL;
5495 	}
5496 }
5497 
zbc_finish_all(struct sdebug_dev_info * devip)5498 static void zbc_finish_all(struct sdebug_dev_info *devip)
5499 {
5500 	unsigned int i;
5501 
5502 	for (i = 0; i < devip->nr_zones; i++)
5503 		zbc_finish_zone(devip, &devip->zstate[i], false);
5504 }
5505 
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5506 static int resp_finish_zone(struct scsi_cmnd *scp,
5507 			    struct sdebug_dev_info *devip)
5508 {
5509 	struct sdeb_zone_state *zsp;
5510 	int res = 0;
5511 	u64 z_id;
5512 	u8 *cmd = scp->cmnd;
5513 	bool all = cmd[14] & 0x01;
5514 	struct sdeb_store_info *sip = devip2sip(devip, false);
5515 
5516 	if (!sdebug_dev_is_zoned(devip)) {
5517 		mk_sense_invalid_opcode(scp);
5518 		return check_condition_result;
5519 	}
5520 
5521 	sdeb_meta_write_lock(sip);
5522 
5523 	if (all) {
5524 		zbc_finish_all(devip);
5525 		goto fini;
5526 	}
5527 
5528 	/* Finish the specified zone */
5529 	z_id = get_unaligned_be64(cmd + 2);
5530 	if (z_id >= sdebug_capacity) {
5531 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5532 		res = check_condition_result;
5533 		goto fini;
5534 	}
5535 
5536 	zsp = zbc_zone(devip, z_id);
5537 	if (z_id != zsp->z_start) {
5538 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5539 		res = check_condition_result;
5540 		goto fini;
5541 	}
5542 	if (zbc_zone_is_conv(zsp)) {
5543 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5544 		res = check_condition_result;
5545 		goto fini;
5546 	}
5547 
5548 	zbc_finish_zone(devip, zsp, true);
5549 fini:
5550 	sdeb_meta_write_unlock(sip);
5551 	return res;
5552 }
5553 
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)5554 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5555 			 struct sdeb_zone_state *zsp)
5556 {
5557 	enum sdebug_z_cond zc;
5558 	struct sdeb_store_info *sip = devip2sip(devip, false);
5559 
5560 	if (!zbc_zone_is_seq(zsp))
5561 		return;
5562 
5563 	zc = zsp->z_cond;
5564 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5565 		zbc_close_zone(devip, zsp);
5566 
5567 	if (zsp->z_cond == ZC4_CLOSED)
5568 		devip->nr_closed--;
5569 
5570 	if (zsp->z_wp > zsp->z_start)
5571 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5572 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5573 
5574 	zsp->z_non_seq_resource = false;
5575 	zsp->z_wp = zsp->z_start;
5576 	zsp->z_cond = ZC1_EMPTY;
5577 }
5578 
zbc_rwp_all(struct sdebug_dev_info * devip)5579 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5580 {
5581 	unsigned int i;
5582 
5583 	for (i = 0; i < devip->nr_zones; i++)
5584 		zbc_rwp_zone(devip, &devip->zstate[i]);
5585 }
5586 
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5587 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5588 {
5589 	struct sdeb_zone_state *zsp;
5590 	int res = 0;
5591 	u64 z_id;
5592 	u8 *cmd = scp->cmnd;
5593 	bool all = cmd[14] & 0x01;
5594 	struct sdeb_store_info *sip = devip2sip(devip, false);
5595 
5596 	if (!sdebug_dev_is_zoned(devip)) {
5597 		mk_sense_invalid_opcode(scp);
5598 		return check_condition_result;
5599 	}
5600 
5601 	sdeb_meta_write_lock(sip);
5602 
5603 	if (all) {
5604 		zbc_rwp_all(devip);
5605 		goto fini;
5606 	}
5607 
5608 	z_id = get_unaligned_be64(cmd + 2);
5609 	if (z_id >= sdebug_capacity) {
5610 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5611 		res = check_condition_result;
5612 		goto fini;
5613 	}
5614 
5615 	zsp = zbc_zone(devip, z_id);
5616 	if (z_id != zsp->z_start) {
5617 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5618 		res = check_condition_result;
5619 		goto fini;
5620 	}
5621 	if (zbc_zone_is_conv(zsp)) {
5622 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5623 		res = check_condition_result;
5624 		goto fini;
5625 	}
5626 
5627 	zbc_rwp_zone(devip, zsp);
5628 fini:
5629 	sdeb_meta_write_unlock(sip);
5630 	return res;
5631 }
5632 
get_tag(struct scsi_cmnd * cmnd)5633 static u32 get_tag(struct scsi_cmnd *cmnd)
5634 {
5635 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5636 }
5637 
5638 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)5639 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5640 {
5641 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5642 	unsigned long flags;
5643 	struct scsi_cmnd *scp = sqcp->scmd;
5644 	struct sdebug_scsi_cmd *sdsc;
5645 	bool aborted;
5646 
5647 	if (sdebug_statistics) {
5648 		atomic_inc(&sdebug_completions);
5649 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5650 			atomic_inc(&sdebug_miss_cpus);
5651 	}
5652 
5653 	if (!scp) {
5654 		pr_err("scmd=NULL\n");
5655 		goto out;
5656 	}
5657 
5658 	sdsc = scsi_cmd_priv(scp);
5659 	spin_lock_irqsave(&sdsc->lock, flags);
5660 	aborted = sd_dp->aborted;
5661 	if (unlikely(aborted))
5662 		sd_dp->aborted = false;
5663 	ASSIGN_QUEUED_CMD(scp, NULL);
5664 
5665 	spin_unlock_irqrestore(&sdsc->lock, flags);
5666 
5667 	if (aborted) {
5668 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5669 		blk_abort_request(scsi_cmd_to_rq(scp));
5670 		goto out;
5671 	}
5672 
5673 	scsi_done(scp); /* callback to mid level */
5674 out:
5675 	sdebug_free_queued_cmd(sqcp);
5676 }
5677 
5678 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)5679 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5680 {
5681 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5682 						  hrt);
5683 	sdebug_q_cmd_complete(sd_dp);
5684 	return HRTIMER_NORESTART;
5685 }
5686 
5687 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)5688 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5689 {
5690 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5691 						  ew.work);
5692 	sdebug_q_cmd_complete(sd_dp);
5693 }
5694 
5695 static bool got_shared_uuid;
5696 static uuid_t shared_uuid;
5697 
sdebug_device_create_zones(struct sdebug_dev_info * devip)5698 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5699 {
5700 	struct sdeb_zone_state *zsp;
5701 	sector_t capacity = get_sdebug_capacity();
5702 	sector_t conv_capacity;
5703 	sector_t zstart = 0;
5704 	unsigned int i;
5705 
5706 	/*
5707 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5708 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5709 	 * use the specified zone size checking that at least 2 zones can be
5710 	 * created for the device.
5711 	 */
5712 	if (!sdeb_zbc_zone_size_mb) {
5713 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5714 			>> ilog2(sdebug_sector_size);
5715 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5716 			devip->zsize >>= 1;
5717 		if (devip->zsize < 2) {
5718 			pr_err("Device capacity too small\n");
5719 			return -EINVAL;
5720 		}
5721 	} else {
5722 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5723 			pr_err("Zone size is not a power of 2\n");
5724 			return -EINVAL;
5725 		}
5726 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5727 			>> ilog2(sdebug_sector_size);
5728 		if (devip->zsize >= capacity) {
5729 			pr_err("Zone size too large for device capacity\n");
5730 			return -EINVAL;
5731 		}
5732 	}
5733 
5734 	devip->zsize_shift = ilog2(devip->zsize);
5735 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5736 
5737 	if (sdeb_zbc_zone_cap_mb == 0) {
5738 		devip->zcap = devip->zsize;
5739 	} else {
5740 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5741 			      ilog2(sdebug_sector_size);
5742 		if (devip->zcap > devip->zsize) {
5743 			pr_err("Zone capacity too large\n");
5744 			return -EINVAL;
5745 		}
5746 	}
5747 
5748 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5749 	if (conv_capacity >= capacity) {
5750 		pr_err("Number of conventional zones too large\n");
5751 		return -EINVAL;
5752 	}
5753 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5754 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5755 			      devip->zsize_shift;
5756 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5757 
5758 	/* Add gap zones if zone capacity is smaller than the zone size */
5759 	if (devip->zcap < devip->zsize)
5760 		devip->nr_zones += devip->nr_seq_zones;
5761 
5762 	if (devip->zoned) {
5763 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5764 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5765 			devip->max_open = (devip->nr_zones - 1) / 2;
5766 		else
5767 			devip->max_open = sdeb_zbc_max_open;
5768 	}
5769 
5770 	devip->zstate = kcalloc(devip->nr_zones,
5771 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5772 	if (!devip->zstate)
5773 		return -ENOMEM;
5774 
5775 	for (i = 0; i < devip->nr_zones; i++) {
5776 		zsp = &devip->zstate[i];
5777 
5778 		zsp->z_start = zstart;
5779 
5780 		if (i < devip->nr_conv_zones) {
5781 			zsp->z_type = ZBC_ZTYPE_CNV;
5782 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5783 			zsp->z_wp = (sector_t)-1;
5784 			zsp->z_size =
5785 				min_t(u64, devip->zsize, capacity - zstart);
5786 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5787 			if (devip->zoned)
5788 				zsp->z_type = ZBC_ZTYPE_SWR;
5789 			else
5790 				zsp->z_type = ZBC_ZTYPE_SWP;
5791 			zsp->z_cond = ZC1_EMPTY;
5792 			zsp->z_wp = zsp->z_start;
5793 			zsp->z_size =
5794 				min_t(u64, devip->zcap, capacity - zstart);
5795 		} else {
5796 			zsp->z_type = ZBC_ZTYPE_GAP;
5797 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5798 			zsp->z_wp = (sector_t)-1;
5799 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5800 					    capacity - zstart);
5801 		}
5802 
5803 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5804 		zstart += zsp->z_size;
5805 	}
5806 
5807 	return 0;
5808 }
5809 
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)5810 static struct sdebug_dev_info *sdebug_device_create(
5811 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5812 {
5813 	struct sdebug_dev_info *devip;
5814 
5815 	devip = kzalloc(sizeof(*devip), flags);
5816 	if (devip) {
5817 		if (sdebug_uuid_ctl == 1)
5818 			uuid_gen(&devip->lu_name);
5819 		else if (sdebug_uuid_ctl == 2) {
5820 			if (got_shared_uuid)
5821 				devip->lu_name = shared_uuid;
5822 			else {
5823 				uuid_gen(&shared_uuid);
5824 				got_shared_uuid = true;
5825 				devip->lu_name = shared_uuid;
5826 			}
5827 		}
5828 		devip->sdbg_host = sdbg_host;
5829 		if (sdeb_zbc_in_use) {
5830 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5831 			if (sdebug_device_create_zones(devip)) {
5832 				kfree(devip);
5833 				return NULL;
5834 			}
5835 		} else {
5836 			devip->zoned = false;
5837 		}
5838 		devip->create_ts = ktime_get_boottime();
5839 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5840 		spin_lock_init(&devip->list_lock);
5841 		INIT_LIST_HEAD(&devip->inject_err_list);
5842 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5843 	}
5844 	return devip;
5845 }
5846 
find_build_dev_info(struct scsi_device * sdev)5847 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5848 {
5849 	struct sdebug_host_info *sdbg_host;
5850 	struct sdebug_dev_info *open_devip = NULL;
5851 	struct sdebug_dev_info *devip;
5852 
5853 	sdbg_host = shost_to_sdebug_host(sdev->host);
5854 
5855 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5856 		if ((devip->used) && (devip->channel == sdev->channel) &&
5857 		    (devip->target == sdev->id) &&
5858 		    (devip->lun == sdev->lun))
5859 			return devip;
5860 		else {
5861 			if ((!devip->used) && (!open_devip))
5862 				open_devip = devip;
5863 		}
5864 	}
5865 	if (!open_devip) { /* try and make a new one */
5866 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5867 		if (!open_devip) {
5868 			pr_err("out of memory at line %d\n", __LINE__);
5869 			return NULL;
5870 		}
5871 	}
5872 
5873 	open_devip->channel = sdev->channel;
5874 	open_devip->target = sdev->id;
5875 	open_devip->lun = sdev->lun;
5876 	open_devip->sdbg_host = sdbg_host;
5877 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5878 	open_devip->used = true;
5879 	return open_devip;
5880 }
5881 
scsi_debug_slave_alloc(struct scsi_device * sdp)5882 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5883 {
5884 	if (sdebug_verbose)
5885 		pr_info("slave_alloc <%u %u %u %llu>\n",
5886 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5887 
5888 	return 0;
5889 }
5890 
scsi_debug_slave_configure(struct scsi_device * sdp)5891 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5892 {
5893 	struct sdebug_dev_info *devip =
5894 			(struct sdebug_dev_info *)sdp->hostdata;
5895 	struct dentry *dentry;
5896 
5897 	if (sdebug_verbose)
5898 		pr_info("slave_configure <%u %u %u %llu>\n",
5899 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5900 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5901 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5902 	if (devip == NULL) {
5903 		devip = find_build_dev_info(sdp);
5904 		if (devip == NULL)
5905 			return 1;  /* no resources, will be marked offline */
5906 	}
5907 	sdp->hostdata = devip;
5908 	if (sdebug_no_uld)
5909 		sdp->no_uld_attach = 1;
5910 	config_cdb_len(sdp);
5911 
5912 	if (sdebug_allow_restart)
5913 		sdp->allow_restart = 1;
5914 
5915 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5916 				sdebug_debugfs_root);
5917 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
5918 		pr_info("%s: failed to create debugfs directory for device %s\n",
5919 			__func__, dev_name(&sdp->sdev_gendev));
5920 
5921 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5922 				&sdebug_error_fops);
5923 	if (IS_ERR_OR_NULL(dentry))
5924 		pr_info("%s: failed to create error file for device %s\n",
5925 			__func__, dev_name(&sdp->sdev_gendev));
5926 
5927 	return 0;
5928 }
5929 
scsi_debug_slave_destroy(struct scsi_device * sdp)5930 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5931 {
5932 	struct sdebug_dev_info *devip =
5933 		(struct sdebug_dev_info *)sdp->hostdata;
5934 	struct sdebug_err_inject *err;
5935 
5936 	if (sdebug_verbose)
5937 		pr_info("slave_destroy <%u %u %u %llu>\n",
5938 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5939 
5940 	if (!devip)
5941 		return;
5942 
5943 	spin_lock(&devip->list_lock);
5944 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5945 		list_del_rcu(&err->list);
5946 		call_rcu(&err->rcu, sdebug_err_free);
5947 	}
5948 	spin_unlock(&devip->list_lock);
5949 
5950 	debugfs_remove(devip->debugfs_entry);
5951 
5952 	/* make this slot available for re-use */
5953 	devip->used = false;
5954 	sdp->hostdata = NULL;
5955 }
5956 
5957 /* Returns true if we require the queued memory to be freed by the caller. */
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)5958 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5959 			   enum sdeb_defer_type defer_t)
5960 {
5961 	if (defer_t == SDEB_DEFER_HRT) {
5962 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5963 
5964 		switch (res) {
5965 		case 0: /* Not active, it must have already run */
5966 		case -1: /* -1 It's executing the CB */
5967 			return false;
5968 		case 1: /* Was active, we've now cancelled */
5969 		default:
5970 			return true;
5971 		}
5972 	} else if (defer_t == SDEB_DEFER_WQ) {
5973 		/* Cancel if pending */
5974 		if (cancel_work_sync(&sd_dp->ew.work))
5975 			return true;
5976 		/* Was not pending, so it must have run */
5977 		return false;
5978 	} else if (defer_t == SDEB_DEFER_POLL) {
5979 		return true;
5980 	}
5981 
5982 	return false;
5983 }
5984 
5985 
scsi_debug_stop_cmnd(struct scsi_cmnd * cmnd)5986 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5987 {
5988 	enum sdeb_defer_type l_defer_t;
5989 	struct sdebug_defer *sd_dp;
5990 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5991 	struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5992 
5993 	lockdep_assert_held(&sdsc->lock);
5994 
5995 	if (!sqcp)
5996 		return false;
5997 	sd_dp = &sqcp->sd_dp;
5998 	l_defer_t = READ_ONCE(sd_dp->defer_t);
5999 	ASSIGN_QUEUED_CMD(cmnd, NULL);
6000 
6001 	if (stop_qc_helper(sd_dp, l_defer_t))
6002 		sdebug_free_queued_cmd(sqcp);
6003 
6004 	return true;
6005 }
6006 
6007 /*
6008  * Called from scsi_debug_abort() only, which is for timed-out cmd.
6009  */
scsi_debug_abort_cmnd(struct scsi_cmnd * cmnd)6010 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6011 {
6012 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6013 	unsigned long flags;
6014 	bool res;
6015 
6016 	spin_lock_irqsave(&sdsc->lock, flags);
6017 	res = scsi_debug_stop_cmnd(cmnd);
6018 	spin_unlock_irqrestore(&sdsc->lock, flags);
6019 
6020 	return res;
6021 }
6022 
6023 /*
6024  * All we can do is set the cmnd as internally aborted and wait for it to
6025  * finish. We cannot call scsi_done() as normal completion path may do that.
6026  */
sdebug_stop_cmnd(struct request * rq,void * data)6027 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6028 {
6029 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6030 
6031 	return true;
6032 }
6033 
6034 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)6035 static void stop_all_queued(void)
6036 {
6037 	struct sdebug_host_info *sdhp;
6038 
6039 	mutex_lock(&sdebug_host_list_mutex);
6040 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6041 		struct Scsi_Host *shost = sdhp->shost;
6042 
6043 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6044 	}
6045 	mutex_unlock(&sdebug_host_list_mutex);
6046 }
6047 
sdebug_fail_abort(struct scsi_cmnd * cmnd)6048 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6049 {
6050 	struct scsi_device *sdp = cmnd->device;
6051 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6052 	struct sdebug_err_inject *err;
6053 	unsigned char *cmd = cmnd->cmnd;
6054 	int ret = 0;
6055 
6056 	if (devip == NULL)
6057 		return 0;
6058 
6059 	rcu_read_lock();
6060 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6061 		if (err->type == ERR_ABORT_CMD_FAILED &&
6062 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6063 			ret = !!err->cnt;
6064 			if (err->cnt < 0)
6065 				err->cnt++;
6066 
6067 			rcu_read_unlock();
6068 			return ret;
6069 		}
6070 	}
6071 	rcu_read_unlock();
6072 
6073 	return 0;
6074 }
6075 
scsi_debug_abort(struct scsi_cmnd * SCpnt)6076 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6077 {
6078 	bool ok = scsi_debug_abort_cmnd(SCpnt);
6079 	u8 *cmd = SCpnt->cmnd;
6080 	u8 opcode = cmd[0];
6081 
6082 	++num_aborts;
6083 
6084 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6085 		sdev_printk(KERN_INFO, SCpnt->device,
6086 			    "%s: command%s found\n", __func__,
6087 			    ok ? "" : " not");
6088 
6089 	if (sdebug_fail_abort(SCpnt)) {
6090 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6091 			    opcode);
6092 		return FAILED;
6093 	}
6094 
6095 	return SUCCESS;
6096 }
6097 
scsi_debug_stop_all_queued_iter(struct request * rq,void * data)6098 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6099 {
6100 	struct scsi_device *sdp = data;
6101 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6102 
6103 	if (scmd->device == sdp)
6104 		scsi_debug_abort_cmnd(scmd);
6105 
6106 	return true;
6107 }
6108 
6109 /* Deletes (stops) timers or work queues of all queued commands per sdev */
scsi_debug_stop_all_queued(struct scsi_device * sdp)6110 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6111 {
6112 	struct Scsi_Host *shost = sdp->host;
6113 
6114 	blk_mq_tagset_busy_iter(&shost->tag_set,
6115 				scsi_debug_stop_all_queued_iter, sdp);
6116 }
6117 
sdebug_fail_lun_reset(struct scsi_cmnd * cmnd)6118 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6119 {
6120 	struct scsi_device *sdp = cmnd->device;
6121 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6122 	struct sdebug_err_inject *err;
6123 	unsigned char *cmd = cmnd->cmnd;
6124 	int ret = 0;
6125 
6126 	if (devip == NULL)
6127 		return 0;
6128 
6129 	rcu_read_lock();
6130 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6131 		if (err->type == ERR_LUN_RESET_FAILED &&
6132 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6133 			ret = !!err->cnt;
6134 			if (err->cnt < 0)
6135 				err->cnt++;
6136 
6137 			rcu_read_unlock();
6138 			return ret;
6139 		}
6140 	}
6141 	rcu_read_unlock();
6142 
6143 	return 0;
6144 }
6145 
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)6146 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6147 {
6148 	struct scsi_device *sdp = SCpnt->device;
6149 	struct sdebug_dev_info *devip = sdp->hostdata;
6150 	u8 *cmd = SCpnt->cmnd;
6151 	u8 opcode = cmd[0];
6152 
6153 	++num_dev_resets;
6154 
6155 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6156 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6157 
6158 	scsi_debug_stop_all_queued(sdp);
6159 	if (devip)
6160 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
6161 
6162 	if (sdebug_fail_lun_reset(SCpnt)) {
6163 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6164 		return FAILED;
6165 	}
6166 
6167 	return SUCCESS;
6168 }
6169 
sdebug_fail_target_reset(struct scsi_cmnd * cmnd)6170 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6171 {
6172 	struct scsi_target *starget = scsi_target(cmnd->device);
6173 	struct sdebug_target_info *targetip =
6174 		(struct sdebug_target_info *)starget->hostdata;
6175 
6176 	if (targetip)
6177 		return targetip->reset_fail;
6178 
6179 	return 0;
6180 }
6181 
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)6182 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6183 {
6184 	struct scsi_device *sdp = SCpnt->device;
6185 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6186 	struct sdebug_dev_info *devip;
6187 	u8 *cmd = SCpnt->cmnd;
6188 	u8 opcode = cmd[0];
6189 	int k = 0;
6190 
6191 	++num_target_resets;
6192 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6193 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6194 
6195 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6196 		if (devip->target == sdp->id) {
6197 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6198 			++k;
6199 		}
6200 	}
6201 
6202 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6203 		sdev_printk(KERN_INFO, sdp,
6204 			    "%s: %d device(s) found in target\n", __func__, k);
6205 
6206 	if (sdebug_fail_target_reset(SCpnt)) {
6207 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
6208 			    opcode);
6209 		return FAILED;
6210 	}
6211 
6212 	return SUCCESS;
6213 }
6214 
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)6215 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
6216 {
6217 	struct scsi_device *sdp = SCpnt->device;
6218 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6219 	struct sdebug_dev_info *devip;
6220 	int k = 0;
6221 
6222 	++num_bus_resets;
6223 
6224 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6225 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6226 
6227 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6228 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6229 		++k;
6230 	}
6231 
6232 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6233 		sdev_printk(KERN_INFO, sdp,
6234 			    "%s: %d device(s) found in host\n", __func__, k);
6235 	return SUCCESS;
6236 }
6237 
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)6238 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
6239 {
6240 	struct sdebug_host_info *sdbg_host;
6241 	struct sdebug_dev_info *devip;
6242 	int k = 0;
6243 
6244 	++num_host_resets;
6245 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6246 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
6247 	mutex_lock(&sdebug_host_list_mutex);
6248 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
6249 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
6250 				    dev_list) {
6251 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6252 			++k;
6253 		}
6254 	}
6255 	mutex_unlock(&sdebug_host_list_mutex);
6256 	stop_all_queued();
6257 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6258 		sdev_printk(KERN_INFO, SCpnt->device,
6259 			    "%s: %d device(s) found\n", __func__, k);
6260 	return SUCCESS;
6261 }
6262 
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)6263 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
6264 {
6265 	struct msdos_partition *pp;
6266 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
6267 	int sectors_per_part, num_sectors, k;
6268 	int heads_by_sects, start_sec, end_sec;
6269 
6270 	/* assume partition table already zeroed */
6271 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
6272 		return;
6273 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
6274 		sdebug_num_parts = SDEBUG_MAX_PARTS;
6275 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
6276 	}
6277 	num_sectors = (int)get_sdebug_capacity();
6278 	sectors_per_part = (num_sectors - sdebug_sectors_per)
6279 			   / sdebug_num_parts;
6280 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
6281 	starts[0] = sdebug_sectors_per;
6282 	max_part_secs = sectors_per_part;
6283 	for (k = 1; k < sdebug_num_parts; ++k) {
6284 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
6285 			    * heads_by_sects;
6286 		if (starts[k] - starts[k - 1] < max_part_secs)
6287 			max_part_secs = starts[k] - starts[k - 1];
6288 	}
6289 	starts[sdebug_num_parts] = num_sectors;
6290 	starts[sdebug_num_parts + 1] = 0;
6291 
6292 	ramp[510] = 0x55;	/* magic partition markings */
6293 	ramp[511] = 0xAA;
6294 	pp = (struct msdos_partition *)(ramp + 0x1be);
6295 	for (k = 0; starts[k + 1]; ++k, ++pp) {
6296 		start_sec = starts[k];
6297 		end_sec = starts[k] + max_part_secs - 1;
6298 		pp->boot_ind = 0;
6299 
6300 		pp->cyl = start_sec / heads_by_sects;
6301 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
6302 			   / sdebug_sectors_per;
6303 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
6304 
6305 		pp->end_cyl = end_sec / heads_by_sects;
6306 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
6307 			       / sdebug_sectors_per;
6308 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
6309 
6310 		pp->start_sect = cpu_to_le32(start_sec);
6311 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
6312 		pp->sys_ind = 0x83;	/* plain Linux partition */
6313 	}
6314 }
6315 
block_unblock_all_queues(bool block)6316 static void block_unblock_all_queues(bool block)
6317 {
6318 	struct sdebug_host_info *sdhp;
6319 
6320 	lockdep_assert_held(&sdebug_host_list_mutex);
6321 
6322 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6323 		struct Scsi_Host *shost = sdhp->shost;
6324 
6325 		if (block)
6326 			scsi_block_requests(shost);
6327 		else
6328 			scsi_unblock_requests(shost);
6329 	}
6330 }
6331 
6332 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6333  * commands will be processed normally before triggers occur.
6334  */
tweak_cmnd_count(void)6335 static void tweak_cmnd_count(void)
6336 {
6337 	int count, modulo;
6338 
6339 	modulo = abs(sdebug_every_nth);
6340 	if (modulo < 2)
6341 		return;
6342 
6343 	mutex_lock(&sdebug_host_list_mutex);
6344 	block_unblock_all_queues(true);
6345 	count = atomic_read(&sdebug_cmnd_count);
6346 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
6347 	block_unblock_all_queues(false);
6348 	mutex_unlock(&sdebug_host_list_mutex);
6349 }
6350 
clear_queue_stats(void)6351 static void clear_queue_stats(void)
6352 {
6353 	atomic_set(&sdebug_cmnd_count, 0);
6354 	atomic_set(&sdebug_completions, 0);
6355 	atomic_set(&sdebug_miss_cpus, 0);
6356 	atomic_set(&sdebug_a_tsf, 0);
6357 }
6358 
inject_on_this_cmd(void)6359 static bool inject_on_this_cmd(void)
6360 {
6361 	if (sdebug_every_nth == 0)
6362 		return false;
6363 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6364 }
6365 
6366 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
6367 
6368 
sdebug_free_queued_cmd(struct sdebug_queued_cmd * sqcp)6369 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
6370 {
6371 	if (sqcp)
6372 		kmem_cache_free(queued_cmd_cache, sqcp);
6373 }
6374 
sdebug_alloc_queued_cmd(struct scsi_cmnd * scmd)6375 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
6376 {
6377 	struct sdebug_queued_cmd *sqcp;
6378 	struct sdebug_defer *sd_dp;
6379 
6380 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
6381 	if (!sqcp)
6382 		return NULL;
6383 
6384 	sd_dp = &sqcp->sd_dp;
6385 
6386 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6387 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
6388 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
6389 
6390 	sqcp->scmd = scmd;
6391 
6392 	return sqcp;
6393 }
6394 
6395 /* Complete the processing of the thread that queued a SCSI command to this
6396  * driver. It either completes the command by calling cmnd_done() or
6397  * schedules a hr timer or work queue then returns 0. Returns
6398  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6399  */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)6400 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
6401 			 int scsi_result,
6402 			 int (*pfp)(struct scsi_cmnd *,
6403 				    struct sdebug_dev_info *),
6404 			 int delta_jiff, int ndelay)
6405 {
6406 	struct request *rq = scsi_cmd_to_rq(cmnd);
6407 	bool polled = rq->cmd_flags & REQ_POLLED;
6408 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6409 	unsigned long flags;
6410 	u64 ns_from_boot = 0;
6411 	struct sdebug_queued_cmd *sqcp;
6412 	struct scsi_device *sdp;
6413 	struct sdebug_defer *sd_dp;
6414 
6415 	if (unlikely(devip == NULL)) {
6416 		if (scsi_result == 0)
6417 			scsi_result = DID_NO_CONNECT << 16;
6418 		goto respond_in_thread;
6419 	}
6420 	sdp = cmnd->device;
6421 
6422 	if (delta_jiff == 0)
6423 		goto respond_in_thread;
6424 
6425 
6426 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6427 		     (scsi_result == 0))) {
6428 		int num_in_q = scsi_device_busy(sdp);
6429 		int qdepth = cmnd->device->queue_depth;
6430 
6431 		if ((num_in_q == qdepth) &&
6432 		    (atomic_inc_return(&sdebug_a_tsf) >=
6433 		     abs(sdebug_every_nth))) {
6434 			atomic_set(&sdebug_a_tsf, 0);
6435 			scsi_result = device_qfull_result;
6436 
6437 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6438 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6439 					    __func__, num_in_q);
6440 		}
6441 	}
6442 
6443 	sqcp = sdebug_alloc_queued_cmd(cmnd);
6444 	if (!sqcp) {
6445 		pr_err("%s no alloc\n", __func__);
6446 		return SCSI_MLQUEUE_HOST_BUSY;
6447 	}
6448 	sd_dp = &sqcp->sd_dp;
6449 
6450 	if (polled)
6451 		ns_from_boot = ktime_get_boottime_ns();
6452 
6453 	/* one of the resp_*() response functions is called here */
6454 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6455 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
6456 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
6457 		delta_jiff = ndelay = 0;
6458 	}
6459 	if (cmnd->result == 0 && scsi_result != 0)
6460 		cmnd->result = scsi_result;
6461 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6462 		if (atomic_read(&sdeb_inject_pending)) {
6463 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6464 			atomic_set(&sdeb_inject_pending, 0);
6465 			cmnd->result = check_condition_result;
6466 		}
6467 	}
6468 
6469 	if (unlikely(sdebug_verbose && cmnd->result))
6470 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6471 			    __func__, cmnd->result);
6472 
6473 	if (delta_jiff > 0 || ndelay > 0) {
6474 		ktime_t kt;
6475 
6476 		if (delta_jiff > 0) {
6477 			u64 ns = jiffies_to_nsecs(delta_jiff);
6478 
6479 			if (sdebug_random && ns < U32_MAX) {
6480 				ns = get_random_u32_below((u32)ns);
6481 			} else if (sdebug_random) {
6482 				ns >>= 12;	/* scale to 4 usec precision */
6483 				if (ns < U32_MAX)	/* over 4 hours max */
6484 					ns = get_random_u32_below((u32)ns);
6485 				ns <<= 12;
6486 			}
6487 			kt = ns_to_ktime(ns);
6488 		} else {	/* ndelay has a 4.2 second max */
6489 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6490 					     (u32)ndelay;
6491 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6492 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
6493 
6494 				if (kt <= d) {	/* elapsed duration >= kt */
6495 					/* call scsi_done() from this thread */
6496 					sdebug_free_queued_cmd(sqcp);
6497 					scsi_done(cmnd);
6498 					return 0;
6499 				}
6500 				/* otherwise reduce kt by elapsed time */
6501 				kt -= d;
6502 			}
6503 		}
6504 		if (sdebug_statistics)
6505 			sd_dp->issuing_cpu = raw_smp_processor_id();
6506 		if (polled) {
6507 			spin_lock_irqsave(&sdsc->lock, flags);
6508 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6509 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6510 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6511 			spin_unlock_irqrestore(&sdsc->lock, flags);
6512 		} else {
6513 			/* schedule the invocation of scsi_done() for a later time */
6514 			spin_lock_irqsave(&sdsc->lock, flags);
6515 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6516 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6517 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6518 			/*
6519 			 * The completion handler will try to grab sqcp->lock,
6520 			 * so there is no chance that the completion handler
6521 			 * will call scsi_done() until we release the lock
6522 			 * here (so ok to keep referencing sdsc).
6523 			 */
6524 			spin_unlock_irqrestore(&sdsc->lock, flags);
6525 		}
6526 	} else {	/* jdelay < 0, use work queue */
6527 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6528 			     atomic_read(&sdeb_inject_pending))) {
6529 			sd_dp->aborted = true;
6530 			atomic_set(&sdeb_inject_pending, 0);
6531 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6532 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6533 		}
6534 
6535 		if (sdebug_statistics)
6536 			sd_dp->issuing_cpu = raw_smp_processor_id();
6537 		if (polled) {
6538 			spin_lock_irqsave(&sdsc->lock, flags);
6539 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6540 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6541 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6542 			spin_unlock_irqrestore(&sdsc->lock, flags);
6543 		} else {
6544 			spin_lock_irqsave(&sdsc->lock, flags);
6545 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6546 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6547 			schedule_work(&sd_dp->ew.work);
6548 			spin_unlock_irqrestore(&sdsc->lock, flags);
6549 		}
6550 	}
6551 
6552 	return 0;
6553 
6554 respond_in_thread:	/* call back to mid-layer using invocation thread */
6555 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6556 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
6557 	if (cmnd->result == 0 && scsi_result != 0)
6558 		cmnd->result = scsi_result;
6559 	scsi_done(cmnd);
6560 	return 0;
6561 }
6562 
6563 /* Note: The following macros create attribute files in the
6564    /sys/module/scsi_debug/parameters directory. Unfortunately this
6565    driver is unaware of a change and cannot trigger auxiliary actions
6566    as it can when the corresponding attribute in the
6567    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6568  */
6569 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6570 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6571 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6572 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6573 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6574 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6575 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6576 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6577 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6578 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6579 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6580 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6581 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6582 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6583 module_param_string(inq_product, sdebug_inq_product_id,
6584 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6585 module_param_string(inq_rev, sdebug_inq_product_rev,
6586 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6587 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6588 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6589 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6590 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6591 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6592 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6593 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
6594 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6595 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6596 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6597 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6598 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6599 		   S_IRUGO | S_IWUSR);
6600 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6601 		   S_IRUGO | S_IWUSR);
6602 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6603 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6604 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6605 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6606 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6607 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6608 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6609 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6610 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6611 module_param_named(per_host_store, sdebug_per_host_store, bool,
6612 		   S_IRUGO | S_IWUSR);
6613 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6614 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6615 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6616 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6617 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6618 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6619 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6620 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6621 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6622 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6623 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6624 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6625 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6626 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6627 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6628 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
6629 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
6630 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
6631 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
6632 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
6633 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6634 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6635 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6636 		   S_IRUGO | S_IWUSR);
6637 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6638 module_param_named(write_same_length, sdebug_write_same_length, int,
6639 		   S_IRUGO | S_IWUSR);
6640 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6641 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6642 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6643 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6644 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6645 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6646 
6647 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6648 MODULE_DESCRIPTION("SCSI debug adapter driver");
6649 MODULE_LICENSE("GPL");
6650 MODULE_VERSION(SDEBUG_VERSION);
6651 
6652 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6653 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6654 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6655 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6656 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6657 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6658 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6659 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6660 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6661 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6662 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6663 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6664 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6665 MODULE_PARM_DESC(host_max_queue,
6666 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6667 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6668 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6669 		 SDEBUG_VERSION "\")");
6670 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6671 MODULE_PARM_DESC(lbprz,
6672 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6673 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6674 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6675 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6676 MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
6677 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6678 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6679 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6680 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6681 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6682 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6683 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6684 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6685 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6686 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6687 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6688 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6689 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6690 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6691 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6692 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6693 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6694 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6695 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6696 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6697 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6698 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6699 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6700 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6701 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6702 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6703 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6704 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6705 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6706 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6707 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6708 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
6709 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
6710 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
6711 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
6712 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
6713 MODULE_PARM_DESC(uuid_ctl,
6714 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6715 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6716 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6717 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6718 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6719 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6720 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6721 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6722 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6723 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6724 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6725 
6726 #define SDEBUG_INFO_LEN 256
6727 static char sdebug_info[SDEBUG_INFO_LEN];
6728 
scsi_debug_info(struct Scsi_Host * shp)6729 static const char *scsi_debug_info(struct Scsi_Host *shp)
6730 {
6731 	int k;
6732 
6733 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6734 		      my_name, SDEBUG_VERSION, sdebug_version_date);
6735 	if (k >= (SDEBUG_INFO_LEN - 1))
6736 		return sdebug_info;
6737 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6738 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6739 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
6740 		  "statistics", (int)sdebug_statistics);
6741 	return sdebug_info;
6742 }
6743 
6744 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)6745 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6746 				 int length)
6747 {
6748 	char arr[16];
6749 	int opts;
6750 	int minLen = length > 15 ? 15 : length;
6751 
6752 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6753 		return -EACCES;
6754 	memcpy(arr, buffer, minLen);
6755 	arr[minLen] = '\0';
6756 	if (1 != sscanf(arr, "%d", &opts))
6757 		return -EINVAL;
6758 	sdebug_opts = opts;
6759 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6760 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6761 	if (sdebug_every_nth != 0)
6762 		tweak_cmnd_count();
6763 	return length;
6764 }
6765 
6766 struct sdebug_submit_queue_data {
6767 	int *first;
6768 	int *last;
6769 	int queue_num;
6770 };
6771 
sdebug_submit_queue_iter(struct request * rq,void * opaque)6772 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6773 {
6774 	struct sdebug_submit_queue_data *data = opaque;
6775 	u32 unique_tag = blk_mq_unique_tag(rq);
6776 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6777 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6778 	int queue_num = data->queue_num;
6779 
6780 	if (hwq != queue_num)
6781 		return true;
6782 
6783 	/* Rely on iter'ing in ascending tag order */
6784 	if (*data->first == -1)
6785 		*data->first = *data->last = tag;
6786 	else
6787 		*data->last = tag;
6788 
6789 	return true;
6790 }
6791 
6792 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6793  * same for each scsi_debug host (if more than one). Some of the counters
6794  * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)6795 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6796 {
6797 	struct sdebug_host_info *sdhp;
6798 	int j;
6799 
6800 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6801 		   SDEBUG_VERSION, sdebug_version_date);
6802 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6803 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6804 		   sdebug_opts, sdebug_every_nth);
6805 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6806 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6807 		   sdebug_sector_size, "bytes");
6808 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6809 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6810 		   num_aborts);
6811 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6812 		   num_dev_resets, num_target_resets, num_bus_resets,
6813 		   num_host_resets);
6814 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6815 		   dix_reads, dix_writes, dif_errors);
6816 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6817 		   sdebug_statistics);
6818 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6819 		   atomic_read(&sdebug_cmnd_count),
6820 		   atomic_read(&sdebug_completions),
6821 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6822 		   atomic_read(&sdebug_a_tsf),
6823 		   atomic_read(&sdeb_mq_poll_count));
6824 
6825 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6826 	for (j = 0; j < submit_queues; ++j) {
6827 		int f = -1, l = -1;
6828 		struct sdebug_submit_queue_data data = {
6829 			.queue_num = j,
6830 			.first = &f,
6831 			.last = &l,
6832 		};
6833 		seq_printf(m, "  queue %d:\n", j);
6834 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6835 					&data);
6836 		if (f >= 0) {
6837 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6838 				   "first,last bits", f, l);
6839 		}
6840 	}
6841 
6842 	seq_printf(m, "this host_no=%d\n", host->host_no);
6843 	if (!xa_empty(per_store_ap)) {
6844 		bool niu;
6845 		int idx;
6846 		unsigned long l_idx;
6847 		struct sdeb_store_info *sip;
6848 
6849 		seq_puts(m, "\nhost list:\n");
6850 		j = 0;
6851 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6852 			idx = sdhp->si_idx;
6853 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6854 				   sdhp->shost->host_no, idx);
6855 			++j;
6856 		}
6857 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6858 			   sdeb_most_recent_idx);
6859 		j = 0;
6860 		xa_for_each(per_store_ap, l_idx, sip) {
6861 			niu = xa_get_mark(per_store_ap, l_idx,
6862 					  SDEB_XA_NOT_IN_USE);
6863 			idx = (int)l_idx;
6864 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6865 				   (niu ? "  not_in_use" : ""));
6866 			++j;
6867 		}
6868 	}
6869 	return 0;
6870 }
6871 
delay_show(struct device_driver * ddp,char * buf)6872 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6873 {
6874 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6875 }
6876 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6877  * of delay is jiffies.
6878  */
delay_store(struct device_driver * ddp,const char * buf,size_t count)6879 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6880 			   size_t count)
6881 {
6882 	int jdelay, res;
6883 
6884 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6885 		res = count;
6886 		if (sdebug_jdelay != jdelay) {
6887 			struct sdebug_host_info *sdhp;
6888 
6889 			mutex_lock(&sdebug_host_list_mutex);
6890 			block_unblock_all_queues(true);
6891 
6892 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6893 				struct Scsi_Host *shost = sdhp->shost;
6894 
6895 				if (scsi_host_busy(shost)) {
6896 					res = -EBUSY;   /* queued commands */
6897 					break;
6898 				}
6899 			}
6900 			if (res > 0) {
6901 				sdebug_jdelay = jdelay;
6902 				sdebug_ndelay = 0;
6903 			}
6904 			block_unblock_all_queues(false);
6905 			mutex_unlock(&sdebug_host_list_mutex);
6906 		}
6907 		return res;
6908 	}
6909 	return -EINVAL;
6910 }
6911 static DRIVER_ATTR_RW(delay);
6912 
ndelay_show(struct device_driver * ddp,char * buf)6913 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6914 {
6915 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6916 }
6917 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6918 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)6919 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6920 			    size_t count)
6921 {
6922 	int ndelay, res;
6923 
6924 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6925 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6926 		res = count;
6927 		if (sdebug_ndelay != ndelay) {
6928 			struct sdebug_host_info *sdhp;
6929 
6930 			mutex_lock(&sdebug_host_list_mutex);
6931 			block_unblock_all_queues(true);
6932 
6933 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6934 				struct Scsi_Host *shost = sdhp->shost;
6935 
6936 				if (scsi_host_busy(shost)) {
6937 					res = -EBUSY;   /* queued commands */
6938 					break;
6939 				}
6940 			}
6941 
6942 			if (res > 0) {
6943 				sdebug_ndelay = ndelay;
6944 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6945 							: DEF_JDELAY;
6946 			}
6947 			block_unblock_all_queues(false);
6948 			mutex_unlock(&sdebug_host_list_mutex);
6949 		}
6950 		return res;
6951 	}
6952 	return -EINVAL;
6953 }
6954 static DRIVER_ATTR_RW(ndelay);
6955 
opts_show(struct device_driver * ddp,char * buf)6956 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6957 {
6958 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6959 }
6960 
opts_store(struct device_driver * ddp,const char * buf,size_t count)6961 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6962 			  size_t count)
6963 {
6964 	int opts;
6965 	char work[20];
6966 
6967 	if (sscanf(buf, "%10s", work) == 1) {
6968 		if (strncasecmp(work, "0x", 2) == 0) {
6969 			if (kstrtoint(work + 2, 16, &opts) == 0)
6970 				goto opts_done;
6971 		} else {
6972 			if (kstrtoint(work, 10, &opts) == 0)
6973 				goto opts_done;
6974 		}
6975 	}
6976 	return -EINVAL;
6977 opts_done:
6978 	sdebug_opts = opts;
6979 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6980 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6981 	tweak_cmnd_count();
6982 	return count;
6983 }
6984 static DRIVER_ATTR_RW(opts);
6985 
ptype_show(struct device_driver * ddp,char * buf)6986 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6987 {
6988 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6989 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)6990 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6991 			   size_t count)
6992 {
6993 	int n;
6994 
6995 	/* Cannot change from or to TYPE_ZBC with sysfs */
6996 	if (sdebug_ptype == TYPE_ZBC)
6997 		return -EINVAL;
6998 
6999 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7000 		if (n == TYPE_ZBC)
7001 			return -EINVAL;
7002 		sdebug_ptype = n;
7003 		return count;
7004 	}
7005 	return -EINVAL;
7006 }
7007 static DRIVER_ATTR_RW(ptype);
7008 
dsense_show(struct device_driver * ddp,char * buf)7009 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7010 {
7011 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7012 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)7013 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7014 			    size_t count)
7015 {
7016 	int n;
7017 
7018 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7019 		sdebug_dsense = n;
7020 		return count;
7021 	}
7022 	return -EINVAL;
7023 }
7024 static DRIVER_ATTR_RW(dsense);
7025 
fake_rw_show(struct device_driver * ddp,char * buf)7026 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7027 {
7028 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7029 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)7030 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7031 			     size_t count)
7032 {
7033 	int n, idx;
7034 
7035 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7036 		bool want_store = (n == 0);
7037 		struct sdebug_host_info *sdhp;
7038 
7039 		n = (n > 0);
7040 		sdebug_fake_rw = (sdebug_fake_rw > 0);
7041 		if (sdebug_fake_rw == n)
7042 			return count;	/* not transitioning so do nothing */
7043 
7044 		if (want_store) {	/* 1 --> 0 transition, set up store */
7045 			if (sdeb_first_idx < 0) {
7046 				idx = sdebug_add_store();
7047 				if (idx < 0)
7048 					return idx;
7049 			} else {
7050 				idx = sdeb_first_idx;
7051 				xa_clear_mark(per_store_ap, idx,
7052 					      SDEB_XA_NOT_IN_USE);
7053 			}
7054 			/* make all hosts use same store */
7055 			list_for_each_entry(sdhp, &sdebug_host_list,
7056 					    host_list) {
7057 				if (sdhp->si_idx != idx) {
7058 					xa_set_mark(per_store_ap, sdhp->si_idx,
7059 						    SDEB_XA_NOT_IN_USE);
7060 					sdhp->si_idx = idx;
7061 				}
7062 			}
7063 			sdeb_most_recent_idx = idx;
7064 		} else {	/* 0 --> 1 transition is trigger for shrink */
7065 			sdebug_erase_all_stores(true /* apart from first */);
7066 		}
7067 		sdebug_fake_rw = n;
7068 		return count;
7069 	}
7070 	return -EINVAL;
7071 }
7072 static DRIVER_ATTR_RW(fake_rw);
7073 
no_lun_0_show(struct device_driver * ddp,char * buf)7074 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7075 {
7076 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7077 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)7078 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7079 			      size_t count)
7080 {
7081 	int n;
7082 
7083 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7084 		sdebug_no_lun_0 = n;
7085 		return count;
7086 	}
7087 	return -EINVAL;
7088 }
7089 static DRIVER_ATTR_RW(no_lun_0);
7090 
num_tgts_show(struct device_driver * ddp,char * buf)7091 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7092 {
7093 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7094 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)7095 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7096 			      size_t count)
7097 {
7098 	int n;
7099 
7100 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7101 		sdebug_num_tgts = n;
7102 		sdebug_max_tgts_luns();
7103 		return count;
7104 	}
7105 	return -EINVAL;
7106 }
7107 static DRIVER_ATTR_RW(num_tgts);
7108 
dev_size_mb_show(struct device_driver * ddp,char * buf)7109 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7110 {
7111 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7112 }
7113 static DRIVER_ATTR_RO(dev_size_mb);
7114 
per_host_store_show(struct device_driver * ddp,char * buf)7115 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7116 {
7117 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7118 }
7119 
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)7120 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7121 				    size_t count)
7122 {
7123 	bool v;
7124 
7125 	if (kstrtobool(buf, &v))
7126 		return -EINVAL;
7127 
7128 	sdebug_per_host_store = v;
7129 	return count;
7130 }
7131 static DRIVER_ATTR_RW(per_host_store);
7132 
num_parts_show(struct device_driver * ddp,char * buf)7133 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7134 {
7135 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7136 }
7137 static DRIVER_ATTR_RO(num_parts);
7138 
every_nth_show(struct device_driver * ddp,char * buf)7139 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7140 {
7141 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7142 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)7143 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7144 			       size_t count)
7145 {
7146 	int nth;
7147 	char work[20];
7148 
7149 	if (sscanf(buf, "%10s", work) == 1) {
7150 		if (strncasecmp(work, "0x", 2) == 0) {
7151 			if (kstrtoint(work + 2, 16, &nth) == 0)
7152 				goto every_nth_done;
7153 		} else {
7154 			if (kstrtoint(work, 10, &nth) == 0)
7155 				goto every_nth_done;
7156 		}
7157 	}
7158 	return -EINVAL;
7159 
7160 every_nth_done:
7161 	sdebug_every_nth = nth;
7162 	if (nth && !sdebug_statistics) {
7163 		pr_info("every_nth needs statistics=1, set it\n");
7164 		sdebug_statistics = true;
7165 	}
7166 	tweak_cmnd_count();
7167 	return count;
7168 }
7169 static DRIVER_ATTR_RW(every_nth);
7170 
lun_format_show(struct device_driver * ddp,char * buf)7171 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7172 {
7173 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7174 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)7175 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7176 				size_t count)
7177 {
7178 	int n;
7179 	bool changed;
7180 
7181 	if (kstrtoint(buf, 0, &n))
7182 		return -EINVAL;
7183 	if (n >= 0) {
7184 		if (n > (int)SAM_LUN_AM_FLAT) {
7185 			pr_warn("only LUN address methods 0 and 1 are supported\n");
7186 			return -EINVAL;
7187 		}
7188 		changed = ((int)sdebug_lun_am != n);
7189 		sdebug_lun_am = n;
7190 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
7191 			struct sdebug_host_info *sdhp;
7192 			struct sdebug_dev_info *dp;
7193 
7194 			mutex_lock(&sdebug_host_list_mutex);
7195 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7196 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7197 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7198 				}
7199 			}
7200 			mutex_unlock(&sdebug_host_list_mutex);
7201 		}
7202 		return count;
7203 	}
7204 	return -EINVAL;
7205 }
7206 static DRIVER_ATTR_RW(lun_format);
7207 
max_luns_show(struct device_driver * ddp,char * buf)7208 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7209 {
7210 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7211 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)7212 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7213 			      size_t count)
7214 {
7215 	int n;
7216 	bool changed;
7217 
7218 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7219 		if (n > 256) {
7220 			pr_warn("max_luns can be no more than 256\n");
7221 			return -EINVAL;
7222 		}
7223 		changed = (sdebug_max_luns != n);
7224 		sdebug_max_luns = n;
7225 		sdebug_max_tgts_luns();
7226 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
7227 			struct sdebug_host_info *sdhp;
7228 			struct sdebug_dev_info *dp;
7229 
7230 			mutex_lock(&sdebug_host_list_mutex);
7231 			list_for_each_entry(sdhp, &sdebug_host_list,
7232 					    host_list) {
7233 				list_for_each_entry(dp, &sdhp->dev_info_list,
7234 						    dev_list) {
7235 					set_bit(SDEBUG_UA_LUNS_CHANGED,
7236 						dp->uas_bm);
7237 				}
7238 			}
7239 			mutex_unlock(&sdebug_host_list_mutex);
7240 		}
7241 		return count;
7242 	}
7243 	return -EINVAL;
7244 }
7245 static DRIVER_ATTR_RW(max_luns);
7246 
max_queue_show(struct device_driver * ddp,char * buf)7247 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
7248 {
7249 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
7250 }
7251 /* N.B. max_queue can be changed while there are queued commands. In flight
7252  * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)7253 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
7254 			       size_t count)
7255 {
7256 	int n;
7257 
7258 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
7259 	    (n <= SDEBUG_CANQUEUE) &&
7260 	    (sdebug_host_max_queue == 0)) {
7261 		mutex_lock(&sdebug_host_list_mutex);
7262 
7263 		/* We may only change sdebug_max_queue when we have no shosts */
7264 		if (list_empty(&sdebug_host_list))
7265 			sdebug_max_queue = n;
7266 		else
7267 			count = -EBUSY;
7268 		mutex_unlock(&sdebug_host_list_mutex);
7269 		return count;
7270 	}
7271 	return -EINVAL;
7272 }
7273 static DRIVER_ATTR_RW(max_queue);
7274 
host_max_queue_show(struct device_driver * ddp,char * buf)7275 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
7276 {
7277 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
7278 }
7279 
no_rwlock_show(struct device_driver * ddp,char * buf)7280 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
7281 {
7282 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
7283 }
7284 
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)7285 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
7286 {
7287 	bool v;
7288 
7289 	if (kstrtobool(buf, &v))
7290 		return -EINVAL;
7291 
7292 	sdebug_no_rwlock = v;
7293 	return count;
7294 }
7295 static DRIVER_ATTR_RW(no_rwlock);
7296 
7297 /*
7298  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
7299  * in range [0, sdebug_host_max_queue), we can't change it.
7300  */
7301 static DRIVER_ATTR_RO(host_max_queue);
7302 
no_uld_show(struct device_driver * ddp,char * buf)7303 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
7304 {
7305 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
7306 }
7307 static DRIVER_ATTR_RO(no_uld);
7308 
scsi_level_show(struct device_driver * ddp,char * buf)7309 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
7310 {
7311 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
7312 }
7313 static DRIVER_ATTR_RO(scsi_level);
7314 
virtual_gb_show(struct device_driver * ddp,char * buf)7315 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
7316 {
7317 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
7318 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)7319 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
7320 				size_t count)
7321 {
7322 	int n;
7323 	bool changed;
7324 
7325 	/* Ignore capacity change for ZBC drives for now */
7326 	if (sdeb_zbc_in_use)
7327 		return -ENOTSUPP;
7328 
7329 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7330 		changed = (sdebug_virtual_gb != n);
7331 		sdebug_virtual_gb = n;
7332 		sdebug_capacity = get_sdebug_capacity();
7333 		if (changed) {
7334 			struct sdebug_host_info *sdhp;
7335 			struct sdebug_dev_info *dp;
7336 
7337 			mutex_lock(&sdebug_host_list_mutex);
7338 			list_for_each_entry(sdhp, &sdebug_host_list,
7339 					    host_list) {
7340 				list_for_each_entry(dp, &sdhp->dev_info_list,
7341 						    dev_list) {
7342 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
7343 						dp->uas_bm);
7344 				}
7345 			}
7346 			mutex_unlock(&sdebug_host_list_mutex);
7347 		}
7348 		return count;
7349 	}
7350 	return -EINVAL;
7351 }
7352 static DRIVER_ATTR_RW(virtual_gb);
7353 
add_host_show(struct device_driver * ddp,char * buf)7354 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
7355 {
7356 	/* absolute number of hosts currently active is what is shown */
7357 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
7358 }
7359 
add_host_store(struct device_driver * ddp,const char * buf,size_t count)7360 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
7361 			      size_t count)
7362 {
7363 	bool found;
7364 	unsigned long idx;
7365 	struct sdeb_store_info *sip;
7366 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
7367 	int delta_hosts;
7368 
7369 	if (sscanf(buf, "%d", &delta_hosts) != 1)
7370 		return -EINVAL;
7371 	if (delta_hosts > 0) {
7372 		do {
7373 			found = false;
7374 			if (want_phs) {
7375 				xa_for_each_marked(per_store_ap, idx, sip,
7376 						   SDEB_XA_NOT_IN_USE) {
7377 					sdeb_most_recent_idx = (int)idx;
7378 					found = true;
7379 					break;
7380 				}
7381 				if (found)	/* re-use case */
7382 					sdebug_add_host_helper((int)idx);
7383 				else
7384 					sdebug_do_add_host(true);
7385 			} else {
7386 				sdebug_do_add_host(false);
7387 			}
7388 		} while (--delta_hosts);
7389 	} else if (delta_hosts < 0) {
7390 		do {
7391 			sdebug_do_remove_host(false);
7392 		} while (++delta_hosts);
7393 	}
7394 	return count;
7395 }
7396 static DRIVER_ATTR_RW(add_host);
7397 
vpd_use_hostno_show(struct device_driver * ddp,char * buf)7398 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
7399 {
7400 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
7401 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)7402 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
7403 				    size_t count)
7404 {
7405 	int n;
7406 
7407 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7408 		sdebug_vpd_use_hostno = n;
7409 		return count;
7410 	}
7411 	return -EINVAL;
7412 }
7413 static DRIVER_ATTR_RW(vpd_use_hostno);
7414 
statistics_show(struct device_driver * ddp,char * buf)7415 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
7416 {
7417 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
7418 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)7419 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
7420 				size_t count)
7421 {
7422 	int n;
7423 
7424 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
7425 		if (n > 0)
7426 			sdebug_statistics = true;
7427 		else {
7428 			clear_queue_stats();
7429 			sdebug_statistics = false;
7430 		}
7431 		return count;
7432 	}
7433 	return -EINVAL;
7434 }
7435 static DRIVER_ATTR_RW(statistics);
7436 
sector_size_show(struct device_driver * ddp,char * buf)7437 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7438 {
7439 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7440 }
7441 static DRIVER_ATTR_RO(sector_size);
7442 
submit_queues_show(struct device_driver * ddp,char * buf)7443 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7444 {
7445 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7446 }
7447 static DRIVER_ATTR_RO(submit_queues);
7448 
dix_show(struct device_driver * ddp,char * buf)7449 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7450 {
7451 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7452 }
7453 static DRIVER_ATTR_RO(dix);
7454 
dif_show(struct device_driver * ddp,char * buf)7455 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7456 {
7457 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7458 }
7459 static DRIVER_ATTR_RO(dif);
7460 
guard_show(struct device_driver * ddp,char * buf)7461 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7462 {
7463 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7464 }
7465 static DRIVER_ATTR_RO(guard);
7466 
ato_show(struct device_driver * ddp,char * buf)7467 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7468 {
7469 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7470 }
7471 static DRIVER_ATTR_RO(ato);
7472 
map_show(struct device_driver * ddp,char * buf)7473 static ssize_t map_show(struct device_driver *ddp, char *buf)
7474 {
7475 	ssize_t count = 0;
7476 
7477 	if (!scsi_debug_lbp())
7478 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7479 				 sdebug_store_sectors);
7480 
7481 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7482 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7483 
7484 		if (sip)
7485 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7486 					  (int)map_size, sip->map_storep);
7487 	}
7488 	buf[count++] = '\n';
7489 	buf[count] = '\0';
7490 
7491 	return count;
7492 }
7493 static DRIVER_ATTR_RO(map);
7494 
random_show(struct device_driver * ddp,char * buf)7495 static ssize_t random_show(struct device_driver *ddp, char *buf)
7496 {
7497 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7498 }
7499 
random_store(struct device_driver * ddp,const char * buf,size_t count)7500 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7501 			    size_t count)
7502 {
7503 	bool v;
7504 
7505 	if (kstrtobool(buf, &v))
7506 		return -EINVAL;
7507 
7508 	sdebug_random = v;
7509 	return count;
7510 }
7511 static DRIVER_ATTR_RW(random);
7512 
removable_show(struct device_driver * ddp,char * buf)7513 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7514 {
7515 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7516 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)7517 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7518 			       size_t count)
7519 {
7520 	int n;
7521 
7522 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7523 		sdebug_removable = (n > 0);
7524 		return count;
7525 	}
7526 	return -EINVAL;
7527 }
7528 static DRIVER_ATTR_RW(removable);
7529 
host_lock_show(struct device_driver * ddp,char * buf)7530 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7531 {
7532 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7533 }
7534 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)7535 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7536 			       size_t count)
7537 {
7538 	int n;
7539 
7540 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7541 		sdebug_host_lock = (n > 0);
7542 		return count;
7543 	}
7544 	return -EINVAL;
7545 }
7546 static DRIVER_ATTR_RW(host_lock);
7547 
strict_show(struct device_driver * ddp,char * buf)7548 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7549 {
7550 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7551 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)7552 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7553 			    size_t count)
7554 {
7555 	int n;
7556 
7557 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7558 		sdebug_strict = (n > 0);
7559 		return count;
7560 	}
7561 	return -EINVAL;
7562 }
7563 static DRIVER_ATTR_RW(strict);
7564 
uuid_ctl_show(struct device_driver * ddp,char * buf)7565 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7566 {
7567 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7568 }
7569 static DRIVER_ATTR_RO(uuid_ctl);
7570 
cdb_len_show(struct device_driver * ddp,char * buf)7571 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7572 {
7573 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7574 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)7575 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7576 			     size_t count)
7577 {
7578 	int ret, n;
7579 
7580 	ret = kstrtoint(buf, 0, &n);
7581 	if (ret)
7582 		return ret;
7583 	sdebug_cdb_len = n;
7584 	all_config_cdb_len();
7585 	return count;
7586 }
7587 static DRIVER_ATTR_RW(cdb_len);
7588 
7589 static const char * const zbc_model_strs_a[] = {
7590 	[BLK_ZONED_NONE] = "none",
7591 	[BLK_ZONED_HA]   = "host-aware",
7592 	[BLK_ZONED_HM]   = "host-managed",
7593 };
7594 
7595 static const char * const zbc_model_strs_b[] = {
7596 	[BLK_ZONED_NONE] = "no",
7597 	[BLK_ZONED_HA]   = "aware",
7598 	[BLK_ZONED_HM]   = "managed",
7599 };
7600 
7601 static const char * const zbc_model_strs_c[] = {
7602 	[BLK_ZONED_NONE] = "0",
7603 	[BLK_ZONED_HA]   = "1",
7604 	[BLK_ZONED_HM]   = "2",
7605 };
7606 
sdeb_zbc_model_str(const char * cp)7607 static int sdeb_zbc_model_str(const char *cp)
7608 {
7609 	int res = sysfs_match_string(zbc_model_strs_a, cp);
7610 
7611 	if (res < 0) {
7612 		res = sysfs_match_string(zbc_model_strs_b, cp);
7613 		if (res < 0) {
7614 			res = sysfs_match_string(zbc_model_strs_c, cp);
7615 			if (res < 0)
7616 				return -EINVAL;
7617 		}
7618 	}
7619 	return res;
7620 }
7621 
zbc_show(struct device_driver * ddp,char * buf)7622 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7623 {
7624 	return scnprintf(buf, PAGE_SIZE, "%s\n",
7625 			 zbc_model_strs_a[sdeb_zbc_model]);
7626 }
7627 static DRIVER_ATTR_RO(zbc);
7628 
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)7629 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7630 {
7631 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7632 }
7633 static DRIVER_ATTR_RO(tur_ms_to_ready);
7634 
group_number_stats_show(struct device_driver * ddp,char * buf)7635 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
7636 {
7637 	char *p = buf, *end = buf + PAGE_SIZE;
7638 	int i;
7639 
7640 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7641 		p += scnprintf(p, end - p, "%d %ld\n", i,
7642 			       atomic_long_read(&writes_by_group_number[i]));
7643 
7644 	return p - buf;
7645 }
7646 
group_number_stats_store(struct device_driver * ddp,const char * buf,size_t count)7647 static ssize_t group_number_stats_store(struct device_driver *ddp,
7648 					const char *buf, size_t count)
7649 {
7650 	int i;
7651 
7652 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7653 		atomic_long_set(&writes_by_group_number[i], 0);
7654 
7655 	return count;
7656 }
7657 static DRIVER_ATTR_RW(group_number_stats);
7658 
7659 /* Note: The following array creates attribute files in the
7660    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7661    files (over those found in the /sys/module/scsi_debug/parameters
7662    directory) is that auxiliary actions can be triggered when an attribute
7663    is changed. For example see: add_host_store() above.
7664  */
7665 
7666 static struct attribute *sdebug_drv_attrs[] = {
7667 	&driver_attr_delay.attr,
7668 	&driver_attr_opts.attr,
7669 	&driver_attr_ptype.attr,
7670 	&driver_attr_dsense.attr,
7671 	&driver_attr_fake_rw.attr,
7672 	&driver_attr_host_max_queue.attr,
7673 	&driver_attr_no_lun_0.attr,
7674 	&driver_attr_num_tgts.attr,
7675 	&driver_attr_dev_size_mb.attr,
7676 	&driver_attr_num_parts.attr,
7677 	&driver_attr_every_nth.attr,
7678 	&driver_attr_lun_format.attr,
7679 	&driver_attr_max_luns.attr,
7680 	&driver_attr_max_queue.attr,
7681 	&driver_attr_no_rwlock.attr,
7682 	&driver_attr_no_uld.attr,
7683 	&driver_attr_scsi_level.attr,
7684 	&driver_attr_virtual_gb.attr,
7685 	&driver_attr_add_host.attr,
7686 	&driver_attr_per_host_store.attr,
7687 	&driver_attr_vpd_use_hostno.attr,
7688 	&driver_attr_sector_size.attr,
7689 	&driver_attr_statistics.attr,
7690 	&driver_attr_submit_queues.attr,
7691 	&driver_attr_dix.attr,
7692 	&driver_attr_dif.attr,
7693 	&driver_attr_guard.attr,
7694 	&driver_attr_ato.attr,
7695 	&driver_attr_map.attr,
7696 	&driver_attr_random.attr,
7697 	&driver_attr_removable.attr,
7698 	&driver_attr_host_lock.attr,
7699 	&driver_attr_ndelay.attr,
7700 	&driver_attr_strict.attr,
7701 	&driver_attr_uuid_ctl.attr,
7702 	&driver_attr_cdb_len.attr,
7703 	&driver_attr_tur_ms_to_ready.attr,
7704 	&driver_attr_zbc.attr,
7705 	&driver_attr_group_number_stats.attr,
7706 	NULL,
7707 };
7708 ATTRIBUTE_GROUPS(sdebug_drv);
7709 
7710 static struct device *pseudo_primary;
7711 
scsi_debug_init(void)7712 static int __init scsi_debug_init(void)
7713 {
7714 	bool want_store = (sdebug_fake_rw == 0);
7715 	unsigned long sz;
7716 	int k, ret, hosts_to_add;
7717 	int idx = -1;
7718 
7719 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7720 		pr_warn("ndelay must be less than 1 second, ignored\n");
7721 		sdebug_ndelay = 0;
7722 	} else if (sdebug_ndelay > 0)
7723 		sdebug_jdelay = JDELAY_OVERRIDDEN;
7724 
7725 	switch (sdebug_sector_size) {
7726 	case  512:
7727 	case 1024:
7728 	case 2048:
7729 	case 4096:
7730 		break;
7731 	default:
7732 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
7733 		return -EINVAL;
7734 	}
7735 
7736 	switch (sdebug_dif) {
7737 	case T10_PI_TYPE0_PROTECTION:
7738 		break;
7739 	case T10_PI_TYPE1_PROTECTION:
7740 	case T10_PI_TYPE2_PROTECTION:
7741 	case T10_PI_TYPE3_PROTECTION:
7742 		have_dif_prot = true;
7743 		break;
7744 
7745 	default:
7746 		pr_err("dif must be 0, 1, 2 or 3\n");
7747 		return -EINVAL;
7748 	}
7749 
7750 	if (sdebug_num_tgts < 0) {
7751 		pr_err("num_tgts must be >= 0\n");
7752 		return -EINVAL;
7753 	}
7754 
7755 	if (sdebug_guard > 1) {
7756 		pr_err("guard must be 0 or 1\n");
7757 		return -EINVAL;
7758 	}
7759 
7760 	if (sdebug_ato > 1) {
7761 		pr_err("ato must be 0 or 1\n");
7762 		return -EINVAL;
7763 	}
7764 
7765 	if (sdebug_physblk_exp > 15) {
7766 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7767 		return -EINVAL;
7768 	}
7769 
7770 	sdebug_lun_am = sdebug_lun_am_i;
7771 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7772 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7773 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7774 	}
7775 
7776 	if (sdebug_max_luns > 256) {
7777 		if (sdebug_max_luns > 16384) {
7778 			pr_warn("max_luns can be no more than 16384, use default\n");
7779 			sdebug_max_luns = DEF_MAX_LUNS;
7780 		}
7781 		sdebug_lun_am = SAM_LUN_AM_FLAT;
7782 	}
7783 
7784 	if (sdebug_lowest_aligned > 0x3fff) {
7785 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7786 		return -EINVAL;
7787 	}
7788 
7789 	if (submit_queues < 1) {
7790 		pr_err("submit_queues must be 1 or more\n");
7791 		return -EINVAL;
7792 	}
7793 
7794 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7795 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7796 		return -EINVAL;
7797 	}
7798 
7799 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7800 	    (sdebug_host_max_queue < 0)) {
7801 		pr_err("host_max_queue must be in range [0 %d]\n",
7802 		       SDEBUG_CANQUEUE);
7803 		return -EINVAL;
7804 	}
7805 
7806 	if (sdebug_host_max_queue &&
7807 	    (sdebug_max_queue != sdebug_host_max_queue)) {
7808 		sdebug_max_queue = sdebug_host_max_queue;
7809 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7810 			sdebug_max_queue);
7811 	}
7812 
7813 	/*
7814 	 * check for host managed zoned block device specified with
7815 	 * ptype=0x14 or zbc=XXX.
7816 	 */
7817 	if (sdebug_ptype == TYPE_ZBC) {
7818 		sdeb_zbc_model = BLK_ZONED_HM;
7819 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7820 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7821 		if (k < 0)
7822 			return k;
7823 		sdeb_zbc_model = k;
7824 		switch (sdeb_zbc_model) {
7825 		case BLK_ZONED_NONE:
7826 		case BLK_ZONED_HA:
7827 			sdebug_ptype = TYPE_DISK;
7828 			break;
7829 		case BLK_ZONED_HM:
7830 			sdebug_ptype = TYPE_ZBC;
7831 			break;
7832 		default:
7833 			pr_err("Invalid ZBC model\n");
7834 			return -EINVAL;
7835 		}
7836 	}
7837 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7838 		sdeb_zbc_in_use = true;
7839 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7840 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7841 	}
7842 
7843 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7844 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7845 	if (sdebug_dev_size_mb < 1)
7846 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7847 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7848 	sdebug_store_sectors = sz / sdebug_sector_size;
7849 	sdebug_capacity = get_sdebug_capacity();
7850 
7851 	/* play around with geometry, don't waste too much on track 0 */
7852 	sdebug_heads = 8;
7853 	sdebug_sectors_per = 32;
7854 	if (sdebug_dev_size_mb >= 256)
7855 		sdebug_heads = 64;
7856 	else if (sdebug_dev_size_mb >= 16)
7857 		sdebug_heads = 32;
7858 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7859 			       (sdebug_sectors_per * sdebug_heads);
7860 	if (sdebug_cylinders_per >= 1024) {
7861 		/* other LLDs do this; implies >= 1GB ram disk ... */
7862 		sdebug_heads = 255;
7863 		sdebug_sectors_per = 63;
7864 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7865 			       (sdebug_sectors_per * sdebug_heads);
7866 	}
7867 	if (scsi_debug_lbp()) {
7868 		sdebug_unmap_max_blocks =
7869 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7870 
7871 		sdebug_unmap_max_desc =
7872 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7873 
7874 		sdebug_unmap_granularity =
7875 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7876 
7877 		if (sdebug_unmap_alignment &&
7878 		    sdebug_unmap_granularity <=
7879 		    sdebug_unmap_alignment) {
7880 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7881 			return -EINVAL;
7882 		}
7883 	}
7884 
7885 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7886 	if (want_store) {
7887 		idx = sdebug_add_store();
7888 		if (idx < 0)
7889 			return idx;
7890 	}
7891 
7892 	pseudo_primary = root_device_register("pseudo_0");
7893 	if (IS_ERR(pseudo_primary)) {
7894 		pr_warn("root_device_register() error\n");
7895 		ret = PTR_ERR(pseudo_primary);
7896 		goto free_vm;
7897 	}
7898 	ret = bus_register(&pseudo_lld_bus);
7899 	if (ret < 0) {
7900 		pr_warn("bus_register error: %d\n", ret);
7901 		goto dev_unreg;
7902 	}
7903 	ret = driver_register(&sdebug_driverfs_driver);
7904 	if (ret < 0) {
7905 		pr_warn("driver_register error: %d\n", ret);
7906 		goto bus_unreg;
7907 	}
7908 
7909 	hosts_to_add = sdebug_add_host;
7910 	sdebug_add_host = 0;
7911 
7912 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7913 	if (!queued_cmd_cache) {
7914 		ret = -ENOMEM;
7915 		goto driver_unreg;
7916 	}
7917 
7918 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7919 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7920 		pr_info("%s: failed to create initial debugfs directory\n", __func__);
7921 
7922 	for (k = 0; k < hosts_to_add; k++) {
7923 		if (want_store && k == 0) {
7924 			ret = sdebug_add_host_helper(idx);
7925 			if (ret < 0) {
7926 				pr_err("add_host_helper k=%d, error=%d\n",
7927 				       k, -ret);
7928 				break;
7929 			}
7930 		} else {
7931 			ret = sdebug_do_add_host(want_store &&
7932 						 sdebug_per_host_store);
7933 			if (ret < 0) {
7934 				pr_err("add_host k=%d error=%d\n", k, -ret);
7935 				break;
7936 			}
7937 		}
7938 	}
7939 	if (sdebug_verbose)
7940 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7941 
7942 	return 0;
7943 
7944 driver_unreg:
7945 	driver_unregister(&sdebug_driverfs_driver);
7946 bus_unreg:
7947 	bus_unregister(&pseudo_lld_bus);
7948 dev_unreg:
7949 	root_device_unregister(pseudo_primary);
7950 free_vm:
7951 	sdebug_erase_store(idx, NULL);
7952 	return ret;
7953 }
7954 
scsi_debug_exit(void)7955 static void __exit scsi_debug_exit(void)
7956 {
7957 	int k = sdebug_num_hosts;
7958 
7959 	for (; k; k--)
7960 		sdebug_do_remove_host(true);
7961 	kmem_cache_destroy(queued_cmd_cache);
7962 	driver_unregister(&sdebug_driverfs_driver);
7963 	bus_unregister(&pseudo_lld_bus);
7964 	root_device_unregister(pseudo_primary);
7965 
7966 	sdebug_erase_all_stores(false);
7967 	xa_destroy(per_store_ap);
7968 	debugfs_remove(sdebug_debugfs_root);
7969 }
7970 
7971 device_initcall(scsi_debug_init);
7972 module_exit(scsi_debug_exit);
7973 
sdebug_release_adapter(struct device * dev)7974 static void sdebug_release_adapter(struct device *dev)
7975 {
7976 	struct sdebug_host_info *sdbg_host;
7977 
7978 	sdbg_host = dev_to_sdebug_host(dev);
7979 	kfree(sdbg_host);
7980 }
7981 
7982 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)7983 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7984 {
7985 	if (idx < 0)
7986 		return;
7987 	if (!sip) {
7988 		if (xa_empty(per_store_ap))
7989 			return;
7990 		sip = xa_load(per_store_ap, idx);
7991 		if (!sip)
7992 			return;
7993 	}
7994 	vfree(sip->map_storep);
7995 	vfree(sip->dif_storep);
7996 	vfree(sip->storep);
7997 	xa_erase(per_store_ap, idx);
7998 	kfree(sip);
7999 }
8000 
8001 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)8002 static void sdebug_erase_all_stores(bool apart_from_first)
8003 {
8004 	unsigned long idx;
8005 	struct sdeb_store_info *sip = NULL;
8006 
8007 	xa_for_each(per_store_ap, idx, sip) {
8008 		if (apart_from_first)
8009 			apart_from_first = false;
8010 		else
8011 			sdebug_erase_store(idx, sip);
8012 	}
8013 	if (apart_from_first)
8014 		sdeb_most_recent_idx = sdeb_first_idx;
8015 }
8016 
8017 /*
8018  * Returns store xarray new element index (idx) if >=0 else negated errno.
8019  * Limit the number of stores to 65536.
8020  */
sdebug_add_store(void)8021 static int sdebug_add_store(void)
8022 {
8023 	int res;
8024 	u32 n_idx;
8025 	unsigned long iflags;
8026 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8027 	struct sdeb_store_info *sip = NULL;
8028 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8029 
8030 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8031 	if (!sip)
8032 		return -ENOMEM;
8033 
8034 	xa_lock_irqsave(per_store_ap, iflags);
8035 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8036 	if (unlikely(res < 0)) {
8037 		xa_unlock_irqrestore(per_store_ap, iflags);
8038 		kfree(sip);
8039 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8040 		return res;
8041 	}
8042 	sdeb_most_recent_idx = n_idx;
8043 	if (sdeb_first_idx < 0)
8044 		sdeb_first_idx = n_idx;
8045 	xa_unlock_irqrestore(per_store_ap, iflags);
8046 
8047 	res = -ENOMEM;
8048 	sip->storep = vzalloc(sz);
8049 	if (!sip->storep) {
8050 		pr_err("user data oom\n");
8051 		goto err;
8052 	}
8053 	if (sdebug_num_parts > 0)
8054 		sdebug_build_parts(sip->storep, sz);
8055 
8056 	/* DIF/DIX: what T10 calls Protection Information (PI) */
8057 	if (sdebug_dix) {
8058 		int dif_size;
8059 
8060 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8061 		sip->dif_storep = vmalloc(dif_size);
8062 
8063 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
8064 			sip->dif_storep);
8065 
8066 		if (!sip->dif_storep) {
8067 			pr_err("DIX oom\n");
8068 			goto err;
8069 		}
8070 		memset(sip->dif_storep, 0xff, dif_size);
8071 	}
8072 	/* Logical Block Provisioning */
8073 	if (scsi_debug_lbp()) {
8074 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8075 		sip->map_storep = vmalloc(array_size(sizeof(long),
8076 						     BITS_TO_LONGS(map_size)));
8077 
8078 		pr_info("%lu provisioning blocks\n", map_size);
8079 
8080 		if (!sip->map_storep) {
8081 			pr_err("LBP map oom\n");
8082 			goto err;
8083 		}
8084 
8085 		bitmap_zero(sip->map_storep, map_size);
8086 
8087 		/* Map first 1KB for partition table */
8088 		if (sdebug_num_parts)
8089 			map_region(sip, 0, 2);
8090 	}
8091 
8092 	rwlock_init(&sip->macc_data_lck);
8093 	rwlock_init(&sip->macc_meta_lck);
8094 	rwlock_init(&sip->macc_sector_lck);
8095 	return (int)n_idx;
8096 err:
8097 	sdebug_erase_store((int)n_idx, sip);
8098 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
8099 	return res;
8100 }
8101 
sdebug_add_host_helper(int per_host_idx)8102 static int sdebug_add_host_helper(int per_host_idx)
8103 {
8104 	int k, devs_per_host, idx;
8105 	int error = -ENOMEM;
8106 	struct sdebug_host_info *sdbg_host;
8107 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8108 
8109 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8110 	if (!sdbg_host)
8111 		return -ENOMEM;
8112 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8113 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8114 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8115 	sdbg_host->si_idx = idx;
8116 
8117 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8118 
8119 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8120 	for (k = 0; k < devs_per_host; k++) {
8121 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8122 		if (!sdbg_devinfo)
8123 			goto clean;
8124 	}
8125 
8126 	mutex_lock(&sdebug_host_list_mutex);
8127 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8128 	mutex_unlock(&sdebug_host_list_mutex);
8129 
8130 	sdbg_host->dev.bus = &pseudo_lld_bus;
8131 	sdbg_host->dev.parent = pseudo_primary;
8132 	sdbg_host->dev.release = &sdebug_release_adapter;
8133 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8134 
8135 	error = device_register(&sdbg_host->dev);
8136 	if (error) {
8137 		mutex_lock(&sdebug_host_list_mutex);
8138 		list_del(&sdbg_host->host_list);
8139 		mutex_unlock(&sdebug_host_list_mutex);
8140 		goto clean;
8141 	}
8142 
8143 	++sdebug_num_hosts;
8144 	return 0;
8145 
8146 clean:
8147 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8148 				 dev_list) {
8149 		list_del(&sdbg_devinfo->dev_list);
8150 		kfree(sdbg_devinfo->zstate);
8151 		kfree(sdbg_devinfo);
8152 	}
8153 	if (sdbg_host->dev.release)
8154 		put_device(&sdbg_host->dev);
8155 	else
8156 		kfree(sdbg_host);
8157 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
8158 	return error;
8159 }
8160 
sdebug_do_add_host(bool mk_new_store)8161 static int sdebug_do_add_host(bool mk_new_store)
8162 {
8163 	int ph_idx = sdeb_most_recent_idx;
8164 
8165 	if (mk_new_store) {
8166 		ph_idx = sdebug_add_store();
8167 		if (ph_idx < 0)
8168 			return ph_idx;
8169 	}
8170 	return sdebug_add_host_helper(ph_idx);
8171 }
8172 
sdebug_do_remove_host(bool the_end)8173 static void sdebug_do_remove_host(bool the_end)
8174 {
8175 	int idx = -1;
8176 	struct sdebug_host_info *sdbg_host = NULL;
8177 	struct sdebug_host_info *sdbg_host2;
8178 
8179 	mutex_lock(&sdebug_host_list_mutex);
8180 	if (!list_empty(&sdebug_host_list)) {
8181 		sdbg_host = list_entry(sdebug_host_list.prev,
8182 				       struct sdebug_host_info, host_list);
8183 		idx = sdbg_host->si_idx;
8184 	}
8185 	if (!the_end && idx >= 0) {
8186 		bool unique = true;
8187 
8188 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8189 			if (sdbg_host2 == sdbg_host)
8190 				continue;
8191 			if (idx == sdbg_host2->si_idx) {
8192 				unique = false;
8193 				break;
8194 			}
8195 		}
8196 		if (unique) {
8197 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8198 			if (idx == sdeb_most_recent_idx)
8199 				--sdeb_most_recent_idx;
8200 		}
8201 	}
8202 	if (sdbg_host)
8203 		list_del(&sdbg_host->host_list);
8204 	mutex_unlock(&sdebug_host_list_mutex);
8205 
8206 	if (!sdbg_host)
8207 		return;
8208 
8209 	device_unregister(&sdbg_host->dev);
8210 	--sdebug_num_hosts;
8211 }
8212 
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)8213 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8214 {
8215 	struct sdebug_dev_info *devip = sdev->hostdata;
8216 
8217 	if (!devip)
8218 		return	-ENODEV;
8219 
8220 	mutex_lock(&sdebug_host_list_mutex);
8221 	block_unblock_all_queues(true);
8222 
8223 	if (qdepth > SDEBUG_CANQUEUE) {
8224 		qdepth = SDEBUG_CANQUEUE;
8225 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
8226 			qdepth, SDEBUG_CANQUEUE);
8227 	}
8228 	if (qdepth < 1)
8229 		qdepth = 1;
8230 	if (qdepth != sdev->queue_depth)
8231 		scsi_change_queue_depth(sdev, qdepth);
8232 
8233 	block_unblock_all_queues(false);
8234 	mutex_unlock(&sdebug_host_list_mutex);
8235 
8236 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8237 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
8238 
8239 	return sdev->queue_depth;
8240 }
8241 
fake_timeout(struct scsi_cmnd * scp)8242 static bool fake_timeout(struct scsi_cmnd *scp)
8243 {
8244 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
8245 		if (sdebug_every_nth < -1)
8246 			sdebug_every_nth = -1;
8247 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
8248 			return true; /* ignore command causing timeout */
8249 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
8250 			 scsi_medium_access_command(scp))
8251 			return true; /* time out reads and writes */
8252 	}
8253 	return false;
8254 }
8255 
8256 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)8257 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
8258 {
8259 	int stopped_state;
8260 	u64 diff_ns = 0;
8261 	ktime_t now_ts = ktime_get_boottime();
8262 	struct scsi_device *sdp = scp->device;
8263 
8264 	stopped_state = atomic_read(&devip->stopped);
8265 	if (stopped_state == 2) {
8266 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
8267 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
8268 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
8269 				/* tur_ms_to_ready timer extinguished */
8270 				atomic_set(&devip->stopped, 0);
8271 				return 0;
8272 			}
8273 		}
8274 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
8275 		if (sdebug_verbose)
8276 			sdev_printk(KERN_INFO, sdp,
8277 				    "%s: Not ready: in process of becoming ready\n", my_name);
8278 		if (scp->cmnd[0] == TEST_UNIT_READY) {
8279 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
8280 
8281 			if (diff_ns <= tur_nanosecs_to_ready)
8282 				diff_ns = tur_nanosecs_to_ready - diff_ns;
8283 			else
8284 				diff_ns = tur_nanosecs_to_ready;
8285 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
8286 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
8287 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
8288 						   diff_ns);
8289 			return check_condition_result;
8290 		}
8291 	}
8292 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
8293 	if (sdebug_verbose)
8294 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
8295 			    my_name);
8296 	return check_condition_result;
8297 }
8298 
sdebug_map_queues(struct Scsi_Host * shost)8299 static void sdebug_map_queues(struct Scsi_Host *shost)
8300 {
8301 	int i, qoff;
8302 
8303 	if (shost->nr_hw_queues == 1)
8304 		return;
8305 
8306 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
8307 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
8308 
8309 		map->nr_queues  = 0;
8310 
8311 		if (i == HCTX_TYPE_DEFAULT)
8312 			map->nr_queues = submit_queues - poll_queues;
8313 		else if (i == HCTX_TYPE_POLL)
8314 			map->nr_queues = poll_queues;
8315 
8316 		if (!map->nr_queues) {
8317 			BUG_ON(i == HCTX_TYPE_DEFAULT);
8318 			continue;
8319 		}
8320 
8321 		map->queue_offset = qoff;
8322 		blk_mq_map_queues(map);
8323 
8324 		qoff += map->nr_queues;
8325 	}
8326 }
8327 
8328 struct sdebug_blk_mq_poll_data {
8329 	unsigned int queue_num;
8330 	int *num_entries;
8331 };
8332 
8333 /*
8334  * We don't handle aborted commands here, but it does not seem possible to have
8335  * aborted polled commands from schedule_resp()
8336  */
sdebug_blk_mq_poll_iter(struct request * rq,void * opaque)8337 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
8338 {
8339 	struct sdebug_blk_mq_poll_data *data = opaque;
8340 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
8341 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8342 	struct sdebug_defer *sd_dp;
8343 	u32 unique_tag = blk_mq_unique_tag(rq);
8344 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
8345 	struct sdebug_queued_cmd *sqcp;
8346 	unsigned long flags;
8347 	int queue_num = data->queue_num;
8348 	ktime_t time;
8349 
8350 	/* We're only interested in one queue for this iteration */
8351 	if (hwq != queue_num)
8352 		return true;
8353 
8354 	/* Subsequent checks would fail if this failed, but check anyway */
8355 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
8356 		return true;
8357 
8358 	time = ktime_get_boottime();
8359 
8360 	spin_lock_irqsave(&sdsc->lock, flags);
8361 	sqcp = TO_QUEUED_CMD(cmd);
8362 	if (!sqcp) {
8363 		spin_unlock_irqrestore(&sdsc->lock, flags);
8364 		return true;
8365 	}
8366 
8367 	sd_dp = &sqcp->sd_dp;
8368 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
8369 		spin_unlock_irqrestore(&sdsc->lock, flags);
8370 		return true;
8371 	}
8372 
8373 	if (time < sd_dp->cmpl_ts) {
8374 		spin_unlock_irqrestore(&sdsc->lock, flags);
8375 		return true;
8376 	}
8377 
8378 	ASSIGN_QUEUED_CMD(cmd, NULL);
8379 	spin_unlock_irqrestore(&sdsc->lock, flags);
8380 
8381 	if (sdebug_statistics) {
8382 		atomic_inc(&sdebug_completions);
8383 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
8384 			atomic_inc(&sdebug_miss_cpus);
8385 	}
8386 
8387 	sdebug_free_queued_cmd(sqcp);
8388 
8389 	scsi_done(cmd); /* callback to mid level */
8390 	(*data->num_entries)++;
8391 	return true;
8392 }
8393 
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)8394 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
8395 {
8396 	int num_entries = 0;
8397 	struct sdebug_blk_mq_poll_data data = {
8398 		.queue_num = queue_num,
8399 		.num_entries = &num_entries,
8400 	};
8401 
8402 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
8403 				&data);
8404 
8405 	if (num_entries > 0)
8406 		atomic_add(num_entries, &sdeb_mq_poll_count);
8407 	return num_entries;
8408 }
8409 
sdebug_timeout_cmd(struct scsi_cmnd * cmnd)8410 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
8411 {
8412 	struct scsi_device *sdp = cmnd->device;
8413 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8414 	struct sdebug_err_inject *err;
8415 	unsigned char *cmd = cmnd->cmnd;
8416 	int ret = 0;
8417 
8418 	if (devip == NULL)
8419 		return 0;
8420 
8421 	rcu_read_lock();
8422 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8423 		if (err->type == ERR_TMOUT_CMD &&
8424 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8425 			ret = !!err->cnt;
8426 			if (err->cnt < 0)
8427 				err->cnt++;
8428 
8429 			rcu_read_unlock();
8430 			return ret;
8431 		}
8432 	}
8433 	rcu_read_unlock();
8434 
8435 	return 0;
8436 }
8437 
sdebug_fail_queue_cmd(struct scsi_cmnd * cmnd)8438 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
8439 {
8440 	struct scsi_device *sdp = cmnd->device;
8441 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8442 	struct sdebug_err_inject *err;
8443 	unsigned char *cmd = cmnd->cmnd;
8444 	int ret = 0;
8445 
8446 	if (devip == NULL)
8447 		return 0;
8448 
8449 	rcu_read_lock();
8450 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8451 		if (err->type == ERR_FAIL_QUEUE_CMD &&
8452 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8453 			ret = err->cnt ? err->queuecmd_ret : 0;
8454 			if (err->cnt < 0)
8455 				err->cnt++;
8456 
8457 			rcu_read_unlock();
8458 			return ret;
8459 		}
8460 	}
8461 	rcu_read_unlock();
8462 
8463 	return 0;
8464 }
8465 
sdebug_fail_cmd(struct scsi_cmnd * cmnd,int * retval,struct sdebug_err_inject * info)8466 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8467 			   struct sdebug_err_inject *info)
8468 {
8469 	struct scsi_device *sdp = cmnd->device;
8470 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8471 	struct sdebug_err_inject *err;
8472 	unsigned char *cmd = cmnd->cmnd;
8473 	int ret = 0;
8474 	int result;
8475 
8476 	if (devip == NULL)
8477 		return 0;
8478 
8479 	rcu_read_lock();
8480 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8481 		if (err->type == ERR_FAIL_CMD &&
8482 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8483 			if (!err->cnt) {
8484 				rcu_read_unlock();
8485 				return 0;
8486 			}
8487 
8488 			ret = !!err->cnt;
8489 			rcu_read_unlock();
8490 			goto out_handle;
8491 		}
8492 	}
8493 	rcu_read_unlock();
8494 
8495 	return 0;
8496 
8497 out_handle:
8498 	if (err->cnt < 0)
8499 		err->cnt++;
8500 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8501 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8502 	*info = *err;
8503 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8504 
8505 	return ret;
8506 }
8507 
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)8508 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8509 				   struct scsi_cmnd *scp)
8510 {
8511 	u8 sdeb_i;
8512 	struct scsi_device *sdp = scp->device;
8513 	const struct opcode_info_t *oip;
8514 	const struct opcode_info_t *r_oip;
8515 	struct sdebug_dev_info *devip;
8516 	u8 *cmd = scp->cmnd;
8517 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8518 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8519 	int k, na;
8520 	int errsts = 0;
8521 	u64 lun_index = sdp->lun & 0x3FFF;
8522 	u32 flags;
8523 	u16 sa;
8524 	u8 opcode = cmd[0];
8525 	bool has_wlun_rl;
8526 	bool inject_now;
8527 	int ret = 0;
8528 	struct sdebug_err_inject err;
8529 
8530 	scsi_set_resid(scp, 0);
8531 	if (sdebug_statistics) {
8532 		atomic_inc(&sdebug_cmnd_count);
8533 		inject_now = inject_on_this_cmd();
8534 	} else {
8535 		inject_now = false;
8536 	}
8537 	if (unlikely(sdebug_verbose &&
8538 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8539 		char b[120];
8540 		int n, len, sb;
8541 
8542 		len = scp->cmd_len;
8543 		sb = (int)sizeof(b);
8544 		if (len > 32)
8545 			strcpy(b, "too long, over 32 bytes");
8546 		else {
8547 			for (k = 0, n = 0; k < len && n < sb; ++k)
8548 				n += scnprintf(b + n, sb - n, "%02x ",
8549 					       (u32)cmd[k]);
8550 		}
8551 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8552 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8553 	}
8554 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8555 		return SCSI_MLQUEUE_HOST_BUSY;
8556 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8557 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8558 		goto err_out;
8559 
8560 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
8561 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
8562 	devip = (struct sdebug_dev_info *)sdp->hostdata;
8563 	if (unlikely(!devip)) {
8564 		devip = find_build_dev_info(sdp);
8565 		if (NULL == devip)
8566 			goto err_out;
8567 	}
8568 
8569 	if (sdebug_timeout_cmd(scp)) {
8570 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8571 		return 0;
8572 	}
8573 
8574 	ret = sdebug_fail_queue_cmd(scp);
8575 	if (ret) {
8576 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8577 				opcode, ret);
8578 		return ret;
8579 	}
8580 
8581 	if (sdebug_fail_cmd(scp, &ret, &err)) {
8582 		scmd_printk(KERN_INFO, scp,
8583 			"fail command 0x%x with hostbyte=0x%x, "
8584 			"driverbyte=0x%x, statusbyte=0x%x, "
8585 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8586 			opcode, err.host_byte, err.driver_byte,
8587 			err.status_byte, err.sense_key, err.asc, err.asq);
8588 		return ret;
8589 	}
8590 
8591 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8592 		atomic_set(&sdeb_inject_pending, 1);
8593 
8594 	na = oip->num_attached;
8595 	r_pfp = oip->pfp;
8596 	if (na) {	/* multiple commands with this opcode */
8597 		r_oip = oip;
8598 		if (FF_SA & r_oip->flags) {
8599 			if (F_SA_LOW & oip->flags)
8600 				sa = 0x1f & cmd[1];
8601 			else
8602 				sa = get_unaligned_be16(cmd + 8);
8603 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8604 				if (opcode == oip->opcode && sa == oip->sa)
8605 					break;
8606 			}
8607 		} else {   /* since no service action only check opcode */
8608 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8609 				if (opcode == oip->opcode)
8610 					break;
8611 			}
8612 		}
8613 		if (k > na) {
8614 			if (F_SA_LOW & r_oip->flags)
8615 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8616 			else if (F_SA_HIGH & r_oip->flags)
8617 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8618 			else
8619 				mk_sense_invalid_opcode(scp);
8620 			goto check_cond;
8621 		}
8622 	}	/* else (when na==0) we assume the oip is a match */
8623 	flags = oip->flags;
8624 	if (unlikely(F_INV_OP & flags)) {
8625 		mk_sense_invalid_opcode(scp);
8626 		goto check_cond;
8627 	}
8628 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8629 		if (sdebug_verbose)
8630 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8631 				    my_name, opcode, " supported for wlun");
8632 		mk_sense_invalid_opcode(scp);
8633 		goto check_cond;
8634 	}
8635 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
8636 		u8 rem;
8637 		int j;
8638 
8639 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8640 			rem = ~oip->len_mask[k] & cmd[k];
8641 			if (rem) {
8642 				for (j = 7; j >= 0; --j, rem <<= 1) {
8643 					if (0x80 & rem)
8644 						break;
8645 				}
8646 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8647 				goto check_cond;
8648 			}
8649 		}
8650 	}
8651 	if (unlikely(!(F_SKIP_UA & flags) &&
8652 		     find_first_bit(devip->uas_bm,
8653 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8654 		errsts = make_ua(scp, devip);
8655 		if (errsts)
8656 			goto check_cond;
8657 	}
8658 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8659 		     atomic_read(&devip->stopped))) {
8660 		errsts = resp_not_ready(scp, devip);
8661 		if (errsts)
8662 			goto fini;
8663 	}
8664 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
8665 		goto fini;
8666 	if (unlikely(sdebug_every_nth)) {
8667 		if (fake_timeout(scp))
8668 			return 0;	/* ignore command: make trouble */
8669 	}
8670 	if (likely(oip->pfp))
8671 		pfp = oip->pfp;	/* calls a resp_* function */
8672 	else
8673 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8674 
8675 fini:
8676 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
8677 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8678 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8679 					    sdebug_ndelay > 10000)) {
8680 		/*
8681 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8682 		 * for Start Stop Unit (SSU) want at least 1 second delay and
8683 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
8684 		 * For Synchronize Cache want 1/20 of SSU's delay.
8685 		 */
8686 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8687 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8688 
8689 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8690 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8691 	} else
8692 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8693 				     sdebug_ndelay);
8694 check_cond:
8695 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8696 err_out:
8697 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8698 }
8699 
sdebug_init_cmd_priv(struct Scsi_Host * shost,struct scsi_cmnd * cmd)8700 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8701 {
8702 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8703 
8704 	spin_lock_init(&sdsc->lock);
8705 
8706 	return 0;
8707 }
8708 
8709 static struct scsi_host_template sdebug_driver_template = {
8710 	.show_info =		scsi_debug_show_info,
8711 	.write_info =		scsi_debug_write_info,
8712 	.proc_name =		sdebug_proc_name,
8713 	.name =			"SCSI DEBUG",
8714 	.info =			scsi_debug_info,
8715 	.slave_alloc =		scsi_debug_slave_alloc,
8716 	.slave_configure =	scsi_debug_slave_configure,
8717 	.slave_destroy =	scsi_debug_slave_destroy,
8718 	.ioctl =		scsi_debug_ioctl,
8719 	.queuecommand =		scsi_debug_queuecommand,
8720 	.change_queue_depth =	sdebug_change_qdepth,
8721 	.map_queues =		sdebug_map_queues,
8722 	.mq_poll =		sdebug_blk_mq_poll,
8723 	.eh_abort_handler =	scsi_debug_abort,
8724 	.eh_device_reset_handler = scsi_debug_device_reset,
8725 	.eh_target_reset_handler = scsi_debug_target_reset,
8726 	.eh_bus_reset_handler = scsi_debug_bus_reset,
8727 	.eh_host_reset_handler = scsi_debug_host_reset,
8728 	.can_queue =		SDEBUG_CANQUEUE,
8729 	.this_id =		7,
8730 	.sg_tablesize =		SG_MAX_SEGMENTS,
8731 	.cmd_per_lun =		DEF_CMD_PER_LUN,
8732 	.max_sectors =		-1U,
8733 	.max_segment_size =	-1U,
8734 	.module =		THIS_MODULE,
8735 	.track_queue_depth =	1,
8736 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
8737 	.init_cmd_priv = sdebug_init_cmd_priv,
8738 	.target_alloc =		sdebug_target_alloc,
8739 	.target_destroy =	sdebug_target_destroy,
8740 };
8741 
sdebug_driver_probe(struct device * dev)8742 static int sdebug_driver_probe(struct device *dev)
8743 {
8744 	int error = 0;
8745 	struct sdebug_host_info *sdbg_host;
8746 	struct Scsi_Host *hpnt;
8747 	int hprot;
8748 
8749 	sdbg_host = dev_to_sdebug_host(dev);
8750 
8751 	sdebug_driver_template.can_queue = sdebug_max_queue;
8752 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8753 	if (!sdebug_clustering)
8754 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8755 
8756 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8757 	if (NULL == hpnt) {
8758 		pr_err("scsi_host_alloc failed\n");
8759 		error = -ENODEV;
8760 		return error;
8761 	}
8762 	if (submit_queues > nr_cpu_ids) {
8763 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8764 			my_name, submit_queues, nr_cpu_ids);
8765 		submit_queues = nr_cpu_ids;
8766 	}
8767 	/*
8768 	 * Decide whether to tell scsi subsystem that we want mq. The
8769 	 * following should give the same answer for each host.
8770 	 */
8771 	hpnt->nr_hw_queues = submit_queues;
8772 	if (sdebug_host_max_queue)
8773 		hpnt->host_tagset = 1;
8774 
8775 	/* poll queues are possible for nr_hw_queues > 1 */
8776 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8777 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8778 			 my_name, poll_queues, hpnt->nr_hw_queues);
8779 		poll_queues = 0;
8780 	}
8781 
8782 	/*
8783 	 * Poll queues don't need interrupts, but we need at least one I/O queue
8784 	 * left over for non-polled I/O.
8785 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
8786 	 */
8787 	if (poll_queues >= submit_queues) {
8788 		if (submit_queues < 3)
8789 			pr_warn("%s: trim poll_queues to 1\n", my_name);
8790 		else
8791 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8792 				my_name, submit_queues - 1);
8793 		poll_queues = 1;
8794 	}
8795 	if (poll_queues)
8796 		hpnt->nr_maps = 3;
8797 
8798 	sdbg_host->shost = hpnt;
8799 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8800 		hpnt->max_id = sdebug_num_tgts + 1;
8801 	else
8802 		hpnt->max_id = sdebug_num_tgts;
8803 	/* = sdebug_max_luns; */
8804 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8805 
8806 	hprot = 0;
8807 
8808 	switch (sdebug_dif) {
8809 
8810 	case T10_PI_TYPE1_PROTECTION:
8811 		hprot = SHOST_DIF_TYPE1_PROTECTION;
8812 		if (sdebug_dix)
8813 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
8814 		break;
8815 
8816 	case T10_PI_TYPE2_PROTECTION:
8817 		hprot = SHOST_DIF_TYPE2_PROTECTION;
8818 		if (sdebug_dix)
8819 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
8820 		break;
8821 
8822 	case T10_PI_TYPE3_PROTECTION:
8823 		hprot = SHOST_DIF_TYPE3_PROTECTION;
8824 		if (sdebug_dix)
8825 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
8826 		break;
8827 
8828 	default:
8829 		if (sdebug_dix)
8830 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
8831 		break;
8832 	}
8833 
8834 	scsi_host_set_prot(hpnt, hprot);
8835 
8836 	if (have_dif_prot || sdebug_dix)
8837 		pr_info("host protection%s%s%s%s%s%s%s\n",
8838 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8839 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8840 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8841 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8842 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8843 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8844 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8845 
8846 	if (sdebug_guard == 1)
8847 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8848 	else
8849 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8850 
8851 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8852 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8853 	if (sdebug_every_nth)	/* need stats counters for every_nth */
8854 		sdebug_statistics = true;
8855 	error = scsi_add_host(hpnt, &sdbg_host->dev);
8856 	if (error) {
8857 		pr_err("scsi_add_host failed\n");
8858 		error = -ENODEV;
8859 		scsi_host_put(hpnt);
8860 	} else {
8861 		scsi_scan_host(hpnt);
8862 	}
8863 
8864 	return error;
8865 }
8866 
sdebug_driver_remove(struct device * dev)8867 static void sdebug_driver_remove(struct device *dev)
8868 {
8869 	struct sdebug_host_info *sdbg_host;
8870 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8871 
8872 	sdbg_host = dev_to_sdebug_host(dev);
8873 
8874 	scsi_remove_host(sdbg_host->shost);
8875 
8876 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8877 				 dev_list) {
8878 		list_del(&sdbg_devinfo->dev_list);
8879 		kfree(sdbg_devinfo->zstate);
8880 		kfree(sdbg_devinfo);
8881 	}
8882 
8883 	scsi_host_put(sdbg_host->shost);
8884 }
8885 
8886 static const struct bus_type pseudo_lld_bus = {
8887 	.name = "pseudo",
8888 	.probe = sdebug_driver_probe,
8889 	.remove = sdebug_driver_remove,
8890 	.drv_groups = sdebug_drv_groups,
8891 };
8892