xref: /linux/drivers/scsi/scsi_debug.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47 
48 #include <net/checksum.h>
49 
50 #include <asm/unaligned.h>
51 
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60 
61 #include "sd.h"
62 #include "scsi_logging.h"
63 
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67 
68 #define MY_NAME "scsi_debug"
69 
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define LOGICAL_UNIT_NOT_READY 0x4
75 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
76 #define UNRECOVERED_READ_ERR 0x11
77 #define PARAMETER_LIST_LENGTH_ERR 0x1a
78 #define INVALID_OPCODE 0x20
79 #define LBA_OUT_OF_RANGE 0x21
80 #define INVALID_FIELD_IN_CDB 0x24
81 #define INVALID_FIELD_IN_PARAM_LIST 0x26
82 #define WRITE_PROTECTED 0x27
83 #define UA_RESET_ASC 0x29
84 #define UA_CHANGED_ASC 0x2a
85 #define TARGET_CHANGED_ASC 0x3f
86 #define LUNS_CHANGED_ASCQ 0x0e
87 #define INSUFF_RES_ASC 0x55
88 #define INSUFF_RES_ASCQ 0x3
89 #define POWER_ON_RESET_ASCQ 0x0
90 #define POWER_ON_OCCURRED_ASCQ 0x1
91 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
92 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
93 #define CAPACITY_CHANGED_ASCQ 0x9
94 #define SAVING_PARAMS_UNSUP 0x39
95 #define TRANSPORT_PROBLEM 0x4b
96 #define THRESHOLD_EXCEEDED 0x5d
97 #define LOW_POWER_COND_ON 0x5e
98 #define MISCOMPARE_VERIFY_ASC 0x1d
99 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
100 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
101 #define WRITE_ERROR_ASC 0xc
102 #define UNALIGNED_WRITE_ASCQ 0x4
103 #define WRITE_BOUNDARY_ASCQ 0x5
104 #define READ_INVDATA_ASCQ 0x6
105 #define READ_BOUNDARY_ASCQ 0x7
106 #define ATTEMPT_ACCESS_GAP 0x9
107 #define INSUFF_ZONE_ASCQ 0xe
108 /* see drivers/scsi/sense_codes.h */
109 
110 /* Additional Sense Code Qualifier (ASCQ) */
111 #define ACK_NAK_TO 0x3
112 
113 /* Default values for driver parameters */
114 #define DEF_NUM_HOST   1
115 #define DEF_NUM_TGTS   1
116 #define DEF_MAX_LUNS   1
117 /* With these defaults, this driver will make 1 host with 1 target
118  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
119  */
120 #define DEF_ATO 1
121 #define DEF_CDB_LEN 10
122 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
123 #define DEF_DEV_SIZE_PRE_INIT   0
124 #define DEF_DEV_SIZE_MB   8
125 #define DEF_ZBC_DEV_SIZE_MB   128
126 #define DEF_DIF 0
127 #define DEF_DIX 0
128 #define DEF_PER_HOST_STORE false
129 #define DEF_D_SENSE   0
130 #define DEF_EVERY_NTH   0
131 #define DEF_FAKE_RW	0
132 #define DEF_GUARD 0
133 #define DEF_HOST_LOCK 0
134 #define DEF_LBPU 0
135 #define DEF_LBPWS 0
136 #define DEF_LBPWS10 0
137 #define DEF_LBPRZ 1
138 #define DEF_LOWEST_ALIGNED 0
139 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
140 #define DEF_NO_LUN_0   0
141 #define DEF_NUM_PARTS   0
142 #define DEF_OPTS   0
143 #define DEF_OPT_BLKS 1024
144 #define DEF_PHYSBLK_EXP 0
145 #define DEF_OPT_XFERLEN_EXP 0
146 #define DEF_PTYPE   TYPE_DISK
147 #define DEF_RANDOM false
148 #define DEF_REMOVABLE false
149 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
150 #define DEF_SECTOR_SIZE 512
151 #define DEF_UNMAP_ALIGNMENT 0
152 #define DEF_UNMAP_GRANULARITY 1
153 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
154 #define DEF_UNMAP_MAX_DESC 256
155 #define DEF_VIRTUAL_GB   0
156 #define DEF_VPD_USE_HOSTNO 1
157 #define DEF_WRITESAME_LENGTH 0xFFFF
158 #define DEF_ATOMIC_WR 0
159 #define DEF_ATOMIC_WR_MAX_LENGTH 8192
160 #define DEF_ATOMIC_WR_ALIGN 2
161 #define DEF_ATOMIC_WR_GRAN 2
162 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
163 #define DEF_ATOMIC_WR_MAX_BNDRY 128
164 #define DEF_STRICT 0
165 #define DEF_STATISTICS false
166 #define DEF_SUBMIT_QUEUES 1
167 #define DEF_TUR_MS_TO_READY 0
168 #define DEF_UUID_CTL 0
169 #define JDELAY_OVERRIDDEN -9999
170 
171 /* Default parameters for ZBC drives */
172 #define DEF_ZBC_ZONE_SIZE_MB	128
173 #define DEF_ZBC_MAX_OPEN_ZONES	8
174 #define DEF_ZBC_NR_CONV_ZONES	1
175 
176 #define SDEBUG_LUN_0_VAL 0
177 
178 /* bit mask values for sdebug_opts */
179 #define SDEBUG_OPT_NOISE		1
180 #define SDEBUG_OPT_MEDIUM_ERR		2
181 #define SDEBUG_OPT_TIMEOUT		4
182 #define SDEBUG_OPT_RECOVERED_ERR	8
183 #define SDEBUG_OPT_TRANSPORT_ERR	16
184 #define SDEBUG_OPT_DIF_ERR		32
185 #define SDEBUG_OPT_DIX_ERR		64
186 #define SDEBUG_OPT_MAC_TIMEOUT		128
187 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
188 #define SDEBUG_OPT_Q_NOISE		0x200
189 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
190 #define SDEBUG_OPT_RARE_TSF		0x800
191 #define SDEBUG_OPT_N_WCE		0x1000
192 #define SDEBUG_OPT_RESET_NOISE		0x2000
193 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
194 #define SDEBUG_OPT_HOST_BUSY		0x8000
195 #define SDEBUG_OPT_CMD_ABORT		0x10000
196 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
197 			      SDEBUG_OPT_RESET_NOISE)
198 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
199 				  SDEBUG_OPT_TRANSPORT_ERR | \
200 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
201 				  SDEBUG_OPT_SHORT_TRANSFER | \
202 				  SDEBUG_OPT_HOST_BUSY | \
203 				  SDEBUG_OPT_CMD_ABORT)
204 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
205 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
206 
207 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
208  * priority order. In the subset implemented here lower numbers have higher
209  * priority. The UA numbers should be a sequence starting from 0 with
210  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
211 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
212 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
213 #define SDEBUG_UA_BUS_RESET 2
214 #define SDEBUG_UA_MODE_CHANGED 3
215 #define SDEBUG_UA_CAPACITY_CHANGED 4
216 #define SDEBUG_UA_LUNS_CHANGED 5
217 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
218 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
219 #define SDEBUG_NUM_UAS 8
220 
221 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
222  * sector on read commands: */
223 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
224 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
225 
226 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
227  * (for response) per submit queue at one time. Can be reduced by max_queue
228  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
229  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
230  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
231  * but cannot exceed SDEBUG_CANQUEUE .
232  */
233 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
234 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
235 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
236 
237 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
238 #define F_D_IN			1	/* Data-in command (e.g. READ) */
239 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
240 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
241 #define F_D_UNKN		8
242 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
243 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
244 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
245 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
246 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
247 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
248 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
249 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
250 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
251 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
252 
253 /* Useful combinations of the above flags */
254 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
255 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
256 #define FF_SA (F_SA_HIGH | F_SA_LOW)
257 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
258 
259 #define SDEBUG_MAX_PARTS 4
260 
261 #define SDEBUG_MAX_CMD_LEN 32
262 
263 #define SDEB_XA_NOT_IN_USE XA_MARK_1
264 
265 static struct kmem_cache *queued_cmd_cache;
266 
267 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
268 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
269 
270 /* Zone types (zbcr05 table 25) */
271 enum sdebug_z_type {
272 	ZBC_ZTYPE_CNV	= 0x1,
273 	ZBC_ZTYPE_SWR	= 0x2,
274 	ZBC_ZTYPE_SWP	= 0x3,
275 	/* ZBC_ZTYPE_SOBR = 0x4, */
276 	ZBC_ZTYPE_GAP	= 0x5,
277 };
278 
279 /* enumeration names taken from table 26, zbcr05 */
280 enum sdebug_z_cond {
281 	ZBC_NOT_WRITE_POINTER	= 0x0,
282 	ZC1_EMPTY		= 0x1,
283 	ZC2_IMPLICIT_OPEN	= 0x2,
284 	ZC3_EXPLICIT_OPEN	= 0x3,
285 	ZC4_CLOSED		= 0x4,
286 	ZC6_READ_ONLY		= 0xd,
287 	ZC5_FULL		= 0xe,
288 	ZC7_OFFLINE		= 0xf,
289 };
290 
291 struct sdeb_zone_state {	/* ZBC: per zone state */
292 	enum sdebug_z_type z_type;
293 	enum sdebug_z_cond z_cond;
294 	bool z_non_seq_resource;
295 	unsigned int z_size;
296 	sector_t z_start;
297 	sector_t z_wp;
298 };
299 
300 enum sdebug_err_type {
301 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
302 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
303 					/* queuecmd return failed */
304 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
305 					/* queuecmd return succeed but */
306 					/* with errors set in scsi_cmnd */
307 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
308 					/* scsi_debug_abort() */
309 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
310 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
311 };
312 
313 struct sdebug_err_inject {
314 	int type;
315 	struct list_head list;
316 	int cnt;
317 	unsigned char cmd;
318 	struct rcu_head rcu;
319 
320 	union {
321 		/*
322 		 * For ERR_FAIL_QUEUE_CMD
323 		 */
324 		int queuecmd_ret;
325 
326 		/*
327 		 * For ERR_FAIL_CMD
328 		 */
329 		struct {
330 			unsigned char host_byte;
331 			unsigned char driver_byte;
332 			unsigned char status_byte;
333 			unsigned char sense_key;
334 			unsigned char asc;
335 			unsigned char asq;
336 		};
337 	};
338 };
339 
340 struct sdebug_dev_info {
341 	struct list_head dev_list;
342 	unsigned int channel;
343 	unsigned int target;
344 	u64 lun;
345 	uuid_t lu_name;
346 	struct sdebug_host_info *sdbg_host;
347 	unsigned long uas_bm[1];
348 	atomic_t stopped;	/* 1: by SSU, 2: device start */
349 	bool used;
350 
351 	/* For ZBC devices */
352 	bool zoned;
353 	unsigned int zcap;
354 	unsigned int zsize;
355 	unsigned int zsize_shift;
356 	unsigned int nr_zones;
357 	unsigned int nr_conv_zones;
358 	unsigned int nr_seq_zones;
359 	unsigned int nr_imp_open;
360 	unsigned int nr_exp_open;
361 	unsigned int nr_closed;
362 	unsigned int max_open;
363 	ktime_t create_ts;	/* time since bootup that this device was created */
364 	struct sdeb_zone_state *zstate;
365 
366 	struct dentry *debugfs_entry;
367 	struct spinlock list_lock;
368 	struct list_head inject_err_list;
369 };
370 
371 struct sdebug_target_info {
372 	bool reset_fail;
373 	struct dentry *debugfs_entry;
374 };
375 
376 struct sdebug_host_info {
377 	struct list_head host_list;
378 	int si_idx;	/* sdeb_store_info (per host) xarray index */
379 	struct Scsi_Host *shost;
380 	struct device dev;
381 	struct list_head dev_info_list;
382 };
383 
384 /* There is an xarray of pointers to this struct's objects, one per host */
385 struct sdeb_store_info {
386 	rwlock_t macc_data_lck;	/* for media data access on this store */
387 	rwlock_t macc_meta_lck;	/* for atomic media meta access on this store */
388 	rwlock_t macc_sector_lck;	/* per-sector media data access on this store */
389 	u8 *storep;		/* user data storage (ram) */
390 	struct t10_pi_tuple *dif_storep; /* protection info */
391 	void *map_storep;	/* provisioning map */
392 };
393 
394 #define dev_to_sdebug_host(d)	\
395 	container_of(d, struct sdebug_host_info, dev)
396 
397 #define shost_to_sdebug_host(shost)	\
398 	dev_to_sdebug_host(shost->dma_dev)
399 
400 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
401 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
402 
403 struct sdebug_defer {
404 	struct hrtimer hrt;
405 	struct execute_work ew;
406 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
407 	int issuing_cpu;
408 	bool aborted;	/* true when blk_abort_request() already called */
409 	enum sdeb_defer_type defer_t;
410 };
411 
412 struct sdebug_device_access_info {
413 	bool atomic_write;
414 	u64 lba;
415 	u32 num;
416 	struct scsi_cmnd *self;
417 };
418 
419 struct sdebug_queued_cmd {
420 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
421 	 * instance indicates this slot is in use.
422 	 */
423 	struct sdebug_defer sd_dp;
424 	struct scsi_cmnd *scmd;
425 	struct sdebug_device_access_info *i;
426 };
427 
428 struct sdebug_scsi_cmd {
429 	spinlock_t   lock;
430 };
431 
432 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
433 static atomic_t sdebug_completions;  /* count of deferred completions */
434 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
435 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
436 static atomic_t sdeb_inject_pending;
437 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
438 
439 struct opcode_info_t {
440 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
441 				/* for terminating element */
442 	u8 opcode;		/* if num_attached > 0, preferred */
443 	u16 sa;			/* service action */
444 	u32 flags;		/* OR-ed set of SDEB_F_* */
445 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
446 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
447 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
448 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
449 };
450 
451 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
452 enum sdeb_opcode_index {
453 	SDEB_I_INVALID_OPCODE =	0,
454 	SDEB_I_INQUIRY = 1,
455 	SDEB_I_REPORT_LUNS = 2,
456 	SDEB_I_REQUEST_SENSE = 3,
457 	SDEB_I_TEST_UNIT_READY = 4,
458 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
459 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
460 	SDEB_I_LOG_SENSE = 7,
461 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
462 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
463 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
464 	SDEB_I_START_STOP = 11,
465 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
466 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
467 	SDEB_I_MAINT_IN = 14,
468 	SDEB_I_MAINT_OUT = 15,
469 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
470 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
471 	SDEB_I_RESERVE = 18,		/* 6, 10 */
472 	SDEB_I_RELEASE = 19,		/* 6, 10 */
473 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
474 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
475 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
476 	SDEB_I_SEND_DIAG = 23,
477 	SDEB_I_UNMAP = 24,
478 	SDEB_I_WRITE_BUFFER = 25,
479 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
480 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
481 	SDEB_I_COMP_WRITE = 28,
482 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
483 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
484 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
485 	SDEB_I_ATOMIC_WRITE_16 = 32,
486 	SDEB_I_LAST_ELEM_P1 = 33,	/* keep this last (previous + 1) */
487 };
488 
489 
490 static const unsigned char opcode_ind_arr[256] = {
491 /* 0x0; 0x0->0x1f: 6 byte cdbs */
492 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
493 	    0, 0, 0, 0,
494 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
495 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
496 	    SDEB_I_RELEASE,
497 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
498 	    SDEB_I_ALLOW_REMOVAL, 0,
499 /* 0x20; 0x20->0x3f: 10 byte cdbs */
500 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
501 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
502 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
503 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
504 /* 0x40; 0x40->0x5f: 10 byte cdbs */
505 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
506 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
507 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
508 	    SDEB_I_RELEASE,
509 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
510 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
511 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
513 	0, SDEB_I_VARIABLE_LEN,
514 /* 0x80; 0x80->0x9f: 16 byte cdbs */
515 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
516 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
517 	0, 0, 0, SDEB_I_VERIFY,
518 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
519 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
520 	0, 0, 0, 0,
521 	SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
522 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
523 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
524 	     SDEB_I_MAINT_OUT, 0, 0, 0,
525 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
526 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
527 	0, 0, 0, 0, 0, 0, 0, 0,
528 	0, 0, 0, 0, 0, 0, 0, 0,
529 /* 0xc0; 0xc0->0xff: vendor specific */
530 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
531 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
532 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
533 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
534 };
535 
536 /*
537  * The following "response" functions return the SCSI mid-level's 4 byte
538  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
539  * command completion, they can mask their return value with
540  * SDEG_RES_IMMED_MASK .
541  */
542 #define SDEG_RES_IMMED_MASK 0x40000000
543 
544 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
551 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
553 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
554 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
555 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
556 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
557 static int resp_get_stream_status(struct scsi_cmnd *scp,
558 				  struct sdebug_dev_info *devip);
559 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
560 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
561 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
562 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
563 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
564 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
565 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
566 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
567 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
568 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
569 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
570 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
571 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
572 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
573 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
574 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
575 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
576 
577 static int sdebug_do_add_host(bool mk_new_store);
578 static int sdebug_add_host_helper(int per_host_idx);
579 static void sdebug_do_remove_host(bool the_end);
580 static int sdebug_add_store(void);
581 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
582 static void sdebug_erase_all_stores(bool apart_from_first);
583 
584 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
585 
586 /*
587  * The following are overflow arrays for cdbs that "hit" the same index in
588  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
589  * should be placed in opcode_info_arr[], the others should be placed here.
590  */
591 static const struct opcode_info_t msense_iarr[] = {
592 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
593 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 };
595 
596 static const struct opcode_info_t mselect_iarr[] = {
597 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
598 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
599 };
600 
601 static const struct opcode_info_t read_iarr[] = {
602 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
603 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
604 	     0, 0, 0, 0} },
605 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
606 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
607 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
608 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
609 	     0xc7, 0, 0, 0, 0} },
610 };
611 
612 static const struct opcode_info_t write_iarr[] = {
613 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
614 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
615 		   0, 0, 0, 0, 0, 0} },
616 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
617 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
618 		   0, 0, 0} },
619 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
620 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 		   0xbf, 0xc7, 0, 0, 0, 0} },
622 };
623 
624 static const struct opcode_info_t verify_iarr[] = {
625 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
626 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
627 		   0, 0, 0, 0, 0, 0} },
628 };
629 
630 static const struct opcode_info_t sa_in_16_iarr[] = {
631 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
632 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
633 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
634 	{0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
635 	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
636 	     0, 0} },	/* GET STREAM STATUS */
637 };
638 
639 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
640 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
641 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
642 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
643 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
644 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
645 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
646 };
647 
648 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
649 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
650 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
651 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
652 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
653 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
654 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
655 };
656 
657 static const struct opcode_info_t write_same_iarr[] = {
658 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
659 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
661 };
662 
663 static const struct opcode_info_t reserve_iarr[] = {
664 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
665 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
666 };
667 
668 static const struct opcode_info_t release_iarr[] = {
669 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
670 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
671 };
672 
673 static const struct opcode_info_t sync_cache_iarr[] = {
674 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
675 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
676 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
677 };
678 
679 static const struct opcode_info_t pre_fetch_iarr[] = {
680 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
681 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
682 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
683 };
684 
685 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
686 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
687 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
688 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
689 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
690 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
691 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
692 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
693 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
694 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
695 };
696 
697 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
698 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
699 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
700 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
701 };
702 
703 
704 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
705  * plus the terminating elements for logic that scans this table such as
706  * REPORT SUPPORTED OPERATION CODES. */
707 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
708 /* 0 */
709 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
710 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
711 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
712 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
713 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
714 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
715 	     0, 0} },					/* REPORT LUNS */
716 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
717 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
718 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
719 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
720 /* 5 */
721 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
722 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
723 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
724 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
725 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
726 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
727 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
728 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
729 	     0, 0, 0} },
730 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
731 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
732 	     0, 0} },
733 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
734 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
735 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
736 /* 10 */
737 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
738 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
739 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
740 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
741 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
742 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
743 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
744 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
745 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
746 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
747 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
748 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
749 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
750 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
751 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
752 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
753 				0xff, 0, 0xc7, 0, 0, 0, 0} },
754 /* 15 */
755 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
756 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
757 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
758 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
759 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
760 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
761 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
762 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
763 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
764 	     0xff, 0xff} },
765 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
766 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
767 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
768 	     0} },
769 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
770 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
771 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
772 	     0} },
773 /* 20 */
774 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
775 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
776 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
777 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
778 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
779 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
780 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
781 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
782 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
783 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
784 /* 25 */
785 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
786 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
787 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
788 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
789 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
790 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
791 		 0, 0, 0, 0, 0} },
792 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
793 	    resp_sync_cache, sync_cache_iarr,
794 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
795 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
796 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
797 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
798 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
799 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
800 	    resp_pre_fetch, pre_fetch_iarr,
801 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
802 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
803 
804 /* 30 */
805 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
806 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
807 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
808 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
809 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
810 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
811 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
812 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
813 /* 31 */
814 	{0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
815 	    resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
816 		{16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
817 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
818 /* sentinel */
819 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
820 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
821 };
822 
823 static int sdebug_num_hosts;
824 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
825 static int sdebug_ato = DEF_ATO;
826 static int sdebug_cdb_len = DEF_CDB_LEN;
827 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
828 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
829 static int sdebug_dif = DEF_DIF;
830 static int sdebug_dix = DEF_DIX;
831 static int sdebug_dsense = DEF_D_SENSE;
832 static int sdebug_every_nth = DEF_EVERY_NTH;
833 static int sdebug_fake_rw = DEF_FAKE_RW;
834 static unsigned int sdebug_guard = DEF_GUARD;
835 static int sdebug_host_max_queue;	/* per host */
836 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
837 static int sdebug_max_luns = DEF_MAX_LUNS;
838 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
839 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
840 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
841 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
842 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
843 static int sdebug_no_uld;
844 static int sdebug_num_parts = DEF_NUM_PARTS;
845 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
846 static int sdebug_opt_blks = DEF_OPT_BLKS;
847 static int sdebug_opts = DEF_OPTS;
848 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
849 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
850 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
851 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
852 static int sdebug_sector_size = DEF_SECTOR_SIZE;
853 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
854 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
855 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
856 static unsigned int sdebug_lbpu = DEF_LBPU;
857 static unsigned int sdebug_lbpws = DEF_LBPWS;
858 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
859 static unsigned int sdebug_lbprz = DEF_LBPRZ;
860 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
861 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
862 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
863 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
864 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
865 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
866 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
867 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
868 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
869 static unsigned int sdebug_atomic_wr_max_length_bndry =
870 			DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
871 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
872 static int sdebug_uuid_ctl = DEF_UUID_CTL;
873 static bool sdebug_random = DEF_RANDOM;
874 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
875 static bool sdebug_removable = DEF_REMOVABLE;
876 static bool sdebug_clustering;
877 static bool sdebug_host_lock = DEF_HOST_LOCK;
878 static bool sdebug_strict = DEF_STRICT;
879 static bool sdebug_any_injecting_opt;
880 static bool sdebug_no_rwlock;
881 static bool sdebug_verbose;
882 static bool have_dif_prot;
883 static bool write_since_sync;
884 static bool sdebug_statistics = DEF_STATISTICS;
885 static bool sdebug_wp;
886 static bool sdebug_allow_restart;
887 static enum {
888 	BLK_ZONED_NONE	= 0,
889 	BLK_ZONED_HA	= 1,
890 	BLK_ZONED_HM	= 2,
891 } sdeb_zbc_model = BLK_ZONED_NONE;
892 static char *sdeb_zbc_model_s;
893 
894 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
895 			  SAM_LUN_AM_FLAT = 0x1,
896 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
897 			  SAM_LUN_AM_EXTENDED = 0x3};
898 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
899 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
900 
901 static unsigned int sdebug_store_sectors;
902 static sector_t sdebug_capacity;	/* in sectors */
903 
904 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
905    may still need them */
906 static int sdebug_heads;		/* heads per disk */
907 static int sdebug_cylinders_per;	/* cylinders per surface */
908 static int sdebug_sectors_per;		/* sectors per cylinder */
909 
910 static LIST_HEAD(sdebug_host_list);
911 static DEFINE_MUTEX(sdebug_host_list_mutex);
912 
913 static struct xarray per_store_arr;
914 static struct xarray *per_store_ap = &per_store_arr;
915 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
916 static int sdeb_most_recent_idx = -1;
917 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
918 
919 static unsigned long map_size;
920 static int num_aborts;
921 static int num_dev_resets;
922 static int num_target_resets;
923 static int num_bus_resets;
924 static int num_host_resets;
925 static int dix_writes;
926 static int dix_reads;
927 static int dif_errors;
928 
929 /* ZBC global data */
930 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
931 static int sdeb_zbc_zone_cap_mb;
932 static int sdeb_zbc_zone_size_mb;
933 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
934 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
935 
936 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
937 static int poll_queues; /* iouring iopoll interface.*/
938 
939 static atomic_long_t writes_by_group_number[64];
940 
941 static char sdebug_proc_name[] = MY_NAME;
942 static const char *my_name = MY_NAME;
943 
944 static const struct bus_type pseudo_lld_bus;
945 
946 static struct device_driver sdebug_driverfs_driver = {
947 	.name 		= sdebug_proc_name,
948 	.bus		= &pseudo_lld_bus,
949 };
950 
951 static const int check_condition_result =
952 	SAM_STAT_CHECK_CONDITION;
953 
954 static const int illegal_condition_result =
955 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
956 
957 static const int device_qfull_result =
958 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
959 
960 static const int condition_met_result = SAM_STAT_CONDITION_MET;
961 
962 static struct dentry *sdebug_debugfs_root;
963 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
964 
sdebug_err_free(struct rcu_head * head)965 static void sdebug_err_free(struct rcu_head *head)
966 {
967 	struct sdebug_err_inject *inject =
968 		container_of(head, typeof(*inject), rcu);
969 
970 	kfree(inject);
971 }
972 
sdebug_err_add(struct scsi_device * sdev,struct sdebug_err_inject * new)973 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
974 {
975 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
976 	struct sdebug_err_inject *err;
977 
978 	spin_lock(&devip->list_lock);
979 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
980 		if (err->type == new->type && err->cmd == new->cmd) {
981 			list_del_rcu(&err->list);
982 			call_rcu(&err->rcu, sdebug_err_free);
983 		}
984 	}
985 
986 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
987 	spin_unlock(&devip->list_lock);
988 }
989 
sdebug_err_remove(struct scsi_device * sdev,const char * buf,size_t count)990 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
991 {
992 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
993 	struct sdebug_err_inject *err;
994 	int type;
995 	unsigned char cmd;
996 
997 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
998 		kfree(buf);
999 		return -EINVAL;
1000 	}
1001 
1002 	spin_lock(&devip->list_lock);
1003 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1004 		if (err->type == type && err->cmd == cmd) {
1005 			list_del_rcu(&err->list);
1006 			call_rcu(&err->rcu, sdebug_err_free);
1007 			spin_unlock(&devip->list_lock);
1008 			kfree(buf);
1009 			return count;
1010 		}
1011 	}
1012 	spin_unlock(&devip->list_lock);
1013 
1014 	kfree(buf);
1015 	return -EINVAL;
1016 }
1017 
sdebug_error_show(struct seq_file * m,void * p)1018 static int sdebug_error_show(struct seq_file *m, void *p)
1019 {
1020 	struct scsi_device *sdev = (struct scsi_device *)m->private;
1021 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1022 	struct sdebug_err_inject *err;
1023 
1024 	seq_puts(m, "Type\tCount\tCommand\n");
1025 
1026 	rcu_read_lock();
1027 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1028 		switch (err->type) {
1029 		case ERR_TMOUT_CMD:
1030 		case ERR_ABORT_CMD_FAILED:
1031 		case ERR_LUN_RESET_FAILED:
1032 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1033 				err->cmd);
1034 		break;
1035 
1036 		case ERR_FAIL_QUEUE_CMD:
1037 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1038 				err->cnt, err->cmd, err->queuecmd_ret);
1039 		break;
1040 
1041 		case ERR_FAIL_CMD:
1042 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1043 				err->type, err->cnt, err->cmd,
1044 				err->host_byte, err->driver_byte,
1045 				err->status_byte, err->sense_key,
1046 				err->asc, err->asq);
1047 		break;
1048 		}
1049 	}
1050 	rcu_read_unlock();
1051 
1052 	return 0;
1053 }
1054 
sdebug_error_open(struct inode * inode,struct file * file)1055 static int sdebug_error_open(struct inode *inode, struct file *file)
1056 {
1057 	return single_open(file, sdebug_error_show, inode->i_private);
1058 }
1059 
sdebug_error_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1060 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1061 		size_t count, loff_t *ppos)
1062 {
1063 	char *buf;
1064 	unsigned int inject_type;
1065 	struct sdebug_err_inject *inject;
1066 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1067 
1068 	buf = kzalloc(count + 1, GFP_KERNEL);
1069 	if (!buf)
1070 		return -ENOMEM;
1071 
1072 	if (copy_from_user(buf, ubuf, count)) {
1073 		kfree(buf);
1074 		return -EFAULT;
1075 	}
1076 
1077 	if (buf[0] == '-')
1078 		return sdebug_err_remove(sdev, buf, count);
1079 
1080 	if (sscanf(buf, "%d", &inject_type) != 1) {
1081 		kfree(buf);
1082 		return -EINVAL;
1083 	}
1084 
1085 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1086 	if (!inject) {
1087 		kfree(buf);
1088 		return -ENOMEM;
1089 	}
1090 
1091 	switch (inject_type) {
1092 	case ERR_TMOUT_CMD:
1093 	case ERR_ABORT_CMD_FAILED:
1094 	case ERR_LUN_RESET_FAILED:
1095 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1096 			   &inject->cmd) != 3)
1097 			goto out_error;
1098 	break;
1099 
1100 	case ERR_FAIL_QUEUE_CMD:
1101 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1102 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1103 			goto out_error;
1104 	break;
1105 
1106 	case ERR_FAIL_CMD:
1107 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1108 			   &inject->type, &inject->cnt, &inject->cmd,
1109 			   &inject->host_byte, &inject->driver_byte,
1110 			   &inject->status_byte, &inject->sense_key,
1111 			   &inject->asc, &inject->asq) != 9)
1112 			goto out_error;
1113 	break;
1114 
1115 	default:
1116 		goto out_error;
1117 	break;
1118 	}
1119 
1120 	kfree(buf);
1121 	sdebug_err_add(sdev, inject);
1122 
1123 	return count;
1124 
1125 out_error:
1126 	kfree(buf);
1127 	kfree(inject);
1128 	return -EINVAL;
1129 }
1130 
1131 static const struct file_operations sdebug_error_fops = {
1132 	.open	= sdebug_error_open,
1133 	.read	= seq_read,
1134 	.write	= sdebug_error_write,
1135 	.release = single_release,
1136 };
1137 
sdebug_target_reset_fail_show(struct seq_file * m,void * p)1138 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1139 {
1140 	struct scsi_target *starget = (struct scsi_target *)m->private;
1141 	struct sdebug_target_info *targetip =
1142 		(struct sdebug_target_info *)starget->hostdata;
1143 
1144 	if (targetip)
1145 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1146 
1147 	return 0;
1148 }
1149 
sdebug_target_reset_fail_open(struct inode * inode,struct file * file)1150 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1151 {
1152 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1153 }
1154 
sdebug_target_reset_fail_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1155 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1156 		const char __user *ubuf, size_t count, loff_t *ppos)
1157 {
1158 	int ret;
1159 	struct scsi_target *starget =
1160 		(struct scsi_target *)file->f_inode->i_private;
1161 	struct sdebug_target_info *targetip =
1162 		(struct sdebug_target_info *)starget->hostdata;
1163 
1164 	if (targetip) {
1165 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1166 		return ret < 0 ? ret : count;
1167 	}
1168 	return -ENODEV;
1169 }
1170 
1171 static const struct file_operations sdebug_target_reset_fail_fops = {
1172 	.open	= sdebug_target_reset_fail_open,
1173 	.read	= seq_read,
1174 	.write	= sdebug_target_reset_fail_write,
1175 	.release = single_release,
1176 };
1177 
sdebug_target_alloc(struct scsi_target * starget)1178 static int sdebug_target_alloc(struct scsi_target *starget)
1179 {
1180 	struct sdebug_target_info *targetip;
1181 
1182 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1183 	if (!targetip)
1184 		return -ENOMEM;
1185 
1186 	async_synchronize_full_domain(&sdebug_async_domain);
1187 
1188 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1189 				sdebug_debugfs_root);
1190 
1191 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1192 				&sdebug_target_reset_fail_fops);
1193 
1194 	starget->hostdata = targetip;
1195 
1196 	return 0;
1197 }
1198 
sdebug_tartget_cleanup_async(void * data,async_cookie_t cookie)1199 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1200 {
1201 	struct sdebug_target_info *targetip = data;
1202 
1203 	debugfs_remove(targetip->debugfs_entry);
1204 	kfree(targetip);
1205 }
1206 
sdebug_target_destroy(struct scsi_target * starget)1207 static void sdebug_target_destroy(struct scsi_target *starget)
1208 {
1209 	struct sdebug_target_info *targetip;
1210 
1211 	targetip = (struct sdebug_target_info *)starget->hostdata;
1212 	if (targetip) {
1213 		starget->hostdata = NULL;
1214 		async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1215 				&sdebug_async_domain);
1216 	}
1217 }
1218 
1219 /* Only do the extra work involved in logical block provisioning if one or
1220  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1221  * real reads and writes (i.e. not skipping them for speed).
1222  */
scsi_debug_lbp(void)1223 static inline bool scsi_debug_lbp(void)
1224 {
1225 	return 0 == sdebug_fake_rw &&
1226 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1227 }
1228 
scsi_debug_atomic_write(void)1229 static inline bool scsi_debug_atomic_write(void)
1230 {
1231 	return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1232 }
1233 
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)1234 static void *lba2fake_store(struct sdeb_store_info *sip,
1235 			    unsigned long long lba)
1236 {
1237 	struct sdeb_store_info *lsip = sip;
1238 
1239 	lba = do_div(lba, sdebug_store_sectors);
1240 	if (!sip || !sip->storep) {
1241 		WARN_ON_ONCE(true);
1242 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1243 	}
1244 	return lsip->storep + lba * sdebug_sector_size;
1245 }
1246 
dif_store(struct sdeb_store_info * sip,sector_t sector)1247 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1248 				      sector_t sector)
1249 {
1250 	sector = sector_div(sector, sdebug_store_sectors);
1251 
1252 	return sip->dif_storep + sector;
1253 }
1254 
sdebug_max_tgts_luns(void)1255 static void sdebug_max_tgts_luns(void)
1256 {
1257 	struct sdebug_host_info *sdbg_host;
1258 	struct Scsi_Host *hpnt;
1259 
1260 	mutex_lock(&sdebug_host_list_mutex);
1261 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1262 		hpnt = sdbg_host->shost;
1263 		if ((hpnt->this_id >= 0) &&
1264 		    (sdebug_num_tgts > hpnt->this_id))
1265 			hpnt->max_id = sdebug_num_tgts + 1;
1266 		else
1267 			hpnt->max_id = sdebug_num_tgts;
1268 		/* sdebug_max_luns; */
1269 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1270 	}
1271 	mutex_unlock(&sdebug_host_list_mutex);
1272 }
1273 
1274 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1275 
1276 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)1277 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1278 				 enum sdeb_cmd_data c_d,
1279 				 int in_byte, int in_bit)
1280 {
1281 	unsigned char *sbuff;
1282 	u8 sks[4];
1283 	int sl, asc;
1284 
1285 	sbuff = scp->sense_buffer;
1286 	if (!sbuff) {
1287 		sdev_printk(KERN_ERR, scp->device,
1288 			    "%s: sense_buffer is NULL\n", __func__);
1289 		return;
1290 	}
1291 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1292 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1293 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1294 	memset(sks, 0, sizeof(sks));
1295 	sks[0] = 0x80;
1296 	if (c_d)
1297 		sks[0] |= 0x40;
1298 	if (in_bit >= 0) {
1299 		sks[0] |= 0x8;
1300 		sks[0] |= 0x7 & in_bit;
1301 	}
1302 	put_unaligned_be16(in_byte, sks + 1);
1303 	if (sdebug_dsense) {
1304 		sl = sbuff[7] + 8;
1305 		sbuff[7] = sl;
1306 		sbuff[sl] = 0x2;
1307 		sbuff[sl + 1] = 0x6;
1308 		memcpy(sbuff + sl + 4, sks, 3);
1309 	} else
1310 		memcpy(sbuff + 15, sks, 3);
1311 	if (sdebug_verbose)
1312 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1313 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1314 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1315 }
1316 
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)1317 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1318 {
1319 	if (!scp->sense_buffer) {
1320 		sdev_printk(KERN_ERR, scp->device,
1321 			    "%s: sense_buffer is NULL\n", __func__);
1322 		return;
1323 	}
1324 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1325 
1326 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1327 
1328 	if (sdebug_verbose)
1329 		sdev_printk(KERN_INFO, scp->device,
1330 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1331 			    my_name, key, asc, asq);
1332 }
1333 
mk_sense_invalid_opcode(struct scsi_cmnd * scp)1334 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1335 {
1336 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1337 }
1338 
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)1339 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1340 			    void __user *arg)
1341 {
1342 	if (sdebug_verbose) {
1343 		if (0x1261 == cmd)
1344 			sdev_printk(KERN_INFO, dev,
1345 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1346 		else if (0x5331 == cmd)
1347 			sdev_printk(KERN_INFO, dev,
1348 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1349 				    __func__);
1350 		else
1351 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1352 				    __func__, cmd);
1353 	}
1354 	return -EINVAL;
1355 	/* return -ENOTTY; // correct return but upsets fdisk */
1356 }
1357 
config_cdb_len(struct scsi_device * sdev)1358 static void config_cdb_len(struct scsi_device *sdev)
1359 {
1360 	switch (sdebug_cdb_len) {
1361 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1362 		sdev->use_10_for_rw = false;
1363 		sdev->use_16_for_rw = false;
1364 		sdev->use_10_for_ms = false;
1365 		break;
1366 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1367 		sdev->use_10_for_rw = true;
1368 		sdev->use_16_for_rw = false;
1369 		sdev->use_10_for_ms = false;
1370 		break;
1371 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1372 		sdev->use_10_for_rw = true;
1373 		sdev->use_16_for_rw = false;
1374 		sdev->use_10_for_ms = true;
1375 		break;
1376 	case 16:
1377 		sdev->use_10_for_rw = false;
1378 		sdev->use_16_for_rw = true;
1379 		sdev->use_10_for_ms = true;
1380 		break;
1381 	case 32: /* No knobs to suggest this so same as 16 for now */
1382 		sdev->use_10_for_rw = false;
1383 		sdev->use_16_for_rw = true;
1384 		sdev->use_10_for_ms = true;
1385 		break;
1386 	default:
1387 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1388 			sdebug_cdb_len);
1389 		sdev->use_10_for_rw = true;
1390 		sdev->use_16_for_rw = false;
1391 		sdev->use_10_for_ms = false;
1392 		sdebug_cdb_len = 10;
1393 		break;
1394 	}
1395 }
1396 
all_config_cdb_len(void)1397 static void all_config_cdb_len(void)
1398 {
1399 	struct sdebug_host_info *sdbg_host;
1400 	struct Scsi_Host *shost;
1401 	struct scsi_device *sdev;
1402 
1403 	mutex_lock(&sdebug_host_list_mutex);
1404 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1405 		shost = sdbg_host->shost;
1406 		shost_for_each_device(sdev, shost) {
1407 			config_cdb_len(sdev);
1408 		}
1409 	}
1410 	mutex_unlock(&sdebug_host_list_mutex);
1411 }
1412 
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1413 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1414 {
1415 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1416 	struct sdebug_dev_info *dp;
1417 
1418 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1419 		if ((devip->sdbg_host == dp->sdbg_host) &&
1420 		    (devip->target == dp->target)) {
1421 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1422 		}
1423 	}
1424 }
1425 
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1426 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1427 {
1428 	int k;
1429 
1430 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1431 	if (k != SDEBUG_NUM_UAS) {
1432 		const char *cp = NULL;
1433 
1434 		switch (k) {
1435 		case SDEBUG_UA_POR:
1436 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1437 					POWER_ON_RESET_ASCQ);
1438 			if (sdebug_verbose)
1439 				cp = "power on reset";
1440 			break;
1441 		case SDEBUG_UA_POOCCUR:
1442 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1443 					POWER_ON_OCCURRED_ASCQ);
1444 			if (sdebug_verbose)
1445 				cp = "power on occurred";
1446 			break;
1447 		case SDEBUG_UA_BUS_RESET:
1448 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1449 					BUS_RESET_ASCQ);
1450 			if (sdebug_verbose)
1451 				cp = "bus reset";
1452 			break;
1453 		case SDEBUG_UA_MODE_CHANGED:
1454 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1455 					MODE_CHANGED_ASCQ);
1456 			if (sdebug_verbose)
1457 				cp = "mode parameters changed";
1458 			break;
1459 		case SDEBUG_UA_CAPACITY_CHANGED:
1460 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1461 					CAPACITY_CHANGED_ASCQ);
1462 			if (sdebug_verbose)
1463 				cp = "capacity data changed";
1464 			break;
1465 		case SDEBUG_UA_MICROCODE_CHANGED:
1466 			mk_sense_buffer(scp, UNIT_ATTENTION,
1467 					TARGET_CHANGED_ASC,
1468 					MICROCODE_CHANGED_ASCQ);
1469 			if (sdebug_verbose)
1470 				cp = "microcode has been changed";
1471 			break;
1472 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1473 			mk_sense_buffer(scp, UNIT_ATTENTION,
1474 					TARGET_CHANGED_ASC,
1475 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1476 			if (sdebug_verbose)
1477 				cp = "microcode has been changed without reset";
1478 			break;
1479 		case SDEBUG_UA_LUNS_CHANGED:
1480 			/*
1481 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1482 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1483 			 * on the target, until a REPORT LUNS command is
1484 			 * received.  SPC-4 behavior is to report it only once.
1485 			 * NOTE:  sdebug_scsi_level does not use the same
1486 			 * values as struct scsi_device->scsi_level.
1487 			 */
1488 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1489 				clear_luns_changed_on_target(devip);
1490 			mk_sense_buffer(scp, UNIT_ATTENTION,
1491 					TARGET_CHANGED_ASC,
1492 					LUNS_CHANGED_ASCQ);
1493 			if (sdebug_verbose)
1494 				cp = "reported luns data has changed";
1495 			break;
1496 		default:
1497 			pr_warn("unexpected unit attention code=%d\n", k);
1498 			if (sdebug_verbose)
1499 				cp = "unknown";
1500 			break;
1501 		}
1502 		clear_bit(k, devip->uas_bm);
1503 		if (sdebug_verbose)
1504 			sdev_printk(KERN_INFO, scp->device,
1505 				   "%s reports: Unit attention: %s\n",
1506 				   my_name, cp);
1507 		return check_condition_result;
1508 	}
1509 	return 0;
1510 }
1511 
1512 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1513 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1514 				int arr_len)
1515 {
1516 	int act_len;
1517 	struct scsi_data_buffer *sdb = &scp->sdb;
1518 
1519 	if (!sdb->length)
1520 		return 0;
1521 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1522 		return DID_ERROR << 16;
1523 
1524 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1525 				      arr, arr_len);
1526 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1527 
1528 	return 0;
1529 }
1530 
1531 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1532  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1533  * calls, not required to write in ascending offset order. Assumes resid
1534  * set to scsi_bufflen() prior to any calls.
1535  */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1536 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1537 				  int arr_len, unsigned int off_dst)
1538 {
1539 	unsigned int act_len, n;
1540 	struct scsi_data_buffer *sdb = &scp->sdb;
1541 	off_t skip = off_dst;
1542 
1543 	if (sdb->length <= off_dst)
1544 		return 0;
1545 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1546 		return DID_ERROR << 16;
1547 
1548 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1549 				       arr, arr_len, skip);
1550 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1551 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1552 		 scsi_get_resid(scp));
1553 	n = scsi_bufflen(scp) - (off_dst + act_len);
1554 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1555 	return 0;
1556 }
1557 
1558 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1559  * 'arr' or -1 if error.
1560  */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1561 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1562 			       int arr_len)
1563 {
1564 	if (!scsi_bufflen(scp))
1565 		return 0;
1566 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1567 		return -1;
1568 
1569 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1570 }
1571 
1572 
1573 static char sdebug_inq_vendor_id[9] = "Linux   ";
1574 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1575 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1576 /* Use some locally assigned NAAs for SAS addresses. */
1577 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1578 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1579 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1580 
1581 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1582 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1583 			  int target_dev_id, int dev_id_num,
1584 			  const char *dev_id_str, int dev_id_str_len,
1585 			  const uuid_t *lu_name)
1586 {
1587 	int num, port_a;
1588 	char b[32];
1589 
1590 	port_a = target_dev_id + 1;
1591 	/* T10 vendor identifier field format (faked) */
1592 	arr[0] = 0x2;	/* ASCII */
1593 	arr[1] = 0x1;
1594 	arr[2] = 0x0;
1595 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1596 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1597 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1598 	num = 8 + 16 + dev_id_str_len;
1599 	arr[3] = num;
1600 	num += 4;
1601 	if (dev_id_num >= 0) {
1602 		if (sdebug_uuid_ctl) {
1603 			/* Locally assigned UUID */
1604 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1605 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1606 			arr[num++] = 0x0;
1607 			arr[num++] = 0x12;
1608 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1609 			arr[num++] = 0x0;
1610 			memcpy(arr + num, lu_name, 16);
1611 			num += 16;
1612 		} else {
1613 			/* NAA-3, Logical unit identifier (binary) */
1614 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1615 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1616 			arr[num++] = 0x0;
1617 			arr[num++] = 0x8;
1618 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1619 			num += 8;
1620 		}
1621 		/* Target relative port number */
1622 		arr[num++] = 0x61;	/* proto=sas, binary */
1623 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1624 		arr[num++] = 0x0;	/* reserved */
1625 		arr[num++] = 0x4;	/* length */
1626 		arr[num++] = 0x0;	/* reserved */
1627 		arr[num++] = 0x0;	/* reserved */
1628 		arr[num++] = 0x0;
1629 		arr[num++] = 0x1;	/* relative port A */
1630 	}
1631 	/* NAA-3, Target port identifier */
1632 	arr[num++] = 0x61;	/* proto=sas, binary */
1633 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1634 	arr[num++] = 0x0;
1635 	arr[num++] = 0x8;
1636 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1637 	num += 8;
1638 	/* NAA-3, Target port group identifier */
1639 	arr[num++] = 0x61;	/* proto=sas, binary */
1640 	arr[num++] = 0x95;	/* piv=1, target port group id */
1641 	arr[num++] = 0x0;
1642 	arr[num++] = 0x4;
1643 	arr[num++] = 0;
1644 	arr[num++] = 0;
1645 	put_unaligned_be16(port_group_id, arr + num);
1646 	num += 2;
1647 	/* NAA-3, Target device identifier */
1648 	arr[num++] = 0x61;	/* proto=sas, binary */
1649 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1650 	arr[num++] = 0x0;
1651 	arr[num++] = 0x8;
1652 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1653 	num += 8;
1654 	/* SCSI name string: Target device identifier */
1655 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1656 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1657 	arr[num++] = 0x0;
1658 	arr[num++] = 24;
1659 	memcpy(arr + num, "naa.32222220", 12);
1660 	num += 12;
1661 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1662 	memcpy(arr + num, b, 8);
1663 	num += 8;
1664 	memset(arr + num, 0, 4);
1665 	num += 4;
1666 	return num;
1667 }
1668 
1669 static unsigned char vpd84_data[] = {
1670 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1671     0x22,0x22,0x22,0x0,0xbb,0x1,
1672     0x22,0x22,0x22,0x0,0xbb,0x2,
1673 };
1674 
1675 /*  Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1676 static int inquiry_vpd_84(unsigned char *arr)
1677 {
1678 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1679 	return sizeof(vpd84_data);
1680 }
1681 
1682 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1683 static int inquiry_vpd_85(unsigned char *arr)
1684 {
1685 	int num = 0;
1686 	const char *na1 = "https://www.kernel.org/config";
1687 	const char *na2 = "http://www.kernel.org/log";
1688 	int plen, olen;
1689 
1690 	arr[num++] = 0x1;	/* lu, storage config */
1691 	arr[num++] = 0x0;	/* reserved */
1692 	arr[num++] = 0x0;
1693 	olen = strlen(na1);
1694 	plen = olen + 1;
1695 	if (plen % 4)
1696 		plen = ((plen / 4) + 1) * 4;
1697 	arr[num++] = plen;	/* length, null termianted, padded */
1698 	memcpy(arr + num, na1, olen);
1699 	memset(arr + num + olen, 0, plen - olen);
1700 	num += plen;
1701 
1702 	arr[num++] = 0x4;	/* lu, logging */
1703 	arr[num++] = 0x0;	/* reserved */
1704 	arr[num++] = 0x0;
1705 	olen = strlen(na2);
1706 	plen = olen + 1;
1707 	if (plen % 4)
1708 		plen = ((plen / 4) + 1) * 4;
1709 	arr[num++] = plen;	/* length, null terminated, padded */
1710 	memcpy(arr + num, na2, olen);
1711 	memset(arr + num + olen, 0, plen - olen);
1712 	num += plen;
1713 
1714 	return num;
1715 }
1716 
1717 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1718 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1719 {
1720 	int num = 0;
1721 	int port_a, port_b;
1722 
1723 	port_a = target_dev_id + 1;
1724 	port_b = port_a + 1;
1725 	arr[num++] = 0x0;	/* reserved */
1726 	arr[num++] = 0x0;	/* reserved */
1727 	arr[num++] = 0x0;
1728 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1729 	memset(arr + num, 0, 6);
1730 	num += 6;
1731 	arr[num++] = 0x0;
1732 	arr[num++] = 12;	/* length tp descriptor */
1733 	/* naa-5 target port identifier (A) */
1734 	arr[num++] = 0x61;	/* proto=sas, binary */
1735 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1736 	arr[num++] = 0x0;	/* reserved */
1737 	arr[num++] = 0x8;	/* length */
1738 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1739 	num += 8;
1740 	arr[num++] = 0x0;	/* reserved */
1741 	arr[num++] = 0x0;	/* reserved */
1742 	arr[num++] = 0x0;
1743 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1744 	memset(arr + num, 0, 6);
1745 	num += 6;
1746 	arr[num++] = 0x0;
1747 	arr[num++] = 12;	/* length tp descriptor */
1748 	/* naa-5 target port identifier (B) */
1749 	arr[num++] = 0x61;	/* proto=sas, binary */
1750 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1751 	arr[num++] = 0x0;	/* reserved */
1752 	arr[num++] = 0x8;	/* length */
1753 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1754 	num += 8;
1755 
1756 	return num;
1757 }
1758 
1759 
1760 static unsigned char vpd89_data[] = {
1761 /* from 4th byte */ 0,0,0,0,
1762 'l','i','n','u','x',' ',' ',' ',
1763 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1764 '1','2','3','4',
1765 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1766 0xec,0,0,0,
1767 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1768 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1769 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1770 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1771 0x53,0x41,
1772 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1773 0x20,0x20,
1774 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1775 0x10,0x80,
1776 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1777 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1778 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1779 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1780 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1781 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1782 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1783 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1784 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1785 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1786 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1787 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1788 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1789 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1790 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1791 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1792 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1793 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1794 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1795 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1796 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1797 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1798 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1799 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1800 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1801 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1802 };
1803 
1804 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1805 static int inquiry_vpd_89(unsigned char *arr)
1806 {
1807 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1808 	return sizeof(vpd89_data);
1809 }
1810 
1811 
1812 static unsigned char vpdb0_data[] = {
1813 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1814 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1815 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1816 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1817 };
1818 
1819 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1820 static int inquiry_vpd_b0(unsigned char *arr)
1821 {
1822 	unsigned int gran;
1823 
1824 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1825 
1826 	/* Optimal transfer length granularity */
1827 	if (sdebug_opt_xferlen_exp != 0 &&
1828 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1829 		gran = 1 << sdebug_opt_xferlen_exp;
1830 	else
1831 		gran = 1 << sdebug_physblk_exp;
1832 	put_unaligned_be16(gran, arr + 2);
1833 
1834 	/* Maximum Transfer Length */
1835 	if (sdebug_store_sectors > 0x400)
1836 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1837 
1838 	/* Optimal Transfer Length */
1839 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1840 
1841 	if (sdebug_lbpu) {
1842 		/* Maximum Unmap LBA Count */
1843 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1844 
1845 		/* Maximum Unmap Block Descriptor Count */
1846 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1847 	}
1848 
1849 	/* Unmap Granularity Alignment */
1850 	if (sdebug_unmap_alignment) {
1851 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1852 		arr[28] |= 0x80; /* UGAVALID */
1853 	}
1854 
1855 	/* Optimal Unmap Granularity */
1856 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1857 
1858 	/* Maximum WRITE SAME Length */
1859 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1860 
1861 	if (sdebug_atomic_wr) {
1862 		put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1863 		put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1864 		put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1865 		put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1866 		put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1867 	}
1868 
1869 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1870 }
1871 
1872 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1873 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1874 {
1875 	memset(arr, 0, 0x3c);
1876 	arr[0] = 0;
1877 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1878 	arr[2] = 0;
1879 	arr[3] = 5;	/* less than 1.8" */
1880 
1881 	return 0x3c;
1882 }
1883 
1884 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1885 static int inquiry_vpd_b2(unsigned char *arr)
1886 {
1887 	memset(arr, 0, 0x4);
1888 	arr[0] = 0;			/* threshold exponent */
1889 	if (sdebug_lbpu)
1890 		arr[1] = 1 << 7;
1891 	if (sdebug_lbpws)
1892 		arr[1] |= 1 << 6;
1893 	if (sdebug_lbpws10)
1894 		arr[1] |= 1 << 5;
1895 	if (sdebug_lbprz && scsi_debug_lbp())
1896 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1897 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1898 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1899 	/* threshold_percentage=0 */
1900 	return 0x4;
1901 }
1902 
1903 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1904 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1905 {
1906 	memset(arr, 0, 0x3c);
1907 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1908 	/*
1909 	 * Set Optimal number of open sequential write preferred zones and
1910 	 * Optimal number of non-sequentially written sequential write
1911 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1912 	 * fields set to zero, apart from Max. number of open swrz_s field.
1913 	 */
1914 	put_unaligned_be32(0xffffffff, &arr[4]);
1915 	put_unaligned_be32(0xffffffff, &arr[8]);
1916 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1917 		put_unaligned_be32(devip->max_open, &arr[12]);
1918 	else
1919 		put_unaligned_be32(0xffffffff, &arr[12]);
1920 	if (devip->zcap < devip->zsize) {
1921 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1922 		put_unaligned_be64(devip->zsize, &arr[20]);
1923 	} else {
1924 		arr[19] = 0;
1925 	}
1926 	return 0x3c;
1927 }
1928 
1929 #define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
1930 
1931 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1932 
1933 /* Block limits extension VPD page (SBC-4) */
inquiry_vpd_b7(unsigned char * arrb4)1934 static int inquiry_vpd_b7(unsigned char *arrb4)
1935 {
1936 	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1937 	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1938 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1939 	return SDEBUG_BLE_LEN_AFTER_B4;
1940 }
1941 
1942 #define SDEBUG_LONG_INQ_SZ 96
1943 #define SDEBUG_MAX_INQ_ARR_SZ 584
1944 
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1945 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1946 {
1947 	unsigned char pq_pdt;
1948 	unsigned char *arr;
1949 	unsigned char *cmd = scp->cmnd;
1950 	u32 alloc_len, n;
1951 	int ret;
1952 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1953 
1954 	alloc_len = get_unaligned_be16(cmd + 3);
1955 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1956 	if (! arr)
1957 		return DID_REQUEUE << 16;
1958 	is_disk = (sdebug_ptype == TYPE_DISK);
1959 	is_zbc = devip->zoned;
1960 	is_disk_zbc = (is_disk || is_zbc);
1961 	have_wlun = scsi_is_wlun(scp->device->lun);
1962 	if (have_wlun)
1963 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1964 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1965 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1966 	else
1967 		pq_pdt = (sdebug_ptype & 0x1f);
1968 	arr[0] = pq_pdt;
1969 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1970 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1971 		kfree(arr);
1972 		return check_condition_result;
1973 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1974 		int lu_id_num, port_group_id, target_dev_id;
1975 		u32 len;
1976 		char lu_id_str[6];
1977 		int host_no = devip->sdbg_host->shost->host_no;
1978 
1979 		arr[1] = cmd[2];
1980 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1981 		    (devip->channel & 0x7f);
1982 		if (sdebug_vpd_use_hostno == 0)
1983 			host_no = 0;
1984 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1985 			    (devip->target * 1000) + devip->lun);
1986 		target_dev_id = ((host_no + 1) * 2000) +
1987 				 (devip->target * 1000) - 3;
1988 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1989 		if (0 == cmd[2]) { /* supported vital product data pages */
1990 			n = 4;
1991 			arr[n++] = 0x0;   /* this page */
1992 			arr[n++] = 0x80;  /* unit serial number */
1993 			arr[n++] = 0x83;  /* device identification */
1994 			arr[n++] = 0x84;  /* software interface ident. */
1995 			arr[n++] = 0x85;  /* management network addresses */
1996 			arr[n++] = 0x86;  /* extended inquiry */
1997 			arr[n++] = 0x87;  /* mode page policy */
1998 			arr[n++] = 0x88;  /* SCSI ports */
1999 			if (is_disk_zbc) {	  /* SBC or ZBC */
2000 				arr[n++] = 0x89;  /* ATA information */
2001 				arr[n++] = 0xb0;  /* Block limits */
2002 				arr[n++] = 0xb1;  /* Block characteristics */
2003 				if (is_disk)
2004 					arr[n++] = 0xb2;  /* LB Provisioning */
2005 				if (is_zbc)
2006 					arr[n++] = 0xb6;  /* ZB dev. char. */
2007 				arr[n++] = 0xb7;  /* Block limits extension */
2008 			}
2009 			arr[3] = n - 4;	  /* number of supported VPD pages */
2010 		} else if (0x80 == cmd[2]) { /* unit serial number */
2011 			arr[3] = len;
2012 			memcpy(&arr[4], lu_id_str, len);
2013 		} else if (0x83 == cmd[2]) { /* device identification */
2014 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2015 						target_dev_id, lu_id_num,
2016 						lu_id_str, len,
2017 						&devip->lu_name);
2018 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
2019 			arr[3] = inquiry_vpd_84(&arr[4]);
2020 		} else if (0x85 == cmd[2]) { /* Management network addresses */
2021 			arr[3] = inquiry_vpd_85(&arr[4]);
2022 		} else if (0x86 == cmd[2]) { /* extended inquiry */
2023 			arr[3] = 0x3c;	/* number of following entries */
2024 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2025 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
2026 			else if (have_dif_prot)
2027 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
2028 			else
2029 				arr[4] = 0x0;   /* no protection stuff */
2030 			/*
2031 			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2032 			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2033 			 */
2034 			arr[5] = 0x17;
2035 		} else if (0x87 == cmd[2]) { /* mode page policy */
2036 			arr[3] = 0x8;	/* number of following entries */
2037 			arr[4] = 0x2;	/* disconnect-reconnect mp */
2038 			arr[6] = 0x80;	/* mlus, shared */
2039 			arr[8] = 0x18;	 /* protocol specific lu */
2040 			arr[10] = 0x82;	 /* mlus, per initiator port */
2041 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
2042 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2043 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2044 			n = inquiry_vpd_89(&arr[4]);
2045 			put_unaligned_be16(n, arr + 2);
2046 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2047 			arr[3] = inquiry_vpd_b0(&arr[4]);
2048 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2049 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2050 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2051 			arr[3] = inquiry_vpd_b2(&arr[4]);
2052 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2053 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2054 		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2055 			arr[3] = inquiry_vpd_b7(&arr[4]);
2056 		} else {
2057 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2058 			kfree(arr);
2059 			return check_condition_result;
2060 		}
2061 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2062 		ret = fill_from_dev_buffer(scp, arr,
2063 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2064 		kfree(arr);
2065 		return ret;
2066 	}
2067 	/* drops through here for a standard inquiry */
2068 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2069 	arr[2] = sdebug_scsi_level;
2070 	arr[3] = 2;    /* response_data_format==2 */
2071 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2072 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2073 	if (sdebug_vpd_use_hostno == 0)
2074 		arr[5] |= 0x10; /* claim: implicit TPGS */
2075 	arr[6] = 0x10; /* claim: MultiP */
2076 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2077 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2078 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2079 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2080 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2081 	/* Use Vendor Specific area to place driver date in ASCII hex */
2082 	memcpy(&arr[36], sdebug_version_date, 8);
2083 	/* version descriptors (2 bytes each) follow */
2084 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2085 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2086 	n = 62;
2087 	if (is_disk) {		/* SBC-4 no version claimed */
2088 		put_unaligned_be16(0x600, arr + n);
2089 		n += 2;
2090 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
2091 		put_unaligned_be16(0x525, arr + n);
2092 		n += 2;
2093 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2094 		put_unaligned_be16(0x624, arr + n);
2095 		n += 2;
2096 	}
2097 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2098 	ret = fill_from_dev_buffer(scp, arr,
2099 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2100 	kfree(arr);
2101 	return ret;
2102 }
2103 
2104 /* See resp_iec_m_pg() for how this data is manipulated */
2105 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2106 				   0, 0, 0x0, 0x0};
2107 
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2108 static int resp_requests(struct scsi_cmnd *scp,
2109 			 struct sdebug_dev_info *devip)
2110 {
2111 	unsigned char *cmd = scp->cmnd;
2112 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2113 	bool dsense = !!(cmd[1] & 1);
2114 	u32 alloc_len = cmd[4];
2115 	u32 len = 18;
2116 	int stopped_state = atomic_read(&devip->stopped);
2117 
2118 	memset(arr, 0, sizeof(arr));
2119 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2120 		if (dsense) {
2121 			arr[0] = 0x72;
2122 			arr[1] = NOT_READY;
2123 			arr[2] = LOGICAL_UNIT_NOT_READY;
2124 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2125 			len = 8;
2126 		} else {
2127 			arr[0] = 0x70;
2128 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2129 			arr[7] = 0xa;			/* 18 byte sense buffer */
2130 			arr[12] = LOGICAL_UNIT_NOT_READY;
2131 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2132 		}
2133 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2134 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2135 		if (dsense) {
2136 			arr[0] = 0x72;
2137 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2138 			arr[2] = THRESHOLD_EXCEEDED;
2139 			arr[3] = 0xff;		/* Failure prediction(false) */
2140 			len = 8;
2141 		} else {
2142 			arr[0] = 0x70;
2143 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2144 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2145 			arr[12] = THRESHOLD_EXCEEDED;
2146 			arr[13] = 0xff;		/* Failure prediction(false) */
2147 		}
2148 	} else {	/* nothing to report */
2149 		if (dsense) {
2150 			len = 8;
2151 			memset(arr, 0, len);
2152 			arr[0] = 0x72;
2153 		} else {
2154 			memset(arr, 0, len);
2155 			arr[0] = 0x70;
2156 			arr[7] = 0xa;
2157 		}
2158 	}
2159 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2160 }
2161 
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2162 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2163 {
2164 	unsigned char *cmd = scp->cmnd;
2165 	int power_cond, want_stop, stopped_state;
2166 	bool changing;
2167 
2168 	power_cond = (cmd[4] & 0xf0) >> 4;
2169 	if (power_cond) {
2170 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2171 		return check_condition_result;
2172 	}
2173 	want_stop = !(cmd[4] & 1);
2174 	stopped_state = atomic_read(&devip->stopped);
2175 	if (stopped_state == 2) {
2176 		ktime_t now_ts = ktime_get_boottime();
2177 
2178 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2179 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2180 
2181 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2182 				/* tur_ms_to_ready timer extinguished */
2183 				atomic_set(&devip->stopped, 0);
2184 				stopped_state = 0;
2185 			}
2186 		}
2187 		if (stopped_state == 2) {
2188 			if (want_stop) {
2189 				stopped_state = 1;	/* dummy up success */
2190 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2191 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2192 				return check_condition_result;
2193 			}
2194 		}
2195 	}
2196 	changing = (stopped_state != want_stop);
2197 	if (changing)
2198 		atomic_xchg(&devip->stopped, want_stop);
2199 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2200 		return SDEG_RES_IMMED_MASK;
2201 	else
2202 		return 0;
2203 }
2204 
get_sdebug_capacity(void)2205 static sector_t get_sdebug_capacity(void)
2206 {
2207 	static const unsigned int gibibyte = 1073741824;
2208 
2209 	if (sdebug_virtual_gb > 0)
2210 		return (sector_t)sdebug_virtual_gb *
2211 			(gibibyte / sdebug_sector_size);
2212 	else
2213 		return sdebug_store_sectors;
2214 }
2215 
2216 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2217 static int resp_readcap(struct scsi_cmnd *scp,
2218 			struct sdebug_dev_info *devip)
2219 {
2220 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2221 	unsigned int capac;
2222 
2223 	/* following just in case virtual_gb changed */
2224 	sdebug_capacity = get_sdebug_capacity();
2225 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2226 	if (sdebug_capacity < 0xffffffff) {
2227 		capac = (unsigned int)sdebug_capacity - 1;
2228 		put_unaligned_be32(capac, arr + 0);
2229 	} else
2230 		put_unaligned_be32(0xffffffff, arr + 0);
2231 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2232 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2233 }
2234 
2235 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2236 static int resp_readcap16(struct scsi_cmnd *scp,
2237 			  struct sdebug_dev_info *devip)
2238 {
2239 	unsigned char *cmd = scp->cmnd;
2240 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2241 	u32 alloc_len;
2242 
2243 	alloc_len = get_unaligned_be32(cmd + 10);
2244 	/* following just in case virtual_gb changed */
2245 	sdebug_capacity = get_sdebug_capacity();
2246 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2247 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2248 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2249 	arr[13] = sdebug_physblk_exp & 0xf;
2250 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2251 
2252 	if (scsi_debug_lbp()) {
2253 		arr[14] |= 0x80; /* LBPME */
2254 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2255 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2256 		 * in the wider field maps to 0 in this field.
2257 		 */
2258 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2259 			arr[14] |= 0x40;
2260 	}
2261 
2262 	/*
2263 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2264 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2265 	 */
2266 	if (devip->zoned)
2267 		arr[12] |= 1 << 4;
2268 
2269 	arr[15] = sdebug_lowest_aligned & 0xff;
2270 
2271 	if (have_dif_prot) {
2272 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2273 		arr[12] |= 1; /* PROT_EN */
2274 	}
2275 
2276 	return fill_from_dev_buffer(scp, arr,
2277 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2278 }
2279 
2280 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2281 
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2282 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2283 			      struct sdebug_dev_info *devip)
2284 {
2285 	unsigned char *cmd = scp->cmnd;
2286 	unsigned char *arr;
2287 	int host_no = devip->sdbg_host->shost->host_no;
2288 	int port_group_a, port_group_b, port_a, port_b;
2289 	u32 alen, n, rlen;
2290 	int ret;
2291 
2292 	alen = get_unaligned_be32(cmd + 6);
2293 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2294 	if (! arr)
2295 		return DID_REQUEUE << 16;
2296 	/*
2297 	 * EVPD page 0x88 states we have two ports, one
2298 	 * real and a fake port with no device connected.
2299 	 * So we create two port groups with one port each
2300 	 * and set the group with port B to unavailable.
2301 	 */
2302 	port_a = 0x1; /* relative port A */
2303 	port_b = 0x2; /* relative port B */
2304 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2305 			(devip->channel & 0x7f);
2306 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2307 			(devip->channel & 0x7f) + 0x80;
2308 
2309 	/*
2310 	 * The asymmetric access state is cycled according to the host_id.
2311 	 */
2312 	n = 4;
2313 	if (sdebug_vpd_use_hostno == 0) {
2314 		arr[n++] = host_no % 3; /* Asymm access state */
2315 		arr[n++] = 0x0F; /* claim: all states are supported */
2316 	} else {
2317 		arr[n++] = 0x0; /* Active/Optimized path */
2318 		arr[n++] = 0x01; /* only support active/optimized paths */
2319 	}
2320 	put_unaligned_be16(port_group_a, arr + n);
2321 	n += 2;
2322 	arr[n++] = 0;    /* Reserved */
2323 	arr[n++] = 0;    /* Status code */
2324 	arr[n++] = 0;    /* Vendor unique */
2325 	arr[n++] = 0x1;  /* One port per group */
2326 	arr[n++] = 0;    /* Reserved */
2327 	arr[n++] = 0;    /* Reserved */
2328 	put_unaligned_be16(port_a, arr + n);
2329 	n += 2;
2330 	arr[n++] = 3;    /* Port unavailable */
2331 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2332 	put_unaligned_be16(port_group_b, arr + n);
2333 	n += 2;
2334 	arr[n++] = 0;    /* Reserved */
2335 	arr[n++] = 0;    /* Status code */
2336 	arr[n++] = 0;    /* Vendor unique */
2337 	arr[n++] = 0x1;  /* One port per group */
2338 	arr[n++] = 0;    /* Reserved */
2339 	arr[n++] = 0;    /* Reserved */
2340 	put_unaligned_be16(port_b, arr + n);
2341 	n += 2;
2342 
2343 	rlen = n - 4;
2344 	put_unaligned_be32(rlen, arr + 0);
2345 
2346 	/*
2347 	 * Return the smallest value of either
2348 	 * - The allocated length
2349 	 * - The constructed command length
2350 	 * - The maximum array size
2351 	 */
2352 	rlen = min(alen, n);
2353 	ret = fill_from_dev_buffer(scp, arr,
2354 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2355 	kfree(arr);
2356 	return ret;
2357 }
2358 
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2359 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2360 			     struct sdebug_dev_info *devip)
2361 {
2362 	bool rctd;
2363 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2364 	u16 req_sa, u;
2365 	u32 alloc_len, a_len;
2366 	int k, offset, len, errsts, count, bump, na;
2367 	const struct opcode_info_t *oip;
2368 	const struct opcode_info_t *r_oip;
2369 	u8 *arr;
2370 	u8 *cmd = scp->cmnd;
2371 
2372 	rctd = !!(cmd[2] & 0x80);
2373 	reporting_opts = cmd[2] & 0x7;
2374 	req_opcode = cmd[3];
2375 	req_sa = get_unaligned_be16(cmd + 4);
2376 	alloc_len = get_unaligned_be32(cmd + 6);
2377 	if (alloc_len < 4 || alloc_len > 0xffff) {
2378 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2379 		return check_condition_result;
2380 	}
2381 	if (alloc_len > 8192)
2382 		a_len = 8192;
2383 	else
2384 		a_len = alloc_len;
2385 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2386 	if (NULL == arr) {
2387 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2388 				INSUFF_RES_ASCQ);
2389 		return check_condition_result;
2390 	}
2391 	switch (reporting_opts) {
2392 	case 0:	/* all commands */
2393 		/* count number of commands */
2394 		for (count = 0, oip = opcode_info_arr;
2395 		     oip->num_attached != 0xff; ++oip) {
2396 			if (F_INV_OP & oip->flags)
2397 				continue;
2398 			count += (oip->num_attached + 1);
2399 		}
2400 		bump = rctd ? 20 : 8;
2401 		put_unaligned_be32(count * bump, arr);
2402 		for (offset = 4, oip = opcode_info_arr;
2403 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2404 			if (F_INV_OP & oip->flags)
2405 				continue;
2406 			na = oip->num_attached;
2407 			arr[offset] = oip->opcode;
2408 			put_unaligned_be16(oip->sa, arr + offset + 2);
2409 			if (rctd)
2410 				arr[offset + 5] |= 0x2;
2411 			if (FF_SA & oip->flags)
2412 				arr[offset + 5] |= 0x1;
2413 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2414 			if (rctd)
2415 				put_unaligned_be16(0xa, arr + offset + 8);
2416 			r_oip = oip;
2417 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2418 				if (F_INV_OP & oip->flags)
2419 					continue;
2420 				offset += bump;
2421 				arr[offset] = oip->opcode;
2422 				put_unaligned_be16(oip->sa, arr + offset + 2);
2423 				if (rctd)
2424 					arr[offset + 5] |= 0x2;
2425 				if (FF_SA & oip->flags)
2426 					arr[offset + 5] |= 0x1;
2427 				put_unaligned_be16(oip->len_mask[0],
2428 						   arr + offset + 6);
2429 				if (rctd)
2430 					put_unaligned_be16(0xa,
2431 							   arr + offset + 8);
2432 			}
2433 			oip = r_oip;
2434 			offset += bump;
2435 		}
2436 		break;
2437 	case 1:	/* one command: opcode only */
2438 	case 2:	/* one command: opcode plus service action */
2439 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2440 		sdeb_i = opcode_ind_arr[req_opcode];
2441 		oip = &opcode_info_arr[sdeb_i];
2442 		if (F_INV_OP & oip->flags) {
2443 			supp = 1;
2444 			offset = 4;
2445 		} else {
2446 			if (1 == reporting_opts) {
2447 				if (FF_SA & oip->flags) {
2448 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2449 							     2, 2);
2450 					kfree(arr);
2451 					return check_condition_result;
2452 				}
2453 				req_sa = 0;
2454 			} else if (2 == reporting_opts &&
2455 				   0 == (FF_SA & oip->flags)) {
2456 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2457 				kfree(arr);	/* point at requested sa */
2458 				return check_condition_result;
2459 			}
2460 			if (0 == (FF_SA & oip->flags) &&
2461 			    req_opcode == oip->opcode)
2462 				supp = 3;
2463 			else if (0 == (FF_SA & oip->flags)) {
2464 				na = oip->num_attached;
2465 				for (k = 0, oip = oip->arrp; k < na;
2466 				     ++k, ++oip) {
2467 					if (req_opcode == oip->opcode)
2468 						break;
2469 				}
2470 				supp = (k >= na) ? 1 : 3;
2471 			} else if (req_sa != oip->sa) {
2472 				na = oip->num_attached;
2473 				for (k = 0, oip = oip->arrp; k < na;
2474 				     ++k, ++oip) {
2475 					if (req_sa == oip->sa)
2476 						break;
2477 				}
2478 				supp = (k >= na) ? 1 : 3;
2479 			} else
2480 				supp = 3;
2481 			if (3 == supp) {
2482 				u = oip->len_mask[0];
2483 				put_unaligned_be16(u, arr + 2);
2484 				arr[4] = oip->opcode;
2485 				for (k = 1; k < u; ++k)
2486 					arr[4 + k] = (k < 16) ?
2487 						 oip->len_mask[k] : 0xff;
2488 				offset = 4 + u;
2489 			} else
2490 				offset = 4;
2491 		}
2492 		arr[1] = (rctd ? 0x80 : 0) | supp;
2493 		if (rctd) {
2494 			put_unaligned_be16(0xa, arr + offset);
2495 			offset += 12;
2496 		}
2497 		break;
2498 	default:
2499 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2500 		kfree(arr);
2501 		return check_condition_result;
2502 	}
2503 	offset = (offset < a_len) ? offset : a_len;
2504 	len = (offset < alloc_len) ? offset : alloc_len;
2505 	errsts = fill_from_dev_buffer(scp, arr, len);
2506 	kfree(arr);
2507 	return errsts;
2508 }
2509 
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2510 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2511 			  struct sdebug_dev_info *devip)
2512 {
2513 	bool repd;
2514 	u32 alloc_len, len;
2515 	u8 arr[16];
2516 	u8 *cmd = scp->cmnd;
2517 
2518 	memset(arr, 0, sizeof(arr));
2519 	repd = !!(cmd[2] & 0x80);
2520 	alloc_len = get_unaligned_be32(cmd + 6);
2521 	if (alloc_len < 4) {
2522 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2523 		return check_condition_result;
2524 	}
2525 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2526 	arr[1] = 0x1;		/* ITNRS */
2527 	if (repd) {
2528 		arr[3] = 0xc;
2529 		len = 16;
2530 	} else
2531 		len = 4;
2532 
2533 	len = (len < alloc_len) ? len : alloc_len;
2534 	return fill_from_dev_buffer(scp, arr, len);
2535 }
2536 
2537 /* <<Following mode page info copied from ST318451LW>> */
2538 
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2539 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2540 {	/* Read-Write Error Recovery page for mode_sense */
2541 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2542 					5, 0, 0xff, 0xff};
2543 
2544 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2545 	if (1 == pcontrol)
2546 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2547 	return sizeof(err_recov_pg);
2548 }
2549 
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2550 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2551 { 	/* Disconnect-Reconnect page for mode_sense */
2552 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2553 					 0, 0, 0, 0, 0, 0, 0, 0};
2554 
2555 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2556 	if (1 == pcontrol)
2557 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2558 	return sizeof(disconnect_pg);
2559 }
2560 
resp_format_pg(unsigned char * p,int pcontrol,int target)2561 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2562 {       /* Format device page for mode_sense */
2563 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2564 				     0, 0, 0, 0, 0, 0, 0, 0,
2565 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2566 
2567 	memcpy(p, format_pg, sizeof(format_pg));
2568 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2569 	put_unaligned_be16(sdebug_sector_size, p + 12);
2570 	if (sdebug_removable)
2571 		p[20] |= 0x20; /* should agree with INQUIRY */
2572 	if (1 == pcontrol)
2573 		memset(p + 2, 0, sizeof(format_pg) - 2);
2574 	return sizeof(format_pg);
2575 }
2576 
2577 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2578 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2579 				     0, 0, 0, 0};
2580 
resp_caching_pg(unsigned char * p,int pcontrol,int target)2581 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2582 { 	/* Caching page for mode_sense */
2583 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2584 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2585 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2586 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2587 
2588 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2589 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2590 	memcpy(p, caching_pg, sizeof(caching_pg));
2591 	if (1 == pcontrol)
2592 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2593 	else if (2 == pcontrol)
2594 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2595 	return sizeof(caching_pg);
2596 }
2597 
2598 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2599 				    0, 0, 0x2, 0x4b};
2600 
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2601 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2602 { 	/* Control mode page for mode_sense */
2603 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2604 					0, 0, 0, 0};
2605 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2606 				     0, 0, 0x2, 0x4b};
2607 
2608 	if (sdebug_dsense)
2609 		ctrl_m_pg[2] |= 0x4;
2610 	else
2611 		ctrl_m_pg[2] &= ~0x4;
2612 
2613 	if (sdebug_ato)
2614 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2615 
2616 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2617 	if (1 == pcontrol)
2618 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2619 	else if (2 == pcontrol)
2620 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2621 	return sizeof(ctrl_m_pg);
2622 }
2623 
2624 /* IO Advice Hints Grouping mode page */
resp_grouping_m_pg(unsigned char * p,int pcontrol,int target)2625 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2626 {
2627 	/* IO Advice Hints Grouping mode page */
2628 	struct grouping_m_pg {
2629 		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2630 		u8 subpage_code;
2631 		__be16 page_length;
2632 		u8 reserved[12];
2633 		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2634 	};
2635 	static const struct grouping_m_pg gr_m_pg = {
2636 		.page_code = 0xa | 0x40,
2637 		.subpage_code = 5,
2638 		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2639 		.descr = {
2640 			{ .st_enble = 1 },
2641 			{ .st_enble = 1 },
2642 			{ .st_enble = 1 },
2643 			{ .st_enble = 1 },
2644 			{ .st_enble = 1 },
2645 			{ .st_enble = 0 },
2646 		}
2647 	};
2648 
2649 	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2650 		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2651 	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2652 	if (1 == pcontrol) {
2653 		/* There are no changeable values so clear from byte 4 on. */
2654 		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2655 	}
2656 	return sizeof(gr_m_pg);
2657 }
2658 
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2659 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2660 {	/* Informational Exceptions control mode page for mode_sense */
2661 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2662 				       0, 0, 0x0, 0x0};
2663 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2664 				      0, 0, 0x0, 0x0};
2665 
2666 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2667 	if (1 == pcontrol)
2668 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2669 	else if (2 == pcontrol)
2670 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2671 	return sizeof(iec_m_pg);
2672 }
2673 
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2674 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2675 {	/* SAS SSP mode page - short format for mode_sense */
2676 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2677 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2678 
2679 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2680 	if (1 == pcontrol)
2681 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2682 	return sizeof(sas_sf_m_pg);
2683 }
2684 
2685 
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2686 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2687 			      int target_dev_id)
2688 {	/* SAS phy control and discover mode page for mode_sense */
2689 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2690 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2691 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2692 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2693 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2694 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2695 		    0, 0, 0, 0, 0, 0, 0, 0,
2696 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2697 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2698 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2699 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2700 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2701 		    0, 0, 0, 0, 0, 0, 0, 0,
2702 		};
2703 	int port_a, port_b;
2704 
2705 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2706 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2707 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2708 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2709 	port_a = target_dev_id + 1;
2710 	port_b = port_a + 1;
2711 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2712 	put_unaligned_be32(port_a, p + 20);
2713 	put_unaligned_be32(port_b, p + 48 + 20);
2714 	if (1 == pcontrol)
2715 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2716 	return sizeof(sas_pcd_m_pg);
2717 }
2718 
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2719 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2720 {	/* SAS SSP shared protocol specific port mode subpage */
2721 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2722 		    0, 0, 0, 0, 0, 0, 0, 0,
2723 		};
2724 
2725 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2726 	if (1 == pcontrol)
2727 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2728 	return sizeof(sas_sha_m_pg);
2729 }
2730 
2731 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2732 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2733 
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2734 static int resp_mode_sense(struct scsi_cmnd *scp,
2735 			   struct sdebug_dev_info *devip)
2736 {
2737 	int pcontrol, pcode, subpcode, bd_len;
2738 	unsigned char dev_spec;
2739 	u32 alloc_len, offset, len;
2740 	int target_dev_id;
2741 	int target = scp->device->id;
2742 	unsigned char *ap;
2743 	unsigned char *arr __free(kfree);
2744 	unsigned char *cmd = scp->cmnd;
2745 	bool dbd, llbaa, msense_6, is_disk, is_zbc;
2746 
2747 	arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2748 	if (!arr)
2749 		return -ENOMEM;
2750 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2751 	pcontrol = (cmd[2] & 0xc0) >> 6;
2752 	pcode = cmd[2] & 0x3f;
2753 	subpcode = cmd[3];
2754 	msense_6 = (MODE_SENSE == cmd[0]);
2755 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2756 	is_disk = (sdebug_ptype == TYPE_DISK);
2757 	is_zbc = devip->zoned;
2758 	if ((is_disk || is_zbc) && !dbd)
2759 		bd_len = llbaa ? 16 : 8;
2760 	else
2761 		bd_len = 0;
2762 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2763 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2764 	if (0x3 == pcontrol) {  /* Saving values not supported */
2765 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2766 		return check_condition_result;
2767 	}
2768 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2769 			(devip->target * 1000) - 3;
2770 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2771 	if (is_disk || is_zbc) {
2772 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2773 		if (sdebug_wp)
2774 			dev_spec |= 0x80;
2775 	} else
2776 		dev_spec = 0x0;
2777 	if (msense_6) {
2778 		arr[2] = dev_spec;
2779 		arr[3] = bd_len;
2780 		offset = 4;
2781 	} else {
2782 		arr[3] = dev_spec;
2783 		if (16 == bd_len)
2784 			arr[4] = 0x1;	/* set LONGLBA bit */
2785 		arr[7] = bd_len;	/* assume 255 or less */
2786 		offset = 8;
2787 	}
2788 	ap = arr + offset;
2789 	if ((bd_len > 0) && (!sdebug_capacity))
2790 		sdebug_capacity = get_sdebug_capacity();
2791 
2792 	if (8 == bd_len) {
2793 		if (sdebug_capacity > 0xfffffffe)
2794 			put_unaligned_be32(0xffffffff, ap + 0);
2795 		else
2796 			put_unaligned_be32(sdebug_capacity, ap + 0);
2797 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2798 		offset += bd_len;
2799 		ap = arr + offset;
2800 	} else if (16 == bd_len) {
2801 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2802 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2803 		offset += bd_len;
2804 		ap = arr + offset;
2805 	}
2806 
2807 	/*
2808 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2809 	 *        len += resp_*_pg(ap + len, pcontrol, target);
2810 	 */
2811 	switch (pcode) {
2812 	case 0x1:	/* Read-Write error recovery page, direct access */
2813 		if (subpcode > 0x0 && subpcode < 0xff)
2814 			goto bad_subpcode;
2815 		len = resp_err_recov_pg(ap, pcontrol, target);
2816 		offset += len;
2817 		break;
2818 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2819 		if (subpcode > 0x0 && subpcode < 0xff)
2820 			goto bad_subpcode;
2821 		len = resp_disconnect_pg(ap, pcontrol, target);
2822 		offset += len;
2823 		break;
2824 	case 0x3:       /* Format device page, direct access */
2825 		if (subpcode > 0x0 && subpcode < 0xff)
2826 			goto bad_subpcode;
2827 		if (is_disk) {
2828 			len = resp_format_pg(ap, pcontrol, target);
2829 			offset += len;
2830 		} else {
2831 			goto bad_pcode;
2832 		}
2833 		break;
2834 	case 0x8:	/* Caching page, direct access */
2835 		if (subpcode > 0x0 && subpcode < 0xff)
2836 			goto bad_subpcode;
2837 		if (is_disk || is_zbc) {
2838 			len = resp_caching_pg(ap, pcontrol, target);
2839 			offset += len;
2840 		} else {
2841 			goto bad_pcode;
2842 		}
2843 		break;
2844 	case 0xa:	/* Control Mode page, all devices */
2845 		switch (subpcode) {
2846 		case 0:
2847 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2848 			break;
2849 		case 0x05:
2850 			len = resp_grouping_m_pg(ap, pcontrol, target);
2851 			break;
2852 		case 0xff:
2853 			len = resp_ctrl_m_pg(ap, pcontrol, target);
2854 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2855 			break;
2856 		default:
2857 			goto bad_subpcode;
2858 		}
2859 		offset += len;
2860 		break;
2861 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2862 		if (subpcode > 0x2 && subpcode < 0xff)
2863 			goto bad_subpcode;
2864 		len = 0;
2865 		if ((0x0 == subpcode) || (0xff == subpcode))
2866 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2867 		if ((0x1 == subpcode) || (0xff == subpcode))
2868 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2869 						  target_dev_id);
2870 		if ((0x2 == subpcode) || (0xff == subpcode))
2871 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2872 		offset += len;
2873 		break;
2874 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2875 		if (subpcode > 0x0 && subpcode < 0xff)
2876 			goto bad_subpcode;
2877 		len = resp_iec_m_pg(ap, pcontrol, target);
2878 		offset += len;
2879 		break;
2880 	case 0x3f:	/* Read all Mode pages */
2881 		if (subpcode > 0x0 && subpcode < 0xff)
2882 			goto bad_subpcode;
2883 		len = resp_err_recov_pg(ap, pcontrol, target);
2884 		len += resp_disconnect_pg(ap + len, pcontrol, target);
2885 		if (is_disk) {
2886 			len += resp_format_pg(ap + len, pcontrol, target);
2887 			len += resp_caching_pg(ap + len, pcontrol, target);
2888 		} else if (is_zbc) {
2889 			len += resp_caching_pg(ap + len, pcontrol, target);
2890 		}
2891 		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2892 		if (0xff == subpcode)
2893 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2894 		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2895 		if (0xff == subpcode) {
2896 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2897 						  target_dev_id);
2898 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2899 		}
2900 		len += resp_iec_m_pg(ap + len, pcontrol, target);
2901 		offset += len;
2902 		break;
2903 	default:
2904 		goto bad_pcode;
2905 	}
2906 	if (msense_6)
2907 		arr[0] = offset - 1;
2908 	else
2909 		put_unaligned_be16((offset - 2), arr + 0);
2910 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2911 
2912 bad_pcode:
2913 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2914 	return check_condition_result;
2915 
2916 bad_subpcode:
2917 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2918 	return check_condition_result;
2919 }
2920 
2921 #define SDEBUG_MAX_MSELECT_SZ 512
2922 
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2923 static int resp_mode_select(struct scsi_cmnd *scp,
2924 			    struct sdebug_dev_info *devip)
2925 {
2926 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2927 	int param_len, res, mpage;
2928 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2929 	unsigned char *cmd = scp->cmnd;
2930 	int mselect6 = (MODE_SELECT == cmd[0]);
2931 
2932 	memset(arr, 0, sizeof(arr));
2933 	pf = cmd[1] & 0x10;
2934 	sp = cmd[1] & 0x1;
2935 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2936 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2937 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2938 		return check_condition_result;
2939 	}
2940 	res = fetch_to_dev_buffer(scp, arr, param_len);
2941 	if (-1 == res)
2942 		return DID_ERROR << 16;
2943 	else if (sdebug_verbose && (res < param_len))
2944 		sdev_printk(KERN_INFO, scp->device,
2945 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2946 			    __func__, param_len, res);
2947 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2948 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2949 	off = bd_len + (mselect6 ? 4 : 8);
2950 	if (md_len > 2 || off >= res) {
2951 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2952 		return check_condition_result;
2953 	}
2954 	mpage = arr[off] & 0x3f;
2955 	ps = !!(arr[off] & 0x80);
2956 	if (ps) {
2957 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2958 		return check_condition_result;
2959 	}
2960 	spf = !!(arr[off] & 0x40);
2961 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2962 		       (arr[off + 1] + 2);
2963 	if ((pg_len + off) > param_len) {
2964 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2965 				PARAMETER_LIST_LENGTH_ERR, 0);
2966 		return check_condition_result;
2967 	}
2968 	switch (mpage) {
2969 	case 0x8:      /* Caching Mode page */
2970 		if (caching_pg[1] == arr[off + 1]) {
2971 			memcpy(caching_pg + 2, arr + off + 2,
2972 			       sizeof(caching_pg) - 2);
2973 			goto set_mode_changed_ua;
2974 		}
2975 		break;
2976 	case 0xa:      /* Control Mode page */
2977 		if (ctrl_m_pg[1] == arr[off + 1]) {
2978 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2979 			       sizeof(ctrl_m_pg) - 2);
2980 			if (ctrl_m_pg[4] & 0x8)
2981 				sdebug_wp = true;
2982 			else
2983 				sdebug_wp = false;
2984 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2985 			goto set_mode_changed_ua;
2986 		}
2987 		break;
2988 	case 0x1c:      /* Informational Exceptions Mode page */
2989 		if (iec_m_pg[1] == arr[off + 1]) {
2990 			memcpy(iec_m_pg + 2, arr + off + 2,
2991 			       sizeof(iec_m_pg) - 2);
2992 			goto set_mode_changed_ua;
2993 		}
2994 		break;
2995 	default:
2996 		break;
2997 	}
2998 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2999 	return check_condition_result;
3000 set_mode_changed_ua:
3001 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3002 	return 0;
3003 }
3004 
resp_temp_l_pg(unsigned char * arr)3005 static int resp_temp_l_pg(unsigned char *arr)
3006 {
3007 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
3008 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
3009 		};
3010 
3011 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3012 	return sizeof(temp_l_pg);
3013 }
3014 
resp_ie_l_pg(unsigned char * arr)3015 static int resp_ie_l_pg(unsigned char *arr)
3016 {
3017 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3018 		};
3019 
3020 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3021 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
3022 		arr[4] = THRESHOLD_EXCEEDED;
3023 		arr[5] = 0xff;
3024 	}
3025 	return sizeof(ie_l_pg);
3026 }
3027 
resp_env_rep_l_spg(unsigned char * arr)3028 static int resp_env_rep_l_spg(unsigned char *arr)
3029 {
3030 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
3031 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
3032 					 0x1, 0x0, 0x23, 0x8,
3033 					 0x0, 55, 72, 35, 55, 45, 0, 0,
3034 		};
3035 
3036 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3037 	return sizeof(env_rep_l_spg);
3038 }
3039 
3040 #define SDEBUG_MAX_LSENSE_SZ 512
3041 
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3042 static int resp_log_sense(struct scsi_cmnd *scp,
3043 			  struct sdebug_dev_info *devip)
3044 {
3045 	int ppc, sp, pcode, subpcode;
3046 	u32 alloc_len, len, n;
3047 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3048 	unsigned char *cmd = scp->cmnd;
3049 
3050 	memset(arr, 0, sizeof(arr));
3051 	ppc = cmd[1] & 0x2;
3052 	sp = cmd[1] & 0x1;
3053 	if (ppc || sp) {
3054 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3055 		return check_condition_result;
3056 	}
3057 	pcode = cmd[2] & 0x3f;
3058 	subpcode = cmd[3] & 0xff;
3059 	alloc_len = get_unaligned_be16(cmd + 7);
3060 	arr[0] = pcode;
3061 	if (0 == subpcode) {
3062 		switch (pcode) {
3063 		case 0x0:	/* Supported log pages log page */
3064 			n = 4;
3065 			arr[n++] = 0x0;		/* this page */
3066 			arr[n++] = 0xd;		/* Temperature */
3067 			arr[n++] = 0x2f;	/* Informational exceptions */
3068 			arr[3] = n - 4;
3069 			break;
3070 		case 0xd:	/* Temperature log page */
3071 			arr[3] = resp_temp_l_pg(arr + 4);
3072 			break;
3073 		case 0x2f:	/* Informational exceptions log page */
3074 			arr[3] = resp_ie_l_pg(arr + 4);
3075 			break;
3076 		default:
3077 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3078 			return check_condition_result;
3079 		}
3080 	} else if (0xff == subpcode) {
3081 		arr[0] |= 0x40;
3082 		arr[1] = subpcode;
3083 		switch (pcode) {
3084 		case 0x0:	/* Supported log pages and subpages log page */
3085 			n = 4;
3086 			arr[n++] = 0x0;
3087 			arr[n++] = 0x0;		/* 0,0 page */
3088 			arr[n++] = 0x0;
3089 			arr[n++] = 0xff;	/* this page */
3090 			arr[n++] = 0xd;
3091 			arr[n++] = 0x0;		/* Temperature */
3092 			arr[n++] = 0xd;
3093 			arr[n++] = 0x1;		/* Environment reporting */
3094 			arr[n++] = 0xd;
3095 			arr[n++] = 0xff;	/* all 0xd subpages */
3096 			arr[n++] = 0x2f;
3097 			arr[n++] = 0x0;	/* Informational exceptions */
3098 			arr[n++] = 0x2f;
3099 			arr[n++] = 0xff;	/* all 0x2f subpages */
3100 			arr[3] = n - 4;
3101 			break;
3102 		case 0xd:	/* Temperature subpages */
3103 			n = 4;
3104 			arr[n++] = 0xd;
3105 			arr[n++] = 0x0;		/* Temperature */
3106 			arr[n++] = 0xd;
3107 			arr[n++] = 0x1;		/* Environment reporting */
3108 			arr[n++] = 0xd;
3109 			arr[n++] = 0xff;	/* these subpages */
3110 			arr[3] = n - 4;
3111 			break;
3112 		case 0x2f:	/* Informational exceptions subpages */
3113 			n = 4;
3114 			arr[n++] = 0x2f;
3115 			arr[n++] = 0x0;		/* Informational exceptions */
3116 			arr[n++] = 0x2f;
3117 			arr[n++] = 0xff;	/* these subpages */
3118 			arr[3] = n - 4;
3119 			break;
3120 		default:
3121 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3122 			return check_condition_result;
3123 		}
3124 	} else if (subpcode > 0) {
3125 		arr[0] |= 0x40;
3126 		arr[1] = subpcode;
3127 		if (pcode == 0xd && subpcode == 1)
3128 			arr[3] = resp_env_rep_l_spg(arr + 4);
3129 		else {
3130 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3131 			return check_condition_result;
3132 		}
3133 	} else {
3134 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3135 		return check_condition_result;
3136 	}
3137 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3138 	return fill_from_dev_buffer(scp, arr,
3139 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3140 }
3141 
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)3142 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3143 {
3144 	return devip->nr_zones != 0;
3145 }
3146 
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)3147 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3148 					unsigned long long lba)
3149 {
3150 	u32 zno = lba >> devip->zsize_shift;
3151 	struct sdeb_zone_state *zsp;
3152 
3153 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3154 		return &devip->zstate[zno];
3155 
3156 	/*
3157 	 * If the zone capacity is less than the zone size, adjust for gap
3158 	 * zones.
3159 	 */
3160 	zno = 2 * zno - devip->nr_conv_zones;
3161 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3162 	zsp = &devip->zstate[zno];
3163 	if (lba >= zsp->z_start + zsp->z_size)
3164 		zsp++;
3165 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3166 	return zsp;
3167 }
3168 
zbc_zone_is_conv(struct sdeb_zone_state * zsp)3169 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3170 {
3171 	return zsp->z_type == ZBC_ZTYPE_CNV;
3172 }
3173 
zbc_zone_is_gap(struct sdeb_zone_state * zsp)3174 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3175 {
3176 	return zsp->z_type == ZBC_ZTYPE_GAP;
3177 }
3178 
zbc_zone_is_seq(struct sdeb_zone_state * zsp)3179 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3180 {
3181 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3182 }
3183 
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3184 static void zbc_close_zone(struct sdebug_dev_info *devip,
3185 			   struct sdeb_zone_state *zsp)
3186 {
3187 	enum sdebug_z_cond zc;
3188 
3189 	if (!zbc_zone_is_seq(zsp))
3190 		return;
3191 
3192 	zc = zsp->z_cond;
3193 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3194 		return;
3195 
3196 	if (zc == ZC2_IMPLICIT_OPEN)
3197 		devip->nr_imp_open--;
3198 	else
3199 		devip->nr_exp_open--;
3200 
3201 	if (zsp->z_wp == zsp->z_start) {
3202 		zsp->z_cond = ZC1_EMPTY;
3203 	} else {
3204 		zsp->z_cond = ZC4_CLOSED;
3205 		devip->nr_closed++;
3206 	}
3207 }
3208 
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)3209 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3210 {
3211 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3212 	unsigned int i;
3213 
3214 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3215 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3216 			zbc_close_zone(devip, zsp);
3217 			return;
3218 		}
3219 	}
3220 }
3221 
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)3222 static void zbc_open_zone(struct sdebug_dev_info *devip,
3223 			  struct sdeb_zone_state *zsp, bool explicit)
3224 {
3225 	enum sdebug_z_cond zc;
3226 
3227 	if (!zbc_zone_is_seq(zsp))
3228 		return;
3229 
3230 	zc = zsp->z_cond;
3231 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3232 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3233 		return;
3234 
3235 	/* Close an implicit open zone if necessary */
3236 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3237 		zbc_close_zone(devip, zsp);
3238 	else if (devip->max_open &&
3239 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3240 		zbc_close_imp_open_zone(devip);
3241 
3242 	if (zsp->z_cond == ZC4_CLOSED)
3243 		devip->nr_closed--;
3244 	if (explicit) {
3245 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3246 		devip->nr_exp_open++;
3247 	} else {
3248 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3249 		devip->nr_imp_open++;
3250 	}
3251 }
3252 
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3253 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3254 				     struct sdeb_zone_state *zsp)
3255 {
3256 	switch (zsp->z_cond) {
3257 	case ZC2_IMPLICIT_OPEN:
3258 		devip->nr_imp_open--;
3259 		break;
3260 	case ZC3_EXPLICIT_OPEN:
3261 		devip->nr_exp_open--;
3262 		break;
3263 	default:
3264 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3265 			  zsp->z_start, zsp->z_cond);
3266 		break;
3267 	}
3268 	zsp->z_cond = ZC5_FULL;
3269 }
3270 
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)3271 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3272 		       unsigned long long lba, unsigned int num)
3273 {
3274 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3275 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3276 
3277 	if (!zbc_zone_is_seq(zsp))
3278 		return;
3279 
3280 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3281 		zsp->z_wp += num;
3282 		if (zsp->z_wp >= zend)
3283 			zbc_set_zone_full(devip, zsp);
3284 		return;
3285 	}
3286 
3287 	while (num) {
3288 		if (lba != zsp->z_wp)
3289 			zsp->z_non_seq_resource = true;
3290 
3291 		end = lba + num;
3292 		if (end >= zend) {
3293 			n = zend - lba;
3294 			zsp->z_wp = zend;
3295 		} else if (end > zsp->z_wp) {
3296 			n = num;
3297 			zsp->z_wp = end;
3298 		} else {
3299 			n = num;
3300 		}
3301 		if (zsp->z_wp >= zend)
3302 			zbc_set_zone_full(devip, zsp);
3303 
3304 		num -= n;
3305 		lba += n;
3306 		if (num) {
3307 			zsp++;
3308 			zend = zsp->z_start + zsp->z_size;
3309 		}
3310 	}
3311 }
3312 
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3313 static int check_zbc_access_params(struct scsi_cmnd *scp,
3314 			unsigned long long lba, unsigned int num, bool write)
3315 {
3316 	struct scsi_device *sdp = scp->device;
3317 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3318 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3319 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3320 
3321 	if (!write) {
3322 		/* For host-managed, reads cannot cross zone types boundaries */
3323 		if (zsp->z_type != zsp_end->z_type) {
3324 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3325 					LBA_OUT_OF_RANGE,
3326 					READ_INVDATA_ASCQ);
3327 			return check_condition_result;
3328 		}
3329 		return 0;
3330 	}
3331 
3332 	/* Writing into a gap zone is not allowed */
3333 	if (zbc_zone_is_gap(zsp)) {
3334 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3335 				ATTEMPT_ACCESS_GAP);
3336 		return check_condition_result;
3337 	}
3338 
3339 	/* No restrictions for writes within conventional zones */
3340 	if (zbc_zone_is_conv(zsp)) {
3341 		if (!zbc_zone_is_conv(zsp_end)) {
3342 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3343 					LBA_OUT_OF_RANGE,
3344 					WRITE_BOUNDARY_ASCQ);
3345 			return check_condition_result;
3346 		}
3347 		return 0;
3348 	}
3349 
3350 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3351 		/* Writes cannot cross sequential zone boundaries */
3352 		if (zsp_end != zsp) {
3353 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3354 					LBA_OUT_OF_RANGE,
3355 					WRITE_BOUNDARY_ASCQ);
3356 			return check_condition_result;
3357 		}
3358 		/* Cannot write full zones */
3359 		if (zsp->z_cond == ZC5_FULL) {
3360 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3361 					INVALID_FIELD_IN_CDB, 0);
3362 			return check_condition_result;
3363 		}
3364 		/* Writes must be aligned to the zone WP */
3365 		if (lba != zsp->z_wp) {
3366 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3367 					LBA_OUT_OF_RANGE,
3368 					UNALIGNED_WRITE_ASCQ);
3369 			return check_condition_result;
3370 		}
3371 	}
3372 
3373 	/* Handle implicit open of closed and empty zones */
3374 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3375 		if (devip->max_open &&
3376 		    devip->nr_exp_open >= devip->max_open) {
3377 			mk_sense_buffer(scp, DATA_PROTECT,
3378 					INSUFF_RES_ASC,
3379 					INSUFF_ZONE_ASCQ);
3380 			return check_condition_result;
3381 		}
3382 		zbc_open_zone(devip, zsp, false);
3383 	}
3384 
3385 	return 0;
3386 }
3387 
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3388 static inline int check_device_access_params
3389 			(struct scsi_cmnd *scp, unsigned long long lba,
3390 			 unsigned int num, bool write)
3391 {
3392 	struct scsi_device *sdp = scp->device;
3393 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3394 
3395 	if (lba + num > sdebug_capacity) {
3396 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3397 		return check_condition_result;
3398 	}
3399 	/* transfer length excessive (tie in to block limits VPD page) */
3400 	if (num > sdebug_store_sectors) {
3401 		/* needs work to find which cdb byte 'num' comes from */
3402 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3403 		return check_condition_result;
3404 	}
3405 	if (write && unlikely(sdebug_wp)) {
3406 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3407 		return check_condition_result;
3408 	}
3409 	if (sdebug_dev_is_zoned(devip))
3410 		return check_zbc_access_params(scp, lba, num, write);
3411 
3412 	return 0;
3413 }
3414 
3415 /*
3416  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3417  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3418  * that access any of the "stores" in struct sdeb_store_info should call this
3419  * function with bug_if_fake_rw set to true.
3420  */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)3421 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3422 						bool bug_if_fake_rw)
3423 {
3424 	if (sdebug_fake_rw) {
3425 		BUG_ON(bug_if_fake_rw);	/* See note above */
3426 		return NULL;
3427 	}
3428 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3429 }
3430 
3431 static inline void
sdeb_read_lock(rwlock_t * lock)3432 sdeb_read_lock(rwlock_t *lock)
3433 {
3434 	if (sdebug_no_rwlock)
3435 		__acquire(lock);
3436 	else
3437 		read_lock(lock);
3438 }
3439 
3440 static inline void
sdeb_read_unlock(rwlock_t * lock)3441 sdeb_read_unlock(rwlock_t *lock)
3442 {
3443 	if (sdebug_no_rwlock)
3444 		__release(lock);
3445 	else
3446 		read_unlock(lock);
3447 }
3448 
3449 static inline void
sdeb_write_lock(rwlock_t * lock)3450 sdeb_write_lock(rwlock_t *lock)
3451 {
3452 	if (sdebug_no_rwlock)
3453 		__acquire(lock);
3454 	else
3455 		write_lock(lock);
3456 }
3457 
3458 static inline void
sdeb_write_unlock(rwlock_t * lock)3459 sdeb_write_unlock(rwlock_t *lock)
3460 {
3461 	if (sdebug_no_rwlock)
3462 		__release(lock);
3463 	else
3464 		write_unlock(lock);
3465 }
3466 
3467 static inline void
sdeb_data_read_lock(struct sdeb_store_info * sip)3468 sdeb_data_read_lock(struct sdeb_store_info *sip)
3469 {
3470 	BUG_ON(!sip);
3471 
3472 	sdeb_read_lock(&sip->macc_data_lck);
3473 }
3474 
3475 static inline void
sdeb_data_read_unlock(struct sdeb_store_info * sip)3476 sdeb_data_read_unlock(struct sdeb_store_info *sip)
3477 {
3478 	BUG_ON(!sip);
3479 
3480 	sdeb_read_unlock(&sip->macc_data_lck);
3481 }
3482 
3483 static inline void
sdeb_data_write_lock(struct sdeb_store_info * sip)3484 sdeb_data_write_lock(struct sdeb_store_info *sip)
3485 {
3486 	BUG_ON(!sip);
3487 
3488 	sdeb_write_lock(&sip->macc_data_lck);
3489 }
3490 
3491 static inline void
sdeb_data_write_unlock(struct sdeb_store_info * sip)3492 sdeb_data_write_unlock(struct sdeb_store_info *sip)
3493 {
3494 	BUG_ON(!sip);
3495 
3496 	sdeb_write_unlock(&sip->macc_data_lck);
3497 }
3498 
3499 static inline void
sdeb_data_sector_read_lock(struct sdeb_store_info * sip)3500 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
3501 {
3502 	BUG_ON(!sip);
3503 
3504 	sdeb_read_lock(&sip->macc_sector_lck);
3505 }
3506 
3507 static inline void
sdeb_data_sector_read_unlock(struct sdeb_store_info * sip)3508 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
3509 {
3510 	BUG_ON(!sip);
3511 
3512 	sdeb_read_unlock(&sip->macc_sector_lck);
3513 }
3514 
3515 static inline void
sdeb_data_sector_write_lock(struct sdeb_store_info * sip)3516 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
3517 {
3518 	BUG_ON(!sip);
3519 
3520 	sdeb_write_lock(&sip->macc_sector_lck);
3521 }
3522 
3523 static inline void
sdeb_data_sector_write_unlock(struct sdeb_store_info * sip)3524 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
3525 {
3526 	BUG_ON(!sip);
3527 
3528 	sdeb_write_unlock(&sip->macc_sector_lck);
3529 }
3530 
3531 /*
3532  * Atomic locking:
3533  * We simplify the atomic model to allow only 1x atomic write and many non-
3534  * atomic reads or writes for all LBAs.
3535 
3536  * A RW lock has a similar bahaviour:
3537  * Only 1x writer and many readers.
3538 
3539  * So use a RW lock for per-device read and write locking:
3540  * An atomic access grabs the lock as a writer and non-atomic grabs the lock
3541  * as a reader.
3542  */
3543 
3544 static inline void
sdeb_data_lock(struct sdeb_store_info * sip,bool atomic)3545 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
3546 {
3547 	if (atomic)
3548 		sdeb_data_write_lock(sip);
3549 	else
3550 		sdeb_data_read_lock(sip);
3551 }
3552 
3553 static inline void
sdeb_data_unlock(struct sdeb_store_info * sip,bool atomic)3554 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
3555 {
3556 	if (atomic)
3557 		sdeb_data_write_unlock(sip);
3558 	else
3559 		sdeb_data_read_unlock(sip);
3560 }
3561 
3562 /* Allow many reads but only 1x write per sector */
3563 static inline void
sdeb_data_sector_lock(struct sdeb_store_info * sip,bool do_write)3564 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
3565 {
3566 	if (do_write)
3567 		sdeb_data_sector_write_lock(sip);
3568 	else
3569 		sdeb_data_sector_read_lock(sip);
3570 }
3571 
3572 static inline void
sdeb_data_sector_unlock(struct sdeb_store_info * sip,bool do_write)3573 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
3574 {
3575 	if (do_write)
3576 		sdeb_data_sector_write_unlock(sip);
3577 	else
3578 		sdeb_data_sector_read_unlock(sip);
3579 }
3580 
3581 static inline void
sdeb_meta_read_lock(struct sdeb_store_info * sip)3582 sdeb_meta_read_lock(struct sdeb_store_info *sip)
3583 {
3584 	if (sdebug_no_rwlock) {
3585 		if (sip)
3586 			__acquire(&sip->macc_meta_lck);
3587 		else
3588 			__acquire(&sdeb_fake_rw_lck);
3589 	} else {
3590 		if (sip)
3591 			read_lock(&sip->macc_meta_lck);
3592 		else
3593 			read_lock(&sdeb_fake_rw_lck);
3594 	}
3595 }
3596 
3597 static inline void
sdeb_meta_read_unlock(struct sdeb_store_info * sip)3598 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
3599 {
3600 	if (sdebug_no_rwlock) {
3601 		if (sip)
3602 			__release(&sip->macc_meta_lck);
3603 		else
3604 			__release(&sdeb_fake_rw_lck);
3605 	} else {
3606 		if (sip)
3607 			read_unlock(&sip->macc_meta_lck);
3608 		else
3609 			read_unlock(&sdeb_fake_rw_lck);
3610 	}
3611 }
3612 
3613 static inline void
sdeb_meta_write_lock(struct sdeb_store_info * sip)3614 sdeb_meta_write_lock(struct sdeb_store_info *sip)
3615 {
3616 	if (sdebug_no_rwlock) {
3617 		if (sip)
3618 			__acquire(&sip->macc_meta_lck);
3619 		else
3620 			__acquire(&sdeb_fake_rw_lck);
3621 	} else {
3622 		if (sip)
3623 			write_lock(&sip->macc_meta_lck);
3624 		else
3625 			write_lock(&sdeb_fake_rw_lck);
3626 	}
3627 }
3628 
3629 static inline void
sdeb_meta_write_unlock(struct sdeb_store_info * sip)3630 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
3631 {
3632 	if (sdebug_no_rwlock) {
3633 		if (sip)
3634 			__release(&sip->macc_meta_lck);
3635 		else
3636 			__release(&sdeb_fake_rw_lck);
3637 	} else {
3638 		if (sip)
3639 			write_unlock(&sip->macc_meta_lck);
3640 		else
3641 			write_unlock(&sdeb_fake_rw_lck);
3642 	}
3643 }
3644 
3645 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,u8 group_number,bool do_write,bool atomic)3646 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3647 			    u32 sg_skip, u64 lba, u32 num, u8 group_number,
3648 			    bool do_write, bool atomic)
3649 {
3650 	int ret;
3651 	u64 block;
3652 	enum dma_data_direction dir;
3653 	struct scsi_data_buffer *sdb = &scp->sdb;
3654 	u8 *fsp;
3655 	int i;
3656 
3657 	/*
3658 	 * Even though reads are inherently atomic (in this driver), we expect
3659 	 * the atomic flag only for writes.
3660 	 */
3661 	if (!do_write && atomic)
3662 		return -1;
3663 
3664 	if (do_write) {
3665 		dir = DMA_TO_DEVICE;
3666 		write_since_sync = true;
3667 	} else {
3668 		dir = DMA_FROM_DEVICE;
3669 	}
3670 
3671 	if (!sdb->length || !sip)
3672 		return 0;
3673 	if (scp->sc_data_direction != dir)
3674 		return -1;
3675 
3676 	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
3677 		atomic_long_inc(&writes_by_group_number[group_number]);
3678 
3679 	fsp = sip->storep;
3680 
3681 	block = do_div(lba, sdebug_store_sectors);
3682 
3683 	/* Only allow 1x atomic write or multiple non-atomic writes at any given time */
3684 	sdeb_data_lock(sip, atomic);
3685 	for (i = 0; i < num; i++) {
3686 		/* We shouldn't need to lock for atomic writes, but do it anyway */
3687 		sdeb_data_sector_lock(sip, do_write);
3688 		ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3689 		   fsp + (block * sdebug_sector_size),
3690 		   sdebug_sector_size, sg_skip, do_write);
3691 		sdeb_data_sector_unlock(sip, do_write);
3692 		if (ret != sdebug_sector_size) {
3693 			ret += (i * sdebug_sector_size);
3694 			break;
3695 		}
3696 		sg_skip += sdebug_sector_size;
3697 		if (++block >= sdebug_store_sectors)
3698 			block = 0;
3699 	}
3700 	ret = num * sdebug_sector_size;
3701 	sdeb_data_unlock(sip, atomic);
3702 
3703 	return ret;
3704 }
3705 
3706 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)3707 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3708 {
3709 	struct scsi_data_buffer *sdb = &scp->sdb;
3710 
3711 	if (!sdb->length)
3712 		return 0;
3713 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3714 		return -1;
3715 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3716 			      num * sdebug_sector_size, 0, true);
3717 }
3718 
3719 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3720  * arr into sip->storep+lba and return true. If comparison fails then
3721  * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)3722 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3723 			      const u8 *arr, bool compare_only)
3724 {
3725 	bool res;
3726 	u64 block, rest = 0;
3727 	u32 store_blks = sdebug_store_sectors;
3728 	u32 lb_size = sdebug_sector_size;
3729 	u8 *fsp = sip->storep;
3730 
3731 	block = do_div(lba, store_blks);
3732 	if (block + num > store_blks)
3733 		rest = block + num - store_blks;
3734 
3735 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3736 	if (!res)
3737 		return res;
3738 	if (rest)
3739 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3740 			     rest * lb_size);
3741 	if (!res)
3742 		return res;
3743 	if (compare_only)
3744 		return true;
3745 	arr += num * lb_size;
3746 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3747 	if (rest)
3748 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3749 	return res;
3750 }
3751 
dif_compute_csum(const void * buf,int len)3752 static __be16 dif_compute_csum(const void *buf, int len)
3753 {
3754 	__be16 csum;
3755 
3756 	if (sdebug_guard)
3757 		csum = (__force __be16)ip_compute_csum(buf, len);
3758 	else
3759 		csum = cpu_to_be16(crc_t10dif(buf, len));
3760 
3761 	return csum;
3762 }
3763 
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3764 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3765 		      sector_t sector, u32 ei_lba)
3766 {
3767 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3768 
3769 	if (sdt->guard_tag != csum) {
3770 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3771 			(unsigned long)sector,
3772 			be16_to_cpu(sdt->guard_tag),
3773 			be16_to_cpu(csum));
3774 		return 0x01;
3775 	}
3776 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3777 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3778 		pr_err("REF check failed on sector %lu\n",
3779 			(unsigned long)sector);
3780 		return 0x03;
3781 	}
3782 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3783 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3784 		pr_err("REF check failed on sector %lu\n",
3785 			(unsigned long)sector);
3786 		return 0x03;
3787 	}
3788 	return 0;
3789 }
3790 
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3791 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3792 			  unsigned int sectors, bool read)
3793 {
3794 	size_t resid;
3795 	void *paddr;
3796 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3797 						scp->device->hostdata, true);
3798 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3799 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3800 	struct sg_mapping_iter miter;
3801 
3802 	/* Bytes of protection data to copy into sgl */
3803 	resid = sectors * sizeof(*dif_storep);
3804 
3805 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3806 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3807 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3808 
3809 	while (sg_miter_next(&miter) && resid > 0) {
3810 		size_t len = min_t(size_t, miter.length, resid);
3811 		void *start = dif_store(sip, sector);
3812 		size_t rest = 0;
3813 
3814 		if (dif_store_end < start + len)
3815 			rest = start + len - dif_store_end;
3816 
3817 		paddr = miter.addr;
3818 
3819 		if (read)
3820 			memcpy(paddr, start, len - rest);
3821 		else
3822 			memcpy(start, paddr, len - rest);
3823 
3824 		if (rest) {
3825 			if (read)
3826 				memcpy(paddr + len - rest, dif_storep, rest);
3827 			else
3828 				memcpy(dif_storep, paddr + len - rest, rest);
3829 		}
3830 
3831 		sector += len / sizeof(*dif_storep);
3832 		resid -= len;
3833 	}
3834 	sg_miter_stop(&miter);
3835 }
3836 
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3837 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3838 			    unsigned int sectors, u32 ei_lba)
3839 {
3840 	int ret = 0;
3841 	unsigned int i;
3842 	sector_t sector;
3843 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3844 						scp->device->hostdata, true);
3845 	struct t10_pi_tuple *sdt;
3846 
3847 	for (i = 0; i < sectors; i++, ei_lba++) {
3848 		sector = start_sec + i;
3849 		sdt = dif_store(sip, sector);
3850 
3851 		if (sdt->app_tag == cpu_to_be16(0xffff))
3852 			continue;
3853 
3854 		/*
3855 		 * Because scsi_debug acts as both initiator and
3856 		 * target we proceed to verify the PI even if
3857 		 * RDPROTECT=3. This is done so the "initiator" knows
3858 		 * which type of error to return. Otherwise we would
3859 		 * have to iterate over the PI twice.
3860 		 */
3861 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3862 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3863 					 sector, ei_lba);
3864 			if (ret) {
3865 				dif_errors++;
3866 				break;
3867 			}
3868 		}
3869 	}
3870 
3871 	dif_copy_prot(scp, start_sec, sectors, true);
3872 	dix_reads++;
3873 
3874 	return ret;
3875 }
3876 
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3877 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3878 {
3879 	bool check_prot;
3880 	u32 num;
3881 	u32 ei_lba;
3882 	int ret;
3883 	u64 lba;
3884 	struct sdeb_store_info *sip = devip2sip(devip, true);
3885 	u8 *cmd = scp->cmnd;
3886 	bool meta_data_locked = false;
3887 
3888 	switch (cmd[0]) {
3889 	case READ_16:
3890 		ei_lba = 0;
3891 		lba = get_unaligned_be64(cmd + 2);
3892 		num = get_unaligned_be32(cmd + 10);
3893 		check_prot = true;
3894 		break;
3895 	case READ_10:
3896 		ei_lba = 0;
3897 		lba = get_unaligned_be32(cmd + 2);
3898 		num = get_unaligned_be16(cmd + 7);
3899 		check_prot = true;
3900 		break;
3901 	case READ_6:
3902 		ei_lba = 0;
3903 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3904 		      (u32)(cmd[1] & 0x1f) << 16;
3905 		num = (0 == cmd[4]) ? 256 : cmd[4];
3906 		check_prot = true;
3907 		break;
3908 	case READ_12:
3909 		ei_lba = 0;
3910 		lba = get_unaligned_be32(cmd + 2);
3911 		num = get_unaligned_be32(cmd + 6);
3912 		check_prot = true;
3913 		break;
3914 	case XDWRITEREAD_10:
3915 		ei_lba = 0;
3916 		lba = get_unaligned_be32(cmd + 2);
3917 		num = get_unaligned_be16(cmd + 7);
3918 		check_prot = false;
3919 		break;
3920 	default:	/* assume READ(32) */
3921 		lba = get_unaligned_be64(cmd + 12);
3922 		ei_lba = get_unaligned_be32(cmd + 20);
3923 		num = get_unaligned_be32(cmd + 28);
3924 		check_prot = false;
3925 		break;
3926 	}
3927 	if (unlikely(have_dif_prot && check_prot)) {
3928 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3929 		    (cmd[1] & 0xe0)) {
3930 			mk_sense_invalid_opcode(scp);
3931 			return check_condition_result;
3932 		}
3933 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3934 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3935 		    (cmd[1] & 0xe0) == 0)
3936 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3937 				    "to DIF device\n");
3938 	}
3939 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3940 		     atomic_read(&sdeb_inject_pending))) {
3941 		num /= 2;
3942 		atomic_set(&sdeb_inject_pending, 0);
3943 	}
3944 
3945 	/*
3946 	 * When checking device access params, for reads we only check data
3947 	 * versus what is set at init time, so no need to lock.
3948 	 */
3949 	ret = check_device_access_params(scp, lba, num, false);
3950 	if (ret)
3951 		return ret;
3952 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3953 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3954 		     ((lba + num) > sdebug_medium_error_start))) {
3955 		/* claim unrecoverable read error */
3956 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3957 		/* set info field and valid bit for fixed descriptor */
3958 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3959 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3960 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3961 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3962 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3963 		}
3964 		scsi_set_resid(scp, scsi_bufflen(scp));
3965 		return check_condition_result;
3966 	}
3967 
3968 	if (sdebug_dev_is_zoned(devip) ||
3969 	    (sdebug_dix && scsi_prot_sg_count(scp)))  {
3970 		sdeb_meta_read_lock(sip);
3971 		meta_data_locked = true;
3972 	}
3973 
3974 	/* DIX + T10 DIF */
3975 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3976 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3977 		case 1: /* Guard tag error */
3978 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3979 				sdeb_meta_read_unlock(sip);
3980 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3981 				return check_condition_result;
3982 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3983 				sdeb_meta_read_unlock(sip);
3984 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3985 				return illegal_condition_result;
3986 			}
3987 			break;
3988 		case 3: /* Reference tag error */
3989 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3990 				sdeb_meta_read_unlock(sip);
3991 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3992 				return check_condition_result;
3993 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3994 				sdeb_meta_read_unlock(sip);
3995 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3996 				return illegal_condition_result;
3997 			}
3998 			break;
3999 		}
4000 	}
4001 
4002 	ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4003 	if (meta_data_locked)
4004 		sdeb_meta_read_unlock(sip);
4005 	if (unlikely(ret == -1))
4006 		return DID_ERROR << 16;
4007 
4008 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4009 
4010 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4011 		     atomic_read(&sdeb_inject_pending))) {
4012 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4013 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4014 			atomic_set(&sdeb_inject_pending, 0);
4015 			return check_condition_result;
4016 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4017 			/* Logical block guard check failed */
4018 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4019 			atomic_set(&sdeb_inject_pending, 0);
4020 			return illegal_condition_result;
4021 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4022 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4023 			atomic_set(&sdeb_inject_pending, 0);
4024 			return illegal_condition_result;
4025 		}
4026 	}
4027 	return 0;
4028 }
4029 
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)4030 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4031 			     unsigned int sectors, u32 ei_lba)
4032 {
4033 	int ret;
4034 	struct t10_pi_tuple *sdt;
4035 	void *daddr;
4036 	sector_t sector = start_sec;
4037 	int ppage_offset;
4038 	int dpage_offset;
4039 	struct sg_mapping_iter diter;
4040 	struct sg_mapping_iter piter;
4041 
4042 	BUG_ON(scsi_sg_count(SCpnt) == 0);
4043 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4044 
4045 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4046 			scsi_prot_sg_count(SCpnt),
4047 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4048 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4049 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4050 
4051 	/* For each protection page */
4052 	while (sg_miter_next(&piter)) {
4053 		dpage_offset = 0;
4054 		if (WARN_ON(!sg_miter_next(&diter))) {
4055 			ret = 0x01;
4056 			goto out;
4057 		}
4058 
4059 		for (ppage_offset = 0; ppage_offset < piter.length;
4060 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
4061 			/* If we're at the end of the current
4062 			 * data page advance to the next one
4063 			 */
4064 			if (dpage_offset >= diter.length) {
4065 				if (WARN_ON(!sg_miter_next(&diter))) {
4066 					ret = 0x01;
4067 					goto out;
4068 				}
4069 				dpage_offset = 0;
4070 			}
4071 
4072 			sdt = piter.addr + ppage_offset;
4073 			daddr = diter.addr + dpage_offset;
4074 
4075 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4076 				ret = dif_verify(sdt, daddr, sector, ei_lba);
4077 				if (ret)
4078 					goto out;
4079 			}
4080 
4081 			sector++;
4082 			ei_lba++;
4083 			dpage_offset += sdebug_sector_size;
4084 		}
4085 		diter.consumed = dpage_offset;
4086 		sg_miter_stop(&diter);
4087 	}
4088 	sg_miter_stop(&piter);
4089 
4090 	dif_copy_prot(SCpnt, start_sec, sectors, false);
4091 	dix_writes++;
4092 
4093 	return 0;
4094 
4095 out:
4096 	dif_errors++;
4097 	sg_miter_stop(&diter);
4098 	sg_miter_stop(&piter);
4099 	return ret;
4100 }
4101 
lba_to_map_index(sector_t lba)4102 static unsigned long lba_to_map_index(sector_t lba)
4103 {
4104 	if (sdebug_unmap_alignment)
4105 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4106 	sector_div(lba, sdebug_unmap_granularity);
4107 	return lba;
4108 }
4109 
map_index_to_lba(unsigned long index)4110 static sector_t map_index_to_lba(unsigned long index)
4111 {
4112 	sector_t lba = index * sdebug_unmap_granularity;
4113 
4114 	if (sdebug_unmap_alignment)
4115 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4116 	return lba;
4117 }
4118 
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)4119 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4120 			      unsigned int *num)
4121 {
4122 	sector_t end;
4123 	unsigned int mapped;
4124 	unsigned long index;
4125 	unsigned long next;
4126 
4127 	index = lba_to_map_index(lba);
4128 	mapped = test_bit(index, sip->map_storep);
4129 
4130 	if (mapped)
4131 		next = find_next_zero_bit(sip->map_storep, map_size, index);
4132 	else
4133 		next = find_next_bit(sip->map_storep, map_size, index);
4134 
4135 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
4136 	*num = end - lba;
4137 	return mapped;
4138 }
4139 
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4140 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4141 		       unsigned int len)
4142 {
4143 	sector_t end = lba + len;
4144 
4145 	while (lba < end) {
4146 		unsigned long index = lba_to_map_index(lba);
4147 
4148 		if (index < map_size)
4149 			set_bit(index, sip->map_storep);
4150 
4151 		lba = map_index_to_lba(index + 1);
4152 	}
4153 }
4154 
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4155 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4156 			 unsigned int len)
4157 {
4158 	sector_t end = lba + len;
4159 	u8 *fsp = sip->storep;
4160 
4161 	while (lba < end) {
4162 		unsigned long index = lba_to_map_index(lba);
4163 
4164 		if (lba == map_index_to_lba(index) &&
4165 		    lba + sdebug_unmap_granularity <= end &&
4166 		    index < map_size) {
4167 			clear_bit(index, sip->map_storep);
4168 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
4169 				memset(fsp + lba * sdebug_sector_size,
4170 				       (sdebug_lbprz & 1) ? 0 : 0xff,
4171 				       sdebug_sector_size *
4172 				       sdebug_unmap_granularity);
4173 			}
4174 			if (sip->dif_storep) {
4175 				memset(sip->dif_storep + lba, 0xff,
4176 				       sizeof(*sip->dif_storep) *
4177 				       sdebug_unmap_granularity);
4178 			}
4179 		}
4180 		lba = map_index_to_lba(index + 1);
4181 	}
4182 }
4183 
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4184 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4185 {
4186 	bool check_prot;
4187 	u32 num;
4188 	u8 group = 0;
4189 	u32 ei_lba;
4190 	int ret;
4191 	u64 lba;
4192 	struct sdeb_store_info *sip = devip2sip(devip, true);
4193 	u8 *cmd = scp->cmnd;
4194 	bool meta_data_locked = false;
4195 
4196 	switch (cmd[0]) {
4197 	case WRITE_16:
4198 		ei_lba = 0;
4199 		lba = get_unaligned_be64(cmd + 2);
4200 		num = get_unaligned_be32(cmd + 10);
4201 		group = cmd[14] & 0x3f;
4202 		check_prot = true;
4203 		break;
4204 	case WRITE_10:
4205 		ei_lba = 0;
4206 		lba = get_unaligned_be32(cmd + 2);
4207 		group = cmd[6] & 0x3f;
4208 		num = get_unaligned_be16(cmd + 7);
4209 		check_prot = true;
4210 		break;
4211 	case WRITE_6:
4212 		ei_lba = 0;
4213 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4214 		      (u32)(cmd[1] & 0x1f) << 16;
4215 		num = (0 == cmd[4]) ? 256 : cmd[4];
4216 		check_prot = true;
4217 		break;
4218 	case WRITE_12:
4219 		ei_lba = 0;
4220 		lba = get_unaligned_be32(cmd + 2);
4221 		num = get_unaligned_be32(cmd + 6);
4222 		group = cmd[6] & 0x3f;
4223 		check_prot = true;
4224 		break;
4225 	case 0x53:	/* XDWRITEREAD(10) */
4226 		ei_lba = 0;
4227 		lba = get_unaligned_be32(cmd + 2);
4228 		group = cmd[6] & 0x1f;
4229 		num = get_unaligned_be16(cmd + 7);
4230 		check_prot = false;
4231 		break;
4232 	default:	/* assume WRITE(32) */
4233 		group = cmd[6] & 0x3f;
4234 		lba = get_unaligned_be64(cmd + 12);
4235 		ei_lba = get_unaligned_be32(cmd + 20);
4236 		num = get_unaligned_be32(cmd + 28);
4237 		check_prot = false;
4238 		break;
4239 	}
4240 	if (unlikely(have_dif_prot && check_prot)) {
4241 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4242 		    (cmd[1] & 0xe0)) {
4243 			mk_sense_invalid_opcode(scp);
4244 			return check_condition_result;
4245 		}
4246 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4247 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4248 		    (cmd[1] & 0xe0) == 0)
4249 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4250 				    "to DIF device\n");
4251 	}
4252 
4253 	if (sdebug_dev_is_zoned(devip) ||
4254 	    (sdebug_dix && scsi_prot_sg_count(scp)) ||
4255 	    scsi_debug_lbp())  {
4256 		sdeb_meta_write_lock(sip);
4257 		meta_data_locked = true;
4258 	}
4259 
4260 	ret = check_device_access_params(scp, lba, num, true);
4261 	if (ret) {
4262 		if (meta_data_locked)
4263 			sdeb_meta_write_unlock(sip);
4264 		return ret;
4265 	}
4266 
4267 	/* DIX + T10 DIF */
4268 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4269 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
4270 		case 1: /* Guard tag error */
4271 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4272 				sdeb_meta_write_unlock(sip);
4273 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4274 				return illegal_condition_result;
4275 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4276 				sdeb_meta_write_unlock(sip);
4277 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4278 				return check_condition_result;
4279 			}
4280 			break;
4281 		case 3: /* Reference tag error */
4282 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4283 				sdeb_meta_write_unlock(sip);
4284 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4285 				return illegal_condition_result;
4286 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4287 				sdeb_meta_write_unlock(sip);
4288 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4289 				return check_condition_result;
4290 			}
4291 			break;
4292 		}
4293 	}
4294 
4295 	ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
4296 	if (unlikely(scsi_debug_lbp()))
4297 		map_region(sip, lba, num);
4298 
4299 	/* If ZBC zone then bump its write pointer */
4300 	if (sdebug_dev_is_zoned(devip))
4301 		zbc_inc_wp(devip, lba, num);
4302 	if (meta_data_locked)
4303 		sdeb_meta_write_unlock(sip);
4304 
4305 	if (unlikely(-1 == ret))
4306 		return DID_ERROR << 16;
4307 	else if (unlikely(sdebug_verbose &&
4308 			  (ret < (num * sdebug_sector_size))))
4309 		sdev_printk(KERN_INFO, scp->device,
4310 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4311 			    my_name, num * sdebug_sector_size, ret);
4312 
4313 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4314 		     atomic_read(&sdeb_inject_pending))) {
4315 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4316 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4317 			atomic_set(&sdeb_inject_pending, 0);
4318 			return check_condition_result;
4319 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4320 			/* Logical block guard check failed */
4321 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4322 			atomic_set(&sdeb_inject_pending, 0);
4323 			return illegal_condition_result;
4324 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4325 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4326 			atomic_set(&sdeb_inject_pending, 0);
4327 			return illegal_condition_result;
4328 		}
4329 	}
4330 	return 0;
4331 }
4332 
4333 /*
4334  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4335  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4336  */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4337 static int resp_write_scat(struct scsi_cmnd *scp,
4338 			   struct sdebug_dev_info *devip)
4339 {
4340 	u8 *cmd = scp->cmnd;
4341 	u8 *lrdp = NULL;
4342 	u8 *up;
4343 	struct sdeb_store_info *sip = devip2sip(devip, true);
4344 	u8 wrprotect;
4345 	u16 lbdof, num_lrd, k;
4346 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4347 	u32 lb_size = sdebug_sector_size;
4348 	u32 ei_lba;
4349 	u64 lba;
4350 	u8 group;
4351 	int ret, res;
4352 	bool is_16;
4353 	static const u32 lrd_size = 32; /* + parameter list header size */
4354 
4355 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
4356 		is_16 = false;
4357 		group = cmd[6] & 0x3f;
4358 		wrprotect = (cmd[10] >> 5) & 0x7;
4359 		lbdof = get_unaligned_be16(cmd + 12);
4360 		num_lrd = get_unaligned_be16(cmd + 16);
4361 		bt_len = get_unaligned_be32(cmd + 28);
4362 	} else {        /* that leaves WRITE SCATTERED(16) */
4363 		is_16 = true;
4364 		wrprotect = (cmd[2] >> 5) & 0x7;
4365 		lbdof = get_unaligned_be16(cmd + 4);
4366 		num_lrd = get_unaligned_be16(cmd + 8);
4367 		bt_len = get_unaligned_be32(cmd + 10);
4368 		group = cmd[14] & 0x3f;
4369 		if (unlikely(have_dif_prot)) {
4370 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4371 			    wrprotect) {
4372 				mk_sense_invalid_opcode(scp);
4373 				return illegal_condition_result;
4374 			}
4375 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4376 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4377 			     wrprotect == 0)
4378 				sdev_printk(KERN_ERR, scp->device,
4379 					    "Unprotected WR to DIF device\n");
4380 		}
4381 	}
4382 	if ((num_lrd == 0) || (bt_len == 0))
4383 		return 0;       /* T10 says these do-nothings are not errors */
4384 	if (lbdof == 0) {
4385 		if (sdebug_verbose)
4386 			sdev_printk(KERN_INFO, scp->device,
4387 				"%s: %s: LB Data Offset field bad\n",
4388 				my_name, __func__);
4389 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4390 		return illegal_condition_result;
4391 	}
4392 	lbdof_blen = lbdof * lb_size;
4393 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4394 		if (sdebug_verbose)
4395 			sdev_printk(KERN_INFO, scp->device,
4396 				"%s: %s: LBA range descriptors don't fit\n",
4397 				my_name, __func__);
4398 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4399 		return illegal_condition_result;
4400 	}
4401 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4402 	if (lrdp == NULL)
4403 		return SCSI_MLQUEUE_HOST_BUSY;
4404 	if (sdebug_verbose)
4405 		sdev_printk(KERN_INFO, scp->device,
4406 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4407 			my_name, __func__, lbdof_blen);
4408 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4409 	if (res == -1) {
4410 		ret = DID_ERROR << 16;
4411 		goto err_out;
4412 	}
4413 
4414 	/* Just keep it simple and always lock for now */
4415 	sdeb_meta_write_lock(sip);
4416 	sg_off = lbdof_blen;
4417 	/* Spec says Buffer xfer Length field in number of LBs in dout */
4418 	cum_lb = 0;
4419 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4420 		lba = get_unaligned_be64(up + 0);
4421 		num = get_unaligned_be32(up + 8);
4422 		if (sdebug_verbose)
4423 			sdev_printk(KERN_INFO, scp->device,
4424 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4425 				my_name, __func__, k, lba, num, sg_off);
4426 		if (num == 0)
4427 			continue;
4428 		ret = check_device_access_params(scp, lba, num, true);
4429 		if (ret)
4430 			goto err_out_unlock;
4431 		num_by = num * lb_size;
4432 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4433 
4434 		if ((cum_lb + num) > bt_len) {
4435 			if (sdebug_verbose)
4436 				sdev_printk(KERN_INFO, scp->device,
4437 				    "%s: %s: sum of blocks > data provided\n",
4438 				    my_name, __func__);
4439 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4440 					0);
4441 			ret = illegal_condition_result;
4442 			goto err_out_unlock;
4443 		}
4444 
4445 		/* DIX + T10 DIF */
4446 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4447 			int prot_ret = prot_verify_write(scp, lba, num,
4448 							 ei_lba);
4449 
4450 			if (prot_ret) {
4451 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4452 						prot_ret);
4453 				ret = illegal_condition_result;
4454 				goto err_out_unlock;
4455 			}
4456 		}
4457 
4458 		/*
4459 		 * Write ranges atomically to keep as close to pre-atomic
4460 		 * writes behaviour as possible.
4461 		 */
4462 		ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
4463 		/* If ZBC zone then bump its write pointer */
4464 		if (sdebug_dev_is_zoned(devip))
4465 			zbc_inc_wp(devip, lba, num);
4466 		if (unlikely(scsi_debug_lbp()))
4467 			map_region(sip, lba, num);
4468 		if (unlikely(-1 == ret)) {
4469 			ret = DID_ERROR << 16;
4470 			goto err_out_unlock;
4471 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
4472 			sdev_printk(KERN_INFO, scp->device,
4473 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4474 			    my_name, num_by, ret);
4475 
4476 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4477 			     atomic_read(&sdeb_inject_pending))) {
4478 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4479 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4480 				atomic_set(&sdeb_inject_pending, 0);
4481 				ret = check_condition_result;
4482 				goto err_out_unlock;
4483 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4484 				/* Logical block guard check failed */
4485 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4486 				atomic_set(&sdeb_inject_pending, 0);
4487 				ret = illegal_condition_result;
4488 				goto err_out_unlock;
4489 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4490 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4491 				atomic_set(&sdeb_inject_pending, 0);
4492 				ret = illegal_condition_result;
4493 				goto err_out_unlock;
4494 			}
4495 		}
4496 		sg_off += num_by;
4497 		cum_lb += num;
4498 	}
4499 	ret = 0;
4500 err_out_unlock:
4501 	sdeb_meta_write_unlock(sip);
4502 err_out:
4503 	kfree(lrdp);
4504 	return ret;
4505 }
4506 
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)4507 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4508 			   u32 ei_lba, bool unmap, bool ndob)
4509 {
4510 	struct scsi_device *sdp = scp->device;
4511 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4512 	unsigned long long i;
4513 	u64 block, lbaa;
4514 	u32 lb_size = sdebug_sector_size;
4515 	int ret;
4516 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4517 						scp->device->hostdata, true);
4518 	u8 *fs1p;
4519 	u8 *fsp;
4520 	bool meta_data_locked = false;
4521 
4522 	if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
4523 		sdeb_meta_write_lock(sip);
4524 		meta_data_locked = true;
4525 	}
4526 
4527 	ret = check_device_access_params(scp, lba, num, true);
4528 	if (ret)
4529 		goto out;
4530 
4531 	if (unmap && scsi_debug_lbp()) {
4532 		unmap_region(sip, lba, num);
4533 		goto out;
4534 	}
4535 	lbaa = lba;
4536 	block = do_div(lbaa, sdebug_store_sectors);
4537 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
4538 	fsp = sip->storep;
4539 	fs1p = fsp + (block * lb_size);
4540 	sdeb_data_write_lock(sip);
4541 	if (ndob) {
4542 		memset(fs1p, 0, lb_size);
4543 		ret = 0;
4544 	} else
4545 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4546 
4547 	if (-1 == ret) {
4548 		ret = DID_ERROR << 16;
4549 		goto out;
4550 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
4551 		sdev_printk(KERN_INFO, scp->device,
4552 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
4553 			    my_name, "write same", lb_size, ret);
4554 
4555 	/* Copy first sector to remaining blocks */
4556 	for (i = 1 ; i < num ; i++) {
4557 		lbaa = lba + i;
4558 		block = do_div(lbaa, sdebug_store_sectors);
4559 		memmove(fsp + (block * lb_size), fs1p, lb_size);
4560 	}
4561 	if (scsi_debug_lbp())
4562 		map_region(sip, lba, num);
4563 	/* If ZBC zone then bump its write pointer */
4564 	if (sdebug_dev_is_zoned(devip))
4565 		zbc_inc_wp(devip, lba, num);
4566 	sdeb_data_write_unlock(sip);
4567 	ret = 0;
4568 out:
4569 	if (meta_data_locked)
4570 		sdeb_meta_write_unlock(sip);
4571 	return ret;
4572 }
4573 
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4574 static int resp_write_same_10(struct scsi_cmnd *scp,
4575 			      struct sdebug_dev_info *devip)
4576 {
4577 	u8 *cmd = scp->cmnd;
4578 	u32 lba;
4579 	u16 num;
4580 	u32 ei_lba = 0;
4581 	bool unmap = false;
4582 
4583 	if (cmd[1] & 0x8) {
4584 		if (sdebug_lbpws10 == 0) {
4585 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4586 			return check_condition_result;
4587 		} else
4588 			unmap = true;
4589 	}
4590 	lba = get_unaligned_be32(cmd + 2);
4591 	num = get_unaligned_be16(cmd + 7);
4592 	if (num > sdebug_write_same_length) {
4593 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4594 		return check_condition_result;
4595 	}
4596 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4597 }
4598 
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4599 static int resp_write_same_16(struct scsi_cmnd *scp,
4600 			      struct sdebug_dev_info *devip)
4601 {
4602 	u8 *cmd = scp->cmnd;
4603 	u64 lba;
4604 	u32 num;
4605 	u32 ei_lba = 0;
4606 	bool unmap = false;
4607 	bool ndob = false;
4608 
4609 	if (cmd[1] & 0x8) {	/* UNMAP */
4610 		if (sdebug_lbpws == 0) {
4611 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4612 			return check_condition_result;
4613 		} else
4614 			unmap = true;
4615 	}
4616 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4617 		ndob = true;
4618 	lba = get_unaligned_be64(cmd + 2);
4619 	num = get_unaligned_be32(cmd + 10);
4620 	if (num > sdebug_write_same_length) {
4621 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4622 		return check_condition_result;
4623 	}
4624 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4625 }
4626 
4627 /* Note the mode field is in the same position as the (lower) service action
4628  * field. For the Report supported operation codes command, SPC-4 suggests
4629  * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4630 static int resp_write_buffer(struct scsi_cmnd *scp,
4631 			     struct sdebug_dev_info *devip)
4632 {
4633 	u8 *cmd = scp->cmnd;
4634 	struct scsi_device *sdp = scp->device;
4635 	struct sdebug_dev_info *dp;
4636 	u8 mode;
4637 
4638 	mode = cmd[1] & 0x1f;
4639 	switch (mode) {
4640 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4641 		/* set UAs on this device only */
4642 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4643 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4644 		break;
4645 	case 0x5:	/* download MC, save and ACT */
4646 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4647 		break;
4648 	case 0x6:	/* download MC with offsets and ACT */
4649 		/* set UAs on most devices (LUs) in this target */
4650 		list_for_each_entry(dp,
4651 				    &devip->sdbg_host->dev_info_list,
4652 				    dev_list)
4653 			if (dp->target == sdp->id) {
4654 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4655 				if (devip != dp)
4656 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4657 						dp->uas_bm);
4658 			}
4659 		break;
4660 	case 0x7:	/* download MC with offsets, save, and ACT */
4661 		/* set UA on all devices (LUs) in this target */
4662 		list_for_each_entry(dp,
4663 				    &devip->sdbg_host->dev_info_list,
4664 				    dev_list)
4665 			if (dp->target == sdp->id)
4666 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4667 					dp->uas_bm);
4668 		break;
4669 	default:
4670 		/* do nothing for this command for other mode values */
4671 		break;
4672 	}
4673 	return 0;
4674 }
4675 
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4676 static int resp_comp_write(struct scsi_cmnd *scp,
4677 			   struct sdebug_dev_info *devip)
4678 {
4679 	u8 *cmd = scp->cmnd;
4680 	u8 *arr;
4681 	struct sdeb_store_info *sip = devip2sip(devip, true);
4682 	u64 lba;
4683 	u32 dnum;
4684 	u32 lb_size = sdebug_sector_size;
4685 	u8 num;
4686 	int ret;
4687 	int retval = 0;
4688 
4689 	lba = get_unaligned_be64(cmd + 2);
4690 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4691 	if (0 == num)
4692 		return 0;	/* degenerate case, not an error */
4693 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4694 	    (cmd[1] & 0xe0)) {
4695 		mk_sense_invalid_opcode(scp);
4696 		return check_condition_result;
4697 	}
4698 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4699 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4700 	    (cmd[1] & 0xe0) == 0)
4701 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4702 			    "to DIF device\n");
4703 	ret = check_device_access_params(scp, lba, num, false);
4704 	if (ret)
4705 		return ret;
4706 	dnum = 2 * num;
4707 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4708 	if (NULL == arr) {
4709 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4710 				INSUFF_RES_ASCQ);
4711 		return check_condition_result;
4712 	}
4713 
4714 	ret = do_dout_fetch(scp, dnum, arr);
4715 	if (ret == -1) {
4716 		retval = DID_ERROR << 16;
4717 		goto cleanup_free;
4718 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4719 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4720 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4721 			    dnum * lb_size, ret);
4722 
4723 	sdeb_data_write_lock(sip);
4724 	sdeb_meta_write_lock(sip);
4725 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4726 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4727 		retval = check_condition_result;
4728 		goto cleanup_unlock;
4729 	}
4730 
4731 	/* Cover sip->map_storep (which map_region()) sets with data lock */
4732 	if (scsi_debug_lbp())
4733 		map_region(sip, lba, num);
4734 cleanup_unlock:
4735 	sdeb_meta_write_unlock(sip);
4736 	sdeb_data_write_unlock(sip);
4737 cleanup_free:
4738 	kfree(arr);
4739 	return retval;
4740 }
4741 
4742 struct unmap_block_desc {
4743 	__be64	lba;
4744 	__be32	blocks;
4745 	__be32	__reserved;
4746 };
4747 
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4748 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4749 {
4750 	unsigned char *buf;
4751 	struct unmap_block_desc *desc;
4752 	struct sdeb_store_info *sip = devip2sip(devip, true);
4753 	unsigned int i, payload_len, descriptors;
4754 	int ret;
4755 
4756 	if (!scsi_debug_lbp())
4757 		return 0;	/* fib and say its done */
4758 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4759 	BUG_ON(scsi_bufflen(scp) != payload_len);
4760 
4761 	descriptors = (payload_len - 8) / 16;
4762 	if (descriptors > sdebug_unmap_max_desc) {
4763 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4764 		return check_condition_result;
4765 	}
4766 
4767 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4768 	if (!buf) {
4769 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4770 				INSUFF_RES_ASCQ);
4771 		return check_condition_result;
4772 	}
4773 
4774 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4775 
4776 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4777 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4778 
4779 	desc = (void *)&buf[8];
4780 
4781 	sdeb_meta_write_lock(sip);
4782 
4783 	for (i = 0 ; i < descriptors ; i++) {
4784 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4785 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4786 
4787 		ret = check_device_access_params(scp, lba, num, true);
4788 		if (ret)
4789 			goto out;
4790 
4791 		unmap_region(sip, lba, num);
4792 	}
4793 
4794 	ret = 0;
4795 
4796 out:
4797 	sdeb_meta_write_unlock(sip);
4798 	kfree(buf);
4799 
4800 	return ret;
4801 }
4802 
4803 #define SDEBUG_GET_LBA_STATUS_LEN 32
4804 
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4805 static int resp_get_lba_status(struct scsi_cmnd *scp,
4806 			       struct sdebug_dev_info *devip)
4807 {
4808 	u8 *cmd = scp->cmnd;
4809 	u64 lba;
4810 	u32 alloc_len, mapped, num;
4811 	int ret;
4812 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4813 
4814 	lba = get_unaligned_be64(cmd + 2);
4815 	alloc_len = get_unaligned_be32(cmd + 10);
4816 
4817 	if (alloc_len < 24)
4818 		return 0;
4819 
4820 	ret = check_device_access_params(scp, lba, 1, false);
4821 	if (ret)
4822 		return ret;
4823 
4824 	if (scsi_debug_lbp()) {
4825 		struct sdeb_store_info *sip = devip2sip(devip, true);
4826 
4827 		mapped = map_state(sip, lba, &num);
4828 	} else {
4829 		mapped = 1;
4830 		/* following just in case virtual_gb changed */
4831 		sdebug_capacity = get_sdebug_capacity();
4832 		if (sdebug_capacity - lba <= 0xffffffff)
4833 			num = sdebug_capacity - lba;
4834 		else
4835 			num = 0xffffffff;
4836 	}
4837 
4838 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4839 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4840 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4841 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4842 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4843 
4844 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4845 }
4846 
resp_get_stream_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4847 static int resp_get_stream_status(struct scsi_cmnd *scp,
4848 				  struct sdebug_dev_info *devip)
4849 {
4850 	u16 starting_stream_id, stream_id;
4851 	const u8 *cmd = scp->cmnd;
4852 	u32 alloc_len, offset;
4853 	u8 arr[256] = {};
4854 	struct scsi_stream_status_header *h = (void *)arr;
4855 
4856 	starting_stream_id = get_unaligned_be16(cmd + 4);
4857 	alloc_len = get_unaligned_be32(cmd + 10);
4858 
4859 	if (alloc_len < 8) {
4860 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4861 		return check_condition_result;
4862 	}
4863 
4864 	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
4865 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
4866 		return check_condition_result;
4867 	}
4868 
4869 	/*
4870 	 * The GET STREAM STATUS command only reports status information
4871 	 * about open streams. Treat the non-permanent stream as open.
4872 	 */
4873 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
4874 			   &h->number_of_open_streams);
4875 
4876 	for (offset = 8, stream_id = starting_stream_id;
4877 	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
4878 		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
4879 	     offset += 8, stream_id++) {
4880 		struct scsi_stream_status *stream_status = (void *)arr + offset;
4881 
4882 		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
4883 		put_unaligned_be16(stream_id,
4884 				   &stream_status->stream_identifier);
4885 		stream_status->rel_lifetime = stream_id + 1;
4886 	}
4887 	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
4888 
4889 	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
4890 }
4891 
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4892 static int resp_sync_cache(struct scsi_cmnd *scp,
4893 			   struct sdebug_dev_info *devip)
4894 {
4895 	int res = 0;
4896 	u64 lba;
4897 	u32 num_blocks;
4898 	u8 *cmd = scp->cmnd;
4899 
4900 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4901 		lba = get_unaligned_be32(cmd + 2);
4902 		num_blocks = get_unaligned_be16(cmd + 7);
4903 	} else {				/* SYNCHRONIZE_CACHE(16) */
4904 		lba = get_unaligned_be64(cmd + 2);
4905 		num_blocks = get_unaligned_be32(cmd + 10);
4906 	}
4907 	if (lba + num_blocks > sdebug_capacity) {
4908 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4909 		return check_condition_result;
4910 	}
4911 	if (!write_since_sync || (cmd[1] & 0x2))
4912 		res = SDEG_RES_IMMED_MASK;
4913 	else		/* delay if write_since_sync and IMMED clear */
4914 		write_since_sync = false;
4915 	return res;
4916 }
4917 
4918 /*
4919  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4920  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4921  * a GOOD status otherwise. Model a disk with a big cache and yield
4922  * CONDITION MET. Actually tries to bring range in main memory into the
4923  * cache associated with the CPU(s).
4924  */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4925 static int resp_pre_fetch(struct scsi_cmnd *scp,
4926 			  struct sdebug_dev_info *devip)
4927 {
4928 	int res = 0;
4929 	u64 lba;
4930 	u64 block, rest = 0;
4931 	u32 nblks;
4932 	u8 *cmd = scp->cmnd;
4933 	struct sdeb_store_info *sip = devip2sip(devip, true);
4934 	u8 *fsp = sip->storep;
4935 
4936 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4937 		lba = get_unaligned_be32(cmd + 2);
4938 		nblks = get_unaligned_be16(cmd + 7);
4939 	} else {			/* PRE-FETCH(16) */
4940 		lba = get_unaligned_be64(cmd + 2);
4941 		nblks = get_unaligned_be32(cmd + 10);
4942 	}
4943 	if (lba + nblks > sdebug_capacity) {
4944 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4945 		return check_condition_result;
4946 	}
4947 	if (!fsp)
4948 		goto fini;
4949 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4950 	block = do_div(lba, sdebug_store_sectors);
4951 	if (block + nblks > sdebug_store_sectors)
4952 		rest = block + nblks - sdebug_store_sectors;
4953 
4954 	/* Try to bring the PRE-FETCH range into CPU's cache */
4955 	sdeb_data_read_lock(sip);
4956 	prefetch_range(fsp + (sdebug_sector_size * block),
4957 		       (nblks - rest) * sdebug_sector_size);
4958 	if (rest)
4959 		prefetch_range(fsp, rest * sdebug_sector_size);
4960 
4961 	sdeb_data_read_unlock(sip);
4962 fini:
4963 	if (cmd[1] & 0x2)
4964 		res = SDEG_RES_IMMED_MASK;
4965 	return res | condition_met_result;
4966 }
4967 
4968 #define RL_BUCKET_ELEMS 8
4969 
4970 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4971  * (W-LUN), the normal Linux scanning logic does not associate it with a
4972  * device (e.g. /dev/sg7). The following magic will make that association:
4973  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4974  * where <n> is a host number. If there are multiple targets in a host then
4975  * the above will associate a W-LUN to each target. To only get a W-LUN
4976  * for target 2, then use "echo '- 2 49409' > scan" .
4977  */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4978 static int resp_report_luns(struct scsi_cmnd *scp,
4979 			    struct sdebug_dev_info *devip)
4980 {
4981 	unsigned char *cmd = scp->cmnd;
4982 	unsigned int alloc_len;
4983 	unsigned char select_report;
4984 	u64 lun;
4985 	struct scsi_lun *lun_p;
4986 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4987 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4988 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4989 	unsigned int tlun_cnt;	/* total LUN count */
4990 	unsigned int rlen;	/* response length (in bytes) */
4991 	int k, j, n, res;
4992 	unsigned int off_rsp = 0;
4993 	const int sz_lun = sizeof(struct scsi_lun);
4994 
4995 	clear_luns_changed_on_target(devip);
4996 
4997 	select_report = cmd[2];
4998 	alloc_len = get_unaligned_be32(cmd + 6);
4999 
5000 	if (alloc_len < 4) {
5001 		pr_err("alloc len too small %d\n", alloc_len);
5002 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5003 		return check_condition_result;
5004 	}
5005 
5006 	switch (select_report) {
5007 	case 0:		/* all LUNs apart from W-LUNs */
5008 		lun_cnt = sdebug_max_luns;
5009 		wlun_cnt = 0;
5010 		break;
5011 	case 1:		/* only W-LUNs */
5012 		lun_cnt = 0;
5013 		wlun_cnt = 1;
5014 		break;
5015 	case 2:		/* all LUNs */
5016 		lun_cnt = sdebug_max_luns;
5017 		wlun_cnt = 1;
5018 		break;
5019 	case 0x10:	/* only administrative LUs */
5020 	case 0x11:	/* see SPC-5 */
5021 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
5022 	default:
5023 		pr_debug("select report invalid %d\n", select_report);
5024 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5025 		return check_condition_result;
5026 	}
5027 
5028 	if (sdebug_no_lun_0 && (lun_cnt > 0))
5029 		--lun_cnt;
5030 
5031 	tlun_cnt = lun_cnt + wlun_cnt;
5032 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
5033 	scsi_set_resid(scp, scsi_bufflen(scp));
5034 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5035 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5036 
5037 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
5038 	lun = sdebug_no_lun_0 ? 1 : 0;
5039 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5040 		memset(arr, 0, sizeof(arr));
5041 		lun_p = (struct scsi_lun *)&arr[0];
5042 		if (k == 0) {
5043 			put_unaligned_be32(rlen, &arr[0]);
5044 			++lun_p;
5045 			j = 1;
5046 		}
5047 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5048 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5049 				break;
5050 			int_to_scsilun(lun++, lun_p);
5051 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5052 				lun_p->scsi_lun[0] |= 0x40;
5053 		}
5054 		if (j < RL_BUCKET_ELEMS)
5055 			break;
5056 		n = j * sz_lun;
5057 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5058 		if (res)
5059 			return res;
5060 		off_rsp += n;
5061 	}
5062 	if (wlun_cnt) {
5063 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5064 		++j;
5065 	}
5066 	if (j > 0)
5067 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5068 	return res;
5069 }
5070 
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5071 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5072 {
5073 	bool is_bytchk3 = false;
5074 	u8 bytchk;
5075 	int ret, j;
5076 	u32 vnum, a_num, off;
5077 	const u32 lb_size = sdebug_sector_size;
5078 	u64 lba;
5079 	u8 *arr;
5080 	u8 *cmd = scp->cmnd;
5081 	struct sdeb_store_info *sip = devip2sip(devip, true);
5082 
5083 	bytchk = (cmd[1] >> 1) & 0x3;
5084 	if (bytchk == 0) {
5085 		return 0;	/* always claim internal verify okay */
5086 	} else if (bytchk == 2) {
5087 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5088 		return check_condition_result;
5089 	} else if (bytchk == 3) {
5090 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
5091 	}
5092 	switch (cmd[0]) {
5093 	case VERIFY_16:
5094 		lba = get_unaligned_be64(cmd + 2);
5095 		vnum = get_unaligned_be32(cmd + 10);
5096 		break;
5097 	case VERIFY:		/* is VERIFY(10) */
5098 		lba = get_unaligned_be32(cmd + 2);
5099 		vnum = get_unaligned_be16(cmd + 7);
5100 		break;
5101 	default:
5102 		mk_sense_invalid_opcode(scp);
5103 		return check_condition_result;
5104 	}
5105 	if (vnum == 0)
5106 		return 0;	/* not an error */
5107 	a_num = is_bytchk3 ? 1 : vnum;
5108 	/* Treat following check like one for read (i.e. no write) access */
5109 	ret = check_device_access_params(scp, lba, a_num, false);
5110 	if (ret)
5111 		return ret;
5112 
5113 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5114 	if (!arr) {
5115 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5116 				INSUFF_RES_ASCQ);
5117 		return check_condition_result;
5118 	}
5119 	/* Not changing store, so only need read access */
5120 	sdeb_data_read_lock(sip);
5121 
5122 	ret = do_dout_fetch(scp, a_num, arr);
5123 	if (ret == -1) {
5124 		ret = DID_ERROR << 16;
5125 		goto cleanup;
5126 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5127 		sdev_printk(KERN_INFO, scp->device,
5128 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5129 			    my_name, __func__, a_num * lb_size, ret);
5130 	}
5131 	if (is_bytchk3) {
5132 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5133 			memcpy(arr + off, arr, lb_size);
5134 	}
5135 	ret = 0;
5136 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5137 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5138 		ret = check_condition_result;
5139 		goto cleanup;
5140 	}
5141 cleanup:
5142 	sdeb_data_read_unlock(sip);
5143 	kfree(arr);
5144 	return ret;
5145 }
5146 
5147 #define RZONES_DESC_HD 64
5148 
5149 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5150 static int resp_report_zones(struct scsi_cmnd *scp,
5151 			     struct sdebug_dev_info *devip)
5152 {
5153 	unsigned int rep_max_zones, nrz = 0;
5154 	int ret = 0;
5155 	u32 alloc_len, rep_opts, rep_len;
5156 	bool partial;
5157 	u64 lba, zs_lba;
5158 	u8 *arr = NULL, *desc;
5159 	u8 *cmd = scp->cmnd;
5160 	struct sdeb_zone_state *zsp = NULL;
5161 	struct sdeb_store_info *sip = devip2sip(devip, false);
5162 
5163 	if (!sdebug_dev_is_zoned(devip)) {
5164 		mk_sense_invalid_opcode(scp);
5165 		return check_condition_result;
5166 	}
5167 	zs_lba = get_unaligned_be64(cmd + 2);
5168 	alloc_len = get_unaligned_be32(cmd + 10);
5169 	if (alloc_len == 0)
5170 		return 0;	/* not an error */
5171 	rep_opts = cmd[14] & 0x3f;
5172 	partial = cmd[14] & 0x80;
5173 
5174 	if (zs_lba >= sdebug_capacity) {
5175 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5176 		return check_condition_result;
5177 	}
5178 
5179 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5180 
5181 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5182 	if (!arr) {
5183 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5184 				INSUFF_RES_ASCQ);
5185 		return check_condition_result;
5186 	}
5187 
5188 	sdeb_meta_read_lock(sip);
5189 
5190 	desc = arr + 64;
5191 	for (lba = zs_lba; lba < sdebug_capacity;
5192 	     lba = zsp->z_start + zsp->z_size) {
5193 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5194 			break;
5195 		zsp = zbc_zone(devip, lba);
5196 		switch (rep_opts) {
5197 		case 0x00:
5198 			/* All zones */
5199 			break;
5200 		case 0x01:
5201 			/* Empty zones */
5202 			if (zsp->z_cond != ZC1_EMPTY)
5203 				continue;
5204 			break;
5205 		case 0x02:
5206 			/* Implicit open zones */
5207 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5208 				continue;
5209 			break;
5210 		case 0x03:
5211 			/* Explicit open zones */
5212 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5213 				continue;
5214 			break;
5215 		case 0x04:
5216 			/* Closed zones */
5217 			if (zsp->z_cond != ZC4_CLOSED)
5218 				continue;
5219 			break;
5220 		case 0x05:
5221 			/* Full zones */
5222 			if (zsp->z_cond != ZC5_FULL)
5223 				continue;
5224 			break;
5225 		case 0x06:
5226 		case 0x07:
5227 		case 0x10:
5228 			/*
5229 			 * Read-only, offline, reset WP recommended are
5230 			 * not emulated: no zones to report;
5231 			 */
5232 			continue;
5233 		case 0x11:
5234 			/* non-seq-resource set */
5235 			if (!zsp->z_non_seq_resource)
5236 				continue;
5237 			break;
5238 		case 0x3e:
5239 			/* All zones except gap zones. */
5240 			if (zbc_zone_is_gap(zsp))
5241 				continue;
5242 			break;
5243 		case 0x3f:
5244 			/* Not write pointer (conventional) zones */
5245 			if (zbc_zone_is_seq(zsp))
5246 				continue;
5247 			break;
5248 		default:
5249 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
5250 					INVALID_FIELD_IN_CDB, 0);
5251 			ret = check_condition_result;
5252 			goto fini;
5253 		}
5254 
5255 		if (nrz < rep_max_zones) {
5256 			/* Fill zone descriptor */
5257 			desc[0] = zsp->z_type;
5258 			desc[1] = zsp->z_cond << 4;
5259 			if (zsp->z_non_seq_resource)
5260 				desc[1] |= 1 << 1;
5261 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
5262 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
5263 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5264 			desc += 64;
5265 		}
5266 
5267 		if (partial && nrz >= rep_max_zones)
5268 			break;
5269 
5270 		nrz++;
5271 	}
5272 
5273 	/* Report header */
5274 	/* Zone list length. */
5275 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5276 	/* Maximum LBA */
5277 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5278 	/* Zone starting LBA granularity. */
5279 	if (devip->zcap < devip->zsize)
5280 		put_unaligned_be64(devip->zsize, arr + 16);
5281 
5282 	rep_len = (unsigned long)desc - (unsigned long)arr;
5283 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5284 
5285 fini:
5286 	sdeb_meta_read_unlock(sip);
5287 	kfree(arr);
5288 	return ret;
5289 }
5290 
resp_atomic_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5291 static int resp_atomic_write(struct scsi_cmnd *scp,
5292 			     struct sdebug_dev_info *devip)
5293 {
5294 	struct sdeb_store_info *sip;
5295 	u8 *cmd = scp->cmnd;
5296 	u16 boundary, len;
5297 	u64 lba, lba_tmp;
5298 	int ret;
5299 
5300 	if (!scsi_debug_atomic_write()) {
5301 		mk_sense_invalid_opcode(scp);
5302 		return check_condition_result;
5303 	}
5304 
5305 	sip = devip2sip(devip, true);
5306 
5307 	lba = get_unaligned_be64(cmd + 2);
5308 	boundary = get_unaligned_be16(cmd + 10);
5309 	len = get_unaligned_be16(cmd + 12);
5310 
5311 	lba_tmp = lba;
5312 	if (sdebug_atomic_wr_align &&
5313 	    do_div(lba_tmp, sdebug_atomic_wr_align)) {
5314 		/* Does not meet alignment requirement */
5315 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5316 		return check_condition_result;
5317 	}
5318 
5319 	if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
5320 		/* Does not meet alignment requirement */
5321 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5322 		return check_condition_result;
5323 	}
5324 
5325 	if (boundary > 0) {
5326 		if (boundary > sdebug_atomic_wr_max_bndry) {
5327 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5328 			return check_condition_result;
5329 		}
5330 
5331 		if (len > sdebug_atomic_wr_max_length_bndry) {
5332 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5333 			return check_condition_result;
5334 		}
5335 	} else {
5336 		if (len > sdebug_atomic_wr_max_length) {
5337 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5338 			return check_condition_result;
5339 		}
5340 	}
5341 
5342 	ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
5343 	if (unlikely(ret == -1))
5344 		return DID_ERROR << 16;
5345 	if (unlikely(ret != len * sdebug_sector_size))
5346 		return DID_ERROR << 16;
5347 	return 0;
5348 }
5349 
5350 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)5351 static void zbc_open_all(struct sdebug_dev_info *devip)
5352 {
5353 	struct sdeb_zone_state *zsp = &devip->zstate[0];
5354 	unsigned int i;
5355 
5356 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
5357 		if (zsp->z_cond == ZC4_CLOSED)
5358 			zbc_open_zone(devip, &devip->zstate[i], true);
5359 	}
5360 }
5361 
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5362 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5363 {
5364 	int res = 0;
5365 	u64 z_id;
5366 	enum sdebug_z_cond zc;
5367 	u8 *cmd = scp->cmnd;
5368 	struct sdeb_zone_state *zsp;
5369 	bool all = cmd[14] & 0x01;
5370 	struct sdeb_store_info *sip = devip2sip(devip, false);
5371 
5372 	if (!sdebug_dev_is_zoned(devip)) {
5373 		mk_sense_invalid_opcode(scp);
5374 		return check_condition_result;
5375 	}
5376 	sdeb_meta_write_lock(sip);
5377 
5378 	if (all) {
5379 		/* Check if all closed zones can be open */
5380 		if (devip->max_open &&
5381 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
5382 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5383 					INSUFF_ZONE_ASCQ);
5384 			res = check_condition_result;
5385 			goto fini;
5386 		}
5387 		/* Open all closed zones */
5388 		zbc_open_all(devip);
5389 		goto fini;
5390 	}
5391 
5392 	/* Open the specified zone */
5393 	z_id = get_unaligned_be64(cmd + 2);
5394 	if (z_id >= sdebug_capacity) {
5395 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5396 		res = check_condition_result;
5397 		goto fini;
5398 	}
5399 
5400 	zsp = zbc_zone(devip, z_id);
5401 	if (z_id != zsp->z_start) {
5402 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5403 		res = check_condition_result;
5404 		goto fini;
5405 	}
5406 	if (zbc_zone_is_conv(zsp)) {
5407 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5408 		res = check_condition_result;
5409 		goto fini;
5410 	}
5411 
5412 	zc = zsp->z_cond;
5413 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
5414 		goto fini;
5415 
5416 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
5417 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5418 				INSUFF_ZONE_ASCQ);
5419 		res = check_condition_result;
5420 		goto fini;
5421 	}
5422 
5423 	zbc_open_zone(devip, zsp, true);
5424 fini:
5425 	sdeb_meta_write_unlock(sip);
5426 	return res;
5427 }
5428 
zbc_close_all(struct sdebug_dev_info * devip)5429 static void zbc_close_all(struct sdebug_dev_info *devip)
5430 {
5431 	unsigned int i;
5432 
5433 	for (i = 0; i < devip->nr_zones; i++)
5434 		zbc_close_zone(devip, &devip->zstate[i]);
5435 }
5436 
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5437 static int resp_close_zone(struct scsi_cmnd *scp,
5438 			   struct sdebug_dev_info *devip)
5439 {
5440 	int res = 0;
5441 	u64 z_id;
5442 	u8 *cmd = scp->cmnd;
5443 	struct sdeb_zone_state *zsp;
5444 	bool all = cmd[14] & 0x01;
5445 	struct sdeb_store_info *sip = devip2sip(devip, false);
5446 
5447 	if (!sdebug_dev_is_zoned(devip)) {
5448 		mk_sense_invalid_opcode(scp);
5449 		return check_condition_result;
5450 	}
5451 
5452 	sdeb_meta_write_lock(sip);
5453 
5454 	if (all) {
5455 		zbc_close_all(devip);
5456 		goto fini;
5457 	}
5458 
5459 	/* Close specified zone */
5460 	z_id = get_unaligned_be64(cmd + 2);
5461 	if (z_id >= sdebug_capacity) {
5462 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5463 		res = check_condition_result;
5464 		goto fini;
5465 	}
5466 
5467 	zsp = zbc_zone(devip, z_id);
5468 	if (z_id != zsp->z_start) {
5469 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5470 		res = check_condition_result;
5471 		goto fini;
5472 	}
5473 	if (zbc_zone_is_conv(zsp)) {
5474 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5475 		res = check_condition_result;
5476 		goto fini;
5477 	}
5478 
5479 	zbc_close_zone(devip, zsp);
5480 fini:
5481 	sdeb_meta_write_unlock(sip);
5482 	return res;
5483 }
5484 
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)5485 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5486 			    struct sdeb_zone_state *zsp, bool empty)
5487 {
5488 	enum sdebug_z_cond zc = zsp->z_cond;
5489 
5490 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5491 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5492 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5493 			zbc_close_zone(devip, zsp);
5494 		if (zsp->z_cond == ZC4_CLOSED)
5495 			devip->nr_closed--;
5496 		zsp->z_wp = zsp->z_start + zsp->z_size;
5497 		zsp->z_cond = ZC5_FULL;
5498 	}
5499 }
5500 
zbc_finish_all(struct sdebug_dev_info * devip)5501 static void zbc_finish_all(struct sdebug_dev_info *devip)
5502 {
5503 	unsigned int i;
5504 
5505 	for (i = 0; i < devip->nr_zones; i++)
5506 		zbc_finish_zone(devip, &devip->zstate[i], false);
5507 }
5508 
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5509 static int resp_finish_zone(struct scsi_cmnd *scp,
5510 			    struct sdebug_dev_info *devip)
5511 {
5512 	struct sdeb_zone_state *zsp;
5513 	int res = 0;
5514 	u64 z_id;
5515 	u8 *cmd = scp->cmnd;
5516 	bool all = cmd[14] & 0x01;
5517 	struct sdeb_store_info *sip = devip2sip(devip, false);
5518 
5519 	if (!sdebug_dev_is_zoned(devip)) {
5520 		mk_sense_invalid_opcode(scp);
5521 		return check_condition_result;
5522 	}
5523 
5524 	sdeb_meta_write_lock(sip);
5525 
5526 	if (all) {
5527 		zbc_finish_all(devip);
5528 		goto fini;
5529 	}
5530 
5531 	/* Finish the specified zone */
5532 	z_id = get_unaligned_be64(cmd + 2);
5533 	if (z_id >= sdebug_capacity) {
5534 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5535 		res = check_condition_result;
5536 		goto fini;
5537 	}
5538 
5539 	zsp = zbc_zone(devip, z_id);
5540 	if (z_id != zsp->z_start) {
5541 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5542 		res = check_condition_result;
5543 		goto fini;
5544 	}
5545 	if (zbc_zone_is_conv(zsp)) {
5546 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5547 		res = check_condition_result;
5548 		goto fini;
5549 	}
5550 
5551 	zbc_finish_zone(devip, zsp, true);
5552 fini:
5553 	sdeb_meta_write_unlock(sip);
5554 	return res;
5555 }
5556 
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)5557 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5558 			 struct sdeb_zone_state *zsp)
5559 {
5560 	enum sdebug_z_cond zc;
5561 	struct sdeb_store_info *sip = devip2sip(devip, false);
5562 
5563 	if (!zbc_zone_is_seq(zsp))
5564 		return;
5565 
5566 	zc = zsp->z_cond;
5567 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5568 		zbc_close_zone(devip, zsp);
5569 
5570 	if (zsp->z_cond == ZC4_CLOSED)
5571 		devip->nr_closed--;
5572 
5573 	if (zsp->z_wp > zsp->z_start)
5574 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5575 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5576 
5577 	zsp->z_non_seq_resource = false;
5578 	zsp->z_wp = zsp->z_start;
5579 	zsp->z_cond = ZC1_EMPTY;
5580 }
5581 
zbc_rwp_all(struct sdebug_dev_info * devip)5582 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5583 {
5584 	unsigned int i;
5585 
5586 	for (i = 0; i < devip->nr_zones; i++)
5587 		zbc_rwp_zone(devip, &devip->zstate[i]);
5588 }
5589 
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5590 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5591 {
5592 	struct sdeb_zone_state *zsp;
5593 	int res = 0;
5594 	u64 z_id;
5595 	u8 *cmd = scp->cmnd;
5596 	bool all = cmd[14] & 0x01;
5597 	struct sdeb_store_info *sip = devip2sip(devip, false);
5598 
5599 	if (!sdebug_dev_is_zoned(devip)) {
5600 		mk_sense_invalid_opcode(scp);
5601 		return check_condition_result;
5602 	}
5603 
5604 	sdeb_meta_write_lock(sip);
5605 
5606 	if (all) {
5607 		zbc_rwp_all(devip);
5608 		goto fini;
5609 	}
5610 
5611 	z_id = get_unaligned_be64(cmd + 2);
5612 	if (z_id >= sdebug_capacity) {
5613 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5614 		res = check_condition_result;
5615 		goto fini;
5616 	}
5617 
5618 	zsp = zbc_zone(devip, z_id);
5619 	if (z_id != zsp->z_start) {
5620 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5621 		res = check_condition_result;
5622 		goto fini;
5623 	}
5624 	if (zbc_zone_is_conv(zsp)) {
5625 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5626 		res = check_condition_result;
5627 		goto fini;
5628 	}
5629 
5630 	zbc_rwp_zone(devip, zsp);
5631 fini:
5632 	sdeb_meta_write_unlock(sip);
5633 	return res;
5634 }
5635 
get_tag(struct scsi_cmnd * cmnd)5636 static u32 get_tag(struct scsi_cmnd *cmnd)
5637 {
5638 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5639 }
5640 
5641 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)5642 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5643 {
5644 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5645 	unsigned long flags;
5646 	struct scsi_cmnd *scp = sqcp->scmd;
5647 	struct sdebug_scsi_cmd *sdsc;
5648 	bool aborted;
5649 
5650 	if (sdebug_statistics) {
5651 		atomic_inc(&sdebug_completions);
5652 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5653 			atomic_inc(&sdebug_miss_cpus);
5654 	}
5655 
5656 	if (!scp) {
5657 		pr_err("scmd=NULL\n");
5658 		goto out;
5659 	}
5660 
5661 	sdsc = scsi_cmd_priv(scp);
5662 	spin_lock_irqsave(&sdsc->lock, flags);
5663 	aborted = sd_dp->aborted;
5664 	if (unlikely(aborted))
5665 		sd_dp->aborted = false;
5666 	ASSIGN_QUEUED_CMD(scp, NULL);
5667 
5668 	spin_unlock_irqrestore(&sdsc->lock, flags);
5669 
5670 	if (aborted) {
5671 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5672 		blk_abort_request(scsi_cmd_to_rq(scp));
5673 		goto out;
5674 	}
5675 
5676 	scsi_done(scp); /* callback to mid level */
5677 out:
5678 	sdebug_free_queued_cmd(sqcp);
5679 }
5680 
5681 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)5682 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5683 {
5684 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5685 						  hrt);
5686 	sdebug_q_cmd_complete(sd_dp);
5687 	return HRTIMER_NORESTART;
5688 }
5689 
5690 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)5691 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5692 {
5693 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5694 						  ew.work);
5695 	sdebug_q_cmd_complete(sd_dp);
5696 }
5697 
5698 static bool got_shared_uuid;
5699 static uuid_t shared_uuid;
5700 
sdebug_device_create_zones(struct sdebug_dev_info * devip)5701 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5702 {
5703 	struct sdeb_zone_state *zsp;
5704 	sector_t capacity = get_sdebug_capacity();
5705 	sector_t conv_capacity;
5706 	sector_t zstart = 0;
5707 	unsigned int i;
5708 
5709 	/*
5710 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5711 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5712 	 * use the specified zone size checking that at least 2 zones can be
5713 	 * created for the device.
5714 	 */
5715 	if (!sdeb_zbc_zone_size_mb) {
5716 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5717 			>> ilog2(sdebug_sector_size);
5718 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5719 			devip->zsize >>= 1;
5720 		if (devip->zsize < 2) {
5721 			pr_err("Device capacity too small\n");
5722 			return -EINVAL;
5723 		}
5724 	} else {
5725 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5726 			pr_err("Zone size is not a power of 2\n");
5727 			return -EINVAL;
5728 		}
5729 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5730 			>> ilog2(sdebug_sector_size);
5731 		if (devip->zsize >= capacity) {
5732 			pr_err("Zone size too large for device capacity\n");
5733 			return -EINVAL;
5734 		}
5735 	}
5736 
5737 	devip->zsize_shift = ilog2(devip->zsize);
5738 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5739 
5740 	if (sdeb_zbc_zone_cap_mb == 0) {
5741 		devip->zcap = devip->zsize;
5742 	} else {
5743 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5744 			      ilog2(sdebug_sector_size);
5745 		if (devip->zcap > devip->zsize) {
5746 			pr_err("Zone capacity too large\n");
5747 			return -EINVAL;
5748 		}
5749 	}
5750 
5751 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5752 	if (conv_capacity >= capacity) {
5753 		pr_err("Number of conventional zones too large\n");
5754 		return -EINVAL;
5755 	}
5756 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5757 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5758 			      devip->zsize_shift;
5759 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5760 
5761 	/* Add gap zones if zone capacity is smaller than the zone size */
5762 	if (devip->zcap < devip->zsize)
5763 		devip->nr_zones += devip->nr_seq_zones;
5764 
5765 	if (devip->zoned) {
5766 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5767 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5768 			devip->max_open = (devip->nr_zones - 1) / 2;
5769 		else
5770 			devip->max_open = sdeb_zbc_max_open;
5771 	}
5772 
5773 	devip->zstate = kcalloc(devip->nr_zones,
5774 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5775 	if (!devip->zstate)
5776 		return -ENOMEM;
5777 
5778 	for (i = 0; i < devip->nr_zones; i++) {
5779 		zsp = &devip->zstate[i];
5780 
5781 		zsp->z_start = zstart;
5782 
5783 		if (i < devip->nr_conv_zones) {
5784 			zsp->z_type = ZBC_ZTYPE_CNV;
5785 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5786 			zsp->z_wp = (sector_t)-1;
5787 			zsp->z_size =
5788 				min_t(u64, devip->zsize, capacity - zstart);
5789 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5790 			if (devip->zoned)
5791 				zsp->z_type = ZBC_ZTYPE_SWR;
5792 			else
5793 				zsp->z_type = ZBC_ZTYPE_SWP;
5794 			zsp->z_cond = ZC1_EMPTY;
5795 			zsp->z_wp = zsp->z_start;
5796 			zsp->z_size =
5797 				min_t(u64, devip->zcap, capacity - zstart);
5798 		} else {
5799 			zsp->z_type = ZBC_ZTYPE_GAP;
5800 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5801 			zsp->z_wp = (sector_t)-1;
5802 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5803 					    capacity - zstart);
5804 		}
5805 
5806 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5807 		zstart += zsp->z_size;
5808 	}
5809 
5810 	return 0;
5811 }
5812 
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)5813 static struct sdebug_dev_info *sdebug_device_create(
5814 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5815 {
5816 	struct sdebug_dev_info *devip;
5817 
5818 	devip = kzalloc(sizeof(*devip), flags);
5819 	if (devip) {
5820 		if (sdebug_uuid_ctl == 1)
5821 			uuid_gen(&devip->lu_name);
5822 		else if (sdebug_uuid_ctl == 2) {
5823 			if (got_shared_uuid)
5824 				devip->lu_name = shared_uuid;
5825 			else {
5826 				uuid_gen(&shared_uuid);
5827 				got_shared_uuid = true;
5828 				devip->lu_name = shared_uuid;
5829 			}
5830 		}
5831 		devip->sdbg_host = sdbg_host;
5832 		if (sdeb_zbc_in_use) {
5833 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5834 			if (sdebug_device_create_zones(devip)) {
5835 				kfree(devip);
5836 				return NULL;
5837 			}
5838 		} else {
5839 			devip->zoned = false;
5840 		}
5841 		devip->create_ts = ktime_get_boottime();
5842 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5843 		spin_lock_init(&devip->list_lock);
5844 		INIT_LIST_HEAD(&devip->inject_err_list);
5845 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5846 	}
5847 	return devip;
5848 }
5849 
find_build_dev_info(struct scsi_device * sdev)5850 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5851 {
5852 	struct sdebug_host_info *sdbg_host;
5853 	struct sdebug_dev_info *open_devip = NULL;
5854 	struct sdebug_dev_info *devip;
5855 
5856 	sdbg_host = shost_to_sdebug_host(sdev->host);
5857 
5858 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5859 		if ((devip->used) && (devip->channel == sdev->channel) &&
5860 		    (devip->target == sdev->id) &&
5861 		    (devip->lun == sdev->lun))
5862 			return devip;
5863 		else {
5864 			if ((!devip->used) && (!open_devip))
5865 				open_devip = devip;
5866 		}
5867 	}
5868 	if (!open_devip) { /* try and make a new one */
5869 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5870 		if (!open_devip) {
5871 			pr_err("out of memory at line %d\n", __LINE__);
5872 			return NULL;
5873 		}
5874 	}
5875 
5876 	open_devip->channel = sdev->channel;
5877 	open_devip->target = sdev->id;
5878 	open_devip->lun = sdev->lun;
5879 	open_devip->sdbg_host = sdbg_host;
5880 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5881 	open_devip->used = true;
5882 	return open_devip;
5883 }
5884 
scsi_debug_slave_alloc(struct scsi_device * sdp)5885 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5886 {
5887 	if (sdebug_verbose)
5888 		pr_info("slave_alloc <%u %u %u %llu>\n",
5889 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5890 
5891 	return 0;
5892 }
5893 
scsi_debug_slave_configure(struct scsi_device * sdp)5894 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5895 {
5896 	struct sdebug_dev_info *devip =
5897 			(struct sdebug_dev_info *)sdp->hostdata;
5898 	struct dentry *dentry;
5899 
5900 	if (sdebug_verbose)
5901 		pr_info("slave_configure <%u %u %u %llu>\n",
5902 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5903 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5904 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5905 	if (devip == NULL) {
5906 		devip = find_build_dev_info(sdp);
5907 		if (devip == NULL)
5908 			return 1;  /* no resources, will be marked offline */
5909 	}
5910 	sdp->hostdata = devip;
5911 	if (sdebug_no_uld)
5912 		sdp->no_uld_attach = 1;
5913 	config_cdb_len(sdp);
5914 
5915 	if (sdebug_allow_restart)
5916 		sdp->allow_restart = 1;
5917 
5918 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5919 				sdebug_debugfs_root);
5920 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
5921 		pr_info("%s: failed to create debugfs directory for device %s\n",
5922 			__func__, dev_name(&sdp->sdev_gendev));
5923 
5924 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5925 				&sdebug_error_fops);
5926 	if (IS_ERR_OR_NULL(dentry))
5927 		pr_info("%s: failed to create error file for device %s\n",
5928 			__func__, dev_name(&sdp->sdev_gendev));
5929 
5930 	return 0;
5931 }
5932 
scsi_debug_slave_destroy(struct scsi_device * sdp)5933 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5934 {
5935 	struct sdebug_dev_info *devip =
5936 		(struct sdebug_dev_info *)sdp->hostdata;
5937 	struct sdebug_err_inject *err;
5938 
5939 	if (sdebug_verbose)
5940 		pr_info("slave_destroy <%u %u %u %llu>\n",
5941 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5942 
5943 	if (!devip)
5944 		return;
5945 
5946 	spin_lock(&devip->list_lock);
5947 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5948 		list_del_rcu(&err->list);
5949 		call_rcu(&err->rcu, sdebug_err_free);
5950 	}
5951 	spin_unlock(&devip->list_lock);
5952 
5953 	debugfs_remove(devip->debugfs_entry);
5954 
5955 	/* make this slot available for re-use */
5956 	devip->used = false;
5957 	sdp->hostdata = NULL;
5958 }
5959 
5960 /* Returns true if we require the queued memory to be freed by the caller. */
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)5961 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5962 			   enum sdeb_defer_type defer_t)
5963 {
5964 	if (defer_t == SDEB_DEFER_HRT) {
5965 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5966 
5967 		switch (res) {
5968 		case 0: /* Not active, it must have already run */
5969 		case -1: /* -1 It's executing the CB */
5970 			return false;
5971 		case 1: /* Was active, we've now cancelled */
5972 		default:
5973 			return true;
5974 		}
5975 	} else if (defer_t == SDEB_DEFER_WQ) {
5976 		/* Cancel if pending */
5977 		if (cancel_work_sync(&sd_dp->ew.work))
5978 			return true;
5979 		/* Was not pending, so it must have run */
5980 		return false;
5981 	} else if (defer_t == SDEB_DEFER_POLL) {
5982 		return true;
5983 	}
5984 
5985 	return false;
5986 }
5987 
5988 
scsi_debug_stop_cmnd(struct scsi_cmnd * cmnd)5989 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5990 {
5991 	enum sdeb_defer_type l_defer_t;
5992 	struct sdebug_defer *sd_dp;
5993 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5994 	struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5995 
5996 	lockdep_assert_held(&sdsc->lock);
5997 
5998 	if (!sqcp)
5999 		return false;
6000 	sd_dp = &sqcp->sd_dp;
6001 	l_defer_t = READ_ONCE(sd_dp->defer_t);
6002 	ASSIGN_QUEUED_CMD(cmnd, NULL);
6003 
6004 	if (stop_qc_helper(sd_dp, l_defer_t))
6005 		sdebug_free_queued_cmd(sqcp);
6006 
6007 	return true;
6008 }
6009 
6010 /*
6011  * Called from scsi_debug_abort() only, which is for timed-out cmd.
6012  */
scsi_debug_abort_cmnd(struct scsi_cmnd * cmnd)6013 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6014 {
6015 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6016 	unsigned long flags;
6017 	bool res;
6018 
6019 	spin_lock_irqsave(&sdsc->lock, flags);
6020 	res = scsi_debug_stop_cmnd(cmnd);
6021 	spin_unlock_irqrestore(&sdsc->lock, flags);
6022 
6023 	return res;
6024 }
6025 
6026 /*
6027  * All we can do is set the cmnd as internally aborted and wait for it to
6028  * finish. We cannot call scsi_done() as normal completion path may do that.
6029  */
sdebug_stop_cmnd(struct request * rq,void * data)6030 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6031 {
6032 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6033 
6034 	return true;
6035 }
6036 
6037 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)6038 static void stop_all_queued(void)
6039 {
6040 	struct sdebug_host_info *sdhp;
6041 
6042 	mutex_lock(&sdebug_host_list_mutex);
6043 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6044 		struct Scsi_Host *shost = sdhp->shost;
6045 
6046 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6047 	}
6048 	mutex_unlock(&sdebug_host_list_mutex);
6049 }
6050 
sdebug_fail_abort(struct scsi_cmnd * cmnd)6051 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6052 {
6053 	struct scsi_device *sdp = cmnd->device;
6054 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6055 	struct sdebug_err_inject *err;
6056 	unsigned char *cmd = cmnd->cmnd;
6057 	int ret = 0;
6058 
6059 	if (devip == NULL)
6060 		return 0;
6061 
6062 	rcu_read_lock();
6063 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6064 		if (err->type == ERR_ABORT_CMD_FAILED &&
6065 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6066 			ret = !!err->cnt;
6067 			if (err->cnt < 0)
6068 				err->cnt++;
6069 
6070 			rcu_read_unlock();
6071 			return ret;
6072 		}
6073 	}
6074 	rcu_read_unlock();
6075 
6076 	return 0;
6077 }
6078 
scsi_debug_abort(struct scsi_cmnd * SCpnt)6079 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6080 {
6081 	bool ok = scsi_debug_abort_cmnd(SCpnt);
6082 	u8 *cmd = SCpnt->cmnd;
6083 	u8 opcode = cmd[0];
6084 
6085 	++num_aborts;
6086 
6087 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6088 		sdev_printk(KERN_INFO, SCpnt->device,
6089 			    "%s: command%s found\n", __func__,
6090 			    ok ? "" : " not");
6091 
6092 	if (sdebug_fail_abort(SCpnt)) {
6093 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6094 			    opcode);
6095 		return FAILED;
6096 	}
6097 
6098 	return SUCCESS;
6099 }
6100 
scsi_debug_stop_all_queued_iter(struct request * rq,void * data)6101 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6102 {
6103 	struct scsi_device *sdp = data;
6104 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6105 
6106 	if (scmd->device == sdp)
6107 		scsi_debug_abort_cmnd(scmd);
6108 
6109 	return true;
6110 }
6111 
6112 /* Deletes (stops) timers or work queues of all queued commands per sdev */
scsi_debug_stop_all_queued(struct scsi_device * sdp)6113 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6114 {
6115 	struct Scsi_Host *shost = sdp->host;
6116 
6117 	blk_mq_tagset_busy_iter(&shost->tag_set,
6118 				scsi_debug_stop_all_queued_iter, sdp);
6119 }
6120 
sdebug_fail_lun_reset(struct scsi_cmnd * cmnd)6121 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6122 {
6123 	struct scsi_device *sdp = cmnd->device;
6124 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6125 	struct sdebug_err_inject *err;
6126 	unsigned char *cmd = cmnd->cmnd;
6127 	int ret = 0;
6128 
6129 	if (devip == NULL)
6130 		return 0;
6131 
6132 	rcu_read_lock();
6133 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6134 		if (err->type == ERR_LUN_RESET_FAILED &&
6135 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6136 			ret = !!err->cnt;
6137 			if (err->cnt < 0)
6138 				err->cnt++;
6139 
6140 			rcu_read_unlock();
6141 			return ret;
6142 		}
6143 	}
6144 	rcu_read_unlock();
6145 
6146 	return 0;
6147 }
6148 
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)6149 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6150 {
6151 	struct scsi_device *sdp = SCpnt->device;
6152 	struct sdebug_dev_info *devip = sdp->hostdata;
6153 	u8 *cmd = SCpnt->cmnd;
6154 	u8 opcode = cmd[0];
6155 
6156 	++num_dev_resets;
6157 
6158 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6159 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6160 
6161 	scsi_debug_stop_all_queued(sdp);
6162 	if (devip)
6163 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
6164 
6165 	if (sdebug_fail_lun_reset(SCpnt)) {
6166 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6167 		return FAILED;
6168 	}
6169 
6170 	return SUCCESS;
6171 }
6172 
sdebug_fail_target_reset(struct scsi_cmnd * cmnd)6173 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6174 {
6175 	struct scsi_target *starget = scsi_target(cmnd->device);
6176 	struct sdebug_target_info *targetip =
6177 		(struct sdebug_target_info *)starget->hostdata;
6178 
6179 	if (targetip)
6180 		return targetip->reset_fail;
6181 
6182 	return 0;
6183 }
6184 
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)6185 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6186 {
6187 	struct scsi_device *sdp = SCpnt->device;
6188 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6189 	struct sdebug_dev_info *devip;
6190 	u8 *cmd = SCpnt->cmnd;
6191 	u8 opcode = cmd[0];
6192 	int k = 0;
6193 
6194 	++num_target_resets;
6195 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6196 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6197 
6198 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6199 		if (devip->target == sdp->id) {
6200 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6201 			++k;
6202 		}
6203 	}
6204 
6205 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6206 		sdev_printk(KERN_INFO, sdp,
6207 			    "%s: %d device(s) found in target\n", __func__, k);
6208 
6209 	if (sdebug_fail_target_reset(SCpnt)) {
6210 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
6211 			    opcode);
6212 		return FAILED;
6213 	}
6214 
6215 	return SUCCESS;
6216 }
6217 
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)6218 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
6219 {
6220 	struct scsi_device *sdp = SCpnt->device;
6221 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6222 	struct sdebug_dev_info *devip;
6223 	int k = 0;
6224 
6225 	++num_bus_resets;
6226 
6227 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6228 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6229 
6230 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6231 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6232 		++k;
6233 	}
6234 
6235 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6236 		sdev_printk(KERN_INFO, sdp,
6237 			    "%s: %d device(s) found in host\n", __func__, k);
6238 	return SUCCESS;
6239 }
6240 
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)6241 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
6242 {
6243 	struct sdebug_host_info *sdbg_host;
6244 	struct sdebug_dev_info *devip;
6245 	int k = 0;
6246 
6247 	++num_host_resets;
6248 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6249 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
6250 	mutex_lock(&sdebug_host_list_mutex);
6251 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
6252 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
6253 				    dev_list) {
6254 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6255 			++k;
6256 		}
6257 	}
6258 	mutex_unlock(&sdebug_host_list_mutex);
6259 	stop_all_queued();
6260 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6261 		sdev_printk(KERN_INFO, SCpnt->device,
6262 			    "%s: %d device(s) found\n", __func__, k);
6263 	return SUCCESS;
6264 }
6265 
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)6266 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
6267 {
6268 	struct msdos_partition *pp;
6269 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
6270 	int sectors_per_part, num_sectors, k;
6271 	int heads_by_sects, start_sec, end_sec;
6272 
6273 	/* assume partition table already zeroed */
6274 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
6275 		return;
6276 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
6277 		sdebug_num_parts = SDEBUG_MAX_PARTS;
6278 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
6279 	}
6280 	num_sectors = (int)get_sdebug_capacity();
6281 	sectors_per_part = (num_sectors - sdebug_sectors_per)
6282 			   / sdebug_num_parts;
6283 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
6284 	starts[0] = sdebug_sectors_per;
6285 	max_part_secs = sectors_per_part;
6286 	for (k = 1; k < sdebug_num_parts; ++k) {
6287 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
6288 			    * heads_by_sects;
6289 		if (starts[k] - starts[k - 1] < max_part_secs)
6290 			max_part_secs = starts[k] - starts[k - 1];
6291 	}
6292 	starts[sdebug_num_parts] = num_sectors;
6293 	starts[sdebug_num_parts + 1] = 0;
6294 
6295 	ramp[510] = 0x55;	/* magic partition markings */
6296 	ramp[511] = 0xAA;
6297 	pp = (struct msdos_partition *)(ramp + 0x1be);
6298 	for (k = 0; starts[k + 1]; ++k, ++pp) {
6299 		start_sec = starts[k];
6300 		end_sec = starts[k] + max_part_secs - 1;
6301 		pp->boot_ind = 0;
6302 
6303 		pp->cyl = start_sec / heads_by_sects;
6304 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
6305 			   / sdebug_sectors_per;
6306 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
6307 
6308 		pp->end_cyl = end_sec / heads_by_sects;
6309 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
6310 			       / sdebug_sectors_per;
6311 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
6312 
6313 		pp->start_sect = cpu_to_le32(start_sec);
6314 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
6315 		pp->sys_ind = 0x83;	/* plain Linux partition */
6316 	}
6317 }
6318 
block_unblock_all_queues(bool block)6319 static void block_unblock_all_queues(bool block)
6320 {
6321 	struct sdebug_host_info *sdhp;
6322 
6323 	lockdep_assert_held(&sdebug_host_list_mutex);
6324 
6325 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6326 		struct Scsi_Host *shost = sdhp->shost;
6327 
6328 		if (block)
6329 			scsi_block_requests(shost);
6330 		else
6331 			scsi_unblock_requests(shost);
6332 	}
6333 }
6334 
6335 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6336  * commands will be processed normally before triggers occur.
6337  */
tweak_cmnd_count(void)6338 static void tweak_cmnd_count(void)
6339 {
6340 	int count, modulo;
6341 
6342 	modulo = abs(sdebug_every_nth);
6343 	if (modulo < 2)
6344 		return;
6345 
6346 	mutex_lock(&sdebug_host_list_mutex);
6347 	block_unblock_all_queues(true);
6348 	count = atomic_read(&sdebug_cmnd_count);
6349 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
6350 	block_unblock_all_queues(false);
6351 	mutex_unlock(&sdebug_host_list_mutex);
6352 }
6353 
clear_queue_stats(void)6354 static void clear_queue_stats(void)
6355 {
6356 	atomic_set(&sdebug_cmnd_count, 0);
6357 	atomic_set(&sdebug_completions, 0);
6358 	atomic_set(&sdebug_miss_cpus, 0);
6359 	atomic_set(&sdebug_a_tsf, 0);
6360 }
6361 
inject_on_this_cmd(void)6362 static bool inject_on_this_cmd(void)
6363 {
6364 	if (sdebug_every_nth == 0)
6365 		return false;
6366 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6367 }
6368 
6369 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
6370 
6371 
sdebug_free_queued_cmd(struct sdebug_queued_cmd * sqcp)6372 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
6373 {
6374 	if (sqcp)
6375 		kmem_cache_free(queued_cmd_cache, sqcp);
6376 }
6377 
sdebug_alloc_queued_cmd(struct scsi_cmnd * scmd)6378 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
6379 {
6380 	struct sdebug_queued_cmd *sqcp;
6381 	struct sdebug_defer *sd_dp;
6382 
6383 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
6384 	if (!sqcp)
6385 		return NULL;
6386 
6387 	sd_dp = &sqcp->sd_dp;
6388 
6389 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6390 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
6391 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
6392 
6393 	sqcp->scmd = scmd;
6394 
6395 	return sqcp;
6396 }
6397 
6398 /* Complete the processing of the thread that queued a SCSI command to this
6399  * driver. It either completes the command by calling cmnd_done() or
6400  * schedules a hr timer or work queue then returns 0. Returns
6401  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6402  */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)6403 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
6404 			 int scsi_result,
6405 			 int (*pfp)(struct scsi_cmnd *,
6406 				    struct sdebug_dev_info *),
6407 			 int delta_jiff, int ndelay)
6408 {
6409 	struct request *rq = scsi_cmd_to_rq(cmnd);
6410 	bool polled = rq->cmd_flags & REQ_POLLED;
6411 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6412 	unsigned long flags;
6413 	u64 ns_from_boot = 0;
6414 	struct sdebug_queued_cmd *sqcp;
6415 	struct scsi_device *sdp;
6416 	struct sdebug_defer *sd_dp;
6417 
6418 	if (unlikely(devip == NULL)) {
6419 		if (scsi_result == 0)
6420 			scsi_result = DID_NO_CONNECT << 16;
6421 		goto respond_in_thread;
6422 	}
6423 	sdp = cmnd->device;
6424 
6425 	if (delta_jiff == 0)
6426 		goto respond_in_thread;
6427 
6428 
6429 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6430 		     (scsi_result == 0))) {
6431 		int num_in_q = scsi_device_busy(sdp);
6432 		int qdepth = cmnd->device->queue_depth;
6433 
6434 		if ((num_in_q == qdepth) &&
6435 		    (atomic_inc_return(&sdebug_a_tsf) >=
6436 		     abs(sdebug_every_nth))) {
6437 			atomic_set(&sdebug_a_tsf, 0);
6438 			scsi_result = device_qfull_result;
6439 
6440 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6441 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6442 					    __func__, num_in_q);
6443 		}
6444 	}
6445 
6446 	sqcp = sdebug_alloc_queued_cmd(cmnd);
6447 	if (!sqcp) {
6448 		pr_err("%s no alloc\n", __func__);
6449 		return SCSI_MLQUEUE_HOST_BUSY;
6450 	}
6451 	sd_dp = &sqcp->sd_dp;
6452 
6453 	if (polled)
6454 		ns_from_boot = ktime_get_boottime_ns();
6455 
6456 	/* one of the resp_*() response functions is called here */
6457 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6458 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
6459 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
6460 		delta_jiff = ndelay = 0;
6461 	}
6462 	if (cmnd->result == 0 && scsi_result != 0)
6463 		cmnd->result = scsi_result;
6464 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6465 		if (atomic_read(&sdeb_inject_pending)) {
6466 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6467 			atomic_set(&sdeb_inject_pending, 0);
6468 			cmnd->result = check_condition_result;
6469 		}
6470 	}
6471 
6472 	if (unlikely(sdebug_verbose && cmnd->result))
6473 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6474 			    __func__, cmnd->result);
6475 
6476 	if (delta_jiff > 0 || ndelay > 0) {
6477 		ktime_t kt;
6478 
6479 		if (delta_jiff > 0) {
6480 			u64 ns = jiffies_to_nsecs(delta_jiff);
6481 
6482 			if (sdebug_random && ns < U32_MAX) {
6483 				ns = get_random_u32_below((u32)ns);
6484 			} else if (sdebug_random) {
6485 				ns >>= 12;	/* scale to 4 usec precision */
6486 				if (ns < U32_MAX)	/* over 4 hours max */
6487 					ns = get_random_u32_below((u32)ns);
6488 				ns <<= 12;
6489 			}
6490 			kt = ns_to_ktime(ns);
6491 		} else {	/* ndelay has a 4.2 second max */
6492 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6493 					     (u32)ndelay;
6494 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6495 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
6496 
6497 				if (kt <= d) {	/* elapsed duration >= kt */
6498 					/* call scsi_done() from this thread */
6499 					sdebug_free_queued_cmd(sqcp);
6500 					scsi_done(cmnd);
6501 					return 0;
6502 				}
6503 				/* otherwise reduce kt by elapsed time */
6504 				kt -= d;
6505 			}
6506 		}
6507 		if (sdebug_statistics)
6508 			sd_dp->issuing_cpu = raw_smp_processor_id();
6509 		if (polled) {
6510 			spin_lock_irqsave(&sdsc->lock, flags);
6511 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6512 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6513 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6514 			spin_unlock_irqrestore(&sdsc->lock, flags);
6515 		} else {
6516 			/* schedule the invocation of scsi_done() for a later time */
6517 			spin_lock_irqsave(&sdsc->lock, flags);
6518 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6519 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6520 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6521 			/*
6522 			 * The completion handler will try to grab sqcp->lock,
6523 			 * so there is no chance that the completion handler
6524 			 * will call scsi_done() until we release the lock
6525 			 * here (so ok to keep referencing sdsc).
6526 			 */
6527 			spin_unlock_irqrestore(&sdsc->lock, flags);
6528 		}
6529 	} else {	/* jdelay < 0, use work queue */
6530 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6531 			     atomic_read(&sdeb_inject_pending))) {
6532 			sd_dp->aborted = true;
6533 			atomic_set(&sdeb_inject_pending, 0);
6534 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6535 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6536 		}
6537 
6538 		if (sdebug_statistics)
6539 			sd_dp->issuing_cpu = raw_smp_processor_id();
6540 		if (polled) {
6541 			spin_lock_irqsave(&sdsc->lock, flags);
6542 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6543 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6544 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6545 			spin_unlock_irqrestore(&sdsc->lock, flags);
6546 		} else {
6547 			spin_lock_irqsave(&sdsc->lock, flags);
6548 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6549 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6550 			schedule_work(&sd_dp->ew.work);
6551 			spin_unlock_irqrestore(&sdsc->lock, flags);
6552 		}
6553 	}
6554 
6555 	return 0;
6556 
6557 respond_in_thread:	/* call back to mid-layer using invocation thread */
6558 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6559 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
6560 	if (cmnd->result == 0 && scsi_result != 0)
6561 		cmnd->result = scsi_result;
6562 	scsi_done(cmnd);
6563 	return 0;
6564 }
6565 
6566 /* Note: The following macros create attribute files in the
6567    /sys/module/scsi_debug/parameters directory. Unfortunately this
6568    driver is unaware of a change and cannot trigger auxiliary actions
6569    as it can when the corresponding attribute in the
6570    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6571  */
6572 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6573 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6574 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6575 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6576 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6577 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6578 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6579 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6580 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6581 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6582 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6583 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6584 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6585 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6586 module_param_string(inq_product, sdebug_inq_product_id,
6587 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6588 module_param_string(inq_rev, sdebug_inq_product_rev,
6589 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6590 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6591 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6592 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6593 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6594 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6595 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6596 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
6597 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6598 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6599 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6600 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6601 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6602 		   S_IRUGO | S_IWUSR);
6603 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6604 		   S_IRUGO | S_IWUSR);
6605 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6606 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6607 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6608 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6609 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6610 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6611 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6612 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6613 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6614 module_param_named(per_host_store, sdebug_per_host_store, bool,
6615 		   S_IRUGO | S_IWUSR);
6616 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6617 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6618 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6619 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6620 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6621 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6622 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6623 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6624 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6625 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6626 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6627 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6628 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6629 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6630 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6631 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
6632 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
6633 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
6634 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
6635 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
6636 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6637 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6638 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6639 		   S_IRUGO | S_IWUSR);
6640 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6641 module_param_named(write_same_length, sdebug_write_same_length, int,
6642 		   S_IRUGO | S_IWUSR);
6643 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6644 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6645 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6646 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6647 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6648 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6649 
6650 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6651 MODULE_DESCRIPTION("SCSI debug adapter driver");
6652 MODULE_LICENSE("GPL");
6653 MODULE_VERSION(SDEBUG_VERSION);
6654 
6655 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6656 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6657 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6658 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6659 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6660 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6661 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6662 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6663 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6664 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6665 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6666 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6667 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6668 MODULE_PARM_DESC(host_max_queue,
6669 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6670 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6671 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6672 		 SDEBUG_VERSION "\")");
6673 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6674 MODULE_PARM_DESC(lbprz,
6675 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6676 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6677 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6678 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6679 MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
6680 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6681 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6682 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6683 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6684 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6685 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6686 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6687 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6688 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6689 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6690 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6691 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6692 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6693 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6694 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6695 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6696 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6697 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6698 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6699 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6700 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6701 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6702 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6703 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6704 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6705 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6706 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6707 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6708 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6709 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6710 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6711 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
6712 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
6713 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
6714 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
6715 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
6716 MODULE_PARM_DESC(uuid_ctl,
6717 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6718 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6719 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6720 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6721 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6722 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6723 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6724 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6725 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6726 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6727 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6728 
6729 #define SDEBUG_INFO_LEN 256
6730 static char sdebug_info[SDEBUG_INFO_LEN];
6731 
scsi_debug_info(struct Scsi_Host * shp)6732 static const char *scsi_debug_info(struct Scsi_Host *shp)
6733 {
6734 	int k;
6735 
6736 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6737 		      my_name, SDEBUG_VERSION, sdebug_version_date);
6738 	if (k >= (SDEBUG_INFO_LEN - 1))
6739 		return sdebug_info;
6740 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6741 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6742 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
6743 		  "statistics", (int)sdebug_statistics);
6744 	return sdebug_info;
6745 }
6746 
6747 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)6748 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6749 				 int length)
6750 {
6751 	char arr[16];
6752 	int opts;
6753 	int minLen = length > 15 ? 15 : length;
6754 
6755 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6756 		return -EACCES;
6757 	memcpy(arr, buffer, minLen);
6758 	arr[minLen] = '\0';
6759 	if (1 != sscanf(arr, "%d", &opts))
6760 		return -EINVAL;
6761 	sdebug_opts = opts;
6762 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6763 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6764 	if (sdebug_every_nth != 0)
6765 		tweak_cmnd_count();
6766 	return length;
6767 }
6768 
6769 struct sdebug_submit_queue_data {
6770 	int *first;
6771 	int *last;
6772 	int queue_num;
6773 };
6774 
sdebug_submit_queue_iter(struct request * rq,void * opaque)6775 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6776 {
6777 	struct sdebug_submit_queue_data *data = opaque;
6778 	u32 unique_tag = blk_mq_unique_tag(rq);
6779 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6780 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6781 	int queue_num = data->queue_num;
6782 
6783 	if (hwq != queue_num)
6784 		return true;
6785 
6786 	/* Rely on iter'ing in ascending tag order */
6787 	if (*data->first == -1)
6788 		*data->first = *data->last = tag;
6789 	else
6790 		*data->last = tag;
6791 
6792 	return true;
6793 }
6794 
6795 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6796  * same for each scsi_debug host (if more than one). Some of the counters
6797  * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)6798 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6799 {
6800 	struct sdebug_host_info *sdhp;
6801 	int j;
6802 
6803 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6804 		   SDEBUG_VERSION, sdebug_version_date);
6805 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6806 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6807 		   sdebug_opts, sdebug_every_nth);
6808 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6809 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6810 		   sdebug_sector_size, "bytes");
6811 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6812 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6813 		   num_aborts);
6814 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6815 		   num_dev_resets, num_target_resets, num_bus_resets,
6816 		   num_host_resets);
6817 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6818 		   dix_reads, dix_writes, dif_errors);
6819 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6820 		   sdebug_statistics);
6821 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6822 		   atomic_read(&sdebug_cmnd_count),
6823 		   atomic_read(&sdebug_completions),
6824 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6825 		   atomic_read(&sdebug_a_tsf),
6826 		   atomic_read(&sdeb_mq_poll_count));
6827 
6828 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6829 	for (j = 0; j < submit_queues; ++j) {
6830 		int f = -1, l = -1;
6831 		struct sdebug_submit_queue_data data = {
6832 			.queue_num = j,
6833 			.first = &f,
6834 			.last = &l,
6835 		};
6836 		seq_printf(m, "  queue %d:\n", j);
6837 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6838 					&data);
6839 		if (f >= 0) {
6840 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6841 				   "first,last bits", f, l);
6842 		}
6843 	}
6844 
6845 	seq_printf(m, "this host_no=%d\n", host->host_no);
6846 	if (!xa_empty(per_store_ap)) {
6847 		bool niu;
6848 		int idx;
6849 		unsigned long l_idx;
6850 		struct sdeb_store_info *sip;
6851 
6852 		seq_puts(m, "\nhost list:\n");
6853 		j = 0;
6854 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6855 			idx = sdhp->si_idx;
6856 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6857 				   sdhp->shost->host_no, idx);
6858 			++j;
6859 		}
6860 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6861 			   sdeb_most_recent_idx);
6862 		j = 0;
6863 		xa_for_each(per_store_ap, l_idx, sip) {
6864 			niu = xa_get_mark(per_store_ap, l_idx,
6865 					  SDEB_XA_NOT_IN_USE);
6866 			idx = (int)l_idx;
6867 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6868 				   (niu ? "  not_in_use" : ""));
6869 			++j;
6870 		}
6871 	}
6872 	return 0;
6873 }
6874 
delay_show(struct device_driver * ddp,char * buf)6875 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6876 {
6877 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6878 }
6879 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6880  * of delay is jiffies.
6881  */
delay_store(struct device_driver * ddp,const char * buf,size_t count)6882 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6883 			   size_t count)
6884 {
6885 	int jdelay, res;
6886 
6887 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6888 		res = count;
6889 		if (sdebug_jdelay != jdelay) {
6890 			struct sdebug_host_info *sdhp;
6891 
6892 			mutex_lock(&sdebug_host_list_mutex);
6893 			block_unblock_all_queues(true);
6894 
6895 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6896 				struct Scsi_Host *shost = sdhp->shost;
6897 
6898 				if (scsi_host_busy(shost)) {
6899 					res = -EBUSY;   /* queued commands */
6900 					break;
6901 				}
6902 			}
6903 			if (res > 0) {
6904 				sdebug_jdelay = jdelay;
6905 				sdebug_ndelay = 0;
6906 			}
6907 			block_unblock_all_queues(false);
6908 			mutex_unlock(&sdebug_host_list_mutex);
6909 		}
6910 		return res;
6911 	}
6912 	return -EINVAL;
6913 }
6914 static DRIVER_ATTR_RW(delay);
6915 
ndelay_show(struct device_driver * ddp,char * buf)6916 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6917 {
6918 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6919 }
6920 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6921 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)6922 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6923 			    size_t count)
6924 {
6925 	int ndelay, res;
6926 
6927 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6928 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6929 		res = count;
6930 		if (sdebug_ndelay != ndelay) {
6931 			struct sdebug_host_info *sdhp;
6932 
6933 			mutex_lock(&sdebug_host_list_mutex);
6934 			block_unblock_all_queues(true);
6935 
6936 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6937 				struct Scsi_Host *shost = sdhp->shost;
6938 
6939 				if (scsi_host_busy(shost)) {
6940 					res = -EBUSY;   /* queued commands */
6941 					break;
6942 				}
6943 			}
6944 
6945 			if (res > 0) {
6946 				sdebug_ndelay = ndelay;
6947 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6948 							: DEF_JDELAY;
6949 			}
6950 			block_unblock_all_queues(false);
6951 			mutex_unlock(&sdebug_host_list_mutex);
6952 		}
6953 		return res;
6954 	}
6955 	return -EINVAL;
6956 }
6957 static DRIVER_ATTR_RW(ndelay);
6958 
opts_show(struct device_driver * ddp,char * buf)6959 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6960 {
6961 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6962 }
6963 
opts_store(struct device_driver * ddp,const char * buf,size_t count)6964 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6965 			  size_t count)
6966 {
6967 	int opts;
6968 	char work[20];
6969 
6970 	if (sscanf(buf, "%10s", work) == 1) {
6971 		if (strncasecmp(work, "0x", 2) == 0) {
6972 			if (kstrtoint(work + 2, 16, &opts) == 0)
6973 				goto opts_done;
6974 		} else {
6975 			if (kstrtoint(work, 10, &opts) == 0)
6976 				goto opts_done;
6977 		}
6978 	}
6979 	return -EINVAL;
6980 opts_done:
6981 	sdebug_opts = opts;
6982 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6983 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6984 	tweak_cmnd_count();
6985 	return count;
6986 }
6987 static DRIVER_ATTR_RW(opts);
6988 
ptype_show(struct device_driver * ddp,char * buf)6989 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6990 {
6991 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6992 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)6993 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6994 			   size_t count)
6995 {
6996 	int n;
6997 
6998 	/* Cannot change from or to TYPE_ZBC with sysfs */
6999 	if (sdebug_ptype == TYPE_ZBC)
7000 		return -EINVAL;
7001 
7002 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7003 		if (n == TYPE_ZBC)
7004 			return -EINVAL;
7005 		sdebug_ptype = n;
7006 		return count;
7007 	}
7008 	return -EINVAL;
7009 }
7010 static DRIVER_ATTR_RW(ptype);
7011 
dsense_show(struct device_driver * ddp,char * buf)7012 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7013 {
7014 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7015 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)7016 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7017 			    size_t count)
7018 {
7019 	int n;
7020 
7021 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7022 		sdebug_dsense = n;
7023 		return count;
7024 	}
7025 	return -EINVAL;
7026 }
7027 static DRIVER_ATTR_RW(dsense);
7028 
fake_rw_show(struct device_driver * ddp,char * buf)7029 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7030 {
7031 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7032 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)7033 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7034 			     size_t count)
7035 {
7036 	int n, idx;
7037 
7038 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7039 		bool want_store = (n == 0);
7040 		struct sdebug_host_info *sdhp;
7041 
7042 		n = (n > 0);
7043 		sdebug_fake_rw = (sdebug_fake_rw > 0);
7044 		if (sdebug_fake_rw == n)
7045 			return count;	/* not transitioning so do nothing */
7046 
7047 		if (want_store) {	/* 1 --> 0 transition, set up store */
7048 			if (sdeb_first_idx < 0) {
7049 				idx = sdebug_add_store();
7050 				if (idx < 0)
7051 					return idx;
7052 			} else {
7053 				idx = sdeb_first_idx;
7054 				xa_clear_mark(per_store_ap, idx,
7055 					      SDEB_XA_NOT_IN_USE);
7056 			}
7057 			/* make all hosts use same store */
7058 			list_for_each_entry(sdhp, &sdebug_host_list,
7059 					    host_list) {
7060 				if (sdhp->si_idx != idx) {
7061 					xa_set_mark(per_store_ap, sdhp->si_idx,
7062 						    SDEB_XA_NOT_IN_USE);
7063 					sdhp->si_idx = idx;
7064 				}
7065 			}
7066 			sdeb_most_recent_idx = idx;
7067 		} else {	/* 0 --> 1 transition is trigger for shrink */
7068 			sdebug_erase_all_stores(true /* apart from first */);
7069 		}
7070 		sdebug_fake_rw = n;
7071 		return count;
7072 	}
7073 	return -EINVAL;
7074 }
7075 static DRIVER_ATTR_RW(fake_rw);
7076 
no_lun_0_show(struct device_driver * ddp,char * buf)7077 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7078 {
7079 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7080 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)7081 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7082 			      size_t count)
7083 {
7084 	int n;
7085 
7086 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7087 		sdebug_no_lun_0 = n;
7088 		return count;
7089 	}
7090 	return -EINVAL;
7091 }
7092 static DRIVER_ATTR_RW(no_lun_0);
7093 
num_tgts_show(struct device_driver * ddp,char * buf)7094 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7095 {
7096 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7097 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)7098 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7099 			      size_t count)
7100 {
7101 	int n;
7102 
7103 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7104 		sdebug_num_tgts = n;
7105 		sdebug_max_tgts_luns();
7106 		return count;
7107 	}
7108 	return -EINVAL;
7109 }
7110 static DRIVER_ATTR_RW(num_tgts);
7111 
dev_size_mb_show(struct device_driver * ddp,char * buf)7112 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7113 {
7114 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7115 }
7116 static DRIVER_ATTR_RO(dev_size_mb);
7117 
per_host_store_show(struct device_driver * ddp,char * buf)7118 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7119 {
7120 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7121 }
7122 
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)7123 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7124 				    size_t count)
7125 {
7126 	bool v;
7127 
7128 	if (kstrtobool(buf, &v))
7129 		return -EINVAL;
7130 
7131 	sdebug_per_host_store = v;
7132 	return count;
7133 }
7134 static DRIVER_ATTR_RW(per_host_store);
7135 
num_parts_show(struct device_driver * ddp,char * buf)7136 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7137 {
7138 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7139 }
7140 static DRIVER_ATTR_RO(num_parts);
7141 
every_nth_show(struct device_driver * ddp,char * buf)7142 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7143 {
7144 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7145 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)7146 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7147 			       size_t count)
7148 {
7149 	int nth;
7150 	char work[20];
7151 
7152 	if (sscanf(buf, "%10s", work) == 1) {
7153 		if (strncasecmp(work, "0x", 2) == 0) {
7154 			if (kstrtoint(work + 2, 16, &nth) == 0)
7155 				goto every_nth_done;
7156 		} else {
7157 			if (kstrtoint(work, 10, &nth) == 0)
7158 				goto every_nth_done;
7159 		}
7160 	}
7161 	return -EINVAL;
7162 
7163 every_nth_done:
7164 	sdebug_every_nth = nth;
7165 	if (nth && !sdebug_statistics) {
7166 		pr_info("every_nth needs statistics=1, set it\n");
7167 		sdebug_statistics = true;
7168 	}
7169 	tweak_cmnd_count();
7170 	return count;
7171 }
7172 static DRIVER_ATTR_RW(every_nth);
7173 
lun_format_show(struct device_driver * ddp,char * buf)7174 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7175 {
7176 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7177 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)7178 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7179 				size_t count)
7180 {
7181 	int n;
7182 	bool changed;
7183 
7184 	if (kstrtoint(buf, 0, &n))
7185 		return -EINVAL;
7186 	if (n >= 0) {
7187 		if (n > (int)SAM_LUN_AM_FLAT) {
7188 			pr_warn("only LUN address methods 0 and 1 are supported\n");
7189 			return -EINVAL;
7190 		}
7191 		changed = ((int)sdebug_lun_am != n);
7192 		sdebug_lun_am = n;
7193 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
7194 			struct sdebug_host_info *sdhp;
7195 			struct sdebug_dev_info *dp;
7196 
7197 			mutex_lock(&sdebug_host_list_mutex);
7198 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7199 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7200 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7201 				}
7202 			}
7203 			mutex_unlock(&sdebug_host_list_mutex);
7204 		}
7205 		return count;
7206 	}
7207 	return -EINVAL;
7208 }
7209 static DRIVER_ATTR_RW(lun_format);
7210 
max_luns_show(struct device_driver * ddp,char * buf)7211 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7212 {
7213 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7214 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)7215 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7216 			      size_t count)
7217 {
7218 	int n;
7219 	bool changed;
7220 
7221 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7222 		if (n > 256) {
7223 			pr_warn("max_luns can be no more than 256\n");
7224 			return -EINVAL;
7225 		}
7226 		changed = (sdebug_max_luns != n);
7227 		sdebug_max_luns = n;
7228 		sdebug_max_tgts_luns();
7229 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
7230 			struct sdebug_host_info *sdhp;
7231 			struct sdebug_dev_info *dp;
7232 
7233 			mutex_lock(&sdebug_host_list_mutex);
7234 			list_for_each_entry(sdhp, &sdebug_host_list,
7235 					    host_list) {
7236 				list_for_each_entry(dp, &sdhp->dev_info_list,
7237 						    dev_list) {
7238 					set_bit(SDEBUG_UA_LUNS_CHANGED,
7239 						dp->uas_bm);
7240 				}
7241 			}
7242 			mutex_unlock(&sdebug_host_list_mutex);
7243 		}
7244 		return count;
7245 	}
7246 	return -EINVAL;
7247 }
7248 static DRIVER_ATTR_RW(max_luns);
7249 
max_queue_show(struct device_driver * ddp,char * buf)7250 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
7251 {
7252 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
7253 }
7254 /* N.B. max_queue can be changed while there are queued commands. In flight
7255  * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)7256 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
7257 			       size_t count)
7258 {
7259 	int n;
7260 
7261 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
7262 	    (n <= SDEBUG_CANQUEUE) &&
7263 	    (sdebug_host_max_queue == 0)) {
7264 		mutex_lock(&sdebug_host_list_mutex);
7265 
7266 		/* We may only change sdebug_max_queue when we have no shosts */
7267 		if (list_empty(&sdebug_host_list))
7268 			sdebug_max_queue = n;
7269 		else
7270 			count = -EBUSY;
7271 		mutex_unlock(&sdebug_host_list_mutex);
7272 		return count;
7273 	}
7274 	return -EINVAL;
7275 }
7276 static DRIVER_ATTR_RW(max_queue);
7277 
host_max_queue_show(struct device_driver * ddp,char * buf)7278 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
7279 {
7280 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
7281 }
7282 
no_rwlock_show(struct device_driver * ddp,char * buf)7283 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
7284 {
7285 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
7286 }
7287 
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)7288 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
7289 {
7290 	bool v;
7291 
7292 	if (kstrtobool(buf, &v))
7293 		return -EINVAL;
7294 
7295 	sdebug_no_rwlock = v;
7296 	return count;
7297 }
7298 static DRIVER_ATTR_RW(no_rwlock);
7299 
7300 /*
7301  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
7302  * in range [0, sdebug_host_max_queue), we can't change it.
7303  */
7304 static DRIVER_ATTR_RO(host_max_queue);
7305 
no_uld_show(struct device_driver * ddp,char * buf)7306 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
7307 {
7308 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
7309 }
7310 static DRIVER_ATTR_RO(no_uld);
7311 
scsi_level_show(struct device_driver * ddp,char * buf)7312 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
7313 {
7314 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
7315 }
7316 static DRIVER_ATTR_RO(scsi_level);
7317 
virtual_gb_show(struct device_driver * ddp,char * buf)7318 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
7319 {
7320 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
7321 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)7322 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
7323 				size_t count)
7324 {
7325 	int n;
7326 	bool changed;
7327 
7328 	/* Ignore capacity change for ZBC drives for now */
7329 	if (sdeb_zbc_in_use)
7330 		return -ENOTSUPP;
7331 
7332 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7333 		changed = (sdebug_virtual_gb != n);
7334 		sdebug_virtual_gb = n;
7335 		sdebug_capacity = get_sdebug_capacity();
7336 		if (changed) {
7337 			struct sdebug_host_info *sdhp;
7338 			struct sdebug_dev_info *dp;
7339 
7340 			mutex_lock(&sdebug_host_list_mutex);
7341 			list_for_each_entry(sdhp, &sdebug_host_list,
7342 					    host_list) {
7343 				list_for_each_entry(dp, &sdhp->dev_info_list,
7344 						    dev_list) {
7345 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
7346 						dp->uas_bm);
7347 				}
7348 			}
7349 			mutex_unlock(&sdebug_host_list_mutex);
7350 		}
7351 		return count;
7352 	}
7353 	return -EINVAL;
7354 }
7355 static DRIVER_ATTR_RW(virtual_gb);
7356 
add_host_show(struct device_driver * ddp,char * buf)7357 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
7358 {
7359 	/* absolute number of hosts currently active is what is shown */
7360 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
7361 }
7362 
add_host_store(struct device_driver * ddp,const char * buf,size_t count)7363 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
7364 			      size_t count)
7365 {
7366 	bool found;
7367 	unsigned long idx;
7368 	struct sdeb_store_info *sip;
7369 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
7370 	int delta_hosts;
7371 
7372 	if (sscanf(buf, "%d", &delta_hosts) != 1)
7373 		return -EINVAL;
7374 	if (delta_hosts > 0) {
7375 		do {
7376 			found = false;
7377 			if (want_phs) {
7378 				xa_for_each_marked(per_store_ap, idx, sip,
7379 						   SDEB_XA_NOT_IN_USE) {
7380 					sdeb_most_recent_idx = (int)idx;
7381 					found = true;
7382 					break;
7383 				}
7384 				if (found)	/* re-use case */
7385 					sdebug_add_host_helper((int)idx);
7386 				else
7387 					sdebug_do_add_host(true);
7388 			} else {
7389 				sdebug_do_add_host(false);
7390 			}
7391 		} while (--delta_hosts);
7392 	} else if (delta_hosts < 0) {
7393 		do {
7394 			sdebug_do_remove_host(false);
7395 		} while (++delta_hosts);
7396 	}
7397 	return count;
7398 }
7399 static DRIVER_ATTR_RW(add_host);
7400 
vpd_use_hostno_show(struct device_driver * ddp,char * buf)7401 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
7402 {
7403 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
7404 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)7405 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
7406 				    size_t count)
7407 {
7408 	int n;
7409 
7410 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7411 		sdebug_vpd_use_hostno = n;
7412 		return count;
7413 	}
7414 	return -EINVAL;
7415 }
7416 static DRIVER_ATTR_RW(vpd_use_hostno);
7417 
statistics_show(struct device_driver * ddp,char * buf)7418 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
7419 {
7420 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
7421 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)7422 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
7423 				size_t count)
7424 {
7425 	int n;
7426 
7427 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
7428 		if (n > 0)
7429 			sdebug_statistics = true;
7430 		else {
7431 			clear_queue_stats();
7432 			sdebug_statistics = false;
7433 		}
7434 		return count;
7435 	}
7436 	return -EINVAL;
7437 }
7438 static DRIVER_ATTR_RW(statistics);
7439 
sector_size_show(struct device_driver * ddp,char * buf)7440 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7441 {
7442 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7443 }
7444 static DRIVER_ATTR_RO(sector_size);
7445 
submit_queues_show(struct device_driver * ddp,char * buf)7446 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7447 {
7448 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7449 }
7450 static DRIVER_ATTR_RO(submit_queues);
7451 
dix_show(struct device_driver * ddp,char * buf)7452 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7453 {
7454 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7455 }
7456 static DRIVER_ATTR_RO(dix);
7457 
dif_show(struct device_driver * ddp,char * buf)7458 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7459 {
7460 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7461 }
7462 static DRIVER_ATTR_RO(dif);
7463 
guard_show(struct device_driver * ddp,char * buf)7464 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7465 {
7466 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7467 }
7468 static DRIVER_ATTR_RO(guard);
7469 
ato_show(struct device_driver * ddp,char * buf)7470 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7471 {
7472 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7473 }
7474 static DRIVER_ATTR_RO(ato);
7475 
map_show(struct device_driver * ddp,char * buf)7476 static ssize_t map_show(struct device_driver *ddp, char *buf)
7477 {
7478 	ssize_t count = 0;
7479 
7480 	if (!scsi_debug_lbp())
7481 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7482 				 sdebug_store_sectors);
7483 
7484 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7485 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7486 
7487 		if (sip)
7488 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7489 					  (int)map_size, sip->map_storep);
7490 	}
7491 	buf[count++] = '\n';
7492 	buf[count] = '\0';
7493 
7494 	return count;
7495 }
7496 static DRIVER_ATTR_RO(map);
7497 
random_show(struct device_driver * ddp,char * buf)7498 static ssize_t random_show(struct device_driver *ddp, char *buf)
7499 {
7500 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7501 }
7502 
random_store(struct device_driver * ddp,const char * buf,size_t count)7503 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7504 			    size_t count)
7505 {
7506 	bool v;
7507 
7508 	if (kstrtobool(buf, &v))
7509 		return -EINVAL;
7510 
7511 	sdebug_random = v;
7512 	return count;
7513 }
7514 static DRIVER_ATTR_RW(random);
7515 
removable_show(struct device_driver * ddp,char * buf)7516 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7517 {
7518 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7519 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)7520 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7521 			       size_t count)
7522 {
7523 	int n;
7524 
7525 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7526 		sdebug_removable = (n > 0);
7527 		return count;
7528 	}
7529 	return -EINVAL;
7530 }
7531 static DRIVER_ATTR_RW(removable);
7532 
host_lock_show(struct device_driver * ddp,char * buf)7533 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7534 {
7535 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7536 }
7537 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)7538 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7539 			       size_t count)
7540 {
7541 	int n;
7542 
7543 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7544 		sdebug_host_lock = (n > 0);
7545 		return count;
7546 	}
7547 	return -EINVAL;
7548 }
7549 static DRIVER_ATTR_RW(host_lock);
7550 
strict_show(struct device_driver * ddp,char * buf)7551 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7552 {
7553 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7554 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)7555 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7556 			    size_t count)
7557 {
7558 	int n;
7559 
7560 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7561 		sdebug_strict = (n > 0);
7562 		return count;
7563 	}
7564 	return -EINVAL;
7565 }
7566 static DRIVER_ATTR_RW(strict);
7567 
uuid_ctl_show(struct device_driver * ddp,char * buf)7568 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7569 {
7570 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7571 }
7572 static DRIVER_ATTR_RO(uuid_ctl);
7573 
cdb_len_show(struct device_driver * ddp,char * buf)7574 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7575 {
7576 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7577 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)7578 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7579 			     size_t count)
7580 {
7581 	int ret, n;
7582 
7583 	ret = kstrtoint(buf, 0, &n);
7584 	if (ret)
7585 		return ret;
7586 	sdebug_cdb_len = n;
7587 	all_config_cdb_len();
7588 	return count;
7589 }
7590 static DRIVER_ATTR_RW(cdb_len);
7591 
7592 static const char * const zbc_model_strs_a[] = {
7593 	[BLK_ZONED_NONE] = "none",
7594 	[BLK_ZONED_HA]   = "host-aware",
7595 	[BLK_ZONED_HM]   = "host-managed",
7596 };
7597 
7598 static const char * const zbc_model_strs_b[] = {
7599 	[BLK_ZONED_NONE] = "no",
7600 	[BLK_ZONED_HA]   = "aware",
7601 	[BLK_ZONED_HM]   = "managed",
7602 };
7603 
7604 static const char * const zbc_model_strs_c[] = {
7605 	[BLK_ZONED_NONE] = "0",
7606 	[BLK_ZONED_HA]   = "1",
7607 	[BLK_ZONED_HM]   = "2",
7608 };
7609 
sdeb_zbc_model_str(const char * cp)7610 static int sdeb_zbc_model_str(const char *cp)
7611 {
7612 	int res = sysfs_match_string(zbc_model_strs_a, cp);
7613 
7614 	if (res < 0) {
7615 		res = sysfs_match_string(zbc_model_strs_b, cp);
7616 		if (res < 0) {
7617 			res = sysfs_match_string(zbc_model_strs_c, cp);
7618 			if (res < 0)
7619 				return -EINVAL;
7620 		}
7621 	}
7622 	return res;
7623 }
7624 
zbc_show(struct device_driver * ddp,char * buf)7625 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7626 {
7627 	return scnprintf(buf, PAGE_SIZE, "%s\n",
7628 			 zbc_model_strs_a[sdeb_zbc_model]);
7629 }
7630 static DRIVER_ATTR_RO(zbc);
7631 
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)7632 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7633 {
7634 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7635 }
7636 static DRIVER_ATTR_RO(tur_ms_to_ready);
7637 
group_number_stats_show(struct device_driver * ddp,char * buf)7638 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
7639 {
7640 	char *p = buf, *end = buf + PAGE_SIZE;
7641 	int i;
7642 
7643 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7644 		p += scnprintf(p, end - p, "%d %ld\n", i,
7645 			       atomic_long_read(&writes_by_group_number[i]));
7646 
7647 	return p - buf;
7648 }
7649 
group_number_stats_store(struct device_driver * ddp,const char * buf,size_t count)7650 static ssize_t group_number_stats_store(struct device_driver *ddp,
7651 					const char *buf, size_t count)
7652 {
7653 	int i;
7654 
7655 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7656 		atomic_long_set(&writes_by_group_number[i], 0);
7657 
7658 	return count;
7659 }
7660 static DRIVER_ATTR_RW(group_number_stats);
7661 
7662 /* Note: The following array creates attribute files in the
7663    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7664    files (over those found in the /sys/module/scsi_debug/parameters
7665    directory) is that auxiliary actions can be triggered when an attribute
7666    is changed. For example see: add_host_store() above.
7667  */
7668 
7669 static struct attribute *sdebug_drv_attrs[] = {
7670 	&driver_attr_delay.attr,
7671 	&driver_attr_opts.attr,
7672 	&driver_attr_ptype.attr,
7673 	&driver_attr_dsense.attr,
7674 	&driver_attr_fake_rw.attr,
7675 	&driver_attr_host_max_queue.attr,
7676 	&driver_attr_no_lun_0.attr,
7677 	&driver_attr_num_tgts.attr,
7678 	&driver_attr_dev_size_mb.attr,
7679 	&driver_attr_num_parts.attr,
7680 	&driver_attr_every_nth.attr,
7681 	&driver_attr_lun_format.attr,
7682 	&driver_attr_max_luns.attr,
7683 	&driver_attr_max_queue.attr,
7684 	&driver_attr_no_rwlock.attr,
7685 	&driver_attr_no_uld.attr,
7686 	&driver_attr_scsi_level.attr,
7687 	&driver_attr_virtual_gb.attr,
7688 	&driver_attr_add_host.attr,
7689 	&driver_attr_per_host_store.attr,
7690 	&driver_attr_vpd_use_hostno.attr,
7691 	&driver_attr_sector_size.attr,
7692 	&driver_attr_statistics.attr,
7693 	&driver_attr_submit_queues.attr,
7694 	&driver_attr_dix.attr,
7695 	&driver_attr_dif.attr,
7696 	&driver_attr_guard.attr,
7697 	&driver_attr_ato.attr,
7698 	&driver_attr_map.attr,
7699 	&driver_attr_random.attr,
7700 	&driver_attr_removable.attr,
7701 	&driver_attr_host_lock.attr,
7702 	&driver_attr_ndelay.attr,
7703 	&driver_attr_strict.attr,
7704 	&driver_attr_uuid_ctl.attr,
7705 	&driver_attr_cdb_len.attr,
7706 	&driver_attr_tur_ms_to_ready.attr,
7707 	&driver_attr_zbc.attr,
7708 	&driver_attr_group_number_stats.attr,
7709 	NULL,
7710 };
7711 ATTRIBUTE_GROUPS(sdebug_drv);
7712 
7713 static struct device *pseudo_primary;
7714 
scsi_debug_init(void)7715 static int __init scsi_debug_init(void)
7716 {
7717 	bool want_store = (sdebug_fake_rw == 0);
7718 	unsigned long sz;
7719 	int k, ret, hosts_to_add;
7720 	int idx = -1;
7721 
7722 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7723 		pr_warn("ndelay must be less than 1 second, ignored\n");
7724 		sdebug_ndelay = 0;
7725 	} else if (sdebug_ndelay > 0)
7726 		sdebug_jdelay = JDELAY_OVERRIDDEN;
7727 
7728 	switch (sdebug_sector_size) {
7729 	case  512:
7730 	case 1024:
7731 	case 2048:
7732 	case 4096:
7733 		break;
7734 	default:
7735 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
7736 		return -EINVAL;
7737 	}
7738 
7739 	switch (sdebug_dif) {
7740 	case T10_PI_TYPE0_PROTECTION:
7741 		break;
7742 	case T10_PI_TYPE1_PROTECTION:
7743 	case T10_PI_TYPE2_PROTECTION:
7744 	case T10_PI_TYPE3_PROTECTION:
7745 		have_dif_prot = true;
7746 		break;
7747 
7748 	default:
7749 		pr_err("dif must be 0, 1, 2 or 3\n");
7750 		return -EINVAL;
7751 	}
7752 
7753 	if (sdebug_num_tgts < 0) {
7754 		pr_err("num_tgts must be >= 0\n");
7755 		return -EINVAL;
7756 	}
7757 
7758 	if (sdebug_guard > 1) {
7759 		pr_err("guard must be 0 or 1\n");
7760 		return -EINVAL;
7761 	}
7762 
7763 	if (sdebug_ato > 1) {
7764 		pr_err("ato must be 0 or 1\n");
7765 		return -EINVAL;
7766 	}
7767 
7768 	if (sdebug_physblk_exp > 15) {
7769 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7770 		return -EINVAL;
7771 	}
7772 
7773 	sdebug_lun_am = sdebug_lun_am_i;
7774 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7775 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7776 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7777 	}
7778 
7779 	if (sdebug_max_luns > 256) {
7780 		if (sdebug_max_luns > 16384) {
7781 			pr_warn("max_luns can be no more than 16384, use default\n");
7782 			sdebug_max_luns = DEF_MAX_LUNS;
7783 		}
7784 		sdebug_lun_am = SAM_LUN_AM_FLAT;
7785 	}
7786 
7787 	if (sdebug_lowest_aligned > 0x3fff) {
7788 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7789 		return -EINVAL;
7790 	}
7791 
7792 	if (submit_queues < 1) {
7793 		pr_err("submit_queues must be 1 or more\n");
7794 		return -EINVAL;
7795 	}
7796 
7797 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7798 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7799 		return -EINVAL;
7800 	}
7801 
7802 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7803 	    (sdebug_host_max_queue < 0)) {
7804 		pr_err("host_max_queue must be in range [0 %d]\n",
7805 		       SDEBUG_CANQUEUE);
7806 		return -EINVAL;
7807 	}
7808 
7809 	if (sdebug_host_max_queue &&
7810 	    (sdebug_max_queue != sdebug_host_max_queue)) {
7811 		sdebug_max_queue = sdebug_host_max_queue;
7812 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7813 			sdebug_max_queue);
7814 	}
7815 
7816 	/*
7817 	 * check for host managed zoned block device specified with
7818 	 * ptype=0x14 or zbc=XXX.
7819 	 */
7820 	if (sdebug_ptype == TYPE_ZBC) {
7821 		sdeb_zbc_model = BLK_ZONED_HM;
7822 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7823 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7824 		if (k < 0)
7825 			return k;
7826 		sdeb_zbc_model = k;
7827 		switch (sdeb_zbc_model) {
7828 		case BLK_ZONED_NONE:
7829 		case BLK_ZONED_HA:
7830 			sdebug_ptype = TYPE_DISK;
7831 			break;
7832 		case BLK_ZONED_HM:
7833 			sdebug_ptype = TYPE_ZBC;
7834 			break;
7835 		default:
7836 			pr_err("Invalid ZBC model\n");
7837 			return -EINVAL;
7838 		}
7839 	}
7840 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7841 		sdeb_zbc_in_use = true;
7842 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7843 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7844 	}
7845 
7846 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7847 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7848 	if (sdebug_dev_size_mb < 1)
7849 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7850 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7851 	sdebug_store_sectors = sz / sdebug_sector_size;
7852 	sdebug_capacity = get_sdebug_capacity();
7853 
7854 	/* play around with geometry, don't waste too much on track 0 */
7855 	sdebug_heads = 8;
7856 	sdebug_sectors_per = 32;
7857 	if (sdebug_dev_size_mb >= 256)
7858 		sdebug_heads = 64;
7859 	else if (sdebug_dev_size_mb >= 16)
7860 		sdebug_heads = 32;
7861 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7862 			       (sdebug_sectors_per * sdebug_heads);
7863 	if (sdebug_cylinders_per >= 1024) {
7864 		/* other LLDs do this; implies >= 1GB ram disk ... */
7865 		sdebug_heads = 255;
7866 		sdebug_sectors_per = 63;
7867 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7868 			       (sdebug_sectors_per * sdebug_heads);
7869 	}
7870 	if (scsi_debug_lbp()) {
7871 		sdebug_unmap_max_blocks =
7872 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7873 
7874 		sdebug_unmap_max_desc =
7875 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7876 
7877 		sdebug_unmap_granularity =
7878 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7879 
7880 		if (sdebug_unmap_alignment &&
7881 		    sdebug_unmap_granularity <=
7882 		    sdebug_unmap_alignment) {
7883 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7884 			return -EINVAL;
7885 		}
7886 	}
7887 
7888 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7889 	if (want_store) {
7890 		idx = sdebug_add_store();
7891 		if (idx < 0)
7892 			return idx;
7893 	}
7894 
7895 	pseudo_primary = root_device_register("pseudo_0");
7896 	if (IS_ERR(pseudo_primary)) {
7897 		pr_warn("root_device_register() error\n");
7898 		ret = PTR_ERR(pseudo_primary);
7899 		goto free_vm;
7900 	}
7901 	ret = bus_register(&pseudo_lld_bus);
7902 	if (ret < 0) {
7903 		pr_warn("bus_register error: %d\n", ret);
7904 		goto dev_unreg;
7905 	}
7906 	ret = driver_register(&sdebug_driverfs_driver);
7907 	if (ret < 0) {
7908 		pr_warn("driver_register error: %d\n", ret);
7909 		goto bus_unreg;
7910 	}
7911 
7912 	hosts_to_add = sdebug_add_host;
7913 	sdebug_add_host = 0;
7914 
7915 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7916 	if (!queued_cmd_cache) {
7917 		ret = -ENOMEM;
7918 		goto driver_unreg;
7919 	}
7920 
7921 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7922 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7923 		pr_info("%s: failed to create initial debugfs directory\n", __func__);
7924 
7925 	for (k = 0; k < hosts_to_add; k++) {
7926 		if (want_store && k == 0) {
7927 			ret = sdebug_add_host_helper(idx);
7928 			if (ret < 0) {
7929 				pr_err("add_host_helper k=%d, error=%d\n",
7930 				       k, -ret);
7931 				break;
7932 			}
7933 		} else {
7934 			ret = sdebug_do_add_host(want_store &&
7935 						 sdebug_per_host_store);
7936 			if (ret < 0) {
7937 				pr_err("add_host k=%d error=%d\n", k, -ret);
7938 				break;
7939 			}
7940 		}
7941 	}
7942 	if (sdebug_verbose)
7943 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7944 
7945 	return 0;
7946 
7947 driver_unreg:
7948 	driver_unregister(&sdebug_driverfs_driver);
7949 bus_unreg:
7950 	bus_unregister(&pseudo_lld_bus);
7951 dev_unreg:
7952 	root_device_unregister(pseudo_primary);
7953 free_vm:
7954 	sdebug_erase_store(idx, NULL);
7955 	return ret;
7956 }
7957 
scsi_debug_exit(void)7958 static void __exit scsi_debug_exit(void)
7959 {
7960 	int k = sdebug_num_hosts;
7961 
7962 	for (; k; k--)
7963 		sdebug_do_remove_host(true);
7964 	kmem_cache_destroy(queued_cmd_cache);
7965 	driver_unregister(&sdebug_driverfs_driver);
7966 	bus_unregister(&pseudo_lld_bus);
7967 	root_device_unregister(pseudo_primary);
7968 
7969 	sdebug_erase_all_stores(false);
7970 	xa_destroy(per_store_ap);
7971 	debugfs_remove(sdebug_debugfs_root);
7972 }
7973 
7974 device_initcall(scsi_debug_init);
7975 module_exit(scsi_debug_exit);
7976 
sdebug_release_adapter(struct device * dev)7977 static void sdebug_release_adapter(struct device *dev)
7978 {
7979 	struct sdebug_host_info *sdbg_host;
7980 
7981 	sdbg_host = dev_to_sdebug_host(dev);
7982 	kfree(sdbg_host);
7983 }
7984 
7985 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)7986 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7987 {
7988 	if (idx < 0)
7989 		return;
7990 	if (!sip) {
7991 		if (xa_empty(per_store_ap))
7992 			return;
7993 		sip = xa_load(per_store_ap, idx);
7994 		if (!sip)
7995 			return;
7996 	}
7997 	vfree(sip->map_storep);
7998 	vfree(sip->dif_storep);
7999 	vfree(sip->storep);
8000 	xa_erase(per_store_ap, idx);
8001 	kfree(sip);
8002 }
8003 
8004 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)8005 static void sdebug_erase_all_stores(bool apart_from_first)
8006 {
8007 	unsigned long idx;
8008 	struct sdeb_store_info *sip = NULL;
8009 
8010 	xa_for_each(per_store_ap, idx, sip) {
8011 		if (apart_from_first)
8012 			apart_from_first = false;
8013 		else
8014 			sdebug_erase_store(idx, sip);
8015 	}
8016 	if (apart_from_first)
8017 		sdeb_most_recent_idx = sdeb_first_idx;
8018 }
8019 
8020 /*
8021  * Returns store xarray new element index (idx) if >=0 else negated errno.
8022  * Limit the number of stores to 65536.
8023  */
sdebug_add_store(void)8024 static int sdebug_add_store(void)
8025 {
8026 	int res;
8027 	u32 n_idx;
8028 	unsigned long iflags;
8029 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8030 	struct sdeb_store_info *sip = NULL;
8031 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8032 
8033 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8034 	if (!sip)
8035 		return -ENOMEM;
8036 
8037 	xa_lock_irqsave(per_store_ap, iflags);
8038 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8039 	if (unlikely(res < 0)) {
8040 		xa_unlock_irqrestore(per_store_ap, iflags);
8041 		kfree(sip);
8042 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8043 		return res;
8044 	}
8045 	sdeb_most_recent_idx = n_idx;
8046 	if (sdeb_first_idx < 0)
8047 		sdeb_first_idx = n_idx;
8048 	xa_unlock_irqrestore(per_store_ap, iflags);
8049 
8050 	res = -ENOMEM;
8051 	sip->storep = vzalloc(sz);
8052 	if (!sip->storep) {
8053 		pr_err("user data oom\n");
8054 		goto err;
8055 	}
8056 	if (sdebug_num_parts > 0)
8057 		sdebug_build_parts(sip->storep, sz);
8058 
8059 	/* DIF/DIX: what T10 calls Protection Information (PI) */
8060 	if (sdebug_dix) {
8061 		int dif_size;
8062 
8063 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8064 		sip->dif_storep = vmalloc(dif_size);
8065 
8066 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
8067 			sip->dif_storep);
8068 
8069 		if (!sip->dif_storep) {
8070 			pr_err("DIX oom\n");
8071 			goto err;
8072 		}
8073 		memset(sip->dif_storep, 0xff, dif_size);
8074 	}
8075 	/* Logical Block Provisioning */
8076 	if (scsi_debug_lbp()) {
8077 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8078 		sip->map_storep = vmalloc(array_size(sizeof(long),
8079 						     BITS_TO_LONGS(map_size)));
8080 
8081 		pr_info("%lu provisioning blocks\n", map_size);
8082 
8083 		if (!sip->map_storep) {
8084 			pr_err("LBP map oom\n");
8085 			goto err;
8086 		}
8087 
8088 		bitmap_zero(sip->map_storep, map_size);
8089 
8090 		/* Map first 1KB for partition table */
8091 		if (sdebug_num_parts)
8092 			map_region(sip, 0, 2);
8093 	}
8094 
8095 	rwlock_init(&sip->macc_data_lck);
8096 	rwlock_init(&sip->macc_meta_lck);
8097 	rwlock_init(&sip->macc_sector_lck);
8098 	return (int)n_idx;
8099 err:
8100 	sdebug_erase_store((int)n_idx, sip);
8101 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
8102 	return res;
8103 }
8104 
sdebug_add_host_helper(int per_host_idx)8105 static int sdebug_add_host_helper(int per_host_idx)
8106 {
8107 	int k, devs_per_host, idx;
8108 	int error = -ENOMEM;
8109 	struct sdebug_host_info *sdbg_host;
8110 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8111 
8112 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8113 	if (!sdbg_host)
8114 		return -ENOMEM;
8115 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8116 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8117 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8118 	sdbg_host->si_idx = idx;
8119 
8120 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8121 
8122 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8123 	for (k = 0; k < devs_per_host; k++) {
8124 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8125 		if (!sdbg_devinfo)
8126 			goto clean;
8127 	}
8128 
8129 	mutex_lock(&sdebug_host_list_mutex);
8130 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8131 	mutex_unlock(&sdebug_host_list_mutex);
8132 
8133 	sdbg_host->dev.bus = &pseudo_lld_bus;
8134 	sdbg_host->dev.parent = pseudo_primary;
8135 	sdbg_host->dev.release = &sdebug_release_adapter;
8136 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8137 
8138 	error = device_register(&sdbg_host->dev);
8139 	if (error) {
8140 		mutex_lock(&sdebug_host_list_mutex);
8141 		list_del(&sdbg_host->host_list);
8142 		mutex_unlock(&sdebug_host_list_mutex);
8143 		goto clean;
8144 	}
8145 
8146 	++sdebug_num_hosts;
8147 	return 0;
8148 
8149 clean:
8150 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8151 				 dev_list) {
8152 		list_del(&sdbg_devinfo->dev_list);
8153 		kfree(sdbg_devinfo->zstate);
8154 		kfree(sdbg_devinfo);
8155 	}
8156 	if (sdbg_host->dev.release)
8157 		put_device(&sdbg_host->dev);
8158 	else
8159 		kfree(sdbg_host);
8160 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
8161 	return error;
8162 }
8163 
sdebug_do_add_host(bool mk_new_store)8164 static int sdebug_do_add_host(bool mk_new_store)
8165 {
8166 	int ph_idx = sdeb_most_recent_idx;
8167 
8168 	if (mk_new_store) {
8169 		ph_idx = sdebug_add_store();
8170 		if (ph_idx < 0)
8171 			return ph_idx;
8172 	}
8173 	return sdebug_add_host_helper(ph_idx);
8174 }
8175 
sdebug_do_remove_host(bool the_end)8176 static void sdebug_do_remove_host(bool the_end)
8177 {
8178 	int idx = -1;
8179 	struct sdebug_host_info *sdbg_host = NULL;
8180 	struct sdebug_host_info *sdbg_host2;
8181 
8182 	mutex_lock(&sdebug_host_list_mutex);
8183 	if (!list_empty(&sdebug_host_list)) {
8184 		sdbg_host = list_entry(sdebug_host_list.prev,
8185 				       struct sdebug_host_info, host_list);
8186 		idx = sdbg_host->si_idx;
8187 	}
8188 	if (!the_end && idx >= 0) {
8189 		bool unique = true;
8190 
8191 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8192 			if (sdbg_host2 == sdbg_host)
8193 				continue;
8194 			if (idx == sdbg_host2->si_idx) {
8195 				unique = false;
8196 				break;
8197 			}
8198 		}
8199 		if (unique) {
8200 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8201 			if (idx == sdeb_most_recent_idx)
8202 				--sdeb_most_recent_idx;
8203 		}
8204 	}
8205 	if (sdbg_host)
8206 		list_del(&sdbg_host->host_list);
8207 	mutex_unlock(&sdebug_host_list_mutex);
8208 
8209 	if (!sdbg_host)
8210 		return;
8211 
8212 	device_unregister(&sdbg_host->dev);
8213 	--sdebug_num_hosts;
8214 }
8215 
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)8216 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8217 {
8218 	struct sdebug_dev_info *devip = sdev->hostdata;
8219 
8220 	if (!devip)
8221 		return	-ENODEV;
8222 
8223 	mutex_lock(&sdebug_host_list_mutex);
8224 	block_unblock_all_queues(true);
8225 
8226 	if (qdepth > SDEBUG_CANQUEUE) {
8227 		qdepth = SDEBUG_CANQUEUE;
8228 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
8229 			qdepth, SDEBUG_CANQUEUE);
8230 	}
8231 	if (qdepth < 1)
8232 		qdepth = 1;
8233 	if (qdepth != sdev->queue_depth)
8234 		scsi_change_queue_depth(sdev, qdepth);
8235 
8236 	block_unblock_all_queues(false);
8237 	mutex_unlock(&sdebug_host_list_mutex);
8238 
8239 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8240 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
8241 
8242 	return sdev->queue_depth;
8243 }
8244 
fake_timeout(struct scsi_cmnd * scp)8245 static bool fake_timeout(struct scsi_cmnd *scp)
8246 {
8247 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
8248 		if (sdebug_every_nth < -1)
8249 			sdebug_every_nth = -1;
8250 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
8251 			return true; /* ignore command causing timeout */
8252 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
8253 			 scsi_medium_access_command(scp))
8254 			return true; /* time out reads and writes */
8255 	}
8256 	return false;
8257 }
8258 
8259 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)8260 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
8261 {
8262 	int stopped_state;
8263 	u64 diff_ns = 0;
8264 	ktime_t now_ts = ktime_get_boottime();
8265 	struct scsi_device *sdp = scp->device;
8266 
8267 	stopped_state = atomic_read(&devip->stopped);
8268 	if (stopped_state == 2) {
8269 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
8270 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
8271 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
8272 				/* tur_ms_to_ready timer extinguished */
8273 				atomic_set(&devip->stopped, 0);
8274 				return 0;
8275 			}
8276 		}
8277 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
8278 		if (sdebug_verbose)
8279 			sdev_printk(KERN_INFO, sdp,
8280 				    "%s: Not ready: in process of becoming ready\n", my_name);
8281 		if (scp->cmnd[0] == TEST_UNIT_READY) {
8282 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
8283 
8284 			if (diff_ns <= tur_nanosecs_to_ready)
8285 				diff_ns = tur_nanosecs_to_ready - diff_ns;
8286 			else
8287 				diff_ns = tur_nanosecs_to_ready;
8288 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
8289 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
8290 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
8291 						   diff_ns);
8292 			return check_condition_result;
8293 		}
8294 	}
8295 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
8296 	if (sdebug_verbose)
8297 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
8298 			    my_name);
8299 	return check_condition_result;
8300 }
8301 
sdebug_map_queues(struct Scsi_Host * shost)8302 static void sdebug_map_queues(struct Scsi_Host *shost)
8303 {
8304 	int i, qoff;
8305 
8306 	if (shost->nr_hw_queues == 1)
8307 		return;
8308 
8309 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
8310 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
8311 
8312 		map->nr_queues  = 0;
8313 
8314 		if (i == HCTX_TYPE_DEFAULT)
8315 			map->nr_queues = submit_queues - poll_queues;
8316 		else if (i == HCTX_TYPE_POLL)
8317 			map->nr_queues = poll_queues;
8318 
8319 		if (!map->nr_queues) {
8320 			BUG_ON(i == HCTX_TYPE_DEFAULT);
8321 			continue;
8322 		}
8323 
8324 		map->queue_offset = qoff;
8325 		blk_mq_map_queues(map);
8326 
8327 		qoff += map->nr_queues;
8328 	}
8329 }
8330 
8331 struct sdebug_blk_mq_poll_data {
8332 	unsigned int queue_num;
8333 	int *num_entries;
8334 };
8335 
8336 /*
8337  * We don't handle aborted commands here, but it does not seem possible to have
8338  * aborted polled commands from schedule_resp()
8339  */
sdebug_blk_mq_poll_iter(struct request * rq,void * opaque)8340 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
8341 {
8342 	struct sdebug_blk_mq_poll_data *data = opaque;
8343 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
8344 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8345 	struct sdebug_defer *sd_dp;
8346 	u32 unique_tag = blk_mq_unique_tag(rq);
8347 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
8348 	struct sdebug_queued_cmd *sqcp;
8349 	unsigned long flags;
8350 	int queue_num = data->queue_num;
8351 	ktime_t time;
8352 
8353 	/* We're only interested in one queue for this iteration */
8354 	if (hwq != queue_num)
8355 		return true;
8356 
8357 	/* Subsequent checks would fail if this failed, but check anyway */
8358 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
8359 		return true;
8360 
8361 	time = ktime_get_boottime();
8362 
8363 	spin_lock_irqsave(&sdsc->lock, flags);
8364 	sqcp = TO_QUEUED_CMD(cmd);
8365 	if (!sqcp) {
8366 		spin_unlock_irqrestore(&sdsc->lock, flags);
8367 		return true;
8368 	}
8369 
8370 	sd_dp = &sqcp->sd_dp;
8371 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
8372 		spin_unlock_irqrestore(&sdsc->lock, flags);
8373 		return true;
8374 	}
8375 
8376 	if (time < sd_dp->cmpl_ts) {
8377 		spin_unlock_irqrestore(&sdsc->lock, flags);
8378 		return true;
8379 	}
8380 
8381 	ASSIGN_QUEUED_CMD(cmd, NULL);
8382 	spin_unlock_irqrestore(&sdsc->lock, flags);
8383 
8384 	if (sdebug_statistics) {
8385 		atomic_inc(&sdebug_completions);
8386 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
8387 			atomic_inc(&sdebug_miss_cpus);
8388 	}
8389 
8390 	sdebug_free_queued_cmd(sqcp);
8391 
8392 	scsi_done(cmd); /* callback to mid level */
8393 	(*data->num_entries)++;
8394 	return true;
8395 }
8396 
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)8397 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
8398 {
8399 	int num_entries = 0;
8400 	struct sdebug_blk_mq_poll_data data = {
8401 		.queue_num = queue_num,
8402 		.num_entries = &num_entries,
8403 	};
8404 
8405 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
8406 				&data);
8407 
8408 	if (num_entries > 0)
8409 		atomic_add(num_entries, &sdeb_mq_poll_count);
8410 	return num_entries;
8411 }
8412 
sdebug_timeout_cmd(struct scsi_cmnd * cmnd)8413 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
8414 {
8415 	struct scsi_device *sdp = cmnd->device;
8416 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8417 	struct sdebug_err_inject *err;
8418 	unsigned char *cmd = cmnd->cmnd;
8419 	int ret = 0;
8420 
8421 	if (devip == NULL)
8422 		return 0;
8423 
8424 	rcu_read_lock();
8425 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8426 		if (err->type == ERR_TMOUT_CMD &&
8427 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8428 			ret = !!err->cnt;
8429 			if (err->cnt < 0)
8430 				err->cnt++;
8431 
8432 			rcu_read_unlock();
8433 			return ret;
8434 		}
8435 	}
8436 	rcu_read_unlock();
8437 
8438 	return 0;
8439 }
8440 
sdebug_fail_queue_cmd(struct scsi_cmnd * cmnd)8441 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
8442 {
8443 	struct scsi_device *sdp = cmnd->device;
8444 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8445 	struct sdebug_err_inject *err;
8446 	unsigned char *cmd = cmnd->cmnd;
8447 	int ret = 0;
8448 
8449 	if (devip == NULL)
8450 		return 0;
8451 
8452 	rcu_read_lock();
8453 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8454 		if (err->type == ERR_FAIL_QUEUE_CMD &&
8455 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8456 			ret = err->cnt ? err->queuecmd_ret : 0;
8457 			if (err->cnt < 0)
8458 				err->cnt++;
8459 
8460 			rcu_read_unlock();
8461 			return ret;
8462 		}
8463 	}
8464 	rcu_read_unlock();
8465 
8466 	return 0;
8467 }
8468 
sdebug_fail_cmd(struct scsi_cmnd * cmnd,int * retval,struct sdebug_err_inject * info)8469 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8470 			   struct sdebug_err_inject *info)
8471 {
8472 	struct scsi_device *sdp = cmnd->device;
8473 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8474 	struct sdebug_err_inject *err;
8475 	unsigned char *cmd = cmnd->cmnd;
8476 	int ret = 0;
8477 	int result;
8478 
8479 	if (devip == NULL)
8480 		return 0;
8481 
8482 	rcu_read_lock();
8483 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8484 		if (err->type == ERR_FAIL_CMD &&
8485 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8486 			if (!err->cnt) {
8487 				rcu_read_unlock();
8488 				return 0;
8489 			}
8490 
8491 			ret = !!err->cnt;
8492 			rcu_read_unlock();
8493 			goto out_handle;
8494 		}
8495 	}
8496 	rcu_read_unlock();
8497 
8498 	return 0;
8499 
8500 out_handle:
8501 	if (err->cnt < 0)
8502 		err->cnt++;
8503 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8504 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8505 	*info = *err;
8506 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8507 
8508 	return ret;
8509 }
8510 
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)8511 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8512 				   struct scsi_cmnd *scp)
8513 {
8514 	u8 sdeb_i;
8515 	struct scsi_device *sdp = scp->device;
8516 	const struct opcode_info_t *oip;
8517 	const struct opcode_info_t *r_oip;
8518 	struct sdebug_dev_info *devip;
8519 	u8 *cmd = scp->cmnd;
8520 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8521 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8522 	int k, na;
8523 	int errsts = 0;
8524 	u64 lun_index = sdp->lun & 0x3FFF;
8525 	u32 flags;
8526 	u16 sa;
8527 	u8 opcode = cmd[0];
8528 	bool has_wlun_rl;
8529 	bool inject_now;
8530 	int ret = 0;
8531 	struct sdebug_err_inject err;
8532 
8533 	scsi_set_resid(scp, 0);
8534 	if (sdebug_statistics) {
8535 		atomic_inc(&sdebug_cmnd_count);
8536 		inject_now = inject_on_this_cmd();
8537 	} else {
8538 		inject_now = false;
8539 	}
8540 	if (unlikely(sdebug_verbose &&
8541 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8542 		char b[120];
8543 		int n, len, sb;
8544 
8545 		len = scp->cmd_len;
8546 		sb = (int)sizeof(b);
8547 		if (len > 32)
8548 			strcpy(b, "too long, over 32 bytes");
8549 		else {
8550 			for (k = 0, n = 0; k < len && n < sb; ++k)
8551 				n += scnprintf(b + n, sb - n, "%02x ",
8552 					       (u32)cmd[k]);
8553 		}
8554 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8555 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8556 	}
8557 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8558 		return SCSI_MLQUEUE_HOST_BUSY;
8559 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8560 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8561 		goto err_out;
8562 
8563 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
8564 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
8565 	devip = (struct sdebug_dev_info *)sdp->hostdata;
8566 	if (unlikely(!devip)) {
8567 		devip = find_build_dev_info(sdp);
8568 		if (NULL == devip)
8569 			goto err_out;
8570 	}
8571 
8572 	if (sdebug_timeout_cmd(scp)) {
8573 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8574 		return 0;
8575 	}
8576 
8577 	ret = sdebug_fail_queue_cmd(scp);
8578 	if (ret) {
8579 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8580 				opcode, ret);
8581 		return ret;
8582 	}
8583 
8584 	if (sdebug_fail_cmd(scp, &ret, &err)) {
8585 		scmd_printk(KERN_INFO, scp,
8586 			"fail command 0x%x with hostbyte=0x%x, "
8587 			"driverbyte=0x%x, statusbyte=0x%x, "
8588 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8589 			opcode, err.host_byte, err.driver_byte,
8590 			err.status_byte, err.sense_key, err.asc, err.asq);
8591 		return ret;
8592 	}
8593 
8594 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8595 		atomic_set(&sdeb_inject_pending, 1);
8596 
8597 	na = oip->num_attached;
8598 	r_pfp = oip->pfp;
8599 	if (na) {	/* multiple commands with this opcode */
8600 		r_oip = oip;
8601 		if (FF_SA & r_oip->flags) {
8602 			if (F_SA_LOW & oip->flags)
8603 				sa = 0x1f & cmd[1];
8604 			else
8605 				sa = get_unaligned_be16(cmd + 8);
8606 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8607 				if (opcode == oip->opcode && sa == oip->sa)
8608 					break;
8609 			}
8610 		} else {   /* since no service action only check opcode */
8611 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8612 				if (opcode == oip->opcode)
8613 					break;
8614 			}
8615 		}
8616 		if (k > na) {
8617 			if (F_SA_LOW & r_oip->flags)
8618 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8619 			else if (F_SA_HIGH & r_oip->flags)
8620 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8621 			else
8622 				mk_sense_invalid_opcode(scp);
8623 			goto check_cond;
8624 		}
8625 	}	/* else (when na==0) we assume the oip is a match */
8626 	flags = oip->flags;
8627 	if (unlikely(F_INV_OP & flags)) {
8628 		mk_sense_invalid_opcode(scp);
8629 		goto check_cond;
8630 	}
8631 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8632 		if (sdebug_verbose)
8633 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8634 				    my_name, opcode, " supported for wlun");
8635 		mk_sense_invalid_opcode(scp);
8636 		goto check_cond;
8637 	}
8638 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
8639 		u8 rem;
8640 		int j;
8641 
8642 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8643 			rem = ~oip->len_mask[k] & cmd[k];
8644 			if (rem) {
8645 				for (j = 7; j >= 0; --j, rem <<= 1) {
8646 					if (0x80 & rem)
8647 						break;
8648 				}
8649 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8650 				goto check_cond;
8651 			}
8652 		}
8653 	}
8654 	if (unlikely(!(F_SKIP_UA & flags) &&
8655 		     find_first_bit(devip->uas_bm,
8656 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8657 		errsts = make_ua(scp, devip);
8658 		if (errsts)
8659 			goto check_cond;
8660 	}
8661 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8662 		     atomic_read(&devip->stopped))) {
8663 		errsts = resp_not_ready(scp, devip);
8664 		if (errsts)
8665 			goto fini;
8666 	}
8667 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
8668 		goto fini;
8669 	if (unlikely(sdebug_every_nth)) {
8670 		if (fake_timeout(scp))
8671 			return 0;	/* ignore command: make trouble */
8672 	}
8673 	if (likely(oip->pfp))
8674 		pfp = oip->pfp;	/* calls a resp_* function */
8675 	else
8676 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8677 
8678 fini:
8679 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
8680 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8681 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8682 					    sdebug_ndelay > 10000)) {
8683 		/*
8684 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8685 		 * for Start Stop Unit (SSU) want at least 1 second delay and
8686 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
8687 		 * For Synchronize Cache want 1/20 of SSU's delay.
8688 		 */
8689 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8690 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8691 
8692 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8693 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8694 	} else
8695 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8696 				     sdebug_ndelay);
8697 check_cond:
8698 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8699 err_out:
8700 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8701 }
8702 
sdebug_init_cmd_priv(struct Scsi_Host * shost,struct scsi_cmnd * cmd)8703 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8704 {
8705 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8706 
8707 	spin_lock_init(&sdsc->lock);
8708 
8709 	return 0;
8710 }
8711 
8712 static struct scsi_host_template sdebug_driver_template = {
8713 	.show_info =		scsi_debug_show_info,
8714 	.write_info =		scsi_debug_write_info,
8715 	.proc_name =		sdebug_proc_name,
8716 	.name =			"SCSI DEBUG",
8717 	.info =			scsi_debug_info,
8718 	.slave_alloc =		scsi_debug_slave_alloc,
8719 	.slave_configure =	scsi_debug_slave_configure,
8720 	.slave_destroy =	scsi_debug_slave_destroy,
8721 	.ioctl =		scsi_debug_ioctl,
8722 	.queuecommand =		scsi_debug_queuecommand,
8723 	.change_queue_depth =	sdebug_change_qdepth,
8724 	.map_queues =		sdebug_map_queues,
8725 	.mq_poll =		sdebug_blk_mq_poll,
8726 	.eh_abort_handler =	scsi_debug_abort,
8727 	.eh_device_reset_handler = scsi_debug_device_reset,
8728 	.eh_target_reset_handler = scsi_debug_target_reset,
8729 	.eh_bus_reset_handler = scsi_debug_bus_reset,
8730 	.eh_host_reset_handler = scsi_debug_host_reset,
8731 	.can_queue =		SDEBUG_CANQUEUE,
8732 	.this_id =		7,
8733 	.sg_tablesize =		SG_MAX_SEGMENTS,
8734 	.cmd_per_lun =		DEF_CMD_PER_LUN,
8735 	.max_sectors =		-1U,
8736 	.max_segment_size =	-1U,
8737 	.module =		THIS_MODULE,
8738 	.track_queue_depth =	1,
8739 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
8740 	.init_cmd_priv = sdebug_init_cmd_priv,
8741 	.target_alloc =		sdebug_target_alloc,
8742 	.target_destroy =	sdebug_target_destroy,
8743 };
8744 
sdebug_driver_probe(struct device * dev)8745 static int sdebug_driver_probe(struct device *dev)
8746 {
8747 	int error = 0;
8748 	struct sdebug_host_info *sdbg_host;
8749 	struct Scsi_Host *hpnt;
8750 	int hprot;
8751 
8752 	sdbg_host = dev_to_sdebug_host(dev);
8753 
8754 	sdebug_driver_template.can_queue = sdebug_max_queue;
8755 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8756 	if (!sdebug_clustering)
8757 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8758 
8759 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8760 	if (NULL == hpnt) {
8761 		pr_err("scsi_host_alloc failed\n");
8762 		error = -ENODEV;
8763 		return error;
8764 	}
8765 	if (submit_queues > nr_cpu_ids) {
8766 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8767 			my_name, submit_queues, nr_cpu_ids);
8768 		submit_queues = nr_cpu_ids;
8769 	}
8770 	/*
8771 	 * Decide whether to tell scsi subsystem that we want mq. The
8772 	 * following should give the same answer for each host.
8773 	 */
8774 	hpnt->nr_hw_queues = submit_queues;
8775 	if (sdebug_host_max_queue)
8776 		hpnt->host_tagset = 1;
8777 
8778 	/* poll queues are possible for nr_hw_queues > 1 */
8779 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8780 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8781 			 my_name, poll_queues, hpnt->nr_hw_queues);
8782 		poll_queues = 0;
8783 	}
8784 
8785 	/*
8786 	 * Poll queues don't need interrupts, but we need at least one I/O queue
8787 	 * left over for non-polled I/O.
8788 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
8789 	 */
8790 	if (poll_queues >= submit_queues) {
8791 		if (submit_queues < 3)
8792 			pr_warn("%s: trim poll_queues to 1\n", my_name);
8793 		else
8794 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8795 				my_name, submit_queues - 1);
8796 		poll_queues = 1;
8797 	}
8798 	if (poll_queues)
8799 		hpnt->nr_maps = 3;
8800 
8801 	sdbg_host->shost = hpnt;
8802 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8803 		hpnt->max_id = sdebug_num_tgts + 1;
8804 	else
8805 		hpnt->max_id = sdebug_num_tgts;
8806 	/* = sdebug_max_luns; */
8807 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8808 
8809 	hprot = 0;
8810 
8811 	switch (sdebug_dif) {
8812 
8813 	case T10_PI_TYPE1_PROTECTION:
8814 		hprot = SHOST_DIF_TYPE1_PROTECTION;
8815 		if (sdebug_dix)
8816 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
8817 		break;
8818 
8819 	case T10_PI_TYPE2_PROTECTION:
8820 		hprot = SHOST_DIF_TYPE2_PROTECTION;
8821 		if (sdebug_dix)
8822 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
8823 		break;
8824 
8825 	case T10_PI_TYPE3_PROTECTION:
8826 		hprot = SHOST_DIF_TYPE3_PROTECTION;
8827 		if (sdebug_dix)
8828 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
8829 		break;
8830 
8831 	default:
8832 		if (sdebug_dix)
8833 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
8834 		break;
8835 	}
8836 
8837 	scsi_host_set_prot(hpnt, hprot);
8838 
8839 	if (have_dif_prot || sdebug_dix)
8840 		pr_info("host protection%s%s%s%s%s%s%s\n",
8841 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8842 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8843 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8844 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8845 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8846 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8847 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8848 
8849 	if (sdebug_guard == 1)
8850 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8851 	else
8852 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8853 
8854 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8855 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8856 	if (sdebug_every_nth)	/* need stats counters for every_nth */
8857 		sdebug_statistics = true;
8858 	error = scsi_add_host(hpnt, &sdbg_host->dev);
8859 	if (error) {
8860 		pr_err("scsi_add_host failed\n");
8861 		error = -ENODEV;
8862 		scsi_host_put(hpnt);
8863 	} else {
8864 		scsi_scan_host(hpnt);
8865 	}
8866 
8867 	return error;
8868 }
8869 
sdebug_driver_remove(struct device * dev)8870 static void sdebug_driver_remove(struct device *dev)
8871 {
8872 	struct sdebug_host_info *sdbg_host;
8873 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8874 
8875 	sdbg_host = dev_to_sdebug_host(dev);
8876 
8877 	scsi_remove_host(sdbg_host->shost);
8878 
8879 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8880 				 dev_list) {
8881 		list_del(&sdbg_devinfo->dev_list);
8882 		kfree(sdbg_devinfo->zstate);
8883 		kfree(sdbg_devinfo);
8884 	}
8885 
8886 	scsi_host_put(sdbg_host->shost);
8887 }
8888 
8889 static const struct bus_type pseudo_lld_bus = {
8890 	.name = "pseudo",
8891 	.probe = sdebug_driver_probe,
8892 	.remove = sdebug_driver_remove,
8893 	.drv_groups = sdebug_drv_groups,
8894 };
8895