xref: /linux/drivers/scsi/scsi_debug.c (revision 7eb7f5723df50a7d5564aa609e4c147f669a5cb4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47 
48 #include <net/checksum.h>
49 
50 #include <linux/unaligned.h>
51 
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60 
61 #include "sd.h"
62 #include "scsi_logging.h"
63 
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67 
68 #define MY_NAME "scsi_debug"
69 
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define FILEMARK_DETECTED_ASCQ 0x1
75 #define EOP_EOM_DETECTED_ASCQ 0x2
76 #define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
77 #define EOD_DETECTED_ASCQ 0x5
78 #define LOGICAL_UNIT_NOT_READY 0x4
79 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
80 #define UNRECOVERED_READ_ERR 0x11
81 #define PARAMETER_LIST_LENGTH_ERR 0x1a
82 #define INVALID_OPCODE 0x20
83 #define LBA_OUT_OF_RANGE 0x21
84 #define INVALID_FIELD_IN_CDB 0x24
85 #define INVALID_FIELD_IN_PARAM_LIST 0x26
86 #define WRITE_PROTECTED 0x27
87 #define UA_READY_ASC 0x28
88 #define UA_RESET_ASC 0x29
89 #define UA_CHANGED_ASC 0x2a
90 #define TOO_MANY_IN_PARTITION_ASC 0x3b
91 #define TARGET_CHANGED_ASC 0x3f
92 #define LUNS_CHANGED_ASCQ 0x0e
93 #define INSUFF_RES_ASC 0x55
94 #define INSUFF_RES_ASCQ 0x3
95 #define POWER_ON_RESET_ASCQ 0x0
96 #define POWER_ON_OCCURRED_ASCQ 0x1
97 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
98 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
99 #define CAPACITY_CHANGED_ASCQ 0x9
100 #define SAVING_PARAMS_UNSUP 0x39
101 #define TRANSPORT_PROBLEM 0x4b
102 #define THRESHOLD_EXCEEDED 0x5d
103 #define LOW_POWER_COND_ON 0x5e
104 #define MISCOMPARE_VERIFY_ASC 0x1d
105 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
106 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
107 #define WRITE_ERROR_ASC 0xc
108 #define UNALIGNED_WRITE_ASCQ 0x4
109 #define WRITE_BOUNDARY_ASCQ 0x5
110 #define READ_INVDATA_ASCQ 0x6
111 #define READ_BOUNDARY_ASCQ 0x7
112 #define ATTEMPT_ACCESS_GAP 0x9
113 #define INSUFF_ZONE_ASCQ 0xe
114 /* see drivers/scsi/sense_codes.h */
115 
116 /* Additional Sense Code Qualifier (ASCQ) */
117 #define ACK_NAK_TO 0x3
118 
119 /* Default values for driver parameters */
120 #define DEF_NUM_HOST   1
121 #define DEF_NUM_TGTS   1
122 #define DEF_MAX_LUNS   1
123 /* With these defaults, this driver will make 1 host with 1 target
124  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
125  */
126 #define DEF_ATO 1
127 #define DEF_CDB_LEN 10
128 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
129 #define DEF_DEV_SIZE_PRE_INIT   0
130 #define DEF_DEV_SIZE_MB   8
131 #define DEF_ZBC_DEV_SIZE_MB   128
132 #define DEF_DIF 0
133 #define DEF_DIX 0
134 #define DEF_PER_HOST_STORE false
135 #define DEF_D_SENSE   0
136 #define DEF_EVERY_NTH   0
137 #define DEF_FAKE_RW	0
138 #define DEF_GUARD 0
139 #define DEF_HOST_LOCK 0
140 #define DEF_LBPU 0
141 #define DEF_LBPWS 0
142 #define DEF_LBPWS10 0
143 #define DEF_LBPRZ 1
144 #define DEF_LOWEST_ALIGNED 0
145 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
146 #define DEF_NO_LUN_0   0
147 #define DEF_NUM_PARTS   0
148 #define DEF_OPTS   0
149 #define DEF_OPT_BLKS 1024
150 #define DEF_PHYSBLK_EXP 0
151 #define DEF_OPT_XFERLEN_EXP 0
152 #define DEF_PTYPE   TYPE_DISK
153 #define DEF_RANDOM false
154 #define DEF_REMOVABLE false
155 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
156 #define DEF_SECTOR_SIZE 512
157 #define DEF_UNMAP_ALIGNMENT 0
158 #define DEF_UNMAP_GRANULARITY 1
159 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
160 #define DEF_UNMAP_MAX_DESC 256
161 #define DEF_VIRTUAL_GB   0
162 #define DEF_VPD_USE_HOSTNO 1
163 #define DEF_WRITESAME_LENGTH 0xFFFF
164 #define DEF_ATOMIC_WR 0
165 #define DEF_ATOMIC_WR_MAX_LENGTH 128
166 #define DEF_ATOMIC_WR_ALIGN 2
167 #define DEF_ATOMIC_WR_GRAN 2
168 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
169 #define DEF_ATOMIC_WR_MAX_BNDRY 128
170 #define DEF_STRICT 0
171 #define DEF_STATISTICS false
172 #define DEF_SUBMIT_QUEUES 1
173 #define DEF_TUR_MS_TO_READY 0
174 #define DEF_UUID_CTL 0
175 #define JDELAY_OVERRIDDEN -9999
176 
177 /* Default parameters for ZBC drives */
178 #define DEF_ZBC_ZONE_SIZE_MB	128
179 #define DEF_ZBC_MAX_OPEN_ZONES	8
180 #define DEF_ZBC_NR_CONV_ZONES	1
181 
182 /* Default parameters for tape drives */
183 #define TAPE_DEF_DENSITY  0x0
184 #define TAPE_BAD_DENSITY  0x65
185 #define TAPE_DEF_BLKSIZE  0
186 #define TAPE_MIN_BLKSIZE  512
187 #define TAPE_MAX_BLKSIZE  1048576
188 #define TAPE_EW 20
189 #define TAPE_MAX_PARTITIONS 2
190 #define TAPE_UNITS 10000
191 #define TAPE_PARTITION_1_UNITS 1000
192 
193 /* The tape block data definitions */
194 #define TAPE_BLOCK_FM_FLAG   ((u32)0x1 << 30)
195 #define TAPE_BLOCK_EOD_FLAG  ((u32)0x2 << 30)
196 #define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
197 #define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
198 #define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
199 #define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
200 #define IS_TAPE_BLOCK_FM(a)   ((a & TAPE_BLOCK_FM_FLAG) != 0)
201 #define IS_TAPE_BLOCK_EOD(a)  ((a & TAPE_BLOCK_EOD_FLAG) != 0)
202 
203 struct tape_block {
204 	u32 fl_size;
205 	unsigned char data[4];
206 };
207 
208 /* Flags for sense data */
209 #define SENSE_FLAG_FILEMARK  0x80
210 #define SENSE_FLAG_EOM 0x40
211 #define SENSE_FLAG_ILI 0x20
212 
213 #define SDEBUG_LUN_0_VAL 0
214 
215 /* bit mask values for sdebug_opts */
216 #define SDEBUG_OPT_NOISE		1
217 #define SDEBUG_OPT_MEDIUM_ERR		2
218 #define SDEBUG_OPT_TIMEOUT		4
219 #define SDEBUG_OPT_RECOVERED_ERR	8
220 #define SDEBUG_OPT_TRANSPORT_ERR	16
221 #define SDEBUG_OPT_DIF_ERR		32
222 #define SDEBUG_OPT_DIX_ERR		64
223 #define SDEBUG_OPT_MAC_TIMEOUT		128
224 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
225 #define SDEBUG_OPT_Q_NOISE		0x200
226 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
227 #define SDEBUG_OPT_RARE_TSF		0x800
228 #define SDEBUG_OPT_N_WCE		0x1000
229 #define SDEBUG_OPT_RESET_NOISE		0x2000
230 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
231 #define SDEBUG_OPT_HOST_BUSY		0x8000
232 #define SDEBUG_OPT_CMD_ABORT		0x10000
233 #define SDEBUG_OPT_UNALIGNED_WRITE	0x20000
234 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
235 			      SDEBUG_OPT_RESET_NOISE)
236 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
237 				  SDEBUG_OPT_TRANSPORT_ERR | \
238 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
239 				  SDEBUG_OPT_SHORT_TRANSFER | \
240 				  SDEBUG_OPT_HOST_BUSY | \
241 				  SDEBUG_OPT_CMD_ABORT | \
242 				  SDEBUG_OPT_UNALIGNED_WRITE)
243 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
244 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
245 
246 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
247  * priority order. In the subset implemented here lower numbers have higher
248  * priority. The UA numbers should be a sequence starting from 0 with
249  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
250 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
251 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
252 #define SDEBUG_UA_BUS_RESET 2
253 #define SDEBUG_UA_MODE_CHANGED 3
254 #define SDEBUG_UA_CAPACITY_CHANGED 4
255 #define SDEBUG_UA_LUNS_CHANGED 5
256 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
257 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
258 #define SDEBUG_UA_NOT_READY_TO_READY 8
259 #define SDEBUG_NUM_UAS 9
260 
261 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
262  * sector on read commands: */
263 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
264 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
265 
266 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
267  * (for response) per submit queue at one time. Can be reduced by max_queue
268  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
269  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
270  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
271  * but cannot exceed SDEBUG_CANQUEUE .
272  */
273 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
274 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
275 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
276 
277 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
278 #define F_D_IN			1	/* Data-in command (e.g. READ) */
279 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
280 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
281 #define F_D_UNKN		8
282 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
283 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
284 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
285 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
286 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
287 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
288 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
289 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
290 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
291 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
292 
293 /* Useful combinations of the above flags */
294 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
295 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
296 #define FF_SA (F_SA_HIGH | F_SA_LOW)
297 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
298 
299 /* Device selection bit mask */
300 #define DS_ALL     0xffffffff
301 #define DS_SBC     (1 << TYPE_DISK)
302 #define DS_SSC     (1 << TYPE_TAPE)
303 #define DS_ZBC     (1 << TYPE_ZBC)
304 
305 #define DS_NO_SSC  (DS_ALL & ~DS_SSC)
306 
307 #define SDEBUG_MAX_PARTS 4
308 
309 #define SDEBUG_MAX_CMD_LEN 32
310 
311 #define SDEB_XA_NOT_IN_USE XA_MARK_1
312 
313 /* Zone types (zbcr05 table 25) */
314 enum sdebug_z_type {
315 	ZBC_ZTYPE_CNV	= 0x1,
316 	ZBC_ZTYPE_SWR	= 0x2,
317 	ZBC_ZTYPE_SWP	= 0x3,
318 	/* ZBC_ZTYPE_SOBR = 0x4, */
319 	ZBC_ZTYPE_GAP	= 0x5,
320 };
321 
322 /* enumeration names taken from table 26, zbcr05 */
323 enum sdebug_z_cond {
324 	ZBC_NOT_WRITE_POINTER	= 0x0,
325 	ZC1_EMPTY		= 0x1,
326 	ZC2_IMPLICIT_OPEN	= 0x2,
327 	ZC3_EXPLICIT_OPEN	= 0x3,
328 	ZC4_CLOSED		= 0x4,
329 	ZC6_READ_ONLY		= 0xd,
330 	ZC5_FULL		= 0xe,
331 	ZC7_OFFLINE		= 0xf,
332 };
333 
334 struct sdeb_zone_state {	/* ZBC: per zone state */
335 	enum sdebug_z_type z_type;
336 	enum sdebug_z_cond z_cond;
337 	bool z_non_seq_resource;
338 	unsigned int z_size;
339 	sector_t z_start;
340 	sector_t z_wp;
341 };
342 
343 enum sdebug_err_type {
344 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
345 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
346 					/* queuecmd return failed */
347 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
348 					/* queuecmd return succeed but */
349 					/* with errors set in scsi_cmnd */
350 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
351 					/* scsi_debug_abort() */
352 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
353 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
354 };
355 
356 struct sdebug_err_inject {
357 	int type;
358 	struct list_head list;
359 	int cnt;
360 	unsigned char cmd;
361 	struct rcu_head rcu;
362 
363 	union {
364 		/*
365 		 * For ERR_FAIL_QUEUE_CMD
366 		 */
367 		int queuecmd_ret;
368 
369 		/*
370 		 * For ERR_FAIL_CMD
371 		 */
372 		struct {
373 			unsigned char host_byte;
374 			unsigned char driver_byte;
375 			unsigned char status_byte;
376 			unsigned char sense_key;
377 			unsigned char asc;
378 			unsigned char asq;
379 		};
380 	};
381 };
382 
383 struct sdebug_dev_info {
384 	struct list_head dev_list;
385 	unsigned int channel;
386 	unsigned int target;
387 	u64 lun;
388 	uuid_t lu_name;
389 	struct sdebug_host_info *sdbg_host;
390 	unsigned long uas_bm[1];
391 	atomic_t stopped;	/* 1: by SSU, 2: device start */
392 	bool used;
393 
394 	/* For ZBC devices */
395 	bool zoned;
396 	unsigned int zcap;
397 	unsigned int zsize;
398 	unsigned int zsize_shift;
399 	unsigned int nr_zones;
400 	unsigned int nr_conv_zones;
401 	unsigned int nr_seq_zones;
402 	unsigned int nr_imp_open;
403 	unsigned int nr_exp_open;
404 	unsigned int nr_closed;
405 	unsigned int max_open;
406 	ktime_t create_ts;	/* time since bootup that this device was created */
407 	struct sdeb_zone_state *zstate;
408 
409 	/* For tapes */
410 	unsigned int tape_blksize;
411 	unsigned int tape_density;
412 	unsigned char tape_partition;
413 	unsigned char tape_nbr_partitions;
414 	unsigned char tape_pending_nbr_partitions;
415 	unsigned int tape_pending_part_0_size;
416 	unsigned int tape_pending_part_1_size;
417 	unsigned char tape_dce;
418 	unsigned int tape_location[TAPE_MAX_PARTITIONS];
419 	unsigned int tape_eop[TAPE_MAX_PARTITIONS];
420 	struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
421 
422 	struct dentry *debugfs_entry;
423 	struct spinlock list_lock;
424 	struct list_head inject_err_list;
425 };
426 
427 struct sdebug_target_info {
428 	bool reset_fail;
429 	struct dentry *debugfs_entry;
430 };
431 
432 struct sdebug_host_info {
433 	struct list_head host_list;
434 	int si_idx;	/* sdeb_store_info (per host) xarray index */
435 	struct Scsi_Host *shost;
436 	struct device dev;
437 	struct list_head dev_info_list;
438 };
439 
440 /* There is an xarray of pointers to this struct's objects, one per host */
441 struct sdeb_store_info {
442 	rwlock_t macc_data_lck;	/* for media data access on this store */
443 	rwlock_t macc_meta_lck;	/* for atomic media meta access on this store */
444 	rwlock_t macc_sector_lck;	/* per-sector media data access on this store */
445 	u8 *storep;		/* user data storage (ram) */
446 	struct t10_pi_tuple *dif_storep; /* protection info */
447 	void *map_storep;	/* provisioning map */
448 };
449 
450 #define dev_to_sdebug_host(d)	\
451 	container_of(d, struct sdebug_host_info, dev)
452 
453 #define shost_to_sdebug_host(shost)	\
454 	dev_to_sdebug_host(shost->dma_dev)
455 
456 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
457 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
458 
459 struct sdebug_defer {
460 	struct hrtimer hrt;
461 	struct execute_work ew;
462 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
463 	int issuing_cpu;
464 	bool aborted;	/* true when blk_abort_request() already called */
465 	enum sdeb_defer_type defer_t;
466 };
467 
468 struct sdebug_scsi_cmd {
469 	spinlock_t   lock;
470 	struct sdebug_defer sd_dp;
471 };
472 
473 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
474 static atomic_t sdebug_completions;  /* count of deferred completions */
475 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
476 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
477 static atomic_t sdeb_inject_pending;
478 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
479 
480 struct opcode_info_t {
481 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
482 				/* for terminating element */
483 	u8 opcode;		/* if num_attached > 0, preferred */
484 	u16 sa;			/* service action */
485 	u32 devsel;		/* device type mask for this definition */
486 	u32 flags;		/* OR-ed set of SDEB_F_* */
487 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
488 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
489 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
490 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
491 };
492 
493 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
494 enum sdeb_opcode_index {
495 	SDEB_I_INVALID_OPCODE =	0,
496 	SDEB_I_INQUIRY = 1,
497 	SDEB_I_REPORT_LUNS = 2,
498 	SDEB_I_REQUEST_SENSE = 3,
499 	SDEB_I_TEST_UNIT_READY = 4,
500 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
501 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
502 	SDEB_I_LOG_SENSE = 7,
503 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
504 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
505 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
506 	SDEB_I_START_STOP = 11,
507 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
508 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
509 	SDEB_I_MAINT_IN = 14,
510 	SDEB_I_MAINT_OUT = 15,
511 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
512 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
513 	SDEB_I_RESERVE = 18,		/* 6, 10 */
514 	SDEB_I_RELEASE = 19,		/* 6, 10 */
515 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
516 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
517 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
518 	SDEB_I_SEND_DIAG = 23,
519 	SDEB_I_UNMAP = 24,
520 	SDEB_I_WRITE_BUFFER = 25,
521 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
522 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
523 	SDEB_I_COMP_WRITE = 28,
524 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
525 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
526 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
527 	SDEB_I_ATOMIC_WRITE_16 = 32,
528 	SDEB_I_READ_BLOCK_LIMITS = 33,
529 	SDEB_I_LOCATE = 34,
530 	SDEB_I_WRITE_FILEMARKS = 35,
531 	SDEB_I_SPACE = 36,
532 	SDEB_I_FORMAT_MEDIUM = 37,
533 	SDEB_I_ERASE = 38,
534 	SDEB_I_LAST_ELEM_P1 = 39,	/* keep this last (previous + 1) */
535 };
536 
537 
538 static const unsigned char opcode_ind_arr[256] = {
539 /* 0x0; 0x0->0x1f: 6 byte cdbs */
540 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
541 	    SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
542 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
543 	SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
544 	    SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
545 	0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
546 	    SDEB_I_ALLOW_REMOVAL, 0,
547 /* 0x20; 0x20->0x3f: 10 byte cdbs */
548 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
549 	SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
550 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
551 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
552 /* 0x40; 0x40->0x5f: 10 byte cdbs */
553 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
554 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
555 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
556 	    SDEB_I_RELEASE,
557 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
558 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
559 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
560 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
561 	0, SDEB_I_VARIABLE_LEN,
562 /* 0x80; 0x80->0x9f: 16 byte cdbs */
563 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
564 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
565 	0, 0, 0, SDEB_I_VERIFY,
566 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
567 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
568 	0, 0, 0, 0,
569 	SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
570 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
571 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
572 	     SDEB_I_MAINT_OUT, 0, 0, 0,
573 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
574 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
575 	0, 0, 0, 0, 0, 0, 0, 0,
576 	0, 0, 0, 0, 0, 0, 0, 0,
577 /* 0xc0; 0xc0->0xff: vendor specific */
578 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
579 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
580 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
581 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
582 };
583 
584 /*
585  * The following "response" functions return the SCSI mid-level's 4 byte
586  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
587  * command completion, they can mask their return value with
588  * SDEG_RES_IMMED_MASK .
589  */
590 #define SDEG_RES_IMMED_MASK 0x40000000
591 
592 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
593 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
594 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
595 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
596 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
597 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
598 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
599 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
600 static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
601 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
602 static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
603 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
604 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
605 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
606 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
607 static int resp_get_stream_status(struct scsi_cmnd *scp,
608 				  struct sdebug_dev_info *devip);
609 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
610 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
611 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
612 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
613 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
614 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
615 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
616 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
617 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
618 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
619 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
620 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
621 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
622 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
623 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
624 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
625 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
626 static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
627 static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
628 static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
629 static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
630 static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *);
631 static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
632 static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
633 static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *);
634 
635 static int sdebug_do_add_host(bool mk_new_store);
636 static int sdebug_add_host_helper(int per_host_idx);
637 static void sdebug_do_remove_host(bool the_end);
638 static int sdebug_add_store(void);
639 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
640 static void sdebug_erase_all_stores(bool apart_from_first);
641 
642 /*
643  * The following are overflow arrays for cdbs that "hit" the same index in
644  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
645  * should be placed in opcode_info_arr[], the others should be placed here.
646  */
647 static const struct opcode_info_t msense_iarr[] = {
648 	{0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL,
649 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
650 };
651 
652 static const struct opcode_info_t mselect_iarr[] = {
653 	{0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL,
654 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 };
656 
657 static const struct opcode_info_t read_iarr[] = {
658 	{0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
659 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
660 	     0, 0, 0, 0} },
661 	{0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */
662 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
663 	{0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */
664 	    {6,  0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
665 	{0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
666 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
667 	     0xc7, 0, 0, 0, 0} },
668 };
669 
670 static const struct opcode_info_t write_iarr[] = {
671 	{0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
672 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
673 		   0, 0, 0, 0, 0, 0} },
674 	{0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */
675 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
676 		   0, 0, 0} },
677 	{0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */
678 	    NULL, {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
679 		   0, 0, 0} },
680 	{0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
681 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
682 		   0xbf, 0xc7, 0, 0, 0, 0} },
683 };
684 
685 static const struct opcode_info_t verify_iarr[] = {
686 	{0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
687 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
688 		   0, 0, 0, 0, 0, 0} },
689 };
690 
691 static const struct opcode_info_t sa_in_16_iarr[] = {
692 	{0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
693 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
694 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
695 	{0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
696 	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
697 	     0, 0} },	/* GET STREAM STATUS */
698 };
699 
700 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
701 	{0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
702 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
703 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
704 	{0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
705 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
706 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
707 };
708 
709 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
710 	{0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
711 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
712 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
713 	{0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
714 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
715 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
716 };
717 
718 static const struct opcode_info_t write_same_iarr[] = {
719 	{0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
720 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
721 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
722 };
723 
724 static const struct opcode_info_t reserve_iarr[] = {
725 	{0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
726 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
727 };
728 
729 static const struct opcode_info_t release_iarr[] = {
730 	{0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
731 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
732 };
733 
734 static const struct opcode_info_t sync_cache_iarr[] = {
735 	{0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
736 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
737 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
738 };
739 
740 static const struct opcode_info_t pre_fetch_iarr[] = {
741 	{0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
742 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
743 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
744 	{0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL,
745 	    {10,  0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0,
746 	     0, 0, 0, 0} },				/* READ POSITION (10) */
747 };
748 
749 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
750 	{0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
751 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
752 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
753 	{0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
754 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
755 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
756 	{0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
757 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
758 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
759 };
760 
761 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
762 	{0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
763 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
764 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
765 };
766 
767 
768 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
769  * plus the terminating elements for logic that scans this table such as
770  * REPORT SUPPORTED OPERATION CODES. */
771 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
772 /* 0 */
773 	{0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
774 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
775 	{0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
776 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
777 	{0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
778 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
779 	     0, 0} },					/* REPORT LUNS */
780 	{0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL,
781 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
782 	{0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
783 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
784 /* 5 */
785 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN,	/* MODE SENSE(10) */
786 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
787 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
788 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT,	/* MODE SELECT(10) */
789 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
790 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
791 	{0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
792 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
793 	     0, 0, 0} },
794 	{0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
795 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
796 	     0, 0} },
797 	{ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */
798 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
799 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
800 /* 10 */
801 	{ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
802 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
803 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
804 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
805 	{0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
806 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
807 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN,
808 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
809 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
810 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
811 	{0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
812 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
813 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
814 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN,
815 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
816 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
817 				0xff, 0, 0xc7, 0, 0, 0, 0} },
818 /* 15 */
819 	{0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
820 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
821 	{ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC,
822 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
823 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
824 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
825 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
826 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
827 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
828 	     0xff, 0xff} },
829 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT,
830 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
831 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
832 	     0} },
833 	{ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT,
834 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
835 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
836 	     0} },
837 /* 20 */
838 	{0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */
839 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
840 	{0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL,
841 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
842 	{0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
843 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
844 	{0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL,      /* SEND DIAGNOSTIC */
845 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
846 	{0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
847 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
848 /* 25 */
849 	{0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL,
850 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
851 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
852 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO,
853 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
854 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
855 		 0, 0, 0, 0, 0} },
856 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS,
857 	    resp_sync_cache, sync_cache_iarr,
858 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
859 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
860 	{0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
861 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
862 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
863 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO,
864 	    resp_pre_fetch, pre_fetch_iarr,
865 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
866 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
867 						/* READ POSITION (10) */
868 
869 /* 30 */
870 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
871 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
872 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
873 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
874 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
875 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
876 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
877 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
878 /* 32 */
879 	{0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
880 	    resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
881 		{16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
882 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
883 	{0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL,    /* READ BLOCK LIMITS (6) */
884 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
885 	{0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL,	   /* LOCATE (10) */
886 	    {10,  0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
887 	     0, 0, 0, 0} },
888 	{0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL,   /* WRITE FILEMARKS (6) */
889 	    {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
890 	{0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL,    /* SPACE (6) */
891 	    {6,  0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
892 	{0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL,  /* FORMAT MEDIUM (6) */
893 	    {6,  0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
894 	{0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL,    /* ERASE (6) */
895 	    {6,  0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
896 /* 39 */
897 /* sentinel */
898 	{0xff, 0, 0, 0, 0, NULL, NULL,		/* terminating element */
899 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
900 };
901 
902 static int sdebug_num_hosts;
903 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
904 static int sdebug_ato = DEF_ATO;
905 static int sdebug_cdb_len = DEF_CDB_LEN;
906 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
907 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
908 static int sdebug_dif = DEF_DIF;
909 static int sdebug_dix = DEF_DIX;
910 static int sdebug_dsense = DEF_D_SENSE;
911 static int sdebug_every_nth = DEF_EVERY_NTH;
912 static int sdebug_fake_rw = DEF_FAKE_RW;
913 static unsigned int sdebug_guard = DEF_GUARD;
914 static int sdebug_host_max_queue;	/* per host */
915 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
916 static int sdebug_max_luns = DEF_MAX_LUNS;
917 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
918 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
919 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
920 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
921 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
922 static int sdebug_no_uld;
923 static int sdebug_num_parts = DEF_NUM_PARTS;
924 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
925 static int sdebug_opt_blks = DEF_OPT_BLKS;
926 static int sdebug_opts = DEF_OPTS;
927 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
928 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
929 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
930 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
931 static int sdebug_sector_size = DEF_SECTOR_SIZE;
932 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
933 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
934 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
935 static unsigned int sdebug_lbpu = DEF_LBPU;
936 static unsigned int sdebug_lbpws = DEF_LBPWS;
937 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
938 static unsigned int sdebug_lbprz = DEF_LBPRZ;
939 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
940 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
941 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
942 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
943 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
944 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
945 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
946 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
947 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
948 static unsigned int sdebug_atomic_wr_max_length_bndry =
949 			DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
950 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
951 static int sdebug_uuid_ctl = DEF_UUID_CTL;
952 static bool sdebug_random = DEF_RANDOM;
953 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
954 static bool sdebug_removable = DEF_REMOVABLE;
955 static bool sdebug_clustering;
956 static bool sdebug_host_lock = DEF_HOST_LOCK;
957 static bool sdebug_strict = DEF_STRICT;
958 static bool sdebug_any_injecting_opt;
959 static bool sdebug_no_rwlock;
960 static bool sdebug_verbose;
961 static bool have_dif_prot;
962 static bool write_since_sync;
963 static bool sdebug_statistics = DEF_STATISTICS;
964 static bool sdebug_wp;
965 static bool sdebug_allow_restart;
966 static enum {
967 	BLK_ZONED_NONE	= 0,
968 	BLK_ZONED_HA	= 1,
969 	BLK_ZONED_HM	= 2,
970 } sdeb_zbc_model = BLK_ZONED_NONE;
971 static char *sdeb_zbc_model_s;
972 
973 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
974 			  SAM_LUN_AM_FLAT = 0x1,
975 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
976 			  SAM_LUN_AM_EXTENDED = 0x3};
977 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
978 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
979 
980 static unsigned int sdebug_store_sectors;
981 static sector_t sdebug_capacity;	/* in sectors */
982 
983 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
984    may still need them */
985 static int sdebug_heads;		/* heads per disk */
986 static int sdebug_cylinders_per;	/* cylinders per surface */
987 static int sdebug_sectors_per;		/* sectors per cylinder */
988 
989 static LIST_HEAD(sdebug_host_list);
990 static DEFINE_MUTEX(sdebug_host_list_mutex);
991 
992 static struct xarray per_store_arr;
993 static struct xarray *per_store_ap = &per_store_arr;
994 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
995 static int sdeb_most_recent_idx = -1;
996 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
997 
998 static unsigned long map_size;
999 static int num_aborts;
1000 static int num_dev_resets;
1001 static int num_target_resets;
1002 static int num_bus_resets;
1003 static int num_host_resets;
1004 static int dix_writes;
1005 static int dix_reads;
1006 static int dif_errors;
1007 
1008 /* ZBC global data */
1009 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
1010 static int sdeb_zbc_zone_cap_mb;
1011 static int sdeb_zbc_zone_size_mb;
1012 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
1013 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
1014 
1015 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
1016 static int poll_queues; /* iouring iopoll interface.*/
1017 
1018 static atomic_long_t writes_by_group_number[64];
1019 
1020 static char sdebug_proc_name[] = MY_NAME;
1021 static const char *my_name = MY_NAME;
1022 
1023 static const struct bus_type pseudo_lld_bus;
1024 
1025 static struct device_driver sdebug_driverfs_driver = {
1026 	.name 		= sdebug_proc_name,
1027 	.bus		= &pseudo_lld_bus,
1028 };
1029 
1030 static const int check_condition_result =
1031 	SAM_STAT_CHECK_CONDITION;
1032 
1033 static const int illegal_condition_result =
1034 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
1035 
1036 static const int device_qfull_result =
1037 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
1038 
1039 static const int condition_met_result = SAM_STAT_CONDITION_MET;
1040 
1041 static struct dentry *sdebug_debugfs_root;
1042 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
1043 
sdebug_get_devsel(struct scsi_device * sdp)1044 static u32 sdebug_get_devsel(struct scsi_device *sdp)
1045 {
1046 	unsigned char devtype = sdp->type;
1047 	u32 devsel;
1048 
1049 	if (devtype < 32)
1050 		devsel = (1 << devtype);
1051 	else
1052 		devsel = DS_ALL;
1053 
1054 	return devsel;
1055 }
1056 
sdebug_err_free(struct rcu_head * head)1057 static void sdebug_err_free(struct rcu_head *head)
1058 {
1059 	struct sdebug_err_inject *inject =
1060 		container_of(head, typeof(*inject), rcu);
1061 
1062 	kfree(inject);
1063 }
1064 
sdebug_err_add(struct scsi_device * sdev,struct sdebug_err_inject * new)1065 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
1066 {
1067 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1068 	struct sdebug_err_inject *err;
1069 
1070 	spin_lock(&devip->list_lock);
1071 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1072 		if (err->type == new->type && err->cmd == new->cmd) {
1073 			list_del_rcu(&err->list);
1074 			call_rcu(&err->rcu, sdebug_err_free);
1075 		}
1076 	}
1077 
1078 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
1079 	spin_unlock(&devip->list_lock);
1080 }
1081 
sdebug_err_remove(struct scsi_device * sdev,const char * buf,size_t count)1082 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
1083 {
1084 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1085 	struct sdebug_err_inject *err;
1086 	int type;
1087 	unsigned char cmd;
1088 
1089 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
1090 		kfree(buf);
1091 		return -EINVAL;
1092 	}
1093 
1094 	spin_lock(&devip->list_lock);
1095 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1096 		if (err->type == type && err->cmd == cmd) {
1097 			list_del_rcu(&err->list);
1098 			call_rcu(&err->rcu, sdebug_err_free);
1099 			spin_unlock(&devip->list_lock);
1100 			kfree(buf);
1101 			return count;
1102 		}
1103 	}
1104 	spin_unlock(&devip->list_lock);
1105 
1106 	kfree(buf);
1107 	return -EINVAL;
1108 }
1109 
sdebug_error_show(struct seq_file * m,void * p)1110 static int sdebug_error_show(struct seq_file *m, void *p)
1111 {
1112 	struct scsi_device *sdev = (struct scsi_device *)m->private;
1113 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1114 	struct sdebug_err_inject *err;
1115 
1116 	seq_puts(m, "Type\tCount\tCommand\n");
1117 
1118 	rcu_read_lock();
1119 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1120 		switch (err->type) {
1121 		case ERR_TMOUT_CMD:
1122 		case ERR_ABORT_CMD_FAILED:
1123 		case ERR_LUN_RESET_FAILED:
1124 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1125 				err->cmd);
1126 		break;
1127 
1128 		case ERR_FAIL_QUEUE_CMD:
1129 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1130 				err->cnt, err->cmd, err->queuecmd_ret);
1131 		break;
1132 
1133 		case ERR_FAIL_CMD:
1134 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1135 				err->type, err->cnt, err->cmd,
1136 				err->host_byte, err->driver_byte,
1137 				err->status_byte, err->sense_key,
1138 				err->asc, err->asq);
1139 		break;
1140 		}
1141 	}
1142 	rcu_read_unlock();
1143 
1144 	return 0;
1145 }
1146 
sdebug_error_open(struct inode * inode,struct file * file)1147 static int sdebug_error_open(struct inode *inode, struct file *file)
1148 {
1149 	return single_open(file, sdebug_error_show, inode->i_private);
1150 }
1151 
sdebug_error_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1152 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1153 		size_t count, loff_t *ppos)
1154 {
1155 	char *buf;
1156 	unsigned int inject_type;
1157 	struct sdebug_err_inject *inject;
1158 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1159 
1160 	buf = memdup_user_nul(ubuf, count);
1161 	if (IS_ERR(buf))
1162 		return PTR_ERR(buf);
1163 
1164 	if (buf[0] == '-')
1165 		return sdebug_err_remove(sdev, buf, count);
1166 
1167 	if (sscanf(buf, "%d", &inject_type) != 1) {
1168 		kfree(buf);
1169 		return -EINVAL;
1170 	}
1171 
1172 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1173 	if (!inject) {
1174 		kfree(buf);
1175 		return -ENOMEM;
1176 	}
1177 
1178 	switch (inject_type) {
1179 	case ERR_TMOUT_CMD:
1180 	case ERR_ABORT_CMD_FAILED:
1181 	case ERR_LUN_RESET_FAILED:
1182 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1183 			   &inject->cmd) != 3)
1184 			goto out_error;
1185 	break;
1186 
1187 	case ERR_FAIL_QUEUE_CMD:
1188 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1189 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1190 			goto out_error;
1191 	break;
1192 
1193 	case ERR_FAIL_CMD:
1194 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1195 			   &inject->type, &inject->cnt, &inject->cmd,
1196 			   &inject->host_byte, &inject->driver_byte,
1197 			   &inject->status_byte, &inject->sense_key,
1198 			   &inject->asc, &inject->asq) != 9)
1199 			goto out_error;
1200 	break;
1201 
1202 	default:
1203 		goto out_error;
1204 	break;
1205 	}
1206 
1207 	kfree(buf);
1208 	sdebug_err_add(sdev, inject);
1209 
1210 	return count;
1211 
1212 out_error:
1213 	kfree(buf);
1214 	kfree(inject);
1215 	return -EINVAL;
1216 }
1217 
1218 static const struct file_operations sdebug_error_fops = {
1219 	.open	= sdebug_error_open,
1220 	.read	= seq_read,
1221 	.write	= sdebug_error_write,
1222 	.release = single_release,
1223 };
1224 
sdebug_target_reset_fail_show(struct seq_file * m,void * p)1225 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1226 {
1227 	struct scsi_target *starget = (struct scsi_target *)m->private;
1228 	struct sdebug_target_info *targetip =
1229 		(struct sdebug_target_info *)starget->hostdata;
1230 
1231 	if (targetip)
1232 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1233 
1234 	return 0;
1235 }
1236 
sdebug_target_reset_fail_open(struct inode * inode,struct file * file)1237 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1238 {
1239 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1240 }
1241 
sdebug_target_reset_fail_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1242 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1243 		const char __user *ubuf, size_t count, loff_t *ppos)
1244 {
1245 	int ret;
1246 	struct scsi_target *starget =
1247 		(struct scsi_target *)file->f_inode->i_private;
1248 	struct sdebug_target_info *targetip =
1249 		(struct sdebug_target_info *)starget->hostdata;
1250 
1251 	if (targetip) {
1252 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1253 		return ret < 0 ? ret : count;
1254 	}
1255 	return -ENODEV;
1256 }
1257 
1258 static const struct file_operations sdebug_target_reset_fail_fops = {
1259 	.open	= sdebug_target_reset_fail_open,
1260 	.read	= seq_read,
1261 	.write	= sdebug_target_reset_fail_write,
1262 	.release = single_release,
1263 };
1264 
sdebug_target_alloc(struct scsi_target * starget)1265 static int sdebug_target_alloc(struct scsi_target *starget)
1266 {
1267 	struct sdebug_target_info *targetip;
1268 
1269 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1270 	if (!targetip)
1271 		return -ENOMEM;
1272 
1273 	async_synchronize_full_domain(&sdebug_async_domain);
1274 
1275 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1276 				sdebug_debugfs_root);
1277 
1278 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1279 				&sdebug_target_reset_fail_fops);
1280 
1281 	starget->hostdata = targetip;
1282 
1283 	return 0;
1284 }
1285 
sdebug_tartget_cleanup_async(void * data,async_cookie_t cookie)1286 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1287 {
1288 	struct sdebug_target_info *targetip = data;
1289 
1290 	debugfs_remove(targetip->debugfs_entry);
1291 	kfree(targetip);
1292 }
1293 
sdebug_target_destroy(struct scsi_target * starget)1294 static void sdebug_target_destroy(struct scsi_target *starget)
1295 {
1296 	struct sdebug_target_info *targetip;
1297 
1298 	targetip = (struct sdebug_target_info *)starget->hostdata;
1299 	if (targetip) {
1300 		starget->hostdata = NULL;
1301 		async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1302 				&sdebug_async_domain);
1303 	}
1304 }
1305 
1306 /* Only do the extra work involved in logical block provisioning if one or
1307  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1308  * real reads and writes (i.e. not skipping them for speed).
1309  */
scsi_debug_lbp(void)1310 static inline bool scsi_debug_lbp(void)
1311 {
1312 	return 0 == sdebug_fake_rw &&
1313 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1314 }
1315 
scsi_debug_atomic_write(void)1316 static inline bool scsi_debug_atomic_write(void)
1317 {
1318 	return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1319 }
1320 
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)1321 static void *lba2fake_store(struct sdeb_store_info *sip,
1322 			    unsigned long long lba)
1323 {
1324 	struct sdeb_store_info *lsip = sip;
1325 
1326 	lba = do_div(lba, sdebug_store_sectors);
1327 	if (!sip || !sip->storep) {
1328 		WARN_ON_ONCE(true);
1329 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1330 	}
1331 	return lsip->storep + lba * sdebug_sector_size;
1332 }
1333 
dif_store(struct sdeb_store_info * sip,sector_t sector)1334 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1335 				      sector_t sector)
1336 {
1337 	sector = sector_div(sector, sdebug_store_sectors);
1338 
1339 	return sip->dif_storep + sector;
1340 }
1341 
sdebug_max_tgts_luns(void)1342 static void sdebug_max_tgts_luns(void)
1343 {
1344 	struct sdebug_host_info *sdbg_host;
1345 	struct Scsi_Host *hpnt;
1346 
1347 	mutex_lock(&sdebug_host_list_mutex);
1348 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1349 		hpnt = sdbg_host->shost;
1350 		if ((hpnt->this_id >= 0) &&
1351 		    (sdebug_num_tgts > hpnt->this_id))
1352 			hpnt->max_id = sdebug_num_tgts + 1;
1353 		else
1354 			hpnt->max_id = sdebug_num_tgts;
1355 		/* sdebug_max_luns; */
1356 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1357 	}
1358 	mutex_unlock(&sdebug_host_list_mutex);
1359 }
1360 
1361 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1362 
1363 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)1364 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1365 				 enum sdeb_cmd_data c_d,
1366 				 int in_byte, int in_bit)
1367 {
1368 	unsigned char *sbuff;
1369 	u8 sks[4];
1370 	int sl, asc;
1371 
1372 	sbuff = scp->sense_buffer;
1373 	if (!sbuff) {
1374 		sdev_printk(KERN_ERR, scp->device,
1375 			    "%s: sense_buffer is NULL\n", __func__);
1376 		return;
1377 	}
1378 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1379 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1380 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1381 	memset(sks, 0, sizeof(sks));
1382 	sks[0] = 0x80;
1383 	if (c_d)
1384 		sks[0] |= 0x40;
1385 	if (in_bit >= 0) {
1386 		sks[0] |= 0x8;
1387 		sks[0] |= 0x7 & in_bit;
1388 	}
1389 	put_unaligned_be16(in_byte, sks + 1);
1390 	if (sdebug_dsense) {
1391 		sl = sbuff[7] + 8;
1392 		sbuff[7] = sl;
1393 		sbuff[sl] = 0x2;
1394 		sbuff[sl + 1] = 0x6;
1395 		memcpy(sbuff + sl + 4, sks, 3);
1396 	} else
1397 		memcpy(sbuff + 15, sks, 3);
1398 	if (sdebug_verbose)
1399 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1400 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1401 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1402 }
1403 
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)1404 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1405 {
1406 	if (!scp->sense_buffer) {
1407 		sdev_printk(KERN_ERR, scp->device,
1408 			    "%s: sense_buffer is NULL\n", __func__);
1409 		return;
1410 	}
1411 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1412 
1413 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1414 
1415 	if (sdebug_verbose)
1416 		sdev_printk(KERN_INFO, scp->device,
1417 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1418 			    my_name, key, asc, asq);
1419 }
1420 
1421 /* Sense data that has information fields for tapes */
mk_sense_info_tape(struct scsi_cmnd * scp,int key,int asc,int asq,unsigned int information,unsigned char tape_flags)1422 static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
1423 			unsigned int information, unsigned char tape_flags)
1424 {
1425 	if (!scp->sense_buffer) {
1426 		sdev_printk(KERN_ERR, scp->device,
1427 			    "%s: sense_buffer is NULL\n", __func__);
1428 		return;
1429 	}
1430 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1431 
1432 	scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
1433 	/* only fixed format so far */
1434 
1435 	scp->sense_buffer[0] |= 0x80; /* valid */
1436 	scp->sense_buffer[2] |= tape_flags;
1437 	put_unaligned_be32(information, &scp->sense_buffer[3]);
1438 
1439 	if (sdebug_verbose)
1440 		sdev_printk(KERN_INFO, scp->device,
1441 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1442 			    my_name, key, asc, asq);
1443 }
1444 
mk_sense_invalid_opcode(struct scsi_cmnd * scp)1445 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1446 {
1447 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1448 }
1449 
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)1450 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1451 			    void __user *arg)
1452 {
1453 	if (sdebug_verbose) {
1454 		if (0x1261 == cmd)
1455 			sdev_printk(KERN_INFO, dev,
1456 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1457 		else if (0x5331 == cmd)
1458 			sdev_printk(KERN_INFO, dev,
1459 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1460 				    __func__);
1461 		else
1462 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1463 				    __func__, cmd);
1464 	}
1465 	return -EINVAL;
1466 	/* return -ENOTTY; // correct return but upsets fdisk */
1467 }
1468 
config_cdb_len(struct scsi_device * sdev)1469 static void config_cdb_len(struct scsi_device *sdev)
1470 {
1471 	switch (sdebug_cdb_len) {
1472 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1473 		sdev->use_10_for_rw = false;
1474 		sdev->use_16_for_rw = false;
1475 		sdev->use_10_for_ms = false;
1476 		break;
1477 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1478 		sdev->use_10_for_rw = true;
1479 		sdev->use_16_for_rw = false;
1480 		sdev->use_10_for_ms = false;
1481 		break;
1482 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1483 		sdev->use_10_for_rw = true;
1484 		sdev->use_16_for_rw = false;
1485 		sdev->use_10_for_ms = true;
1486 		break;
1487 	case 16:
1488 		sdev->use_10_for_rw = false;
1489 		sdev->use_16_for_rw = true;
1490 		sdev->use_10_for_ms = true;
1491 		break;
1492 	case 32: /* No knobs to suggest this so same as 16 for now */
1493 		sdev->use_10_for_rw = false;
1494 		sdev->use_16_for_rw = true;
1495 		sdev->use_10_for_ms = true;
1496 		break;
1497 	default:
1498 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1499 			sdebug_cdb_len);
1500 		sdev->use_10_for_rw = true;
1501 		sdev->use_16_for_rw = false;
1502 		sdev->use_10_for_ms = false;
1503 		sdebug_cdb_len = 10;
1504 		break;
1505 	}
1506 }
1507 
all_config_cdb_len(void)1508 static void all_config_cdb_len(void)
1509 {
1510 	struct sdebug_host_info *sdbg_host;
1511 	struct Scsi_Host *shost;
1512 	struct scsi_device *sdev;
1513 
1514 	mutex_lock(&sdebug_host_list_mutex);
1515 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1516 		shost = sdbg_host->shost;
1517 		shost_for_each_device(sdev, shost) {
1518 			config_cdb_len(sdev);
1519 		}
1520 	}
1521 	mutex_unlock(&sdebug_host_list_mutex);
1522 }
1523 
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1524 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1525 {
1526 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1527 	struct sdebug_dev_info *dp;
1528 
1529 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1530 		if ((devip->sdbg_host == dp->sdbg_host) &&
1531 		    (devip->target == dp->target)) {
1532 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1533 		}
1534 	}
1535 }
1536 
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1537 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1538 {
1539 	int k;
1540 
1541 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1542 	if (k != SDEBUG_NUM_UAS) {
1543 		const char *cp = NULL;
1544 
1545 		switch (k) {
1546 		case SDEBUG_UA_POR:
1547 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1548 					POWER_ON_RESET_ASCQ);
1549 			if (sdebug_verbose)
1550 				cp = "power on reset";
1551 			break;
1552 		case SDEBUG_UA_POOCCUR:
1553 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1554 					POWER_ON_OCCURRED_ASCQ);
1555 			if (sdebug_verbose)
1556 				cp = "power on occurred";
1557 			break;
1558 		case SDEBUG_UA_BUS_RESET:
1559 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1560 					BUS_RESET_ASCQ);
1561 			if (sdebug_verbose)
1562 				cp = "bus reset";
1563 			break;
1564 		case SDEBUG_UA_MODE_CHANGED:
1565 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1566 					MODE_CHANGED_ASCQ);
1567 			if (sdebug_verbose)
1568 				cp = "mode parameters changed";
1569 			break;
1570 		case SDEBUG_UA_CAPACITY_CHANGED:
1571 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1572 					CAPACITY_CHANGED_ASCQ);
1573 			if (sdebug_verbose)
1574 				cp = "capacity data changed";
1575 			break;
1576 		case SDEBUG_UA_MICROCODE_CHANGED:
1577 			mk_sense_buffer(scp, UNIT_ATTENTION,
1578 					TARGET_CHANGED_ASC,
1579 					MICROCODE_CHANGED_ASCQ);
1580 			if (sdebug_verbose)
1581 				cp = "microcode has been changed";
1582 			break;
1583 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1584 			mk_sense_buffer(scp, UNIT_ATTENTION,
1585 					TARGET_CHANGED_ASC,
1586 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1587 			if (sdebug_verbose)
1588 				cp = "microcode has been changed without reset";
1589 			break;
1590 		case SDEBUG_UA_LUNS_CHANGED:
1591 			/*
1592 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1593 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1594 			 * on the target, until a REPORT LUNS command is
1595 			 * received.  SPC-4 behavior is to report it only once.
1596 			 * NOTE:  sdebug_scsi_level does not use the same
1597 			 * values as struct scsi_device->scsi_level.
1598 			 */
1599 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1600 				clear_luns_changed_on_target(devip);
1601 			mk_sense_buffer(scp, UNIT_ATTENTION,
1602 					TARGET_CHANGED_ASC,
1603 					LUNS_CHANGED_ASCQ);
1604 			if (sdebug_verbose)
1605 				cp = "reported luns data has changed";
1606 			break;
1607 		case SDEBUG_UA_NOT_READY_TO_READY:
1608 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
1609 					0);
1610 			if (sdebug_verbose)
1611 				cp = "not ready to ready transition/media change";
1612 			break;
1613 		default:
1614 			pr_warn("unexpected unit attention code=%d\n", k);
1615 			if (sdebug_verbose)
1616 				cp = "unknown";
1617 			break;
1618 		}
1619 		clear_bit(k, devip->uas_bm);
1620 		if (sdebug_verbose)
1621 			sdev_printk(KERN_INFO, scp->device,
1622 				   "%s reports: Unit attention: %s\n",
1623 				   my_name, cp);
1624 		return check_condition_result;
1625 	}
1626 	return 0;
1627 }
1628 
1629 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1630 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1631 				int arr_len)
1632 {
1633 	int act_len;
1634 	struct scsi_data_buffer *sdb = &scp->sdb;
1635 
1636 	if (!sdb->length)
1637 		return 0;
1638 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1639 		return DID_ERROR << 16;
1640 
1641 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1642 				      arr, arr_len);
1643 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1644 
1645 	return 0;
1646 }
1647 
1648 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1649  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1650  * calls, not required to write in ascending offset order. Assumes resid
1651  * set to scsi_bufflen() prior to any calls.
1652  */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1653 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1654 				  int arr_len, unsigned int off_dst)
1655 {
1656 	unsigned int act_len, n;
1657 	struct scsi_data_buffer *sdb = &scp->sdb;
1658 	off_t skip = off_dst;
1659 
1660 	if (sdb->length <= off_dst)
1661 		return 0;
1662 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1663 		return DID_ERROR << 16;
1664 
1665 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1666 				       arr, arr_len, skip);
1667 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1668 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1669 		 scsi_get_resid(scp));
1670 	n = scsi_bufflen(scp) - (off_dst + act_len);
1671 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1672 	return 0;
1673 }
1674 
1675 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1676  * 'arr' or -1 if error.
1677  */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1678 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1679 			       int arr_len)
1680 {
1681 	if (!scsi_bufflen(scp))
1682 		return 0;
1683 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1684 		return -1;
1685 
1686 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1687 }
1688 
1689 
1690 static char sdebug_inq_vendor_id[9] = "Linux   ";
1691 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1692 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1693 /* Use some locally assigned NAAs for SAS addresses. */
1694 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1695 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1696 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1697 
1698 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1699 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1700 			  int target_dev_id, int dev_id_num,
1701 			  const char *dev_id_str, int dev_id_str_len,
1702 			  const uuid_t *lu_name)
1703 {
1704 	int num, port_a;
1705 	char b[32];
1706 
1707 	port_a = target_dev_id + 1;
1708 	/* T10 vendor identifier field format (faked) */
1709 	arr[0] = 0x2;	/* ASCII */
1710 	arr[1] = 0x1;
1711 	arr[2] = 0x0;
1712 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1713 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1714 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1715 	num = 8 + 16 + dev_id_str_len;
1716 	arr[3] = num;
1717 	num += 4;
1718 	if (dev_id_num >= 0) {
1719 		if (sdebug_uuid_ctl) {
1720 			/* Locally assigned UUID */
1721 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1722 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1723 			arr[num++] = 0x0;
1724 			arr[num++] = 0x12;
1725 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1726 			arr[num++] = 0x0;
1727 			memcpy(arr + num, lu_name, 16);
1728 			num += 16;
1729 		} else {
1730 			/* NAA-3, Logical unit identifier (binary) */
1731 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1732 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1733 			arr[num++] = 0x0;
1734 			arr[num++] = 0x8;
1735 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1736 			num += 8;
1737 		}
1738 		/* Target relative port number */
1739 		arr[num++] = 0x61;	/* proto=sas, binary */
1740 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1741 		arr[num++] = 0x0;	/* reserved */
1742 		arr[num++] = 0x4;	/* length */
1743 		arr[num++] = 0x0;	/* reserved */
1744 		arr[num++] = 0x0;	/* reserved */
1745 		arr[num++] = 0x0;
1746 		arr[num++] = 0x1;	/* relative port A */
1747 	}
1748 	/* NAA-3, Target port identifier */
1749 	arr[num++] = 0x61;	/* proto=sas, binary */
1750 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1751 	arr[num++] = 0x0;
1752 	arr[num++] = 0x8;
1753 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1754 	num += 8;
1755 	/* NAA-3, Target port group identifier */
1756 	arr[num++] = 0x61;	/* proto=sas, binary */
1757 	arr[num++] = 0x95;	/* piv=1, target port group id */
1758 	arr[num++] = 0x0;
1759 	arr[num++] = 0x4;
1760 	arr[num++] = 0;
1761 	arr[num++] = 0;
1762 	put_unaligned_be16(port_group_id, arr + num);
1763 	num += 2;
1764 	/* NAA-3, Target device identifier */
1765 	arr[num++] = 0x61;	/* proto=sas, binary */
1766 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1767 	arr[num++] = 0x0;
1768 	arr[num++] = 0x8;
1769 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1770 	num += 8;
1771 	/* SCSI name string: Target device identifier */
1772 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1773 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1774 	arr[num++] = 0x0;
1775 	arr[num++] = 24;
1776 	memcpy(arr + num, "naa.32222220", 12);
1777 	num += 12;
1778 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1779 	memcpy(arr + num, b, 8);
1780 	num += 8;
1781 	memset(arr + num, 0, 4);
1782 	num += 4;
1783 	return num;
1784 }
1785 
1786 static unsigned char vpd84_data[] = {
1787 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1788     0x22,0x22,0x22,0x0,0xbb,0x1,
1789     0x22,0x22,0x22,0x0,0xbb,0x2,
1790 };
1791 
1792 /*  Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1793 static int inquiry_vpd_84(unsigned char *arr)
1794 {
1795 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1796 	return sizeof(vpd84_data);
1797 }
1798 
1799 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1800 static int inquiry_vpd_85(unsigned char *arr)
1801 {
1802 	int num = 0;
1803 	const char *na1 = "https://www.kernel.org/config";
1804 	const char *na2 = "http://www.kernel.org/log";
1805 	int plen, olen;
1806 
1807 	arr[num++] = 0x1;	/* lu, storage config */
1808 	arr[num++] = 0x0;	/* reserved */
1809 	arr[num++] = 0x0;
1810 	olen = strlen(na1);
1811 	plen = olen + 1;
1812 	if (plen % 4)
1813 		plen = ((plen / 4) + 1) * 4;
1814 	arr[num++] = plen;	/* length, null termianted, padded */
1815 	memcpy(arr + num, na1, olen);
1816 	memset(arr + num + olen, 0, plen - olen);
1817 	num += plen;
1818 
1819 	arr[num++] = 0x4;	/* lu, logging */
1820 	arr[num++] = 0x0;	/* reserved */
1821 	arr[num++] = 0x0;
1822 	olen = strlen(na2);
1823 	plen = olen + 1;
1824 	if (plen % 4)
1825 		plen = ((plen / 4) + 1) * 4;
1826 	arr[num++] = plen;	/* length, null terminated, padded */
1827 	memcpy(arr + num, na2, olen);
1828 	memset(arr + num + olen, 0, plen - olen);
1829 	num += plen;
1830 
1831 	return num;
1832 }
1833 
1834 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1835 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1836 {
1837 	int num = 0;
1838 	int port_a, port_b;
1839 
1840 	port_a = target_dev_id + 1;
1841 	port_b = port_a + 1;
1842 	arr[num++] = 0x0;	/* reserved */
1843 	arr[num++] = 0x0;	/* reserved */
1844 	arr[num++] = 0x0;
1845 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1846 	memset(arr + num, 0, 6);
1847 	num += 6;
1848 	arr[num++] = 0x0;
1849 	arr[num++] = 12;	/* length tp descriptor */
1850 	/* naa-5 target port identifier (A) */
1851 	arr[num++] = 0x61;	/* proto=sas, binary */
1852 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1853 	arr[num++] = 0x0;	/* reserved */
1854 	arr[num++] = 0x8;	/* length */
1855 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1856 	num += 8;
1857 	arr[num++] = 0x0;	/* reserved */
1858 	arr[num++] = 0x0;	/* reserved */
1859 	arr[num++] = 0x0;
1860 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1861 	memset(arr + num, 0, 6);
1862 	num += 6;
1863 	arr[num++] = 0x0;
1864 	arr[num++] = 12;	/* length tp descriptor */
1865 	/* naa-5 target port identifier (B) */
1866 	arr[num++] = 0x61;	/* proto=sas, binary */
1867 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1868 	arr[num++] = 0x0;	/* reserved */
1869 	arr[num++] = 0x8;	/* length */
1870 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1871 	num += 8;
1872 
1873 	return num;
1874 }
1875 
1876 
1877 static unsigned char vpd89_data[] = {
1878 /* from 4th byte */ 0,0,0,0,
1879 'l','i','n','u','x',' ',' ',' ',
1880 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1881 '1','2','3','4',
1882 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1883 0xec,0,0,0,
1884 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1885 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1886 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1887 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1888 0x53,0x41,
1889 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1890 0x20,0x20,
1891 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1892 0x10,0x80,
1893 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1894 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1895 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1896 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1897 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1898 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1899 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1900 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1901 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1902 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1903 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1904 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1905 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1906 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1907 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1908 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1909 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1910 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1911 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1912 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1913 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1914 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1915 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1916 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1917 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1918 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1919 };
1920 
1921 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1922 static int inquiry_vpd_89(unsigned char *arr)
1923 {
1924 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1925 	return sizeof(vpd89_data);
1926 }
1927 
1928 
1929 static unsigned char vpdb0_data[] = {
1930 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1931 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1932 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1933 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1934 };
1935 
1936 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1937 static int inquiry_vpd_b0(unsigned char *arr)
1938 {
1939 	unsigned int gran;
1940 
1941 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1942 
1943 	/* Optimal transfer length granularity */
1944 	if (sdebug_opt_xferlen_exp != 0 &&
1945 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1946 		gran = 1 << sdebug_opt_xferlen_exp;
1947 	else
1948 		gran = 1 << sdebug_physblk_exp;
1949 	put_unaligned_be16(gran, arr + 2);
1950 
1951 	/* Maximum Transfer Length */
1952 	if (sdebug_store_sectors > 0x400)
1953 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1954 
1955 	/* Optimal Transfer Length */
1956 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1957 
1958 	if (sdebug_lbpu) {
1959 		/* Maximum Unmap LBA Count */
1960 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1961 
1962 		/* Maximum Unmap Block Descriptor Count */
1963 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1964 	}
1965 
1966 	/* Unmap Granularity Alignment */
1967 	if (sdebug_unmap_alignment) {
1968 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1969 		arr[28] |= 0x80; /* UGAVALID */
1970 	}
1971 
1972 	/* Optimal Unmap Granularity */
1973 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1974 
1975 	/* Maximum WRITE SAME Length */
1976 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1977 
1978 	if (sdebug_atomic_wr) {
1979 		put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1980 		put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1981 		put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1982 		put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1983 		put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1984 	}
1985 
1986 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1987 }
1988 
1989 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1990 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1991 {
1992 	memset(arr, 0, 0x3c);
1993 	arr[0] = 0;
1994 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1995 	arr[2] = 0;
1996 	arr[3] = 5;	/* less than 1.8" */
1997 
1998 	return 0x3c;
1999 }
2000 
2001 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)2002 static int inquiry_vpd_b2(unsigned char *arr)
2003 {
2004 	memset(arr, 0, 0x4);
2005 	arr[0] = 0;			/* threshold exponent */
2006 	if (sdebug_lbpu)
2007 		arr[1] = 1 << 7;
2008 	if (sdebug_lbpws)
2009 		arr[1] |= 1 << 6;
2010 	if (sdebug_lbpws10)
2011 		arr[1] |= 1 << 5;
2012 	if (sdebug_lbprz && scsi_debug_lbp())
2013 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
2014 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
2015 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
2016 	/* threshold_percentage=0 */
2017 	return 0x4;
2018 }
2019 
2020 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)2021 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
2022 {
2023 	memset(arr, 0, 0x3c);
2024 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
2025 	/*
2026 	 * Set Optimal number of open sequential write preferred zones and
2027 	 * Optimal number of non-sequentially written sequential write
2028 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
2029 	 * fields set to zero, apart from Max. number of open swrz_s field.
2030 	 */
2031 	put_unaligned_be32(0xffffffff, &arr[4]);
2032 	put_unaligned_be32(0xffffffff, &arr[8]);
2033 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
2034 		put_unaligned_be32(devip->max_open, &arr[12]);
2035 	else
2036 		put_unaligned_be32(0xffffffff, &arr[12]);
2037 	if (devip->zcap < devip->zsize) {
2038 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
2039 		put_unaligned_be64(devip->zsize, &arr[20]);
2040 	} else {
2041 		arr[19] = 0;
2042 	}
2043 	return 0x3c;
2044 }
2045 
2046 #define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
2047 
2048 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
2049 
2050 /* Block limits extension VPD page (SBC-4) */
inquiry_vpd_b7(unsigned char * arrb4)2051 static int inquiry_vpd_b7(unsigned char *arrb4)
2052 {
2053 	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
2054 	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
2055 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
2056 	return SDEBUG_BLE_LEN_AFTER_B4;
2057 }
2058 
2059 #define SDEBUG_LONG_INQ_SZ 96
2060 #define SDEBUG_MAX_INQ_ARR_SZ 584
2061 
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2062 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2063 {
2064 	unsigned char pq_pdt;
2065 	unsigned char *arr;
2066 	unsigned char *cmd = scp->cmnd;
2067 	u32 alloc_len, n;
2068 	int ret;
2069 	bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape;
2070 
2071 	alloc_len = get_unaligned_be16(cmd + 3);
2072 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
2073 	if (! arr)
2074 		return DID_REQUEUE << 16;
2075 	if (scp->device->type >= 32) {
2076 		is_disk = (sdebug_ptype == TYPE_DISK);
2077 		is_tape = (sdebug_ptype == TYPE_TAPE);
2078 	} else {
2079 		is_disk = (scp->device->type == TYPE_DISK);
2080 		is_tape = (scp->device->type == TYPE_TAPE);
2081 	}
2082 	is_zbc = devip->zoned;
2083 	is_disk_zbc = (is_disk || is_zbc);
2084 	have_wlun = scsi_is_wlun(scp->device->lun);
2085 	if (have_wlun)
2086 		pq_pdt = TYPE_WLUN;	/* present, wlun */
2087 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
2088 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
2089 	else
2090 		pq_pdt = ((scp->device->type >= 32 ?
2091 				sdebug_ptype : scp->device->type) & 0x1f);
2092 	arr[0] = pq_pdt;
2093 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
2094 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
2095 		kfree(arr);
2096 		return check_condition_result;
2097 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
2098 		int lu_id_num, port_group_id, target_dev_id;
2099 		u32 len;
2100 		char lu_id_str[6];
2101 		int host_no = devip->sdbg_host->shost->host_no;
2102 
2103 		arr[1] = cmd[2];
2104 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
2105 		    (devip->channel & 0x7f);
2106 		if (sdebug_vpd_use_hostno == 0)
2107 			host_no = 0;
2108 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
2109 			    (devip->target * 1000) + devip->lun);
2110 		target_dev_id = ((host_no + 1) * 2000) +
2111 				 (devip->target * 1000) - 3;
2112 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
2113 		if (0 == cmd[2]) { /* supported vital product data pages */
2114 			n = 4;
2115 			arr[n++] = 0x0;   /* this page */
2116 			arr[n++] = 0x80;  /* unit serial number */
2117 			arr[n++] = 0x83;  /* device identification */
2118 			arr[n++] = 0x84;  /* software interface ident. */
2119 			arr[n++] = 0x85;  /* management network addresses */
2120 			arr[n++] = 0x86;  /* extended inquiry */
2121 			arr[n++] = 0x87;  /* mode page policy */
2122 			arr[n++] = 0x88;  /* SCSI ports */
2123 			if (is_disk_zbc) {	  /* SBC or ZBC */
2124 				arr[n++] = 0x89;  /* ATA information */
2125 				arr[n++] = 0xb0;  /* Block limits */
2126 				arr[n++] = 0xb1;  /* Block characteristics */
2127 				if (is_disk)
2128 					arr[n++] = 0xb2;  /* LB Provisioning */
2129 				if (is_zbc)
2130 					arr[n++] = 0xb6;  /* ZB dev. char. */
2131 				arr[n++] = 0xb7;  /* Block limits extension */
2132 			}
2133 			arr[3] = n - 4;	  /* number of supported VPD pages */
2134 		} else if (0x80 == cmd[2]) { /* unit serial number */
2135 			arr[3] = len;
2136 			memcpy(&arr[4], lu_id_str, len);
2137 		} else if (0x83 == cmd[2]) { /* device identification */
2138 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2139 						target_dev_id, lu_id_num,
2140 						lu_id_str, len,
2141 						&devip->lu_name);
2142 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
2143 			arr[3] = inquiry_vpd_84(&arr[4]);
2144 		} else if (0x85 == cmd[2]) { /* Management network addresses */
2145 			arr[3] = inquiry_vpd_85(&arr[4]);
2146 		} else if (0x86 == cmd[2]) { /* extended inquiry */
2147 			arr[3] = 0x3c;	/* number of following entries */
2148 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2149 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
2150 			else if (have_dif_prot)
2151 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
2152 			else
2153 				arr[4] = 0x0;   /* no protection stuff */
2154 			/*
2155 			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2156 			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2157 			 */
2158 			arr[5] = 0x17;
2159 		} else if (0x87 == cmd[2]) { /* mode page policy */
2160 			arr[3] = 0x8;	/* number of following entries */
2161 			arr[4] = 0x2;	/* disconnect-reconnect mp */
2162 			arr[6] = 0x80;	/* mlus, shared */
2163 			arr[8] = 0x18;	 /* protocol specific lu */
2164 			arr[10] = 0x82;	 /* mlus, per initiator port */
2165 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
2166 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2167 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2168 			n = inquiry_vpd_89(&arr[4]);
2169 			put_unaligned_be16(n, arr + 2);
2170 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2171 			arr[3] = inquiry_vpd_b0(&arr[4]);
2172 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2173 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2174 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2175 			arr[3] = inquiry_vpd_b2(&arr[4]);
2176 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2177 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2178 		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2179 			arr[3] = inquiry_vpd_b7(&arr[4]);
2180 		} else {
2181 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2182 			kfree(arr);
2183 			return check_condition_result;
2184 		}
2185 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2186 		ret = fill_from_dev_buffer(scp, arr,
2187 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2188 		kfree(arr);
2189 		return ret;
2190 	}
2191 	/* drops through here for a standard inquiry */
2192 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2193 	arr[2] = sdebug_scsi_level;
2194 	arr[3] = 2;    /* response_data_format==2 */
2195 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2196 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2197 	if (sdebug_vpd_use_hostno == 0)
2198 		arr[5] |= 0x10; /* claim: implicit TPGS */
2199 	arr[6] = 0x10; /* claim: MultiP */
2200 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2201 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2202 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2203 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2204 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2205 	/* Use Vendor Specific area to place driver date in ASCII hex */
2206 	memcpy(&arr[36], sdebug_version_date, 8);
2207 	/* version descriptors (2 bytes each) follow */
2208 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2209 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2210 	n = 62;
2211 	if (is_disk) {		/* SBC-4 no version claimed */
2212 		put_unaligned_be16(0x600, arr + n);
2213 		n += 2;
2214 	} else if (is_tape) {	/* SSC-4 rev 3 */
2215 		put_unaligned_be16(0x525, arr + n);
2216 		n += 2;
2217 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2218 		put_unaligned_be16(0x624, arr + n);
2219 		n += 2;
2220 	}
2221 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2222 	ret = fill_from_dev_buffer(scp, arr,
2223 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2224 	kfree(arr);
2225 	return ret;
2226 }
2227 
2228 /* See resp_iec_m_pg() for how this data is manipulated */
2229 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2230 				   0, 0, 0x0, 0x0};
2231 
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2232 static int resp_requests(struct scsi_cmnd *scp,
2233 			 struct sdebug_dev_info *devip)
2234 {
2235 	unsigned char *cmd = scp->cmnd;
2236 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2237 	bool dsense = !!(cmd[1] & 1);
2238 	u32 alloc_len = cmd[4];
2239 	u32 len = 18;
2240 	int stopped_state = atomic_read(&devip->stopped);
2241 
2242 	memset(arr, 0, sizeof(arr));
2243 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2244 		if (dsense) {
2245 			arr[0] = 0x72;
2246 			arr[1] = NOT_READY;
2247 			arr[2] = LOGICAL_UNIT_NOT_READY;
2248 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2249 			len = 8;
2250 		} else {
2251 			arr[0] = 0x70;
2252 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2253 			arr[7] = 0xa;			/* 18 byte sense buffer */
2254 			arr[12] = LOGICAL_UNIT_NOT_READY;
2255 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2256 		}
2257 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2258 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2259 		if (dsense) {
2260 			arr[0] = 0x72;
2261 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2262 			arr[2] = THRESHOLD_EXCEEDED;
2263 			arr[3] = 0xff;		/* Failure prediction(false) */
2264 			len = 8;
2265 		} else {
2266 			arr[0] = 0x70;
2267 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2268 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2269 			arr[12] = THRESHOLD_EXCEEDED;
2270 			arr[13] = 0xff;		/* Failure prediction(false) */
2271 		}
2272 	} else {	/* nothing to report */
2273 		if (dsense) {
2274 			len = 8;
2275 			memset(arr, 0, len);
2276 			arr[0] = 0x72;
2277 		} else {
2278 			memset(arr, 0, len);
2279 			arr[0] = 0x70;
2280 			arr[7] = 0xa;
2281 		}
2282 	}
2283 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2284 }
2285 
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2286 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2287 {
2288 	unsigned char *cmd = scp->cmnd;
2289 	int power_cond, want_stop, stopped_state;
2290 	bool changing;
2291 
2292 	power_cond = (cmd[4] & 0xf0) >> 4;
2293 	if (power_cond) {
2294 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2295 		return check_condition_result;
2296 	}
2297 	want_stop = !(cmd[4] & 1);
2298 	stopped_state = atomic_read(&devip->stopped);
2299 	if (stopped_state == 2) {
2300 		ktime_t now_ts = ktime_get_boottime();
2301 
2302 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2303 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2304 
2305 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2306 				/* tur_ms_to_ready timer extinguished */
2307 				atomic_set(&devip->stopped, 0);
2308 				stopped_state = 0;
2309 			}
2310 		}
2311 		if (stopped_state == 2) {
2312 			if (want_stop) {
2313 				stopped_state = 1;	/* dummy up success */
2314 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2315 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2316 				return check_condition_result;
2317 			}
2318 		}
2319 	}
2320 	changing = (stopped_state != want_stop);
2321 	if (changing)
2322 		atomic_xchg(&devip->stopped, want_stop);
2323 	if (scp->device->type == TYPE_TAPE && !want_stop) {
2324 		int i;
2325 
2326 		set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
2327 		for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
2328 			devip->tape_location[i] = 0;
2329 		devip->tape_partition = 0;
2330 	}
2331 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2332 		return SDEG_RES_IMMED_MASK;
2333 	else
2334 		return 0;
2335 }
2336 
get_sdebug_capacity(void)2337 static sector_t get_sdebug_capacity(void)
2338 {
2339 	static const unsigned int gibibyte = 1073741824;
2340 
2341 	if (sdebug_virtual_gb > 0)
2342 		return (sector_t)sdebug_virtual_gb *
2343 			(gibibyte / sdebug_sector_size);
2344 	else
2345 		return sdebug_store_sectors;
2346 }
2347 
2348 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2349 static int resp_readcap(struct scsi_cmnd *scp,
2350 			struct sdebug_dev_info *devip)
2351 {
2352 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2353 	unsigned int capac;
2354 
2355 	/* following just in case virtual_gb changed */
2356 	sdebug_capacity = get_sdebug_capacity();
2357 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2358 	if (sdebug_capacity < 0xffffffff) {
2359 		capac = (unsigned int)sdebug_capacity - 1;
2360 		put_unaligned_be32(capac, arr + 0);
2361 	} else
2362 		put_unaligned_be32(0xffffffff, arr + 0);
2363 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2364 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2365 }
2366 
2367 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2368 static int resp_readcap16(struct scsi_cmnd *scp,
2369 			  struct sdebug_dev_info *devip)
2370 {
2371 	unsigned char *cmd = scp->cmnd;
2372 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2373 	u32 alloc_len;
2374 
2375 	alloc_len = get_unaligned_be32(cmd + 10);
2376 	/* following just in case virtual_gb changed */
2377 	sdebug_capacity = get_sdebug_capacity();
2378 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2379 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2380 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2381 	arr[13] = sdebug_physblk_exp & 0xf;
2382 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2383 
2384 	if (scsi_debug_lbp()) {
2385 		arr[14] |= 0x80; /* LBPME */
2386 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2387 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2388 		 * in the wider field maps to 0 in this field.
2389 		 */
2390 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2391 			arr[14] |= 0x40;
2392 	}
2393 
2394 	/*
2395 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2396 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2397 	 */
2398 	if (devip->zoned)
2399 		arr[12] |= 1 << 4;
2400 
2401 	arr[15] = sdebug_lowest_aligned & 0xff;
2402 
2403 	if (have_dif_prot) {
2404 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2405 		arr[12] |= 1; /* PROT_EN */
2406 	}
2407 
2408 	return fill_from_dev_buffer(scp, arr,
2409 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2410 }
2411 
2412 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2413 
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2414 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2415 			      struct sdebug_dev_info *devip)
2416 {
2417 	unsigned char *cmd = scp->cmnd;
2418 	unsigned char *arr;
2419 	int host_no = devip->sdbg_host->shost->host_no;
2420 	int port_group_a, port_group_b, port_a, port_b;
2421 	u32 alen, n, rlen;
2422 	int ret;
2423 
2424 	alen = get_unaligned_be32(cmd + 6);
2425 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2426 	if (! arr)
2427 		return DID_REQUEUE << 16;
2428 	/*
2429 	 * EVPD page 0x88 states we have two ports, one
2430 	 * real and a fake port with no device connected.
2431 	 * So we create two port groups with one port each
2432 	 * and set the group with port B to unavailable.
2433 	 */
2434 	port_a = 0x1; /* relative port A */
2435 	port_b = 0x2; /* relative port B */
2436 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2437 			(devip->channel & 0x7f);
2438 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2439 			(devip->channel & 0x7f) + 0x80;
2440 
2441 	/*
2442 	 * The asymmetric access state is cycled according to the host_id.
2443 	 */
2444 	n = 4;
2445 	if (sdebug_vpd_use_hostno == 0) {
2446 		arr[n++] = host_no % 3; /* Asymm access state */
2447 		arr[n++] = 0x0F; /* claim: all states are supported */
2448 	} else {
2449 		arr[n++] = 0x0; /* Active/Optimized path */
2450 		arr[n++] = 0x01; /* only support active/optimized paths */
2451 	}
2452 	put_unaligned_be16(port_group_a, arr + n);
2453 	n += 2;
2454 	arr[n++] = 0;    /* Reserved */
2455 	arr[n++] = 0;    /* Status code */
2456 	arr[n++] = 0;    /* Vendor unique */
2457 	arr[n++] = 0x1;  /* One port per group */
2458 	arr[n++] = 0;    /* Reserved */
2459 	arr[n++] = 0;    /* Reserved */
2460 	put_unaligned_be16(port_a, arr + n);
2461 	n += 2;
2462 	arr[n++] = 3;    /* Port unavailable */
2463 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2464 	put_unaligned_be16(port_group_b, arr + n);
2465 	n += 2;
2466 	arr[n++] = 0;    /* Reserved */
2467 	arr[n++] = 0;    /* Status code */
2468 	arr[n++] = 0;    /* Vendor unique */
2469 	arr[n++] = 0x1;  /* One port per group */
2470 	arr[n++] = 0;    /* Reserved */
2471 	arr[n++] = 0;    /* Reserved */
2472 	put_unaligned_be16(port_b, arr + n);
2473 	n += 2;
2474 
2475 	rlen = n - 4;
2476 	put_unaligned_be32(rlen, arr + 0);
2477 
2478 	/*
2479 	 * Return the smallest value of either
2480 	 * - The allocated length
2481 	 * - The constructed command length
2482 	 * - The maximum array size
2483 	 */
2484 	rlen = min(alen, n);
2485 	ret = fill_from_dev_buffer(scp, arr,
2486 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2487 	kfree(arr);
2488 	return ret;
2489 }
2490 
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2491 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2492 			     struct sdebug_dev_info *devip)
2493 {
2494 	bool rctd;
2495 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2496 	u16 req_sa, u;
2497 	u32 alloc_len, a_len;
2498 	int k, offset, len, errsts, bump, na;
2499 	const struct opcode_info_t *oip;
2500 	const struct opcode_info_t *r_oip;
2501 	u8 *arr;
2502 	u8 *cmd = scp->cmnd;
2503 	u32 devsel = sdebug_get_devsel(scp->device);
2504 
2505 	rctd = !!(cmd[2] & 0x80);
2506 	reporting_opts = cmd[2] & 0x7;
2507 	req_opcode = cmd[3];
2508 	req_sa = get_unaligned_be16(cmd + 4);
2509 	alloc_len = get_unaligned_be32(cmd + 6);
2510 	if (alloc_len < 4 || alloc_len > 0xffff) {
2511 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2512 		return check_condition_result;
2513 	}
2514 	if (alloc_len > 8192)
2515 		a_len = 8192;
2516 	else
2517 		a_len = alloc_len;
2518 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2519 	if (NULL == arr) {
2520 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2521 				INSUFF_RES_ASCQ);
2522 		return check_condition_result;
2523 	}
2524 	switch (reporting_opts) {
2525 	case 0:	/* all commands */
2526 		bump = rctd ? 20 : 8;
2527 		for (offset = 4, oip = opcode_info_arr;
2528 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2529 			if (F_INV_OP & oip->flags)
2530 				continue;
2531 			if ((devsel & oip->devsel) != 0) {
2532 				arr[offset] = oip->opcode;
2533 				put_unaligned_be16(oip->sa, arr + offset + 2);
2534 				if (rctd)
2535 					arr[offset + 5] |= 0x2;
2536 				if (FF_SA & oip->flags)
2537 					arr[offset + 5] |= 0x1;
2538 				put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2539 				if (rctd)
2540 					put_unaligned_be16(0xa, arr + offset + 8);
2541 				offset += bump;
2542 			}
2543 			na = oip->num_attached;
2544 			r_oip = oip;
2545 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2546 				if (F_INV_OP & oip->flags)
2547 					continue;
2548 				if ((devsel & oip->devsel) == 0)
2549 					continue;
2550 				arr[offset] = oip->opcode;
2551 				put_unaligned_be16(oip->sa, arr + offset + 2);
2552 				if (rctd)
2553 					arr[offset + 5] |= 0x2;
2554 				if (FF_SA & oip->flags)
2555 					arr[offset + 5] |= 0x1;
2556 				put_unaligned_be16(oip->len_mask[0],
2557 						arr + offset + 6);
2558 				if (rctd)
2559 					put_unaligned_be16(0xa,
2560 							   arr + offset + 8);
2561 				offset += bump;
2562 			}
2563 			oip = r_oip;
2564 		}
2565 		put_unaligned_be32(offset - 4, arr);
2566 		break;
2567 	case 1:	/* one command: opcode only */
2568 	case 2:	/* one command: opcode plus service action */
2569 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2570 		sdeb_i = opcode_ind_arr[req_opcode];
2571 		oip = &opcode_info_arr[sdeb_i];
2572 		if (F_INV_OP & oip->flags) {
2573 			supp = 1;
2574 			offset = 4;
2575 		} else {
2576 			if (1 == reporting_opts) {
2577 				if (FF_SA & oip->flags) {
2578 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2579 							     2, 2);
2580 					kfree(arr);
2581 					return check_condition_result;
2582 				}
2583 				req_sa = 0;
2584 			} else if (2 == reporting_opts &&
2585 				   0 == (FF_SA & oip->flags)) {
2586 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2587 				kfree(arr);	/* point at requested sa */
2588 				return check_condition_result;
2589 			}
2590 			if (0 == (FF_SA & oip->flags) &&
2591 				(devsel & oip->devsel) != 0 &&
2592 				req_opcode == oip->opcode)
2593 				supp = 3;
2594 			else if (0 == (FF_SA & oip->flags)) {
2595 				na = oip->num_attached;
2596 				for (k = 0, oip = oip->arrp; k < na;
2597 				     ++k, ++oip) {
2598 					if (req_opcode == oip->opcode &&
2599 						(devsel & oip->devsel) != 0)
2600 						break;
2601 				}
2602 				supp = (k >= na) ? 1 : 3;
2603 			} else if (req_sa != oip->sa) {
2604 				na = oip->num_attached;
2605 				for (k = 0, oip = oip->arrp; k < na;
2606 				     ++k, ++oip) {
2607 					if (req_sa == oip->sa &&
2608 						(devsel & oip->devsel) != 0)
2609 						break;
2610 				}
2611 				supp = (k >= na) ? 1 : 3;
2612 			} else
2613 				supp = 3;
2614 			if (3 == supp) {
2615 				u = oip->len_mask[0];
2616 				put_unaligned_be16(u, arr + 2);
2617 				arr[4] = oip->opcode;
2618 				for (k = 1; k < u; ++k)
2619 					arr[4 + k] = (k < 16) ?
2620 						 oip->len_mask[k] : 0xff;
2621 				offset = 4 + u;
2622 			} else
2623 				offset = 4;
2624 		}
2625 		arr[1] = (rctd ? 0x80 : 0) | supp;
2626 		if (rctd) {
2627 			put_unaligned_be16(0xa, arr + offset);
2628 			offset += 12;
2629 		}
2630 		break;
2631 	default:
2632 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2633 		kfree(arr);
2634 		return check_condition_result;
2635 	}
2636 	offset = (offset < a_len) ? offset : a_len;
2637 	len = (offset < alloc_len) ? offset : alloc_len;
2638 	errsts = fill_from_dev_buffer(scp, arr, len);
2639 	kfree(arr);
2640 	return errsts;
2641 }
2642 
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2643 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2644 			  struct sdebug_dev_info *devip)
2645 {
2646 	bool repd;
2647 	u32 alloc_len, len;
2648 	u8 arr[16];
2649 	u8 *cmd = scp->cmnd;
2650 
2651 	memset(arr, 0, sizeof(arr));
2652 	repd = !!(cmd[2] & 0x80);
2653 	alloc_len = get_unaligned_be32(cmd + 6);
2654 	if (alloc_len < 4) {
2655 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2656 		return check_condition_result;
2657 	}
2658 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2659 	arr[1] = 0x1;		/* ITNRS */
2660 	if (repd) {
2661 		arr[3] = 0xc;
2662 		len = 16;
2663 	} else
2664 		len = 4;
2665 
2666 	len = (len < alloc_len) ? len : alloc_len;
2667 	return fill_from_dev_buffer(scp, arr, len);
2668 }
2669 
2670 /* <<Following mode page info copied from ST318451LW>> */
2671 
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2672 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2673 {	/* Read-Write Error Recovery page for mode_sense */
2674 	static const unsigned char err_recov_pg[] = {
2675 		0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2676 		5, 0, 0xff, 0xff
2677 	};
2678 
2679 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2680 	if (1 == pcontrol)
2681 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2682 	return sizeof(err_recov_pg);
2683 }
2684 
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2685 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2686 { 	/* Disconnect-Reconnect page for mode_sense */
2687 	static const unsigned char disconnect_pg[] = {
2688 		0x2, 0xe, 128, 128, 0, 10, 0, 0,
2689 		0, 0, 0, 0, 0, 0, 0, 0
2690 	};
2691 
2692 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2693 	if (1 == pcontrol)
2694 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2695 	return sizeof(disconnect_pg);
2696 }
2697 
resp_format_pg(unsigned char * p,int pcontrol,int target)2698 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2699 {       /* Format device page for mode_sense */
2700 	static const unsigned char format_pg[] = {
2701 		0x3, 0x16, 0, 0, 0, 0, 0, 0,
2702 		0, 0, 0, 0, 0, 0, 0, 0,
2703 		0, 0, 0, 0, 0x40, 0, 0, 0
2704 	};
2705 
2706 	memcpy(p, format_pg, sizeof(format_pg));
2707 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2708 	put_unaligned_be16(sdebug_sector_size, p + 12);
2709 	if (sdebug_removable)
2710 		p[20] |= 0x20; /* should agree with INQUIRY */
2711 	if (1 == pcontrol)
2712 		memset(p + 2, 0, sizeof(format_pg) - 2);
2713 	return sizeof(format_pg);
2714 }
2715 
2716 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2717 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2718 				     0, 0, 0, 0};
2719 
resp_caching_pg(unsigned char * p,int pcontrol,int target)2720 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2721 { 	/* Caching page for mode_sense */
2722 	static const unsigned char ch_caching_pg[] = {
2723 		/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2724 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2725 	};
2726 	static const unsigned char d_caching_pg[] = {
2727 		0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2728 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0
2729 	};
2730 
2731 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2732 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2733 	memcpy(p, caching_pg, sizeof(caching_pg));
2734 	if (1 == pcontrol)
2735 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2736 	else if (2 == pcontrol)
2737 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2738 	return sizeof(caching_pg);
2739 }
2740 
2741 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2742 				    0, 0, 0x2, 0x4b};
2743 
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2744 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2745 { 	/* Control mode page for mode_sense */
2746 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2747 					0, 0, 0, 0};
2748 	static const unsigned char d_ctrl_m_pg[] = {
2749 		0xa, 10, 2, 0, 0, 0, 0, 0,
2750 		0, 0, 0x2, 0x4b
2751 	};
2752 
2753 	if (sdebug_dsense)
2754 		ctrl_m_pg[2] |= 0x4;
2755 	else
2756 		ctrl_m_pg[2] &= ~0x4;
2757 
2758 	if (sdebug_ato)
2759 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2760 
2761 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2762 	if (1 == pcontrol)
2763 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2764 	else if (2 == pcontrol)
2765 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2766 	return sizeof(ctrl_m_pg);
2767 }
2768 
2769 /* IO Advice Hints Grouping mode page */
resp_grouping_m_pg(unsigned char * p,int pcontrol,int target)2770 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2771 {
2772 	/* IO Advice Hints Grouping mode page */
2773 	struct grouping_m_pg {
2774 		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2775 		u8 subpage_code;
2776 		__be16 page_length;
2777 		u8 reserved[12];
2778 		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2779 	};
2780 	static const struct grouping_m_pg gr_m_pg = {
2781 		.page_code = 0xa | 0x40,
2782 		.subpage_code = 5,
2783 		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2784 		.descr = {
2785 			{ .st_enble = 1 },
2786 			{ .st_enble = 1 },
2787 			{ .st_enble = 1 },
2788 			{ .st_enble = 1 },
2789 			{ .st_enble = 1 },
2790 			{ .st_enble = 0 },
2791 		}
2792 	};
2793 
2794 	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2795 		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2796 	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2797 	if (1 == pcontrol) {
2798 		/* There are no changeable values so clear from byte 4 on. */
2799 		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2800 	}
2801 	return sizeof(gr_m_pg);
2802 }
2803 
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2804 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2805 {	/* Informational Exceptions control mode page for mode_sense */
2806 	static const unsigned char ch_iec_m_pg[] = {
2807 		/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2808 		0, 0, 0x0, 0x0
2809 	};
2810 	static const unsigned char d_iec_m_pg[] = {
2811 		0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2812 		0, 0, 0x0, 0x0
2813 	};
2814 
2815 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2816 	if (1 == pcontrol)
2817 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2818 	else if (2 == pcontrol)
2819 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2820 	return sizeof(iec_m_pg);
2821 }
2822 
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2823 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2824 {	/* SAS SSP mode page - short format for mode_sense */
2825 	static const unsigned char sas_sf_m_pg[] = {
2826 		0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0
2827 	};
2828 
2829 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2830 	if (1 == pcontrol)
2831 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2832 	return sizeof(sas_sf_m_pg);
2833 }
2834 
2835 
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2836 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2837 			      int target_dev_id)
2838 {	/* SAS phy control and discover mode page for mode_sense */
2839 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2840 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2841 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2842 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2843 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2844 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2845 		    0, 0, 0, 0, 0, 0, 0, 0,
2846 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2847 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2848 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2849 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2850 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2851 		    0, 0, 0, 0, 0, 0, 0, 0,
2852 		};
2853 	int port_a, port_b;
2854 
2855 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2856 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2857 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2858 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2859 	port_a = target_dev_id + 1;
2860 	port_b = port_a + 1;
2861 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2862 	put_unaligned_be32(port_a, p + 20);
2863 	put_unaligned_be32(port_b, p + 48 + 20);
2864 	if (1 == pcontrol)
2865 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2866 	return sizeof(sas_pcd_m_pg);
2867 }
2868 
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2869 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2870 {	/* SAS SSP shared protocol specific port mode subpage */
2871 	static const unsigned char sas_sha_m_pg[] = {
2872 		0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2873 		0, 0, 0, 0, 0, 0, 0, 0,
2874 	};
2875 
2876 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2877 	if (1 == pcontrol)
2878 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2879 	return sizeof(sas_sha_m_pg);
2880 }
2881 
2882 static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
2883 	0xff, 0xff, 0x00, 0x00};
2884 
resp_partition_m_pg(unsigned char * p,int pcontrol,int target)2885 static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
2886 {	/* Partition page for mode_sense (tape) */
2887 	memcpy(p, partition_pg, sizeof(partition_pg));
2888 	if (pcontrol == 1)
2889 		memset(p + 2, 0, sizeof(partition_pg) - 2);
2890 	return sizeof(partition_pg);
2891 }
2892 
process_medium_part_m_pg(struct sdebug_dev_info * devip,unsigned char * new,int pg_len)2893 static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
2894 				unsigned char *new, int pg_len)
2895 {
2896 	int new_nbr, p0_size, p1_size;
2897 
2898 	if ((new[4] & 0x80) != 0) { /* FDP */
2899 		partition_pg[4] |= 0x80;
2900 		devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
2901 		devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
2902 		devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
2903 	} else {
2904 		new_nbr = new[3] + 1;
2905 		if (new_nbr > TAPE_MAX_PARTITIONS)
2906 			return 3;
2907 		if ((new[4] & 0x40) != 0) { /* SDP */
2908 			p1_size = TAPE_PARTITION_1_UNITS;
2909 			p0_size = TAPE_UNITS - p1_size;
2910 			if (p0_size < 100)
2911 				return 4;
2912 		} else if ((new[4] & 0x20) != 0) {
2913 			if (new_nbr > 1) {
2914 				p0_size = get_unaligned_be16(new + 8);
2915 				p1_size = get_unaligned_be16(new + 10);
2916 				if (p1_size == 0xFFFF)
2917 					p1_size = TAPE_UNITS - p0_size;
2918 				else if (p0_size == 0xFFFF)
2919 					p0_size = TAPE_UNITS - p1_size;
2920 				if (p0_size < 100 || p1_size < 100)
2921 					return 8;
2922 			} else {
2923 				p0_size = TAPE_UNITS;
2924 				p1_size = 0;
2925 			}
2926 		} else
2927 			return 6;
2928 		devip->tape_pending_nbr_partitions = new_nbr;
2929 		devip->tape_pending_part_0_size = p0_size;
2930 		devip->tape_pending_part_1_size = p1_size;
2931 		partition_pg[3] = new_nbr;
2932 		devip->tape_pending_nbr_partitions = new_nbr;
2933 	}
2934 
2935 	return 0;
2936 }
2937 
resp_compression_m_pg(unsigned char * p,int pcontrol,int target,unsigned char dce)2938 static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
2939 	unsigned char dce)
2940 {	/* Compression page for mode_sense (tape) */
2941 	static const unsigned char compression_pg[] = {
2942 		0x0f, 14, 0x40, 0, 0, 0, 0, 0,
2943 		0, 0, 0, 0, 0, 0
2944 	};
2945 
2946 	memcpy(p, compression_pg, sizeof(compression_pg));
2947 	if (dce)
2948 		p[2] |= 0x80;
2949 	if (pcontrol == 1)
2950 		memset(p + 2, 0, sizeof(compression_pg) - 2);
2951 	return sizeof(compression_pg);
2952 }
2953 
2954 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2955 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2956 
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2957 static int resp_mode_sense(struct scsi_cmnd *scp,
2958 			   struct sdebug_dev_info *devip)
2959 {
2960 	int pcontrol, pcode, subpcode, bd_len;
2961 	unsigned char dev_spec;
2962 	u32 alloc_len, offset, len;
2963 	int target_dev_id;
2964 	int target = scp->device->id;
2965 	unsigned char *ap;
2966 	unsigned char *cmd = scp->cmnd;
2967 	bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
2968 
2969 	unsigned char *arr __free(kfree) = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2970 
2971 	if (!arr)
2972 		return -ENOMEM;
2973 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2974 	pcontrol = (cmd[2] & 0xc0) >> 6;
2975 	pcode = cmd[2] & 0x3f;
2976 	subpcode = cmd[3];
2977 	msense_6 = (MODE_SENSE == cmd[0]);
2978 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2979 	is_disk = (scp->device->type == TYPE_DISK);
2980 	is_zbc = devip->zoned;
2981 	is_tape = (scp->device->type == TYPE_TAPE);
2982 	if ((is_disk || is_zbc || is_tape) && !dbd)
2983 		bd_len = llbaa ? 16 : 8;
2984 	else
2985 		bd_len = 0;
2986 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2987 	if (0x3 == pcontrol) {  /* Saving values not supported */
2988 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2989 		return check_condition_result;
2990 	}
2991 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2992 			(devip->target * 1000) - 3;
2993 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2994 	if (is_disk || is_zbc) {
2995 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2996 		if (sdebug_wp)
2997 			dev_spec |= 0x80;
2998 	} else
2999 		dev_spec = 0x0;
3000 	if (msense_6) {
3001 		arr[2] = dev_spec;
3002 		arr[3] = bd_len;
3003 		offset = 4;
3004 	} else {
3005 		arr[3] = dev_spec;
3006 		if (16 == bd_len)
3007 			arr[4] = 0x1;	/* set LONGLBA bit */
3008 		arr[7] = bd_len;	/* assume 255 or less */
3009 		offset = 8;
3010 	}
3011 	ap = arr + offset;
3012 	if ((bd_len > 0) && (!sdebug_capacity))
3013 		sdebug_capacity = get_sdebug_capacity();
3014 
3015 	if (8 == bd_len) {
3016 		if (sdebug_capacity > 0xfffffffe)
3017 			put_unaligned_be32(0xffffffff, ap + 0);
3018 		else
3019 			put_unaligned_be32(sdebug_capacity, ap + 0);
3020 		if (is_tape) {
3021 			ap[0] = devip->tape_density;
3022 			put_unaligned_be16(devip->tape_blksize, ap + 6);
3023 		} else
3024 			put_unaligned_be16(sdebug_sector_size, ap + 6);
3025 		offset += bd_len;
3026 		ap = arr + offset;
3027 	} else if (16 == bd_len) {
3028 		if (is_tape) {
3029 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
3030 			return check_condition_result;
3031 		}
3032 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
3033 		put_unaligned_be32(sdebug_sector_size, ap + 12);
3034 		offset += bd_len;
3035 		ap = arr + offset;
3036 	}
3037 	if (cmd[2] == 0)
3038 		goto only_bd; /* Only block descriptor requested */
3039 
3040 	/*
3041 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
3042 	 *        len += resp_*_pg(ap + len, pcontrol, target);
3043 	 */
3044 	switch (pcode) {
3045 	case 0x1:	/* Read-Write error recovery page, direct access */
3046 		if (subpcode > 0x0 && subpcode < 0xff)
3047 			goto bad_subpcode;
3048 		len = resp_err_recov_pg(ap, pcontrol, target);
3049 		offset += len;
3050 		break;
3051 	case 0x2:	/* Disconnect-Reconnect page, all devices */
3052 		if (subpcode > 0x0 && subpcode < 0xff)
3053 			goto bad_subpcode;
3054 		len = resp_disconnect_pg(ap, pcontrol, target);
3055 		offset += len;
3056 		break;
3057 	case 0x3:       /* Format device page, direct access */
3058 		if (subpcode > 0x0 && subpcode < 0xff)
3059 			goto bad_subpcode;
3060 		if (is_disk) {
3061 			len = resp_format_pg(ap, pcontrol, target);
3062 			offset += len;
3063 		} else {
3064 			goto bad_pcode;
3065 		}
3066 		break;
3067 	case 0x8:	/* Caching page, direct access */
3068 		if (subpcode > 0x0 && subpcode < 0xff)
3069 			goto bad_subpcode;
3070 		if (is_disk || is_zbc) {
3071 			len = resp_caching_pg(ap, pcontrol, target);
3072 			offset += len;
3073 		} else {
3074 			goto bad_pcode;
3075 		}
3076 		break;
3077 	case 0xa:	/* Control Mode page, all devices */
3078 		switch (subpcode) {
3079 		case 0:
3080 			len = resp_ctrl_m_pg(ap, pcontrol, target);
3081 			break;
3082 		case 0x05:
3083 			len = resp_grouping_m_pg(ap, pcontrol, target);
3084 			break;
3085 		case 0xff:
3086 			len = resp_ctrl_m_pg(ap, pcontrol, target);
3087 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
3088 			break;
3089 		default:
3090 			goto bad_subpcode;
3091 		}
3092 		offset += len;
3093 		break;
3094 	case 0xf:	/* Compression Mode Page (tape) */
3095 		if (!is_tape)
3096 			goto bad_pcode;
3097 		len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
3098 		offset += len;
3099 		break;
3100 	case 0x11:	/* Partition Mode Page (tape) */
3101 		if (!is_tape)
3102 			goto bad_pcode;
3103 		len = resp_partition_m_pg(ap, pcontrol, target);
3104 		offset += len;
3105 		break;
3106 	case 0x19:	/* if spc==1 then sas phy, control+discover */
3107 		if (subpcode > 0x2 && subpcode < 0xff)
3108 			goto bad_subpcode;
3109 		len = 0;
3110 		if ((0x0 == subpcode) || (0xff == subpcode))
3111 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3112 		if ((0x1 == subpcode) || (0xff == subpcode))
3113 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3114 						  target_dev_id);
3115 		if ((0x2 == subpcode) || (0xff == subpcode))
3116 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
3117 		offset += len;
3118 		break;
3119 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
3120 		if (subpcode > 0x0 && subpcode < 0xff)
3121 			goto bad_subpcode;
3122 		len = resp_iec_m_pg(ap, pcontrol, target);
3123 		offset += len;
3124 		break;
3125 	case 0x3f:	/* Read all Mode pages */
3126 		if (subpcode > 0x0 && subpcode < 0xff)
3127 			goto bad_subpcode;
3128 		len = resp_err_recov_pg(ap, pcontrol, target);
3129 		len += resp_disconnect_pg(ap + len, pcontrol, target);
3130 		if (is_disk) {
3131 			len += resp_format_pg(ap + len, pcontrol, target);
3132 			len += resp_caching_pg(ap + len, pcontrol, target);
3133 		} else if (is_zbc) {
3134 			len += resp_caching_pg(ap + len, pcontrol, target);
3135 		}
3136 		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
3137 		if (0xff == subpcode)
3138 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
3139 		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3140 		if (0xff == subpcode) {
3141 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3142 						  target_dev_id);
3143 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
3144 		}
3145 		len += resp_iec_m_pg(ap + len, pcontrol, target);
3146 		offset += len;
3147 		break;
3148 	default:
3149 		goto bad_pcode;
3150 	}
3151 only_bd:
3152 	if (msense_6)
3153 		arr[0] = offset - 1;
3154 	else
3155 		put_unaligned_be16((offset - 2), arr + 0);
3156 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
3157 
3158 bad_pcode:
3159 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3160 	return check_condition_result;
3161 
3162 bad_subpcode:
3163 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3164 	return check_condition_result;
3165 }
3166 
3167 #define SDEBUG_MAX_MSELECT_SZ 512
3168 
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3169 static int resp_mode_select(struct scsi_cmnd *scp,
3170 			    struct sdebug_dev_info *devip)
3171 {
3172 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
3173 	int param_len, res, mpage;
3174 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
3175 	unsigned char *cmd = scp->cmnd;
3176 	int mselect6 = (MODE_SELECT == cmd[0]);
3177 
3178 	memset(arr, 0, sizeof(arr));
3179 	pf = cmd[1] & 0x10;
3180 	sp = cmd[1] & 0x1;
3181 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
3182 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
3183 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
3184 		return check_condition_result;
3185 	}
3186 	res = fetch_to_dev_buffer(scp, arr, param_len);
3187 	if (-1 == res)
3188 		return DID_ERROR << 16;
3189 	else if (sdebug_verbose && (res < param_len))
3190 		sdev_printk(KERN_INFO, scp->device,
3191 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
3192 			    __func__, param_len, res);
3193 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
3194 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
3195 	off = (mselect6 ? 4 : 8);
3196 	if (scp->device->type == TYPE_TAPE) {
3197 		int blksize;
3198 
3199 		if (bd_len != 8) {
3200 			mk_sense_invalid_fld(scp, SDEB_IN_DATA,
3201 					mselect6 ? 3 : 6, -1);
3202 			return check_condition_result;
3203 		}
3204 		if (arr[off] == TAPE_BAD_DENSITY) {
3205 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3206 			return check_condition_result;
3207 		}
3208 		blksize = get_unaligned_be16(arr + off + 6);
3209 		if (blksize != 0 &&
3210 			(blksize < TAPE_MIN_BLKSIZE ||
3211 				blksize > TAPE_MAX_BLKSIZE ||
3212 				(blksize % 4) != 0)) {
3213 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
3214 			return check_condition_result;
3215 		}
3216 		devip->tape_density = arr[off];
3217 		devip->tape_blksize = blksize;
3218 	}
3219 	off += bd_len;
3220 	if (off >= res)
3221 		return 0; /* No page written, just descriptors */
3222 	if (md_len > 2) {
3223 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3224 		return check_condition_result;
3225 	}
3226 	mpage = arr[off] & 0x3f;
3227 	ps = !!(arr[off] & 0x80);
3228 	if (ps) {
3229 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
3230 		return check_condition_result;
3231 	}
3232 	spf = !!(arr[off] & 0x40);
3233 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
3234 		       (arr[off + 1] + 2);
3235 	if ((pg_len + off) > param_len) {
3236 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
3237 				PARAMETER_LIST_LENGTH_ERR, 0);
3238 		return check_condition_result;
3239 	}
3240 	switch (mpage) {
3241 	case 0x8:      /* Caching Mode page */
3242 		if (caching_pg[1] == arr[off + 1]) {
3243 			memcpy(caching_pg + 2, arr + off + 2,
3244 			       sizeof(caching_pg) - 2);
3245 			goto set_mode_changed_ua;
3246 		}
3247 		break;
3248 	case 0xa:      /* Control Mode page */
3249 		if (ctrl_m_pg[1] == arr[off + 1]) {
3250 			memcpy(ctrl_m_pg + 2, arr + off + 2,
3251 			       sizeof(ctrl_m_pg) - 2);
3252 			if (ctrl_m_pg[4] & 0x8)
3253 				sdebug_wp = true;
3254 			else
3255 				sdebug_wp = false;
3256 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
3257 			goto set_mode_changed_ua;
3258 		}
3259 		break;
3260 	case 0xf:       /* Compression mode page */
3261 		if (scp->device->type != TYPE_TAPE)
3262 			goto bad_pcode;
3263 		if ((arr[off + 2] & 0x40) != 0) {
3264 			devip->tape_dce = (arr[off + 2] & 0x80) != 0;
3265 			return 0;
3266 		}
3267 		break;
3268 	case 0x11:	/* Medium Partition Mode Page (tape) */
3269 		if (scp->device->type == TYPE_TAPE) {
3270 			int fld;
3271 
3272 			fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
3273 			if (fld == 0)
3274 				return 0;
3275 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
3276 			return check_condition_result;
3277 		}
3278 		break;
3279 	case 0x1c:      /* Informational Exceptions Mode page */
3280 		if (iec_m_pg[1] == arr[off + 1]) {
3281 			memcpy(iec_m_pg + 2, arr + off + 2,
3282 			       sizeof(iec_m_pg) - 2);
3283 			goto set_mode_changed_ua;
3284 		}
3285 		break;
3286 	default:
3287 		break;
3288 	}
3289 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
3290 	return check_condition_result;
3291 set_mode_changed_ua:
3292 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3293 	return 0;
3294 
3295 bad_pcode:
3296 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3297 	return check_condition_result;
3298 }
3299 
resp_temp_l_pg(unsigned char * arr)3300 static int resp_temp_l_pg(unsigned char *arr)
3301 {
3302 	static const unsigned char temp_l_pg[] = {
3303 		0x0, 0x0, 0x3, 0x2, 0x0, 38,
3304 		0x0, 0x1, 0x3, 0x2, 0x0, 65,
3305 	};
3306 
3307 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3308 	return sizeof(temp_l_pg);
3309 }
3310 
resp_ie_l_pg(unsigned char * arr)3311 static int resp_ie_l_pg(unsigned char *arr)
3312 {
3313 	static const unsigned char ie_l_pg[] = {
3314 		0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3315 	};
3316 
3317 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3318 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
3319 		arr[4] = THRESHOLD_EXCEEDED;
3320 		arr[5] = 0xff;
3321 	}
3322 	return sizeof(ie_l_pg);
3323 }
3324 
resp_env_rep_l_spg(unsigned char * arr)3325 static int resp_env_rep_l_spg(unsigned char *arr)
3326 {
3327 	static const unsigned char env_rep_l_spg[] = {
3328 		0x0, 0x0, 0x23, 0x8,
3329 		0x0, 40, 72, 0xff, 45, 18, 0, 0,
3330 		0x1, 0x0, 0x23, 0x8,
3331 		0x0, 55, 72, 35, 55, 45, 0, 0,
3332 	};
3333 
3334 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3335 	return sizeof(env_rep_l_spg);
3336 }
3337 
3338 #define SDEBUG_MAX_LSENSE_SZ 512
3339 
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3340 static int resp_log_sense(struct scsi_cmnd *scp,
3341 			  struct sdebug_dev_info *devip)
3342 {
3343 	int ppc, sp, pcode, subpcode;
3344 	u32 alloc_len, len, n;
3345 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3346 	unsigned char *cmd = scp->cmnd;
3347 
3348 	memset(arr, 0, sizeof(arr));
3349 	ppc = cmd[1] & 0x2;
3350 	sp = cmd[1] & 0x1;
3351 	if (ppc || sp) {
3352 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3353 		return check_condition_result;
3354 	}
3355 	pcode = cmd[2] & 0x3f;
3356 	subpcode = cmd[3] & 0xff;
3357 	alloc_len = get_unaligned_be16(cmd + 7);
3358 	arr[0] = pcode;
3359 	if (0 == subpcode) {
3360 		switch (pcode) {
3361 		case 0x0:	/* Supported log pages log page */
3362 			n = 4;
3363 			arr[n++] = 0x0;		/* this page */
3364 			arr[n++] = 0xd;		/* Temperature */
3365 			arr[n++] = 0x2f;	/* Informational exceptions */
3366 			arr[3] = n - 4;
3367 			break;
3368 		case 0xd:	/* Temperature log page */
3369 			arr[3] = resp_temp_l_pg(arr + 4);
3370 			break;
3371 		case 0x2f:	/* Informational exceptions log page */
3372 			arr[3] = resp_ie_l_pg(arr + 4);
3373 			break;
3374 		default:
3375 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3376 			return check_condition_result;
3377 		}
3378 	} else if (0xff == subpcode) {
3379 		arr[0] |= 0x40;
3380 		arr[1] = subpcode;
3381 		switch (pcode) {
3382 		case 0x0:	/* Supported log pages and subpages log page */
3383 			n = 4;
3384 			arr[n++] = 0x0;
3385 			arr[n++] = 0x0;		/* 0,0 page */
3386 			arr[n++] = 0x0;
3387 			arr[n++] = 0xff;	/* this page */
3388 			arr[n++] = 0xd;
3389 			arr[n++] = 0x0;		/* Temperature */
3390 			arr[n++] = 0xd;
3391 			arr[n++] = 0x1;		/* Environment reporting */
3392 			arr[n++] = 0xd;
3393 			arr[n++] = 0xff;	/* all 0xd subpages */
3394 			arr[n++] = 0x2f;
3395 			arr[n++] = 0x0;	/* Informational exceptions */
3396 			arr[n++] = 0x2f;
3397 			arr[n++] = 0xff;	/* all 0x2f subpages */
3398 			arr[3] = n - 4;
3399 			break;
3400 		case 0xd:	/* Temperature subpages */
3401 			n = 4;
3402 			arr[n++] = 0xd;
3403 			arr[n++] = 0x0;		/* Temperature */
3404 			arr[n++] = 0xd;
3405 			arr[n++] = 0x1;		/* Environment reporting */
3406 			arr[n++] = 0xd;
3407 			arr[n++] = 0xff;	/* these subpages */
3408 			arr[3] = n - 4;
3409 			break;
3410 		case 0x2f:	/* Informational exceptions subpages */
3411 			n = 4;
3412 			arr[n++] = 0x2f;
3413 			arr[n++] = 0x0;		/* Informational exceptions */
3414 			arr[n++] = 0x2f;
3415 			arr[n++] = 0xff;	/* these subpages */
3416 			arr[3] = n - 4;
3417 			break;
3418 		default:
3419 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3420 			return check_condition_result;
3421 		}
3422 	} else if (subpcode > 0) {
3423 		arr[0] |= 0x40;
3424 		arr[1] = subpcode;
3425 		if (pcode == 0xd && subpcode == 1)
3426 			arr[3] = resp_env_rep_l_spg(arr + 4);
3427 		else {
3428 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3429 			return check_condition_result;
3430 		}
3431 	} else {
3432 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3433 		return check_condition_result;
3434 	}
3435 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3436 	return fill_from_dev_buffer(scp, arr,
3437 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3438 }
3439 
3440 enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
resp_read_blklimits(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3441 static int resp_read_blklimits(struct scsi_cmnd *scp,
3442 			struct sdebug_dev_info *devip)
3443 {
3444 	unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
3445 
3446 	arr[0] = 4;
3447 	put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
3448 	put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
3449 	return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
3450 }
3451 
resp_locate(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3452 static int resp_locate(struct scsi_cmnd *scp,
3453 		struct sdebug_dev_info *devip)
3454 {
3455 	unsigned char *cmd = scp->cmnd;
3456 	unsigned int i, pos;
3457 	struct tape_block *blp;
3458 	int partition;
3459 
3460 	if ((cmd[1] & 0x02) != 0) {
3461 		if (cmd[8] >= devip->tape_nbr_partitions) {
3462 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3463 			return check_condition_result;
3464 		}
3465 		devip->tape_partition = cmd[8];
3466 	}
3467 	pos = get_unaligned_be32(cmd + 3);
3468 	partition = devip->tape_partition;
3469 
3470 	for (i = 0, blp = devip->tape_blocks[partition];
3471 	     i < pos && i < devip->tape_eop[partition]; i++, blp++)
3472 		if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3473 			break;
3474 	if (i < pos) {
3475 		devip->tape_location[partition] = i;
3476 		mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
3477 		return check_condition_result;
3478 	}
3479 	devip->tape_location[partition] = pos;
3480 
3481 	return 0;
3482 }
3483 
resp_write_filemarks(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3484 static int resp_write_filemarks(struct scsi_cmnd *scp,
3485 		struct sdebug_dev_info *devip)
3486 {
3487 	unsigned char *cmd = scp->cmnd;
3488 	unsigned int i, count, pos;
3489 	u32 data;
3490 	int partition = devip->tape_partition;
3491 
3492 	if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
3493 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
3494 		return check_condition_result;
3495 	}
3496 	count = get_unaligned_be24(cmd + 2);
3497 	data = TAPE_BLOCK_FM_FLAG;
3498 	for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
3499 		if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
3500 			devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3501 			mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
3502 					EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
3503 			return check_condition_result;
3504 		}
3505 		(devip->tape_blocks[partition] + pos)->fl_size = data;
3506 	}
3507 	(devip->tape_blocks[partition] + pos)->fl_size =
3508 		TAPE_BLOCK_EOD_FLAG;
3509 	devip->tape_location[partition] = pos;
3510 
3511 	return 0;
3512 }
3513 
resp_space(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3514 static int resp_space(struct scsi_cmnd *scp,
3515 		struct sdebug_dev_info *devip)
3516 {
3517 	unsigned char *cmd = scp->cmnd, code;
3518 	int i = 0, pos, count;
3519 	struct tape_block *blp;
3520 	int partition = devip->tape_partition;
3521 
3522 	count = get_unaligned_be24(cmd + 2);
3523 	if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
3524 		count |= 0xff000000;
3525 	code = cmd[1] & 0x0f;
3526 
3527 	pos = devip->tape_location[partition];
3528 	if (code == 0) { /* blocks */
3529 		if (count < 0) {
3530 			count = (-count);
3531 			pos -= 1;
3532 			for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3533 			     i++) {
3534 				if (pos < 0)
3535 					goto is_bop;
3536 				else if (IS_TAPE_BLOCK_FM(blp->fl_size))
3537 					goto is_fm;
3538 				if (i > 0) {
3539 					pos--;
3540 					blp--;
3541 				}
3542 			}
3543 		} else if (count > 0) {
3544 			for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3545 			     i++, pos++, blp++) {
3546 				if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3547 					goto is_eod;
3548 				if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
3549 					pos += 1;
3550 					goto is_fm;
3551 				}
3552 				if (pos >= devip->tape_eop[partition])
3553 					goto is_eop;
3554 			}
3555 		}
3556 	} else if (code == 1) { /* filemarks */
3557 		if (count < 0) {
3558 			count = (-count);
3559 			if (pos == 0)
3560 				goto is_bop;
3561 			else {
3562 				for (i = 0, blp = devip->tape_blocks[partition] + pos;
3563 				     i < count && pos >= 0; i++, pos--, blp--) {
3564 					for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3565 						     pos >= 0; pos--, blp--)
3566 						; /* empty */
3567 					if (pos < 0)
3568 						goto is_bop;
3569 				}
3570 			}
3571 			pos += 1;
3572 		} else if (count > 0) {
3573 			for (i = 0, blp = devip->tape_blocks[partition] + pos;
3574 			     i < count; i++, pos++, blp++) {
3575 				for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3576 					      !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
3577 					      pos < devip->tape_eop[partition];
3578 				      pos++, blp++)
3579 					; /* empty */
3580 				if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3581 					goto is_eod;
3582 				if (pos >= devip->tape_eop[partition])
3583 					goto is_eop;
3584 			}
3585 		}
3586 	} else if (code == 3) { /* EOD */
3587 		for (blp = devip->tape_blocks[partition] + pos;
3588 		     !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
3589 		     pos++, blp++)
3590 			; /* empty */
3591 		if (pos >= devip->tape_eop[partition])
3592 			goto is_eop;
3593 	} else {
3594 		/* sequential filemarks not supported */
3595 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3596 		return check_condition_result;
3597 	}
3598 	devip->tape_location[partition] = pos;
3599 	return 0;
3600 
3601 is_fm:
3602 	devip->tape_location[partition] = pos;
3603 	mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3604 			FILEMARK_DETECTED_ASCQ, count - i,
3605 			SENSE_FLAG_FILEMARK);
3606 	return check_condition_result;
3607 
3608 is_eod:
3609 	devip->tape_location[partition] = pos;
3610 	mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
3611 			EOD_DETECTED_ASCQ, count - i,
3612 			0);
3613 	return check_condition_result;
3614 
3615 is_bop:
3616 	devip->tape_location[partition] = 0;
3617 	mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3618 			BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
3619 			SENSE_FLAG_EOM);
3620 	devip->tape_location[partition] = 0;
3621 	return check_condition_result;
3622 
3623 is_eop:
3624 	devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3625 	mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
3626 			EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
3627 			SENSE_FLAG_EOM);
3628 	return check_condition_result;
3629 }
3630 
3631 enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
resp_read_position(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3632 static int resp_read_position(struct scsi_cmnd *scp,
3633 			struct sdebug_dev_info *devip)
3634 {
3635 	u8 *cmd = scp->cmnd;
3636 	int all_length;
3637 	unsigned char arr[20];
3638 	unsigned int pos;
3639 
3640 	all_length = get_unaligned_be16(cmd + 7);
3641 	if ((cmd[1] & 0xfe) != 0 ||
3642 		all_length != 0) { /* only short form */
3643 		mk_sense_invalid_fld(scp, SDEB_IN_CDB,
3644 				all_length ? 7 : 1, 0);
3645 		return check_condition_result;
3646 	}
3647 	memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
3648 	arr[1] = devip->tape_partition;
3649 	pos = devip->tape_location[devip->tape_partition];
3650 	put_unaligned_be32(pos, arr + 4);
3651 	put_unaligned_be32(pos, arr + 8);
3652 	return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ);
3653 }
3654 
resp_rewind(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3655 static int resp_rewind(struct scsi_cmnd *scp,
3656 		struct sdebug_dev_info *devip)
3657 {
3658 	devip->tape_location[devip->tape_partition] = 0;
3659 
3660 	return 0;
3661 }
3662 
partition_tape(struct sdebug_dev_info * devip,int nbr_partitions,int part_0_size,int part_1_size)3663 static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
3664 			int part_0_size, int part_1_size)
3665 {
3666 	int i;
3667 
3668 	if (part_0_size + part_1_size > TAPE_UNITS)
3669 		return -1;
3670 	devip->tape_eop[0] = part_0_size;
3671 	devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
3672 	devip->tape_eop[1] = part_1_size;
3673 	devip->tape_blocks[1] = devip->tape_blocks[0] +
3674 			devip->tape_eop[0];
3675 	devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
3676 
3677 	for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
3678 		devip->tape_location[i] = 0;
3679 
3680 	devip->tape_nbr_partitions = nbr_partitions;
3681 	devip->tape_partition = 0;
3682 
3683 	partition_pg[3] = nbr_partitions - 1;
3684 	put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
3685 	put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
3686 
3687 	return nbr_partitions;
3688 }
3689 
resp_format_medium(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3690 static int resp_format_medium(struct scsi_cmnd *scp,
3691 			struct sdebug_dev_info *devip)
3692 {
3693 	int res = 0;
3694 	unsigned char *cmd = scp->cmnd;
3695 
3696 	if (cmd[2] > 2) {
3697 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
3698 		return check_condition_result;
3699 	}
3700 	if (cmd[2] != 0) {
3701 		if (devip->tape_pending_nbr_partitions > 0) {
3702 			res = partition_tape(devip,
3703 					devip->tape_pending_nbr_partitions,
3704 					devip->tape_pending_part_0_size,
3705 					devip->tape_pending_part_1_size);
3706 		} else
3707 			res = partition_tape(devip, devip->tape_nbr_partitions,
3708 					devip->tape_eop[0], devip->tape_eop[1]);
3709 	} else
3710 		res = partition_tape(devip, 1, TAPE_UNITS, 0);
3711 	if (res < 0)
3712 		return -EINVAL;
3713 
3714 	devip->tape_pending_nbr_partitions = -1;
3715 
3716 	return 0;
3717 }
3718 
resp_erase(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3719 static int resp_erase(struct scsi_cmnd *scp,
3720 		struct sdebug_dev_info *devip)
3721 {
3722 	int partition = devip->tape_partition;
3723 	int pos = devip->tape_location[partition];
3724 	struct tape_block *blp;
3725 
3726 	blp = devip->tape_blocks[partition] + pos;
3727 	blp->fl_size = TAPE_BLOCK_EOD_FLAG;
3728 
3729 	return 0;
3730 }
3731 
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)3732 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3733 {
3734 	return devip->nr_zones != 0;
3735 }
3736 
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)3737 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3738 					unsigned long long lba)
3739 {
3740 	u32 zno = lba >> devip->zsize_shift;
3741 	struct sdeb_zone_state *zsp;
3742 
3743 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3744 		return &devip->zstate[zno];
3745 
3746 	/*
3747 	 * If the zone capacity is less than the zone size, adjust for gap
3748 	 * zones.
3749 	 */
3750 	zno = 2 * zno - devip->nr_conv_zones;
3751 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3752 	zsp = &devip->zstate[zno];
3753 	if (lba >= zsp->z_start + zsp->z_size)
3754 		zsp++;
3755 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3756 	return zsp;
3757 }
3758 
zbc_zone_is_conv(struct sdeb_zone_state * zsp)3759 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3760 {
3761 	return zsp->z_type == ZBC_ZTYPE_CNV;
3762 }
3763 
zbc_zone_is_gap(struct sdeb_zone_state * zsp)3764 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3765 {
3766 	return zsp->z_type == ZBC_ZTYPE_GAP;
3767 }
3768 
zbc_zone_is_seq(struct sdeb_zone_state * zsp)3769 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3770 {
3771 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3772 }
3773 
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3774 static void zbc_close_zone(struct sdebug_dev_info *devip,
3775 			   struct sdeb_zone_state *zsp)
3776 {
3777 	enum sdebug_z_cond zc;
3778 
3779 	if (!zbc_zone_is_seq(zsp))
3780 		return;
3781 
3782 	zc = zsp->z_cond;
3783 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3784 		return;
3785 
3786 	if (zc == ZC2_IMPLICIT_OPEN)
3787 		devip->nr_imp_open--;
3788 	else
3789 		devip->nr_exp_open--;
3790 
3791 	if (zsp->z_wp == zsp->z_start) {
3792 		zsp->z_cond = ZC1_EMPTY;
3793 	} else {
3794 		zsp->z_cond = ZC4_CLOSED;
3795 		devip->nr_closed++;
3796 	}
3797 }
3798 
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)3799 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3800 {
3801 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3802 	unsigned int i;
3803 
3804 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3805 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3806 			zbc_close_zone(devip, zsp);
3807 			return;
3808 		}
3809 	}
3810 }
3811 
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)3812 static void zbc_open_zone(struct sdebug_dev_info *devip,
3813 			  struct sdeb_zone_state *zsp, bool explicit)
3814 {
3815 	enum sdebug_z_cond zc;
3816 
3817 	if (!zbc_zone_is_seq(zsp))
3818 		return;
3819 
3820 	zc = zsp->z_cond;
3821 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3822 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3823 		return;
3824 
3825 	/* Close an implicit open zone if necessary */
3826 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3827 		zbc_close_zone(devip, zsp);
3828 	else if (devip->max_open &&
3829 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3830 		zbc_close_imp_open_zone(devip);
3831 
3832 	if (zsp->z_cond == ZC4_CLOSED)
3833 		devip->nr_closed--;
3834 	if (explicit) {
3835 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3836 		devip->nr_exp_open++;
3837 	} else {
3838 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3839 		devip->nr_imp_open++;
3840 	}
3841 }
3842 
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3843 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3844 				     struct sdeb_zone_state *zsp)
3845 {
3846 	switch (zsp->z_cond) {
3847 	case ZC2_IMPLICIT_OPEN:
3848 		devip->nr_imp_open--;
3849 		break;
3850 	case ZC3_EXPLICIT_OPEN:
3851 		devip->nr_exp_open--;
3852 		break;
3853 	default:
3854 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3855 			  zsp->z_start, zsp->z_cond);
3856 		break;
3857 	}
3858 	zsp->z_cond = ZC5_FULL;
3859 }
3860 
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)3861 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3862 		       unsigned long long lba, unsigned int num)
3863 {
3864 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3865 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3866 
3867 	if (!zbc_zone_is_seq(zsp))
3868 		return;
3869 
3870 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3871 		zsp->z_wp += num;
3872 		if (zsp->z_wp >= zend)
3873 			zbc_set_zone_full(devip, zsp);
3874 		return;
3875 	}
3876 
3877 	while (num) {
3878 		if (lba != zsp->z_wp)
3879 			zsp->z_non_seq_resource = true;
3880 
3881 		end = lba + num;
3882 		if (end >= zend) {
3883 			n = zend - lba;
3884 			zsp->z_wp = zend;
3885 		} else if (end > zsp->z_wp) {
3886 			n = num;
3887 			zsp->z_wp = end;
3888 		} else {
3889 			n = num;
3890 		}
3891 		if (zsp->z_wp >= zend)
3892 			zbc_set_zone_full(devip, zsp);
3893 
3894 		num -= n;
3895 		lba += n;
3896 		if (num) {
3897 			zsp++;
3898 			zend = zsp->z_start + zsp->z_size;
3899 		}
3900 	}
3901 }
3902 
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3903 static int check_zbc_access_params(struct scsi_cmnd *scp,
3904 			unsigned long long lba, unsigned int num, bool write)
3905 {
3906 	struct scsi_device *sdp = scp->device;
3907 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3908 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3909 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3910 
3911 	if (!write) {
3912 		/* For host-managed, reads cannot cross zone types boundaries */
3913 		if (zsp->z_type != zsp_end->z_type) {
3914 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3915 					LBA_OUT_OF_RANGE,
3916 					READ_INVDATA_ASCQ);
3917 			return check_condition_result;
3918 		}
3919 		return 0;
3920 	}
3921 
3922 	/* Writing into a gap zone is not allowed */
3923 	if (zbc_zone_is_gap(zsp)) {
3924 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3925 				ATTEMPT_ACCESS_GAP);
3926 		return check_condition_result;
3927 	}
3928 
3929 	/* No restrictions for writes within conventional zones */
3930 	if (zbc_zone_is_conv(zsp)) {
3931 		if (!zbc_zone_is_conv(zsp_end)) {
3932 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3933 					LBA_OUT_OF_RANGE,
3934 					WRITE_BOUNDARY_ASCQ);
3935 			return check_condition_result;
3936 		}
3937 		return 0;
3938 	}
3939 
3940 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3941 		/* Writes cannot cross sequential zone boundaries */
3942 		if (zsp_end != zsp) {
3943 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3944 					LBA_OUT_OF_RANGE,
3945 					WRITE_BOUNDARY_ASCQ);
3946 			return check_condition_result;
3947 		}
3948 		/* Cannot write full zones */
3949 		if (zsp->z_cond == ZC5_FULL) {
3950 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3951 					INVALID_FIELD_IN_CDB, 0);
3952 			return check_condition_result;
3953 		}
3954 		/* Writes must be aligned to the zone WP */
3955 		if (lba != zsp->z_wp) {
3956 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3957 					LBA_OUT_OF_RANGE,
3958 					UNALIGNED_WRITE_ASCQ);
3959 			return check_condition_result;
3960 		}
3961 	}
3962 
3963 	/* Handle implicit open of closed and empty zones */
3964 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3965 		if (devip->max_open &&
3966 		    devip->nr_exp_open >= devip->max_open) {
3967 			mk_sense_buffer(scp, DATA_PROTECT,
3968 					INSUFF_RES_ASC,
3969 					INSUFF_ZONE_ASCQ);
3970 			return check_condition_result;
3971 		}
3972 		zbc_open_zone(devip, zsp, false);
3973 	}
3974 
3975 	return 0;
3976 }
3977 
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3978 static inline int check_device_access_params
3979 			(struct scsi_cmnd *scp, unsigned long long lba,
3980 			 unsigned int num, bool write)
3981 {
3982 	struct scsi_device *sdp = scp->device;
3983 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3984 
3985 	if (lba + num > sdebug_capacity) {
3986 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3987 		return check_condition_result;
3988 	}
3989 	/* transfer length excessive (tie in to block limits VPD page) */
3990 	if (num > sdebug_store_sectors) {
3991 		/* needs work to find which cdb byte 'num' comes from */
3992 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3993 		return check_condition_result;
3994 	}
3995 	if (write && unlikely(sdebug_wp)) {
3996 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3997 		return check_condition_result;
3998 	}
3999 	if (sdebug_dev_is_zoned(devip))
4000 		return check_zbc_access_params(scp, lba, num, write);
4001 
4002 	return 0;
4003 }
4004 
4005 /*
4006  * Note: if BUG_ON() fires it usually indicates a problem with the parser
4007  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
4008  * that access any of the "stores" in struct sdeb_store_info should call this
4009  * function with bug_if_fake_rw set to true.
4010  */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)4011 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
4012 						bool bug_if_fake_rw)
4013 {
4014 	if (sdebug_fake_rw) {
4015 		BUG_ON(bug_if_fake_rw);	/* See note above */
4016 		return NULL;
4017 	}
4018 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
4019 }
4020 
4021 static inline void
sdeb_read_lock(rwlock_t * lock)4022 sdeb_read_lock(rwlock_t *lock)
4023 {
4024 	if (sdebug_no_rwlock)
4025 		__acquire(lock);
4026 	else
4027 		read_lock(lock);
4028 }
4029 
4030 static inline void
sdeb_read_unlock(rwlock_t * lock)4031 sdeb_read_unlock(rwlock_t *lock)
4032 {
4033 	if (sdebug_no_rwlock)
4034 		__release(lock);
4035 	else
4036 		read_unlock(lock);
4037 }
4038 
4039 static inline void
sdeb_write_lock(rwlock_t * lock)4040 sdeb_write_lock(rwlock_t *lock)
4041 {
4042 	if (sdebug_no_rwlock)
4043 		__acquire(lock);
4044 	else
4045 		write_lock(lock);
4046 }
4047 
4048 static inline void
sdeb_write_unlock(rwlock_t * lock)4049 sdeb_write_unlock(rwlock_t *lock)
4050 {
4051 	if (sdebug_no_rwlock)
4052 		__release(lock);
4053 	else
4054 		write_unlock(lock);
4055 }
4056 
4057 static inline void
sdeb_data_read_lock(struct sdeb_store_info * sip)4058 sdeb_data_read_lock(struct sdeb_store_info *sip)
4059 {
4060 	BUG_ON(!sip);
4061 
4062 	sdeb_read_lock(&sip->macc_data_lck);
4063 }
4064 
4065 static inline void
sdeb_data_read_unlock(struct sdeb_store_info * sip)4066 sdeb_data_read_unlock(struct sdeb_store_info *sip)
4067 {
4068 	BUG_ON(!sip);
4069 
4070 	sdeb_read_unlock(&sip->macc_data_lck);
4071 }
4072 
4073 static inline void
sdeb_data_write_lock(struct sdeb_store_info * sip)4074 sdeb_data_write_lock(struct sdeb_store_info *sip)
4075 {
4076 	BUG_ON(!sip);
4077 
4078 	sdeb_write_lock(&sip->macc_data_lck);
4079 }
4080 
4081 static inline void
sdeb_data_write_unlock(struct sdeb_store_info * sip)4082 sdeb_data_write_unlock(struct sdeb_store_info *sip)
4083 {
4084 	BUG_ON(!sip);
4085 
4086 	sdeb_write_unlock(&sip->macc_data_lck);
4087 }
4088 
4089 static inline void
sdeb_data_sector_read_lock(struct sdeb_store_info * sip)4090 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
4091 {
4092 	BUG_ON(!sip);
4093 
4094 	sdeb_read_lock(&sip->macc_sector_lck);
4095 }
4096 
4097 static inline void
sdeb_data_sector_read_unlock(struct sdeb_store_info * sip)4098 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
4099 {
4100 	BUG_ON(!sip);
4101 
4102 	sdeb_read_unlock(&sip->macc_sector_lck);
4103 }
4104 
4105 static inline void
sdeb_data_sector_write_lock(struct sdeb_store_info * sip)4106 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
4107 {
4108 	BUG_ON(!sip);
4109 
4110 	sdeb_write_lock(&sip->macc_sector_lck);
4111 }
4112 
4113 static inline void
sdeb_data_sector_write_unlock(struct sdeb_store_info * sip)4114 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
4115 {
4116 	BUG_ON(!sip);
4117 
4118 	sdeb_write_unlock(&sip->macc_sector_lck);
4119 }
4120 
4121 /*
4122  * Atomic locking:
4123  * We simplify the atomic model to allow only 1x atomic write and many non-
4124  * atomic reads or writes for all LBAs.
4125 
4126  * A RW lock has a similar bahaviour:
4127  * Only 1x writer and many readers.
4128 
4129  * So use a RW lock for per-device read and write locking:
4130  * An atomic access grabs the lock as a writer and non-atomic grabs the lock
4131  * as a reader.
4132  */
4133 
4134 static inline void
sdeb_data_lock(struct sdeb_store_info * sip,bool atomic)4135 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
4136 {
4137 	if (atomic)
4138 		sdeb_data_write_lock(sip);
4139 	else
4140 		sdeb_data_read_lock(sip);
4141 }
4142 
4143 static inline void
sdeb_data_unlock(struct sdeb_store_info * sip,bool atomic)4144 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
4145 {
4146 	if (atomic)
4147 		sdeb_data_write_unlock(sip);
4148 	else
4149 		sdeb_data_read_unlock(sip);
4150 }
4151 
4152 /* Allow many reads but only 1x write per sector */
4153 static inline void
sdeb_data_sector_lock(struct sdeb_store_info * sip,bool do_write)4154 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
4155 {
4156 	if (do_write)
4157 		sdeb_data_sector_write_lock(sip);
4158 	else
4159 		sdeb_data_sector_read_lock(sip);
4160 }
4161 
4162 static inline void
sdeb_data_sector_unlock(struct sdeb_store_info * sip,bool do_write)4163 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
4164 {
4165 	if (do_write)
4166 		sdeb_data_sector_write_unlock(sip);
4167 	else
4168 		sdeb_data_sector_read_unlock(sip);
4169 }
4170 
4171 static inline void
sdeb_meta_read_lock(struct sdeb_store_info * sip)4172 sdeb_meta_read_lock(struct sdeb_store_info *sip)
4173 {
4174 	if (sdebug_no_rwlock) {
4175 		if (sip)
4176 			__acquire(&sip->macc_meta_lck);
4177 		else
4178 			__acquire(&sdeb_fake_rw_lck);
4179 	} else {
4180 		if (sip)
4181 			read_lock(&sip->macc_meta_lck);
4182 		else
4183 			read_lock(&sdeb_fake_rw_lck);
4184 	}
4185 }
4186 
4187 static inline void
sdeb_meta_read_unlock(struct sdeb_store_info * sip)4188 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
4189 {
4190 	if (sdebug_no_rwlock) {
4191 		if (sip)
4192 			__release(&sip->macc_meta_lck);
4193 		else
4194 			__release(&sdeb_fake_rw_lck);
4195 	} else {
4196 		if (sip)
4197 			read_unlock(&sip->macc_meta_lck);
4198 		else
4199 			read_unlock(&sdeb_fake_rw_lck);
4200 	}
4201 }
4202 
4203 static inline void
sdeb_meta_write_lock(struct sdeb_store_info * sip)4204 sdeb_meta_write_lock(struct sdeb_store_info *sip)
4205 {
4206 	if (sdebug_no_rwlock) {
4207 		if (sip)
4208 			__acquire(&sip->macc_meta_lck);
4209 		else
4210 			__acquire(&sdeb_fake_rw_lck);
4211 	} else {
4212 		if (sip)
4213 			write_lock(&sip->macc_meta_lck);
4214 		else
4215 			write_lock(&sdeb_fake_rw_lck);
4216 	}
4217 }
4218 
4219 static inline void
sdeb_meta_write_unlock(struct sdeb_store_info * sip)4220 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
4221 {
4222 	if (sdebug_no_rwlock) {
4223 		if (sip)
4224 			__release(&sip->macc_meta_lck);
4225 		else
4226 			__release(&sdeb_fake_rw_lck);
4227 	} else {
4228 		if (sip)
4229 			write_unlock(&sip->macc_meta_lck);
4230 		else
4231 			write_unlock(&sdeb_fake_rw_lck);
4232 	}
4233 }
4234 
4235 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,u8 group_number,bool do_write,bool atomic)4236 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
4237 			    u32 sg_skip, u64 lba, u32 num, u8 group_number,
4238 			    bool do_write, bool atomic)
4239 {
4240 	int ret;
4241 	u64 block;
4242 	enum dma_data_direction dir;
4243 	struct scsi_data_buffer *sdb = &scp->sdb;
4244 	u8 *fsp;
4245 	int i, total = 0;
4246 
4247 	/*
4248 	 * Even though reads are inherently atomic (in this driver), we expect
4249 	 * the atomic flag only for writes.
4250 	 */
4251 	if (!do_write && atomic)
4252 		return -1;
4253 
4254 	if (do_write) {
4255 		dir = DMA_TO_DEVICE;
4256 		write_since_sync = true;
4257 	} else {
4258 		dir = DMA_FROM_DEVICE;
4259 	}
4260 
4261 	if (!sdb->length || !sip)
4262 		return 0;
4263 	if (scp->sc_data_direction != dir)
4264 		return -1;
4265 
4266 	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
4267 		atomic_long_inc(&writes_by_group_number[group_number]);
4268 
4269 	fsp = sip->storep;
4270 
4271 	block = do_div(lba, sdebug_store_sectors);
4272 
4273 	/* Only allow 1x atomic write or multiple non-atomic writes at any given time */
4274 	sdeb_data_lock(sip, atomic);
4275 	for (i = 0; i < num; i++) {
4276 		/* We shouldn't need to lock for atomic writes, but do it anyway */
4277 		sdeb_data_sector_lock(sip, do_write);
4278 		ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4279 		   fsp + (block * sdebug_sector_size),
4280 		   sdebug_sector_size, sg_skip, do_write);
4281 		sdeb_data_sector_unlock(sip, do_write);
4282 		total += ret;
4283 		if (ret != sdebug_sector_size)
4284 			break;
4285 		sg_skip += sdebug_sector_size;
4286 		if (++block >= sdebug_store_sectors)
4287 			block = 0;
4288 	}
4289 	sdeb_data_unlock(sip, atomic);
4290 
4291 	return total;
4292 }
4293 
4294 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)4295 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
4296 {
4297 	struct scsi_data_buffer *sdb = &scp->sdb;
4298 
4299 	if (!sdb->length)
4300 		return 0;
4301 	if (scp->sc_data_direction != DMA_TO_DEVICE)
4302 		return -1;
4303 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
4304 			      num * sdebug_sector_size, 0, true);
4305 }
4306 
4307 /* If sip->storep+lba compares equal to arr(num), then copy top half of
4308  * arr into sip->storep+lba and return true. If comparison fails then
4309  * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)4310 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
4311 			      const u8 *arr, bool compare_only)
4312 {
4313 	bool res;
4314 	u64 block, rest = 0;
4315 	u32 store_blks = sdebug_store_sectors;
4316 	u32 lb_size = sdebug_sector_size;
4317 	u8 *fsp = sip->storep;
4318 
4319 	block = do_div(lba, store_blks);
4320 	if (block + num > store_blks)
4321 		rest = block + num - store_blks;
4322 
4323 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4324 	if (!res)
4325 		return res;
4326 	if (rest)
4327 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
4328 			     rest * lb_size);
4329 	if (!res)
4330 		return res;
4331 	if (compare_only)
4332 		return true;
4333 	arr += num * lb_size;
4334 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4335 	if (rest)
4336 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
4337 	return res;
4338 }
4339 
dif_compute_csum(const void * buf,int len)4340 static __be16 dif_compute_csum(const void *buf, int len)
4341 {
4342 	__be16 csum;
4343 
4344 	if (sdebug_guard)
4345 		csum = (__force __be16)ip_compute_csum(buf, len);
4346 	else
4347 		csum = cpu_to_be16(crc_t10dif(buf, len));
4348 
4349 	return csum;
4350 }
4351 
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)4352 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
4353 		      sector_t sector, u32 ei_lba)
4354 {
4355 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
4356 
4357 	if (sdt->guard_tag != csum) {
4358 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
4359 			(unsigned long)sector,
4360 			be16_to_cpu(sdt->guard_tag),
4361 			be16_to_cpu(csum));
4362 		return 0x01;
4363 	}
4364 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
4365 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
4366 		pr_err("REF check failed on sector %lu\n",
4367 			(unsigned long)sector);
4368 		return 0x03;
4369 	}
4370 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4371 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
4372 		pr_err("REF check failed on sector %lu\n",
4373 			(unsigned long)sector);
4374 		return 0x03;
4375 	}
4376 	return 0;
4377 }
4378 
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)4379 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
4380 			  unsigned int sectors, bool read)
4381 {
4382 	size_t resid;
4383 	void *paddr;
4384 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4385 						scp->device->hostdata, true);
4386 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
4387 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
4388 	struct sg_mapping_iter miter;
4389 
4390 	/* Bytes of protection data to copy into sgl */
4391 	resid = sectors * sizeof(*dif_storep);
4392 
4393 	sg_miter_start(&miter, scsi_prot_sglist(scp),
4394 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
4395 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
4396 
4397 	while (sg_miter_next(&miter) && resid > 0) {
4398 		size_t len = min_t(size_t, miter.length, resid);
4399 		void *start = dif_store(sip, sector);
4400 		size_t rest = 0;
4401 
4402 		if (dif_store_end < start + len)
4403 			rest = start + len - dif_store_end;
4404 
4405 		paddr = miter.addr;
4406 
4407 		if (read)
4408 			memcpy(paddr, start, len - rest);
4409 		else
4410 			memcpy(start, paddr, len - rest);
4411 
4412 		if (rest) {
4413 			if (read)
4414 				memcpy(paddr + len - rest, dif_storep, rest);
4415 			else
4416 				memcpy(dif_storep, paddr + len - rest, rest);
4417 		}
4418 
4419 		sector += len / sizeof(*dif_storep);
4420 		resid -= len;
4421 	}
4422 	sg_miter_stop(&miter);
4423 }
4424 
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)4425 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
4426 			    unsigned int sectors, u32 ei_lba)
4427 {
4428 	int ret = 0;
4429 	unsigned int i;
4430 	sector_t sector;
4431 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4432 						scp->device->hostdata, true);
4433 	struct t10_pi_tuple *sdt;
4434 
4435 	for (i = 0; i < sectors; i++, ei_lba++) {
4436 		sector = start_sec + i;
4437 		sdt = dif_store(sip, sector);
4438 
4439 		if (sdt->app_tag == cpu_to_be16(0xffff))
4440 			continue;
4441 
4442 		/*
4443 		 * Because scsi_debug acts as both initiator and
4444 		 * target we proceed to verify the PI even if
4445 		 * RDPROTECT=3. This is done so the "initiator" knows
4446 		 * which type of error to return. Otherwise we would
4447 		 * have to iterate over the PI twice.
4448 		 */
4449 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
4450 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
4451 					 sector, ei_lba);
4452 			if (ret) {
4453 				dif_errors++;
4454 				break;
4455 			}
4456 		}
4457 	}
4458 
4459 	dif_copy_prot(scp, start_sec, sectors, true);
4460 	dix_reads++;
4461 
4462 	return ret;
4463 }
4464 
resp_read_tape(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4465 static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4466 {
4467 	u32 i, num, transfer, size;
4468 	u8 *cmd = scp->cmnd;
4469 	struct scsi_data_buffer *sdb = &scp->sdb;
4470 	int partition = devip->tape_partition;
4471 	u32 pos = devip->tape_location[partition];
4472 	struct tape_block *blp;
4473 	bool fixed, sili;
4474 
4475 	if (cmd[0] != READ_6) { /* Only Read(6) supported */
4476 		mk_sense_invalid_opcode(scp);
4477 		return illegal_condition_result;
4478 	}
4479 	fixed = (cmd[1] & 0x1) != 0;
4480 	sili = (cmd[1] & 0x2) != 0;
4481 	if (fixed && sili) {
4482 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
4483 		return check_condition_result;
4484 	}
4485 
4486 	transfer = get_unaligned_be24(cmd + 2);
4487 	if (fixed) {
4488 		num = transfer;
4489 		size = devip->tape_blksize;
4490 	} else {
4491 		if (transfer < TAPE_MIN_BLKSIZE ||
4492 			transfer > TAPE_MAX_BLKSIZE) {
4493 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4494 			return check_condition_result;
4495 		}
4496 		num = 1;
4497 		size = transfer;
4498 	}
4499 
4500 	for (i = 0, blp = devip->tape_blocks[partition] + pos;
4501 	     i < num && pos < devip->tape_eop[partition];
4502 	     i++, pos++, blp++) {
4503 		devip->tape_location[partition] = pos + 1;
4504 		if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
4505 			mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4506 					FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
4507 					SENSE_FLAG_FILEMARK);
4508 			scsi_set_resid(scp, (num - i) * size);
4509 			return check_condition_result;
4510 		}
4511 		/* Assume no REW */
4512 		if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
4513 			mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
4514 					EOD_DETECTED_ASCQ, fixed ? num - i : size,
4515 					0);
4516 			devip->tape_location[partition] = pos;
4517 			scsi_set_resid(scp, (num - i) * size);
4518 			return check_condition_result;
4519 		}
4520 		sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
4521 			size, i * size);
4522 		sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4523 			&(blp->data), 4, i * size, false);
4524 		if (fixed) {
4525 			if (blp->fl_size != devip->tape_blksize) {
4526 				scsi_set_resid(scp, (num - i) * size);
4527 				mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4528 						0, num - i,
4529 						SENSE_FLAG_ILI);
4530 				return check_condition_result;
4531 			}
4532 		} else {
4533 			if (blp->fl_size != size) {
4534 				if (blp->fl_size < size)
4535 					scsi_set_resid(scp, size - blp->fl_size);
4536 				if (!sili) {
4537 					mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4538 							0, size - blp->fl_size,
4539 							SENSE_FLAG_ILI);
4540 					return check_condition_result;
4541 				}
4542 			}
4543 		}
4544 	}
4545 	if (pos >= devip->tape_eop[partition]) {
4546 		mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4547 				EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
4548 				SENSE_FLAG_EOM);
4549 		devip->tape_location[partition] = pos - 1;
4550 		return check_condition_result;
4551 	}
4552 	devip->tape_location[partition] = pos;
4553 
4554 	return 0;
4555 }
4556 
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4557 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4558 {
4559 	bool check_prot;
4560 	u32 num;
4561 	u32 ei_lba;
4562 	int ret;
4563 	u64 lba;
4564 	struct sdeb_store_info *sip = devip2sip(devip, true);
4565 	u8 *cmd = scp->cmnd;
4566 	bool meta_data_locked = false;
4567 
4568 	switch (cmd[0]) {
4569 	case READ_16:
4570 		ei_lba = 0;
4571 		lba = get_unaligned_be64(cmd + 2);
4572 		num = get_unaligned_be32(cmd + 10);
4573 		check_prot = true;
4574 		break;
4575 	case READ_10:
4576 		ei_lba = 0;
4577 		lba = get_unaligned_be32(cmd + 2);
4578 		num = get_unaligned_be16(cmd + 7);
4579 		check_prot = true;
4580 		break;
4581 	case READ_6:
4582 		ei_lba = 0;
4583 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4584 		      (u32)(cmd[1] & 0x1f) << 16;
4585 		num = (0 == cmd[4]) ? 256 : cmd[4];
4586 		check_prot = true;
4587 		break;
4588 	case READ_12:
4589 		ei_lba = 0;
4590 		lba = get_unaligned_be32(cmd + 2);
4591 		num = get_unaligned_be32(cmd + 6);
4592 		check_prot = true;
4593 		break;
4594 	case XDWRITEREAD_10:
4595 		ei_lba = 0;
4596 		lba = get_unaligned_be32(cmd + 2);
4597 		num = get_unaligned_be16(cmd + 7);
4598 		check_prot = false;
4599 		break;
4600 	default:	/* assume READ(32) */
4601 		lba = get_unaligned_be64(cmd + 12);
4602 		ei_lba = get_unaligned_be32(cmd + 20);
4603 		num = get_unaligned_be32(cmd + 28);
4604 		check_prot = false;
4605 		break;
4606 	}
4607 	if (unlikely(have_dif_prot && check_prot)) {
4608 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4609 		    (cmd[1] & 0xe0)) {
4610 			mk_sense_invalid_opcode(scp);
4611 			return check_condition_result;
4612 		}
4613 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4614 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4615 		    (cmd[1] & 0xe0) == 0)
4616 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
4617 				    "to DIF device\n");
4618 	}
4619 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
4620 		     atomic_read(&sdeb_inject_pending))) {
4621 		num /= 2;
4622 		atomic_set(&sdeb_inject_pending, 0);
4623 	}
4624 
4625 	/*
4626 	 * When checking device access params, for reads we only check data
4627 	 * versus what is set at init time, so no need to lock.
4628 	 */
4629 	ret = check_device_access_params(scp, lba, num, false);
4630 	if (ret)
4631 		return ret;
4632 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
4633 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
4634 		     ((lba + num) > sdebug_medium_error_start))) {
4635 		/* claim unrecoverable read error */
4636 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
4637 		/* set info field and valid bit for fixed descriptor */
4638 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
4639 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
4640 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
4641 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
4642 			put_unaligned_be32(ret, scp->sense_buffer + 3);
4643 		}
4644 		scsi_set_resid(scp, scsi_bufflen(scp));
4645 		return check_condition_result;
4646 	}
4647 
4648 	if (sdebug_dev_is_zoned(devip) ||
4649 	    (sdebug_dix && scsi_prot_sg_count(scp)))  {
4650 		sdeb_meta_read_lock(sip);
4651 		meta_data_locked = true;
4652 	}
4653 
4654 	/* DIX + T10 DIF */
4655 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4656 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
4657 		case 1: /* Guard tag error */
4658 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4659 				sdeb_meta_read_unlock(sip);
4660 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4661 				return check_condition_result;
4662 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4663 				sdeb_meta_read_unlock(sip);
4664 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4665 				return illegal_condition_result;
4666 			}
4667 			break;
4668 		case 3: /* Reference tag error */
4669 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4670 				sdeb_meta_read_unlock(sip);
4671 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4672 				return check_condition_result;
4673 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4674 				sdeb_meta_read_unlock(sip);
4675 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4676 				return illegal_condition_result;
4677 			}
4678 			break;
4679 		}
4680 	}
4681 
4682 	ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4683 	if (meta_data_locked)
4684 		sdeb_meta_read_unlock(sip);
4685 	if (unlikely(ret == -1))
4686 		return DID_ERROR << 16;
4687 
4688 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4689 
4690 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4691 		     atomic_read(&sdeb_inject_pending))) {
4692 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4693 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4694 			atomic_set(&sdeb_inject_pending, 0);
4695 			return check_condition_result;
4696 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4697 			/* Logical block guard check failed */
4698 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4699 			atomic_set(&sdeb_inject_pending, 0);
4700 			return illegal_condition_result;
4701 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4702 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4703 			atomic_set(&sdeb_inject_pending, 0);
4704 			return illegal_condition_result;
4705 		}
4706 	}
4707 	return 0;
4708 }
4709 
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)4710 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4711 			     unsigned int sectors, u32 ei_lba)
4712 {
4713 	int ret;
4714 	struct t10_pi_tuple *sdt;
4715 	void *daddr;
4716 	sector_t sector = start_sec;
4717 	int ppage_offset;
4718 	int dpage_offset;
4719 	struct sg_mapping_iter diter;
4720 	struct sg_mapping_iter piter;
4721 
4722 	BUG_ON(scsi_sg_count(SCpnt) == 0);
4723 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4724 
4725 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4726 			scsi_prot_sg_count(SCpnt),
4727 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4728 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4729 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4730 
4731 	/* For each protection page */
4732 	while (sg_miter_next(&piter)) {
4733 		dpage_offset = 0;
4734 		if (WARN_ON(!sg_miter_next(&diter))) {
4735 			ret = 0x01;
4736 			goto out;
4737 		}
4738 
4739 		for (ppage_offset = 0; ppage_offset < piter.length;
4740 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
4741 			/* If we're at the end of the current
4742 			 * data page advance to the next one
4743 			 */
4744 			if (dpage_offset >= diter.length) {
4745 				if (WARN_ON(!sg_miter_next(&diter))) {
4746 					ret = 0x01;
4747 					goto out;
4748 				}
4749 				dpage_offset = 0;
4750 			}
4751 
4752 			sdt = piter.addr + ppage_offset;
4753 			daddr = diter.addr + dpage_offset;
4754 
4755 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4756 				ret = dif_verify(sdt, daddr, sector, ei_lba);
4757 				if (ret)
4758 					goto out;
4759 			}
4760 
4761 			sector++;
4762 			ei_lba++;
4763 			dpage_offset += sdebug_sector_size;
4764 		}
4765 		diter.consumed = dpage_offset;
4766 		sg_miter_stop(&diter);
4767 	}
4768 	sg_miter_stop(&piter);
4769 
4770 	dif_copy_prot(SCpnt, start_sec, sectors, false);
4771 	dix_writes++;
4772 
4773 	return 0;
4774 
4775 out:
4776 	dif_errors++;
4777 	sg_miter_stop(&diter);
4778 	sg_miter_stop(&piter);
4779 	return ret;
4780 }
4781 
lba_to_map_index(sector_t lba)4782 static unsigned long lba_to_map_index(sector_t lba)
4783 {
4784 	if (sdebug_unmap_alignment)
4785 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4786 	sector_div(lba, sdebug_unmap_granularity);
4787 	return lba;
4788 }
4789 
map_index_to_lba(unsigned long index)4790 static sector_t map_index_to_lba(unsigned long index)
4791 {
4792 	sector_t lba = index * sdebug_unmap_granularity;
4793 
4794 	if (sdebug_unmap_alignment)
4795 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4796 	return lba;
4797 }
4798 
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)4799 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4800 			      unsigned int *num)
4801 {
4802 	sector_t end;
4803 	unsigned int mapped;
4804 	unsigned long index;
4805 	unsigned long next;
4806 
4807 	index = lba_to_map_index(lba);
4808 	mapped = test_bit(index, sip->map_storep);
4809 
4810 	if (mapped)
4811 		next = find_next_zero_bit(sip->map_storep, map_size, index);
4812 	else
4813 		next = find_next_bit(sip->map_storep, map_size, index);
4814 
4815 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
4816 	*num = end - lba;
4817 	return mapped;
4818 }
4819 
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4820 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4821 		       unsigned int len)
4822 {
4823 	sector_t end = lba + len;
4824 
4825 	while (lba < end) {
4826 		unsigned long index = lba_to_map_index(lba);
4827 
4828 		if (index < map_size)
4829 			set_bit(index, sip->map_storep);
4830 
4831 		lba = map_index_to_lba(index + 1);
4832 	}
4833 }
4834 
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4835 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4836 			 unsigned int len)
4837 {
4838 	sector_t end = lba + len;
4839 	u8 *fsp = sip->storep;
4840 
4841 	while (lba < end) {
4842 		unsigned long index = lba_to_map_index(lba);
4843 
4844 		if (lba == map_index_to_lba(index) &&
4845 		    lba + sdebug_unmap_granularity <= end &&
4846 		    index < map_size) {
4847 			clear_bit(index, sip->map_storep);
4848 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
4849 				memset(fsp + lba * sdebug_sector_size,
4850 				       (sdebug_lbprz & 1) ? 0 : 0xff,
4851 				       sdebug_sector_size *
4852 				       sdebug_unmap_granularity);
4853 			}
4854 			if (sip->dif_storep) {
4855 				memset(sip->dif_storep + lba, 0xff,
4856 				       sizeof(*sip->dif_storep) *
4857 				       sdebug_unmap_granularity);
4858 			}
4859 		}
4860 		lba = map_index_to_lba(index + 1);
4861 	}
4862 }
4863 
resp_write_tape(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4864 static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4865 {
4866 	u32 i, num, transfer, size, written = 0;
4867 	u8 *cmd = scp->cmnd;
4868 	struct scsi_data_buffer *sdb = &scp->sdb;
4869 	int partition = devip->tape_partition;
4870 	int pos = devip->tape_location[partition];
4871 	struct tape_block *blp;
4872 	bool fixed, ew;
4873 
4874 	if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
4875 		mk_sense_invalid_opcode(scp);
4876 		return illegal_condition_result;
4877 	}
4878 
4879 	fixed = (cmd[1] & 1) != 0;
4880 	transfer = get_unaligned_be24(cmd + 2);
4881 	if (fixed) {
4882 		num = transfer;
4883 		size = devip->tape_blksize;
4884 	} else {
4885 		if (transfer < TAPE_MIN_BLKSIZE ||
4886 			transfer > TAPE_MAX_BLKSIZE) {
4887 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4888 			return check_condition_result;
4889 		}
4890 		num = 1;
4891 		size = transfer;
4892 	}
4893 
4894 	scsi_set_resid(scp, num * transfer);
4895 	for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
4896 	     i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
4897 		blp->fl_size = size;
4898 		sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4899 			&(blp->data), 4, i * size, true);
4900 		written += size;
4901 		scsi_set_resid(scp, num * transfer - written);
4902 		ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
4903 	}
4904 
4905 	devip->tape_location[partition] = pos;
4906 	blp->fl_size = TAPE_BLOCK_EOD_FLAG;
4907 	if (pos >= devip->tape_eop[partition] - 1) {
4908 		mk_sense_info_tape(scp, VOLUME_OVERFLOW,
4909 				NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4910 				fixed ? num - i : transfer,
4911 				SENSE_FLAG_EOM);
4912 		return check_condition_result;
4913 	}
4914 	if (ew) { /* early warning */
4915 		mk_sense_info_tape(scp, NO_SENSE,
4916 				NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4917 				fixed ? num - i : transfer,
4918 				SENSE_FLAG_EOM);
4919 		return check_condition_result;
4920 	}
4921 
4922 	return 0;
4923 }
4924 
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4925 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4926 {
4927 	bool check_prot;
4928 	u32 num;
4929 	u8 group = 0;
4930 	u32 ei_lba;
4931 	int ret;
4932 	u64 lba;
4933 	struct sdeb_store_info *sip = devip2sip(devip, true);
4934 	u8 *cmd = scp->cmnd;
4935 	bool meta_data_locked = false;
4936 
4937 	if (unlikely(sdebug_opts & SDEBUG_OPT_UNALIGNED_WRITE &&
4938 		     atomic_read(&sdeb_inject_pending))) {
4939 		atomic_set(&sdeb_inject_pending, 0);
4940 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
4941 				UNALIGNED_WRITE_ASCQ);
4942 		return check_condition_result;
4943 	}
4944 
4945 	switch (cmd[0]) {
4946 	case WRITE_16:
4947 		ei_lba = 0;
4948 		lba = get_unaligned_be64(cmd + 2);
4949 		num = get_unaligned_be32(cmd + 10);
4950 		group = cmd[14] & 0x3f;
4951 		check_prot = true;
4952 		break;
4953 	case WRITE_10:
4954 		ei_lba = 0;
4955 		lba = get_unaligned_be32(cmd + 2);
4956 		group = cmd[6] & 0x3f;
4957 		num = get_unaligned_be16(cmd + 7);
4958 		check_prot = true;
4959 		break;
4960 	case WRITE_6:
4961 		ei_lba = 0;
4962 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4963 		      (u32)(cmd[1] & 0x1f) << 16;
4964 		num = (0 == cmd[4]) ? 256 : cmd[4];
4965 		check_prot = true;
4966 		break;
4967 	case WRITE_12:
4968 		ei_lba = 0;
4969 		lba = get_unaligned_be32(cmd + 2);
4970 		num = get_unaligned_be32(cmd + 6);
4971 		group = cmd[6] & 0x3f;
4972 		check_prot = true;
4973 		break;
4974 	case 0x53:	/* XDWRITEREAD(10) */
4975 		ei_lba = 0;
4976 		lba = get_unaligned_be32(cmd + 2);
4977 		group = cmd[6] & 0x1f;
4978 		num = get_unaligned_be16(cmd + 7);
4979 		check_prot = false;
4980 		break;
4981 	default:	/* assume WRITE(32) */
4982 		group = cmd[6] & 0x3f;
4983 		lba = get_unaligned_be64(cmd + 12);
4984 		ei_lba = get_unaligned_be32(cmd + 20);
4985 		num = get_unaligned_be32(cmd + 28);
4986 		check_prot = false;
4987 		break;
4988 	}
4989 	if (unlikely(have_dif_prot && check_prot)) {
4990 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4991 		    (cmd[1] & 0xe0)) {
4992 			mk_sense_invalid_opcode(scp);
4993 			return check_condition_result;
4994 		}
4995 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4996 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4997 		    (cmd[1] & 0xe0) == 0)
4998 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4999 				    "to DIF device\n");
5000 	}
5001 
5002 	if (sdebug_dev_is_zoned(devip) ||
5003 	    (sdebug_dix && scsi_prot_sg_count(scp)) ||
5004 	    scsi_debug_lbp())  {
5005 		sdeb_meta_write_lock(sip);
5006 		meta_data_locked = true;
5007 	}
5008 
5009 	ret = check_device_access_params(scp, lba, num, true);
5010 	if (ret) {
5011 		if (meta_data_locked)
5012 			sdeb_meta_write_unlock(sip);
5013 		return ret;
5014 	}
5015 
5016 	/* DIX + T10 DIF */
5017 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5018 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
5019 		case 1: /* Guard tag error */
5020 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
5021 				sdeb_meta_write_unlock(sip);
5022 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5023 				return illegal_condition_result;
5024 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5025 				sdeb_meta_write_unlock(sip);
5026 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5027 				return check_condition_result;
5028 			}
5029 			break;
5030 		case 3: /* Reference tag error */
5031 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
5032 				sdeb_meta_write_unlock(sip);
5033 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
5034 				return illegal_condition_result;
5035 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5036 				sdeb_meta_write_unlock(sip);
5037 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
5038 				return check_condition_result;
5039 			}
5040 			break;
5041 		}
5042 	}
5043 
5044 	ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
5045 	if (unlikely(scsi_debug_lbp()))
5046 		map_region(sip, lba, num);
5047 
5048 	/* If ZBC zone then bump its write pointer */
5049 	if (sdebug_dev_is_zoned(devip))
5050 		zbc_inc_wp(devip, lba, num);
5051 	if (meta_data_locked)
5052 		sdeb_meta_write_unlock(sip);
5053 
5054 	if (unlikely(-1 == ret))
5055 		return DID_ERROR << 16;
5056 	else if (unlikely(sdebug_verbose &&
5057 			  (ret < (num * sdebug_sector_size))))
5058 		sdev_printk(KERN_INFO, scp->device,
5059 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5060 			    my_name, num * sdebug_sector_size, ret);
5061 
5062 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5063 		     atomic_read(&sdeb_inject_pending))) {
5064 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5065 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5066 			atomic_set(&sdeb_inject_pending, 0);
5067 			return check_condition_result;
5068 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5069 			/* Logical block guard check failed */
5070 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5071 			atomic_set(&sdeb_inject_pending, 0);
5072 			return illegal_condition_result;
5073 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5074 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5075 			atomic_set(&sdeb_inject_pending, 0);
5076 			return illegal_condition_result;
5077 		}
5078 	}
5079 	return 0;
5080 }
5081 
5082 /*
5083  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
5084  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
5085  */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5086 static int resp_write_scat(struct scsi_cmnd *scp,
5087 			   struct sdebug_dev_info *devip)
5088 {
5089 	u8 *cmd = scp->cmnd;
5090 	u8 *lrdp = NULL;
5091 	u8 *up;
5092 	struct sdeb_store_info *sip = devip2sip(devip, true);
5093 	u8 wrprotect;
5094 	u16 lbdof, num_lrd, k;
5095 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
5096 	u32 lb_size = sdebug_sector_size;
5097 	u32 ei_lba;
5098 	u64 lba;
5099 	u8 group;
5100 	int ret, res;
5101 	bool is_16;
5102 	static const u32 lrd_size = 32; /* + parameter list header size */
5103 
5104 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
5105 		is_16 = false;
5106 		group = cmd[6] & 0x3f;
5107 		wrprotect = (cmd[10] >> 5) & 0x7;
5108 		lbdof = get_unaligned_be16(cmd + 12);
5109 		num_lrd = get_unaligned_be16(cmd + 16);
5110 		bt_len = get_unaligned_be32(cmd + 28);
5111 	} else {        /* that leaves WRITE SCATTERED(16) */
5112 		is_16 = true;
5113 		wrprotect = (cmd[2] >> 5) & 0x7;
5114 		lbdof = get_unaligned_be16(cmd + 4);
5115 		num_lrd = get_unaligned_be16(cmd + 8);
5116 		bt_len = get_unaligned_be32(cmd + 10);
5117 		group = cmd[14] & 0x3f;
5118 		if (unlikely(have_dif_prot)) {
5119 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5120 			    wrprotect) {
5121 				mk_sense_invalid_opcode(scp);
5122 				return illegal_condition_result;
5123 			}
5124 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5125 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5126 			     wrprotect == 0)
5127 				sdev_printk(KERN_ERR, scp->device,
5128 					    "Unprotected WR to DIF device\n");
5129 		}
5130 	}
5131 	if ((num_lrd == 0) || (bt_len == 0))
5132 		return 0;       /* T10 says these do-nothings are not errors */
5133 	if (lbdof == 0) {
5134 		if (sdebug_verbose)
5135 			sdev_printk(KERN_INFO, scp->device,
5136 				"%s: %s: LB Data Offset field bad\n",
5137 				my_name, __func__);
5138 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5139 		return illegal_condition_result;
5140 	}
5141 	lbdof_blen = lbdof * lb_size;
5142 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
5143 		if (sdebug_verbose)
5144 			sdev_printk(KERN_INFO, scp->device,
5145 				"%s: %s: LBA range descriptors don't fit\n",
5146 				my_name, __func__);
5147 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5148 		return illegal_condition_result;
5149 	}
5150 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
5151 	if (lrdp == NULL)
5152 		return SCSI_MLQUEUE_HOST_BUSY;
5153 	if (sdebug_verbose)
5154 		sdev_printk(KERN_INFO, scp->device,
5155 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
5156 			my_name, __func__, lbdof_blen);
5157 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
5158 	if (res == -1) {
5159 		ret = DID_ERROR << 16;
5160 		goto err_out;
5161 	}
5162 
5163 	/* Just keep it simple and always lock for now */
5164 	sdeb_meta_write_lock(sip);
5165 	sg_off = lbdof_blen;
5166 	/* Spec says Buffer xfer Length field in number of LBs in dout */
5167 	cum_lb = 0;
5168 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
5169 		lba = get_unaligned_be64(up + 0);
5170 		num = get_unaligned_be32(up + 8);
5171 		if (sdebug_verbose)
5172 			sdev_printk(KERN_INFO, scp->device,
5173 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
5174 				my_name, __func__, k, lba, num, sg_off);
5175 		if (num == 0)
5176 			continue;
5177 		ret = check_device_access_params(scp, lba, num, true);
5178 		if (ret)
5179 			goto err_out_unlock;
5180 		num_by = num * lb_size;
5181 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
5182 
5183 		if ((cum_lb + num) > bt_len) {
5184 			if (sdebug_verbose)
5185 				sdev_printk(KERN_INFO, scp->device,
5186 				    "%s: %s: sum of blocks > data provided\n",
5187 				    my_name, __func__);
5188 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
5189 					0);
5190 			ret = illegal_condition_result;
5191 			goto err_out_unlock;
5192 		}
5193 
5194 		/* DIX + T10 DIF */
5195 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5196 			int prot_ret = prot_verify_write(scp, lba, num,
5197 							 ei_lba);
5198 
5199 			if (prot_ret) {
5200 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
5201 						prot_ret);
5202 				ret = illegal_condition_result;
5203 				goto err_out_unlock;
5204 			}
5205 		}
5206 
5207 		/*
5208 		 * Write ranges atomically to keep as close to pre-atomic
5209 		 * writes behaviour as possible.
5210 		 */
5211 		ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
5212 		/* If ZBC zone then bump its write pointer */
5213 		if (sdebug_dev_is_zoned(devip))
5214 			zbc_inc_wp(devip, lba, num);
5215 		if (unlikely(scsi_debug_lbp()))
5216 			map_region(sip, lba, num);
5217 		if (unlikely(-1 == ret)) {
5218 			ret = DID_ERROR << 16;
5219 			goto err_out_unlock;
5220 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
5221 			sdev_printk(KERN_INFO, scp->device,
5222 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5223 			    my_name, num_by, ret);
5224 
5225 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5226 			     atomic_read(&sdeb_inject_pending))) {
5227 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5228 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5229 				atomic_set(&sdeb_inject_pending, 0);
5230 				ret = check_condition_result;
5231 				goto err_out_unlock;
5232 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5233 				/* Logical block guard check failed */
5234 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5235 				atomic_set(&sdeb_inject_pending, 0);
5236 				ret = illegal_condition_result;
5237 				goto err_out_unlock;
5238 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5239 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5240 				atomic_set(&sdeb_inject_pending, 0);
5241 				ret = illegal_condition_result;
5242 				goto err_out_unlock;
5243 			}
5244 		}
5245 		sg_off += num_by;
5246 		cum_lb += num;
5247 	}
5248 	ret = 0;
5249 err_out_unlock:
5250 	sdeb_meta_write_unlock(sip);
5251 err_out:
5252 	kfree(lrdp);
5253 	return ret;
5254 }
5255 
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)5256 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
5257 			   u32 ei_lba, bool unmap, bool ndob)
5258 {
5259 	struct scsi_device *sdp = scp->device;
5260 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5261 	unsigned long long i;
5262 	u64 block, lbaa;
5263 	u32 lb_size = sdebug_sector_size;
5264 	int ret;
5265 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
5266 						scp->device->hostdata, true);
5267 	u8 *fs1p;
5268 	u8 *fsp;
5269 	bool meta_data_locked = false;
5270 
5271 	if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
5272 		sdeb_meta_write_lock(sip);
5273 		meta_data_locked = true;
5274 	}
5275 
5276 	ret = check_device_access_params(scp, lba, num, true);
5277 	if (ret)
5278 		goto out;
5279 
5280 	if (unmap && scsi_debug_lbp()) {
5281 		unmap_region(sip, lba, num);
5282 		goto out;
5283 	}
5284 	lbaa = lba;
5285 	block = do_div(lbaa, sdebug_store_sectors);
5286 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
5287 	fsp = sip->storep;
5288 	fs1p = fsp + (block * lb_size);
5289 	sdeb_data_write_lock(sip);
5290 	if (ndob) {
5291 		memset(fs1p, 0, lb_size);
5292 		ret = 0;
5293 	} else
5294 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
5295 
5296 	if (-1 == ret) {
5297 		ret = DID_ERROR << 16;
5298 		goto out;
5299 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
5300 		sdev_printk(KERN_INFO, scp->device,
5301 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
5302 			    my_name, "write same", lb_size, ret);
5303 
5304 	/* Copy first sector to remaining blocks */
5305 	for (i = 1 ; i < num ; i++) {
5306 		lbaa = lba + i;
5307 		block = do_div(lbaa, sdebug_store_sectors);
5308 		memmove(fsp + (block * lb_size), fs1p, lb_size);
5309 	}
5310 	if (scsi_debug_lbp())
5311 		map_region(sip, lba, num);
5312 	/* If ZBC zone then bump its write pointer */
5313 	if (sdebug_dev_is_zoned(devip))
5314 		zbc_inc_wp(devip, lba, num);
5315 	sdeb_data_write_unlock(sip);
5316 	ret = 0;
5317 out:
5318 	if (meta_data_locked)
5319 		sdeb_meta_write_unlock(sip);
5320 	return ret;
5321 }
5322 
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5323 static int resp_write_same_10(struct scsi_cmnd *scp,
5324 			      struct sdebug_dev_info *devip)
5325 {
5326 	u8 *cmd = scp->cmnd;
5327 	u32 lba;
5328 	u16 num;
5329 	u32 ei_lba = 0;
5330 	bool unmap = false;
5331 
5332 	if (cmd[1] & 0x8) {
5333 		if (sdebug_lbpws10 == 0) {
5334 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5335 			return check_condition_result;
5336 		} else
5337 			unmap = true;
5338 	}
5339 	lba = get_unaligned_be32(cmd + 2);
5340 	num = get_unaligned_be16(cmd + 7);
5341 	if (num > sdebug_write_same_length) {
5342 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5343 		return check_condition_result;
5344 	}
5345 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
5346 }
5347 
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5348 static int resp_write_same_16(struct scsi_cmnd *scp,
5349 			      struct sdebug_dev_info *devip)
5350 {
5351 	u8 *cmd = scp->cmnd;
5352 	u64 lba;
5353 	u32 num;
5354 	u32 ei_lba = 0;
5355 	bool unmap = false;
5356 	bool ndob = false;
5357 
5358 	if (cmd[1] & 0x8) {	/* UNMAP */
5359 		if (sdebug_lbpws == 0) {
5360 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5361 			return check_condition_result;
5362 		} else
5363 			unmap = true;
5364 	}
5365 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
5366 		ndob = true;
5367 	lba = get_unaligned_be64(cmd + 2);
5368 	num = get_unaligned_be32(cmd + 10);
5369 	if (num > sdebug_write_same_length) {
5370 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5371 		return check_condition_result;
5372 	}
5373 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
5374 }
5375 
5376 /* Note the mode field is in the same position as the (lower) service action
5377  * field. For the Report supported operation codes command, SPC-4 suggests
5378  * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5379 static int resp_write_buffer(struct scsi_cmnd *scp,
5380 			     struct sdebug_dev_info *devip)
5381 {
5382 	u8 *cmd = scp->cmnd;
5383 	struct scsi_device *sdp = scp->device;
5384 	struct sdebug_dev_info *dp;
5385 	u8 mode;
5386 
5387 	mode = cmd[1] & 0x1f;
5388 	switch (mode) {
5389 	case 0x4:	/* download microcode (MC) and activate (ACT) */
5390 		/* set UAs on this device only */
5391 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5392 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
5393 		break;
5394 	case 0x5:	/* download MC, save and ACT */
5395 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
5396 		break;
5397 	case 0x6:	/* download MC with offsets and ACT */
5398 		/* set UAs on most devices (LUs) in this target */
5399 		list_for_each_entry(dp,
5400 				    &devip->sdbg_host->dev_info_list,
5401 				    dev_list)
5402 			if (dp->target == sdp->id) {
5403 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
5404 				if (devip != dp)
5405 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
5406 						dp->uas_bm);
5407 			}
5408 		break;
5409 	case 0x7:	/* download MC with offsets, save, and ACT */
5410 		/* set UA on all devices (LUs) in this target */
5411 		list_for_each_entry(dp,
5412 				    &devip->sdbg_host->dev_info_list,
5413 				    dev_list)
5414 			if (dp->target == sdp->id)
5415 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
5416 					dp->uas_bm);
5417 		break;
5418 	default:
5419 		/* do nothing for this command for other mode values */
5420 		break;
5421 	}
5422 	return 0;
5423 }
5424 
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5425 static int resp_comp_write(struct scsi_cmnd *scp,
5426 			   struct sdebug_dev_info *devip)
5427 {
5428 	u8 *cmd = scp->cmnd;
5429 	u8 *arr;
5430 	struct sdeb_store_info *sip = devip2sip(devip, true);
5431 	u64 lba;
5432 	u32 dnum;
5433 	u32 lb_size = sdebug_sector_size;
5434 	u8 num;
5435 	int ret;
5436 	int retval = 0;
5437 
5438 	lba = get_unaligned_be64(cmd + 2);
5439 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
5440 	if (0 == num)
5441 		return 0;	/* degenerate case, not an error */
5442 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5443 	    (cmd[1] & 0xe0)) {
5444 		mk_sense_invalid_opcode(scp);
5445 		return check_condition_result;
5446 	}
5447 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5448 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5449 	    (cmd[1] & 0xe0) == 0)
5450 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
5451 			    "to DIF device\n");
5452 	ret = check_device_access_params(scp, lba, num, false);
5453 	if (ret)
5454 		return ret;
5455 	dnum = 2 * num;
5456 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
5457 	if (NULL == arr) {
5458 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5459 				INSUFF_RES_ASCQ);
5460 		return check_condition_result;
5461 	}
5462 
5463 	ret = do_dout_fetch(scp, dnum, arr);
5464 	if (ret == -1) {
5465 		retval = DID_ERROR << 16;
5466 		goto cleanup_free;
5467 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
5468 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
5469 			    "indicated=%u, IO sent=%d bytes\n", my_name,
5470 			    dnum * lb_size, ret);
5471 
5472 	sdeb_data_write_lock(sip);
5473 	sdeb_meta_write_lock(sip);
5474 	if (!comp_write_worker(sip, lba, num, arr, false)) {
5475 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5476 		retval = check_condition_result;
5477 		goto cleanup_unlock;
5478 	}
5479 
5480 	/* Cover sip->map_storep (which map_region()) sets with data lock */
5481 	if (scsi_debug_lbp())
5482 		map_region(sip, lba, num);
5483 cleanup_unlock:
5484 	sdeb_meta_write_unlock(sip);
5485 	sdeb_data_write_unlock(sip);
5486 cleanup_free:
5487 	kfree(arr);
5488 	return retval;
5489 }
5490 
5491 struct unmap_block_desc {
5492 	__be64	lba;
5493 	__be32	blocks;
5494 	__be32	__reserved;
5495 };
5496 
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5497 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5498 {
5499 	unsigned char *buf;
5500 	struct unmap_block_desc *desc;
5501 	struct sdeb_store_info *sip = devip2sip(devip, true);
5502 	unsigned int i, payload_len, descriptors;
5503 	int ret;
5504 
5505 	if (!scsi_debug_lbp())
5506 		return 0;	/* fib and say its done */
5507 	payload_len = get_unaligned_be16(scp->cmnd + 7);
5508 	BUG_ON(scsi_bufflen(scp) != payload_len);
5509 
5510 	descriptors = (payload_len - 8) / 16;
5511 	if (descriptors > sdebug_unmap_max_desc) {
5512 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5513 		return check_condition_result;
5514 	}
5515 
5516 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
5517 	if (!buf) {
5518 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5519 				INSUFF_RES_ASCQ);
5520 		return check_condition_result;
5521 	}
5522 
5523 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
5524 
5525 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
5526 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
5527 
5528 	desc = (void *)&buf[8];
5529 
5530 	sdeb_meta_write_lock(sip);
5531 
5532 	for (i = 0 ; i < descriptors ; i++) {
5533 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
5534 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
5535 
5536 		ret = check_device_access_params(scp, lba, num, true);
5537 		if (ret)
5538 			goto out;
5539 
5540 		unmap_region(sip, lba, num);
5541 	}
5542 
5543 	ret = 0;
5544 
5545 out:
5546 	sdeb_meta_write_unlock(sip);
5547 	kfree(buf);
5548 
5549 	return ret;
5550 }
5551 
5552 #define SDEBUG_GET_LBA_STATUS_LEN 32
5553 
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5554 static int resp_get_lba_status(struct scsi_cmnd *scp,
5555 			       struct sdebug_dev_info *devip)
5556 {
5557 	u8 *cmd = scp->cmnd;
5558 	u64 lba;
5559 	u32 alloc_len, mapped, num;
5560 	int ret;
5561 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
5562 
5563 	lba = get_unaligned_be64(cmd + 2);
5564 	alloc_len = get_unaligned_be32(cmd + 10);
5565 
5566 	if (alloc_len < 24)
5567 		return 0;
5568 
5569 	ret = check_device_access_params(scp, lba, 1, false);
5570 	if (ret)
5571 		return ret;
5572 
5573 	if (scsi_debug_lbp()) {
5574 		struct sdeb_store_info *sip = devip2sip(devip, true);
5575 
5576 		mapped = map_state(sip, lba, &num);
5577 	} else {
5578 		mapped = 1;
5579 		/* following just in case virtual_gb changed */
5580 		sdebug_capacity = get_sdebug_capacity();
5581 		if (sdebug_capacity - lba <= 0xffffffff)
5582 			num = sdebug_capacity - lba;
5583 		else
5584 			num = 0xffffffff;
5585 	}
5586 
5587 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
5588 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
5589 	put_unaligned_be64(lba, arr + 8);	/* LBA */
5590 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
5591 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
5592 
5593 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
5594 }
5595 
resp_get_stream_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5596 static int resp_get_stream_status(struct scsi_cmnd *scp,
5597 				  struct sdebug_dev_info *devip)
5598 {
5599 	u16 starting_stream_id, stream_id;
5600 	const u8 *cmd = scp->cmnd;
5601 	u32 alloc_len, offset;
5602 	u8 arr[256] = {};
5603 	struct scsi_stream_status_header *h = (void *)arr;
5604 
5605 	starting_stream_id = get_unaligned_be16(cmd + 4);
5606 	alloc_len = get_unaligned_be32(cmd + 10);
5607 
5608 	if (alloc_len < 8) {
5609 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5610 		return check_condition_result;
5611 	}
5612 
5613 	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
5614 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
5615 		return check_condition_result;
5616 	}
5617 
5618 	/*
5619 	 * The GET STREAM STATUS command only reports status information
5620 	 * about open streams. Treat the non-permanent stream as open.
5621 	 */
5622 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
5623 			   &h->number_of_open_streams);
5624 
5625 	for (offset = 8, stream_id = starting_stream_id;
5626 	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
5627 		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
5628 	     offset += 8, stream_id++) {
5629 		struct scsi_stream_status *stream_status = (void *)arr + offset;
5630 
5631 		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
5632 		put_unaligned_be16(stream_id,
5633 				   &stream_status->stream_identifier);
5634 		stream_status->rel_lifetime = stream_id + 1;
5635 	}
5636 	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
5637 
5638 	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
5639 }
5640 
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5641 static int resp_sync_cache(struct scsi_cmnd *scp,
5642 			   struct sdebug_dev_info *devip)
5643 {
5644 	int res = 0;
5645 	u64 lba;
5646 	u32 num_blocks;
5647 	u8 *cmd = scp->cmnd;
5648 
5649 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
5650 		lba = get_unaligned_be32(cmd + 2);
5651 		num_blocks = get_unaligned_be16(cmd + 7);
5652 	} else {				/* SYNCHRONIZE_CACHE(16) */
5653 		lba = get_unaligned_be64(cmd + 2);
5654 		num_blocks = get_unaligned_be32(cmd + 10);
5655 	}
5656 	if (lba + num_blocks > sdebug_capacity) {
5657 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5658 		return check_condition_result;
5659 	}
5660 	if (!write_since_sync || (cmd[1] & 0x2))
5661 		res = SDEG_RES_IMMED_MASK;
5662 	else		/* delay if write_since_sync and IMMED clear */
5663 		write_since_sync = false;
5664 	return res;
5665 }
5666 
5667 /*
5668  * Assuming the LBA+num_blocks is not out-of-range, this function will return
5669  * CONDITION MET if the specified blocks will/have fitted in the cache, and
5670  * a GOOD status otherwise. Model a disk with a big cache and yield
5671  * CONDITION MET. Actually tries to bring range in main memory into the
5672  * cache associated with the CPU(s).
5673  *
5674  * The pcode 0x34 is also used for READ POSITION by tape devices.
5675  */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5676 static int resp_pre_fetch(struct scsi_cmnd *scp,
5677 			  struct sdebug_dev_info *devip)
5678 {
5679 	int res = 0;
5680 	u64 lba;
5681 	u64 block, rest = 0;
5682 	u32 nblks;
5683 	u8 *cmd = scp->cmnd;
5684 	struct sdeb_store_info *sip = devip2sip(devip, true);
5685 	u8 *fsp = sip->storep;
5686 
5687 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
5688 		lba = get_unaligned_be32(cmd + 2);
5689 		nblks = get_unaligned_be16(cmd + 7);
5690 	} else {			/* PRE-FETCH(16) */
5691 		lba = get_unaligned_be64(cmd + 2);
5692 		nblks = get_unaligned_be32(cmd + 10);
5693 	}
5694 	if (lba + nblks > sdebug_capacity) {
5695 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5696 		return check_condition_result;
5697 	}
5698 	if (!fsp)
5699 		goto fini;
5700 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
5701 	block = do_div(lba, sdebug_store_sectors);
5702 	if (block + nblks > sdebug_store_sectors)
5703 		rest = block + nblks - sdebug_store_sectors;
5704 
5705 	/* Try to bring the PRE-FETCH range into CPU's cache */
5706 	sdeb_data_read_lock(sip);
5707 	prefetch_range(fsp + (sdebug_sector_size * block),
5708 		       (nblks - rest) * sdebug_sector_size);
5709 	if (rest)
5710 		prefetch_range(fsp, rest * sdebug_sector_size);
5711 
5712 	sdeb_data_read_unlock(sip);
5713 fini:
5714 	if (cmd[1] & 0x2)
5715 		res = SDEG_RES_IMMED_MASK;
5716 	return res | condition_met_result;
5717 }
5718 
5719 #define RL_BUCKET_ELEMS 8
5720 
5721 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
5722  * (W-LUN), the normal Linux scanning logic does not associate it with a
5723  * device (e.g. /dev/sg7). The following magic will make that association:
5724  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5725  * where <n> is a host number. If there are multiple targets in a host then
5726  * the above will associate a W-LUN to each target. To only get a W-LUN
5727  * for target 2, then use "echo '- 2 49409' > scan" .
5728  */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5729 static int resp_report_luns(struct scsi_cmnd *scp,
5730 			    struct sdebug_dev_info *devip)
5731 {
5732 	unsigned char *cmd = scp->cmnd;
5733 	unsigned int alloc_len;
5734 	unsigned char select_report;
5735 	u64 lun;
5736 	struct scsi_lun *lun_p;
5737 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
5738 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
5739 	unsigned int wlun_cnt;	/* report luns W-LUN count */
5740 	unsigned int tlun_cnt;	/* total LUN count */
5741 	unsigned int rlen;	/* response length (in bytes) */
5742 	int k, j, n, res;
5743 	unsigned int off_rsp = 0;
5744 	const int sz_lun = sizeof(struct scsi_lun);
5745 
5746 	clear_luns_changed_on_target(devip);
5747 
5748 	select_report = cmd[2];
5749 	alloc_len = get_unaligned_be32(cmd + 6);
5750 
5751 	if (alloc_len < 4) {
5752 		pr_err("alloc len too small %d\n", alloc_len);
5753 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5754 		return check_condition_result;
5755 	}
5756 
5757 	switch (select_report) {
5758 	case 0:		/* all LUNs apart from W-LUNs */
5759 		lun_cnt = sdebug_max_luns;
5760 		wlun_cnt = 0;
5761 		break;
5762 	case 1:		/* only W-LUNs */
5763 		lun_cnt = 0;
5764 		wlun_cnt = 1;
5765 		break;
5766 	case 2:		/* all LUNs */
5767 		lun_cnt = sdebug_max_luns;
5768 		wlun_cnt = 1;
5769 		break;
5770 	case 0x10:	/* only administrative LUs */
5771 	case 0x11:	/* see SPC-5 */
5772 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
5773 	default:
5774 		pr_debug("select report invalid %d\n", select_report);
5775 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5776 		return check_condition_result;
5777 	}
5778 
5779 	if (sdebug_no_lun_0 && (lun_cnt > 0))
5780 		--lun_cnt;
5781 
5782 	tlun_cnt = lun_cnt + wlun_cnt;
5783 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
5784 	scsi_set_resid(scp, scsi_bufflen(scp));
5785 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5786 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5787 
5788 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
5789 	lun = sdebug_no_lun_0 ? 1 : 0;
5790 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5791 		memset(arr, 0, sizeof(arr));
5792 		lun_p = (struct scsi_lun *)&arr[0];
5793 		if (k == 0) {
5794 			put_unaligned_be32(rlen, &arr[0]);
5795 			++lun_p;
5796 			j = 1;
5797 		}
5798 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5799 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5800 				break;
5801 			int_to_scsilun(lun++, lun_p);
5802 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5803 				lun_p->scsi_lun[0] |= 0x40;
5804 		}
5805 		if (j < RL_BUCKET_ELEMS)
5806 			break;
5807 		n = j * sz_lun;
5808 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5809 		if (res)
5810 			return res;
5811 		off_rsp += n;
5812 	}
5813 	if (wlun_cnt) {
5814 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5815 		++j;
5816 	}
5817 	if (j > 0)
5818 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5819 	return res;
5820 }
5821 
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5822 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5823 {
5824 	bool is_bytchk3 = false;
5825 	u8 bytchk;
5826 	int ret, j;
5827 	u32 vnum, a_num, off;
5828 	const u32 lb_size = sdebug_sector_size;
5829 	u64 lba;
5830 	u8 *arr;
5831 	u8 *cmd = scp->cmnd;
5832 	struct sdeb_store_info *sip = devip2sip(devip, true);
5833 
5834 	bytchk = (cmd[1] >> 1) & 0x3;
5835 	if (bytchk == 0) {
5836 		return 0;	/* always claim internal verify okay */
5837 	} else if (bytchk == 2) {
5838 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5839 		return check_condition_result;
5840 	} else if (bytchk == 3) {
5841 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
5842 	}
5843 	switch (cmd[0]) {
5844 	case VERIFY_16:
5845 		lba = get_unaligned_be64(cmd + 2);
5846 		vnum = get_unaligned_be32(cmd + 10);
5847 		break;
5848 	case VERIFY:		/* is VERIFY(10) */
5849 		lba = get_unaligned_be32(cmd + 2);
5850 		vnum = get_unaligned_be16(cmd + 7);
5851 		break;
5852 	default:
5853 		mk_sense_invalid_opcode(scp);
5854 		return check_condition_result;
5855 	}
5856 	if (vnum == 0)
5857 		return 0;	/* not an error */
5858 	a_num = is_bytchk3 ? 1 : vnum;
5859 	/* Treat following check like one for read (i.e. no write) access */
5860 	ret = check_device_access_params(scp, lba, a_num, false);
5861 	if (ret)
5862 		return ret;
5863 
5864 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5865 	if (!arr) {
5866 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5867 				INSUFF_RES_ASCQ);
5868 		return check_condition_result;
5869 	}
5870 	/* Not changing store, so only need read access */
5871 	sdeb_data_read_lock(sip);
5872 
5873 	ret = do_dout_fetch(scp, a_num, arr);
5874 	if (ret == -1) {
5875 		ret = DID_ERROR << 16;
5876 		goto cleanup;
5877 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5878 		sdev_printk(KERN_INFO, scp->device,
5879 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5880 			    my_name, __func__, a_num * lb_size, ret);
5881 	}
5882 	if (is_bytchk3) {
5883 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5884 			memcpy(arr + off, arr, lb_size);
5885 	}
5886 	ret = 0;
5887 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5888 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5889 		ret = check_condition_result;
5890 		goto cleanup;
5891 	}
5892 cleanup:
5893 	sdeb_data_read_unlock(sip);
5894 	kfree(arr);
5895 	return ret;
5896 }
5897 
5898 #define RZONES_DESC_HD 64
5899 
5900 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5901 static int resp_report_zones(struct scsi_cmnd *scp,
5902 			     struct sdebug_dev_info *devip)
5903 {
5904 	unsigned int rep_max_zones, nrz = 0;
5905 	int ret = 0;
5906 	u32 alloc_len, rep_opts, rep_len;
5907 	bool partial;
5908 	u64 lba, zs_lba;
5909 	u8 *arr = NULL, *desc;
5910 	u8 *cmd = scp->cmnd;
5911 	struct sdeb_zone_state *zsp = NULL;
5912 	struct sdeb_store_info *sip = devip2sip(devip, false);
5913 
5914 	if (!sdebug_dev_is_zoned(devip)) {
5915 		mk_sense_invalid_opcode(scp);
5916 		return check_condition_result;
5917 	}
5918 	zs_lba = get_unaligned_be64(cmd + 2);
5919 	alloc_len = get_unaligned_be32(cmd + 10);
5920 	if (alloc_len == 0)
5921 		return 0;	/* not an error */
5922 	rep_opts = cmd[14] & 0x3f;
5923 	partial = cmd[14] & 0x80;
5924 
5925 	if (zs_lba >= sdebug_capacity) {
5926 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5927 		return check_condition_result;
5928 	}
5929 
5930 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5931 
5932 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5933 	if (!arr) {
5934 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5935 				INSUFF_RES_ASCQ);
5936 		return check_condition_result;
5937 	}
5938 
5939 	sdeb_meta_read_lock(sip);
5940 
5941 	desc = arr + 64;
5942 	for (lba = zs_lba; lba < sdebug_capacity;
5943 	     lba = zsp->z_start + zsp->z_size) {
5944 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5945 			break;
5946 		zsp = zbc_zone(devip, lba);
5947 		switch (rep_opts) {
5948 		case 0x00:
5949 			/* All zones */
5950 			break;
5951 		case 0x01:
5952 			/* Empty zones */
5953 			if (zsp->z_cond != ZC1_EMPTY)
5954 				continue;
5955 			break;
5956 		case 0x02:
5957 			/* Implicit open zones */
5958 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5959 				continue;
5960 			break;
5961 		case 0x03:
5962 			/* Explicit open zones */
5963 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5964 				continue;
5965 			break;
5966 		case 0x04:
5967 			/* Closed zones */
5968 			if (zsp->z_cond != ZC4_CLOSED)
5969 				continue;
5970 			break;
5971 		case 0x05:
5972 			/* Full zones */
5973 			if (zsp->z_cond != ZC5_FULL)
5974 				continue;
5975 			break;
5976 		case 0x06:
5977 		case 0x07:
5978 		case 0x10:
5979 			/*
5980 			 * Read-only, offline, reset WP recommended are
5981 			 * not emulated: no zones to report;
5982 			 */
5983 			continue;
5984 		case 0x11:
5985 			/* non-seq-resource set */
5986 			if (!zsp->z_non_seq_resource)
5987 				continue;
5988 			break;
5989 		case 0x3e:
5990 			/* All zones except gap zones. */
5991 			if (zbc_zone_is_gap(zsp))
5992 				continue;
5993 			break;
5994 		case 0x3f:
5995 			/* Not write pointer (conventional) zones */
5996 			if (zbc_zone_is_seq(zsp))
5997 				continue;
5998 			break;
5999 		default:
6000 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
6001 					INVALID_FIELD_IN_CDB, 0);
6002 			ret = check_condition_result;
6003 			goto fini;
6004 		}
6005 
6006 		if (nrz < rep_max_zones) {
6007 			/* Fill zone descriptor */
6008 			desc[0] = zsp->z_type;
6009 			desc[1] = zsp->z_cond << 4;
6010 			if (zsp->z_non_seq_resource)
6011 				desc[1] |= 1 << 1;
6012 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
6013 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
6014 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
6015 			desc += 64;
6016 		}
6017 
6018 		if (partial && nrz >= rep_max_zones)
6019 			break;
6020 
6021 		nrz++;
6022 	}
6023 
6024 	/* Report header */
6025 	/* Zone list length. */
6026 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
6027 	/* Maximum LBA */
6028 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
6029 	/* Zone starting LBA granularity. */
6030 	if (devip->zcap < devip->zsize)
6031 		put_unaligned_be64(devip->zsize, arr + 16);
6032 
6033 	rep_len = (unsigned long)desc - (unsigned long)arr;
6034 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
6035 
6036 fini:
6037 	sdeb_meta_read_unlock(sip);
6038 	kfree(arr);
6039 	return ret;
6040 }
6041 
resp_atomic_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6042 static int resp_atomic_write(struct scsi_cmnd *scp,
6043 			     struct sdebug_dev_info *devip)
6044 {
6045 	struct sdeb_store_info *sip;
6046 	u8 *cmd = scp->cmnd;
6047 	u16 boundary, len;
6048 	u64 lba, lba_tmp;
6049 	int ret;
6050 
6051 	if (!scsi_debug_atomic_write()) {
6052 		mk_sense_invalid_opcode(scp);
6053 		return check_condition_result;
6054 	}
6055 
6056 	sip = devip2sip(devip, true);
6057 
6058 	lba = get_unaligned_be64(cmd + 2);
6059 	boundary = get_unaligned_be16(cmd + 10);
6060 	len = get_unaligned_be16(cmd + 12);
6061 
6062 	lba_tmp = lba;
6063 	if (sdebug_atomic_wr_align &&
6064 	    do_div(lba_tmp, sdebug_atomic_wr_align)) {
6065 		/* Does not meet alignment requirement */
6066 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6067 		return check_condition_result;
6068 	}
6069 
6070 	if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
6071 		/* Does not meet alignment requirement */
6072 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6073 		return check_condition_result;
6074 	}
6075 
6076 	if (boundary > 0) {
6077 		if (boundary > sdebug_atomic_wr_max_bndry) {
6078 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6079 			return check_condition_result;
6080 		}
6081 
6082 		if (len > sdebug_atomic_wr_max_length_bndry) {
6083 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6084 			return check_condition_result;
6085 		}
6086 	} else {
6087 		if (len > sdebug_atomic_wr_max_length) {
6088 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6089 			return check_condition_result;
6090 		}
6091 	}
6092 
6093 	ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
6094 	if (unlikely(ret == -1))
6095 		return DID_ERROR << 16;
6096 	if (unlikely(ret != len * sdebug_sector_size))
6097 		return DID_ERROR << 16;
6098 	return 0;
6099 }
6100 
6101 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)6102 static void zbc_open_all(struct sdebug_dev_info *devip)
6103 {
6104 	struct sdeb_zone_state *zsp = &devip->zstate[0];
6105 	unsigned int i;
6106 
6107 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
6108 		if (zsp->z_cond == ZC4_CLOSED)
6109 			zbc_open_zone(devip, &devip->zstate[i], true);
6110 	}
6111 }
6112 
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6113 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6114 {
6115 	int res = 0;
6116 	u64 z_id;
6117 	enum sdebug_z_cond zc;
6118 	u8 *cmd = scp->cmnd;
6119 	struct sdeb_zone_state *zsp;
6120 	bool all = cmd[14] & 0x01;
6121 	struct sdeb_store_info *sip = devip2sip(devip, false);
6122 
6123 	if (!sdebug_dev_is_zoned(devip)) {
6124 		mk_sense_invalid_opcode(scp);
6125 		return check_condition_result;
6126 	}
6127 	sdeb_meta_write_lock(sip);
6128 
6129 	if (all) {
6130 		/* Check if all closed zones can be open */
6131 		if (devip->max_open &&
6132 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
6133 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6134 					INSUFF_ZONE_ASCQ);
6135 			res = check_condition_result;
6136 			goto fini;
6137 		}
6138 		/* Open all closed zones */
6139 		zbc_open_all(devip);
6140 		goto fini;
6141 	}
6142 
6143 	/* Open the specified zone */
6144 	z_id = get_unaligned_be64(cmd + 2);
6145 	if (z_id >= sdebug_capacity) {
6146 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6147 		res = check_condition_result;
6148 		goto fini;
6149 	}
6150 
6151 	zsp = zbc_zone(devip, z_id);
6152 	if (z_id != zsp->z_start) {
6153 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6154 		res = check_condition_result;
6155 		goto fini;
6156 	}
6157 	if (zbc_zone_is_conv(zsp)) {
6158 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6159 		res = check_condition_result;
6160 		goto fini;
6161 	}
6162 
6163 	zc = zsp->z_cond;
6164 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
6165 		goto fini;
6166 
6167 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
6168 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6169 				INSUFF_ZONE_ASCQ);
6170 		res = check_condition_result;
6171 		goto fini;
6172 	}
6173 
6174 	zbc_open_zone(devip, zsp, true);
6175 fini:
6176 	sdeb_meta_write_unlock(sip);
6177 	return res;
6178 }
6179 
zbc_close_all(struct sdebug_dev_info * devip)6180 static void zbc_close_all(struct sdebug_dev_info *devip)
6181 {
6182 	unsigned int i;
6183 
6184 	for (i = 0; i < devip->nr_zones; i++)
6185 		zbc_close_zone(devip, &devip->zstate[i]);
6186 }
6187 
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6188 static int resp_close_zone(struct scsi_cmnd *scp,
6189 			   struct sdebug_dev_info *devip)
6190 {
6191 	int res = 0;
6192 	u64 z_id;
6193 	u8 *cmd = scp->cmnd;
6194 	struct sdeb_zone_state *zsp;
6195 	bool all = cmd[14] & 0x01;
6196 	struct sdeb_store_info *sip = devip2sip(devip, false);
6197 
6198 	if (!sdebug_dev_is_zoned(devip)) {
6199 		mk_sense_invalid_opcode(scp);
6200 		return check_condition_result;
6201 	}
6202 
6203 	sdeb_meta_write_lock(sip);
6204 
6205 	if (all) {
6206 		zbc_close_all(devip);
6207 		goto fini;
6208 	}
6209 
6210 	/* Close specified zone */
6211 	z_id = get_unaligned_be64(cmd + 2);
6212 	if (z_id >= sdebug_capacity) {
6213 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6214 		res = check_condition_result;
6215 		goto fini;
6216 	}
6217 
6218 	zsp = zbc_zone(devip, z_id);
6219 	if (z_id != zsp->z_start) {
6220 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6221 		res = check_condition_result;
6222 		goto fini;
6223 	}
6224 	if (zbc_zone_is_conv(zsp)) {
6225 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6226 		res = check_condition_result;
6227 		goto fini;
6228 	}
6229 
6230 	zbc_close_zone(devip, zsp);
6231 fini:
6232 	sdeb_meta_write_unlock(sip);
6233 	return res;
6234 }
6235 
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)6236 static void zbc_finish_zone(struct sdebug_dev_info *devip,
6237 			    struct sdeb_zone_state *zsp, bool empty)
6238 {
6239 	enum sdebug_z_cond zc = zsp->z_cond;
6240 
6241 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
6242 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
6243 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6244 			zbc_close_zone(devip, zsp);
6245 		if (zsp->z_cond == ZC4_CLOSED)
6246 			devip->nr_closed--;
6247 		zsp->z_wp = zsp->z_start + zsp->z_size;
6248 		zsp->z_cond = ZC5_FULL;
6249 	}
6250 }
6251 
zbc_finish_all(struct sdebug_dev_info * devip)6252 static void zbc_finish_all(struct sdebug_dev_info *devip)
6253 {
6254 	unsigned int i;
6255 
6256 	for (i = 0; i < devip->nr_zones; i++)
6257 		zbc_finish_zone(devip, &devip->zstate[i], false);
6258 }
6259 
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6260 static int resp_finish_zone(struct scsi_cmnd *scp,
6261 			    struct sdebug_dev_info *devip)
6262 {
6263 	struct sdeb_zone_state *zsp;
6264 	int res = 0;
6265 	u64 z_id;
6266 	u8 *cmd = scp->cmnd;
6267 	bool all = cmd[14] & 0x01;
6268 	struct sdeb_store_info *sip = devip2sip(devip, false);
6269 
6270 	if (!sdebug_dev_is_zoned(devip)) {
6271 		mk_sense_invalid_opcode(scp);
6272 		return check_condition_result;
6273 	}
6274 
6275 	sdeb_meta_write_lock(sip);
6276 
6277 	if (all) {
6278 		zbc_finish_all(devip);
6279 		goto fini;
6280 	}
6281 
6282 	/* Finish the specified zone */
6283 	z_id = get_unaligned_be64(cmd + 2);
6284 	if (z_id >= sdebug_capacity) {
6285 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6286 		res = check_condition_result;
6287 		goto fini;
6288 	}
6289 
6290 	zsp = zbc_zone(devip, z_id);
6291 	if (z_id != zsp->z_start) {
6292 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6293 		res = check_condition_result;
6294 		goto fini;
6295 	}
6296 	if (zbc_zone_is_conv(zsp)) {
6297 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6298 		res = check_condition_result;
6299 		goto fini;
6300 	}
6301 
6302 	zbc_finish_zone(devip, zsp, true);
6303 fini:
6304 	sdeb_meta_write_unlock(sip);
6305 	return res;
6306 }
6307 
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)6308 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
6309 			 struct sdeb_zone_state *zsp)
6310 {
6311 	enum sdebug_z_cond zc;
6312 	struct sdeb_store_info *sip = devip2sip(devip, false);
6313 
6314 	if (!zbc_zone_is_seq(zsp))
6315 		return;
6316 
6317 	zc = zsp->z_cond;
6318 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6319 		zbc_close_zone(devip, zsp);
6320 
6321 	if (zsp->z_cond == ZC4_CLOSED)
6322 		devip->nr_closed--;
6323 
6324 	if (zsp->z_wp > zsp->z_start)
6325 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
6326 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
6327 
6328 	zsp->z_non_seq_resource = false;
6329 	zsp->z_wp = zsp->z_start;
6330 	zsp->z_cond = ZC1_EMPTY;
6331 }
6332 
zbc_rwp_all(struct sdebug_dev_info * devip)6333 static void zbc_rwp_all(struct sdebug_dev_info *devip)
6334 {
6335 	unsigned int i;
6336 
6337 	for (i = 0; i < devip->nr_zones; i++)
6338 		zbc_rwp_zone(devip, &devip->zstate[i]);
6339 }
6340 
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6341 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6342 {
6343 	struct sdeb_zone_state *zsp;
6344 	int res = 0;
6345 	u64 z_id;
6346 	u8 *cmd = scp->cmnd;
6347 	bool all = cmd[14] & 0x01;
6348 	struct sdeb_store_info *sip = devip2sip(devip, false);
6349 
6350 	if (!sdebug_dev_is_zoned(devip)) {
6351 		mk_sense_invalid_opcode(scp);
6352 		return check_condition_result;
6353 	}
6354 
6355 	sdeb_meta_write_lock(sip);
6356 
6357 	if (all) {
6358 		zbc_rwp_all(devip);
6359 		goto fini;
6360 	}
6361 
6362 	z_id = get_unaligned_be64(cmd + 2);
6363 	if (z_id >= sdebug_capacity) {
6364 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6365 		res = check_condition_result;
6366 		goto fini;
6367 	}
6368 
6369 	zsp = zbc_zone(devip, z_id);
6370 	if (z_id != zsp->z_start) {
6371 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6372 		res = check_condition_result;
6373 		goto fini;
6374 	}
6375 	if (zbc_zone_is_conv(zsp)) {
6376 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6377 		res = check_condition_result;
6378 		goto fini;
6379 	}
6380 
6381 	zbc_rwp_zone(devip, zsp);
6382 fini:
6383 	sdeb_meta_write_unlock(sip);
6384 	return res;
6385 }
6386 
get_tag(struct scsi_cmnd * cmnd)6387 static u32 get_tag(struct scsi_cmnd *cmnd)
6388 {
6389 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
6390 }
6391 
6392 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)6393 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
6394 {
6395 	struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
6396 					typeof(*sdsc), sd_dp);
6397 	struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
6398 	unsigned long flags;
6399 	bool aborted;
6400 
6401 	if (sdebug_statistics) {
6402 		atomic_inc(&sdebug_completions);
6403 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
6404 			atomic_inc(&sdebug_miss_cpus);
6405 	}
6406 
6407 	if (!scp) {
6408 		pr_err("scmd=NULL\n");
6409 		return;
6410 	}
6411 
6412 	spin_lock_irqsave(&sdsc->lock, flags);
6413 	aborted = sd_dp->aborted;
6414 	if (unlikely(aborted))
6415 		sd_dp->aborted = false;
6416 
6417 	spin_unlock_irqrestore(&sdsc->lock, flags);
6418 
6419 	if (aborted) {
6420 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
6421 		blk_abort_request(scsi_cmd_to_rq(scp));
6422 		return;
6423 	}
6424 
6425 	scsi_done(scp); /* callback to mid level */
6426 }
6427 
6428 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)6429 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
6430 {
6431 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
6432 						  hrt);
6433 	sdebug_q_cmd_complete(sd_dp);
6434 	return HRTIMER_NORESTART;
6435 }
6436 
6437 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)6438 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
6439 {
6440 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
6441 						  ew.work);
6442 	sdebug_q_cmd_complete(sd_dp);
6443 }
6444 
6445 static bool got_shared_uuid;
6446 static uuid_t shared_uuid;
6447 
sdebug_device_create_zones(struct sdebug_dev_info * devip)6448 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
6449 {
6450 	struct sdeb_zone_state *zsp;
6451 	sector_t capacity = get_sdebug_capacity();
6452 	sector_t conv_capacity;
6453 	sector_t zstart = 0;
6454 	unsigned int i;
6455 
6456 	/*
6457 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
6458 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
6459 	 * use the specified zone size checking that at least 2 zones can be
6460 	 * created for the device.
6461 	 */
6462 	if (!sdeb_zbc_zone_size_mb) {
6463 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
6464 			>> ilog2(sdebug_sector_size);
6465 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
6466 			devip->zsize >>= 1;
6467 		if (devip->zsize < 2) {
6468 			pr_err("Device capacity too small\n");
6469 			return -EINVAL;
6470 		}
6471 	} else {
6472 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
6473 			pr_err("Zone size is not a power of 2\n");
6474 			return -EINVAL;
6475 		}
6476 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
6477 			>> ilog2(sdebug_sector_size);
6478 		if (devip->zsize >= capacity) {
6479 			pr_err("Zone size too large for device capacity\n");
6480 			return -EINVAL;
6481 		}
6482 	}
6483 
6484 	devip->zsize_shift = ilog2(devip->zsize);
6485 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
6486 
6487 	if (sdeb_zbc_zone_cap_mb == 0) {
6488 		devip->zcap = devip->zsize;
6489 	} else {
6490 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
6491 			      ilog2(sdebug_sector_size);
6492 		if (devip->zcap > devip->zsize) {
6493 			pr_err("Zone capacity too large\n");
6494 			return -EINVAL;
6495 		}
6496 	}
6497 
6498 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
6499 	if (conv_capacity >= capacity) {
6500 		pr_err("Number of conventional zones too large\n");
6501 		return -EINVAL;
6502 	}
6503 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
6504 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
6505 			      devip->zsize_shift;
6506 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
6507 
6508 	/* Add gap zones if zone capacity is smaller than the zone size */
6509 	if (devip->zcap < devip->zsize)
6510 		devip->nr_zones += devip->nr_seq_zones;
6511 
6512 	if (devip->zoned) {
6513 		/* zbc_max_open_zones can be 0, meaning "not reported" */
6514 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
6515 			devip->max_open = (devip->nr_zones - 1) / 2;
6516 		else
6517 			devip->max_open = sdeb_zbc_max_open;
6518 	}
6519 
6520 	devip->zstate = kcalloc(devip->nr_zones,
6521 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
6522 	if (!devip->zstate)
6523 		return -ENOMEM;
6524 
6525 	for (i = 0; i < devip->nr_zones; i++) {
6526 		zsp = &devip->zstate[i];
6527 
6528 		zsp->z_start = zstart;
6529 
6530 		if (i < devip->nr_conv_zones) {
6531 			zsp->z_type = ZBC_ZTYPE_CNV;
6532 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6533 			zsp->z_wp = (sector_t)-1;
6534 			zsp->z_size =
6535 				min_t(u64, devip->zsize, capacity - zstart);
6536 		} else if ((zstart & (devip->zsize - 1)) == 0) {
6537 			if (devip->zoned)
6538 				zsp->z_type = ZBC_ZTYPE_SWR;
6539 			else
6540 				zsp->z_type = ZBC_ZTYPE_SWP;
6541 			zsp->z_cond = ZC1_EMPTY;
6542 			zsp->z_wp = zsp->z_start;
6543 			zsp->z_size =
6544 				min_t(u64, devip->zcap, capacity - zstart);
6545 		} else {
6546 			zsp->z_type = ZBC_ZTYPE_GAP;
6547 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6548 			zsp->z_wp = (sector_t)-1;
6549 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
6550 					    capacity - zstart);
6551 		}
6552 
6553 		WARN_ON_ONCE((int)zsp->z_size <= 0);
6554 		zstart += zsp->z_size;
6555 	}
6556 
6557 	return 0;
6558 }
6559 
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)6560 static struct sdebug_dev_info *sdebug_device_create(
6561 			struct sdebug_host_info *sdbg_host, gfp_t flags)
6562 {
6563 	struct sdebug_dev_info *devip;
6564 
6565 	devip = kzalloc(sizeof(*devip), flags);
6566 	if (devip) {
6567 		if (sdebug_uuid_ctl == 1)
6568 			uuid_gen(&devip->lu_name);
6569 		else if (sdebug_uuid_ctl == 2) {
6570 			if (got_shared_uuid)
6571 				devip->lu_name = shared_uuid;
6572 			else {
6573 				uuid_gen(&shared_uuid);
6574 				got_shared_uuid = true;
6575 				devip->lu_name = shared_uuid;
6576 			}
6577 		}
6578 		devip->sdbg_host = sdbg_host;
6579 		if (sdeb_zbc_in_use) {
6580 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
6581 			if (sdebug_device_create_zones(devip)) {
6582 				kfree(devip);
6583 				return NULL;
6584 			}
6585 		} else {
6586 			devip->zoned = false;
6587 		}
6588 		if (sdebug_ptype == TYPE_TAPE) {
6589 			devip->tape_density = TAPE_DEF_DENSITY;
6590 			devip->tape_blksize = TAPE_DEF_BLKSIZE;
6591 		}
6592 		devip->create_ts = ktime_get_boottime();
6593 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
6594 		spin_lock_init(&devip->list_lock);
6595 		INIT_LIST_HEAD(&devip->inject_err_list);
6596 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
6597 	}
6598 	return devip;
6599 }
6600 
find_build_dev_info(struct scsi_device * sdev)6601 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
6602 {
6603 	struct sdebug_host_info *sdbg_host;
6604 	struct sdebug_dev_info *open_devip = NULL;
6605 	struct sdebug_dev_info *devip;
6606 
6607 	sdbg_host = shost_to_sdebug_host(sdev->host);
6608 
6609 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6610 		if ((devip->used) && (devip->channel == sdev->channel) &&
6611 		    (devip->target == sdev->id) &&
6612 		    (devip->lun == sdev->lun))
6613 			return devip;
6614 		else {
6615 			if ((!devip->used) && (!open_devip))
6616 				open_devip = devip;
6617 		}
6618 	}
6619 	if (!open_devip) { /* try and make a new one */
6620 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
6621 		if (!open_devip) {
6622 			pr_err("out of memory at line %d\n", __LINE__);
6623 			return NULL;
6624 		}
6625 	}
6626 
6627 	open_devip->channel = sdev->channel;
6628 	open_devip->target = sdev->id;
6629 	open_devip->lun = sdev->lun;
6630 	open_devip->sdbg_host = sdbg_host;
6631 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
6632 	open_devip->used = true;
6633 	return open_devip;
6634 }
6635 
scsi_debug_sdev_init(struct scsi_device * sdp)6636 static int scsi_debug_sdev_init(struct scsi_device *sdp)
6637 {
6638 	if (sdebug_verbose)
6639 		pr_info("sdev_init <%u %u %u %llu>\n",
6640 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6641 
6642 	return 0;
6643 }
6644 
scsi_debug_sdev_configure(struct scsi_device * sdp,struct queue_limits * lim)6645 static int scsi_debug_sdev_configure(struct scsi_device *sdp,
6646 				     struct queue_limits *lim)
6647 {
6648 	struct sdebug_dev_info *devip =
6649 			(struct sdebug_dev_info *)sdp->hostdata;
6650 	struct dentry *dentry;
6651 
6652 	if (sdebug_verbose)
6653 		pr_info("sdev_configure <%u %u %u %llu>\n",
6654 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6655 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
6656 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
6657 	if (devip == NULL) {
6658 		devip = find_build_dev_info(sdp);
6659 		if (devip == NULL)
6660 			return 1;  /* no resources, will be marked offline */
6661 	}
6662 	if (sdebug_ptype == TYPE_TAPE) {
6663 		if (!devip->tape_blocks[0]) {
6664 			devip->tape_blocks[0] =
6665 				kcalloc(TAPE_UNITS, sizeof(struct tape_block),
6666 					GFP_KERNEL);
6667 			if (!devip->tape_blocks[0])
6668 				return 1;
6669 		}
6670 		devip->tape_pending_nbr_partitions = -1;
6671 		if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
6672 			kfree(devip->tape_blocks[0]);
6673 			devip->tape_blocks[0] = NULL;
6674 			return 1;
6675 		}
6676 	}
6677 	sdp->hostdata = devip;
6678 	if (sdebug_no_uld)
6679 		sdp->no_uld_attach = 1;
6680 	config_cdb_len(sdp);
6681 
6682 	if (sdebug_allow_restart)
6683 		sdp->allow_restart = 1;
6684 
6685 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
6686 				sdebug_debugfs_root);
6687 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
6688 		pr_info("%s: failed to create debugfs directory for device %s\n",
6689 			__func__, dev_name(&sdp->sdev_gendev));
6690 
6691 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
6692 				&sdebug_error_fops);
6693 	if (IS_ERR_OR_NULL(dentry))
6694 		pr_info("%s: failed to create error file for device %s\n",
6695 			__func__, dev_name(&sdp->sdev_gendev));
6696 
6697 	return 0;
6698 }
6699 
scsi_debug_sdev_destroy(struct scsi_device * sdp)6700 static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
6701 {
6702 	struct sdebug_dev_info *devip =
6703 		(struct sdebug_dev_info *)sdp->hostdata;
6704 	struct sdebug_err_inject *err;
6705 
6706 	if (sdebug_verbose)
6707 		pr_info("sdev_destroy <%u %u %u %llu>\n",
6708 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6709 
6710 	if (!devip)
6711 		return;
6712 
6713 	spin_lock(&devip->list_lock);
6714 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6715 		list_del_rcu(&err->list);
6716 		call_rcu(&err->rcu, sdebug_err_free);
6717 	}
6718 	spin_unlock(&devip->list_lock);
6719 
6720 	debugfs_remove(devip->debugfs_entry);
6721 
6722 	if (sdp->type == TYPE_TAPE) {
6723 		kfree(devip->tape_blocks[0]);
6724 		devip->tape_blocks[0] = NULL;
6725 	}
6726 
6727 	/* make this slot available for re-use */
6728 	devip->used = false;
6729 	sdp->hostdata = NULL;
6730 }
6731 
6732 /* Returns true if cancelled or not running callback. */
scsi_debug_stop_cmnd(struct scsi_cmnd * cmnd)6733 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
6734 {
6735 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6736 	struct sdebug_defer *sd_dp = &sdsc->sd_dp;
6737 	enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
6738 
6739 	lockdep_assert_held(&sdsc->lock);
6740 
6741 	if (defer_t == SDEB_DEFER_HRT) {
6742 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
6743 
6744 		switch (res) {
6745 		case -1: /* -1 It's executing the CB */
6746 			return false;
6747 		case 0: /* Not active, it must have already run */
6748 		case 1: /* Was active, we've now cancelled */
6749 		default:
6750 			return true;
6751 		}
6752 	} else if (defer_t == SDEB_DEFER_WQ) {
6753 		/* Cancel if pending */
6754 		if (cancel_work(&sd_dp->ew.work))
6755 			return true;
6756 		/* callback may be running, so return false */
6757 		return false;
6758 	} else if (defer_t == SDEB_DEFER_POLL) {
6759 		return true;
6760 	}
6761 
6762 	return false;
6763 }
6764 
6765 struct sdebug_abort_cmd {
6766 	u32 unique_tag;
6767 };
6768 
6769 enum sdebug_internal_cmd_type {
6770 	SCSI_DEBUG_ABORT_CMD,
6771 };
6772 
6773 struct sdebug_internal_cmd {
6774 	enum sdebug_internal_cmd_type type;
6775 
6776 	union {
6777 		struct sdebug_abort_cmd abort_cmd;
6778 	};
6779 };
6780 
6781 union sdebug_priv {
6782 	struct sdebug_scsi_cmd cmd;
6783 	struct sdebug_internal_cmd internal_cmd;
6784 };
6785 
6786 /*
6787  * Abort SCSI command @cmnd. Only called from scsi_debug_abort(). Although
6788  * it would be possible to call scsi_debug_stop_cmnd() directly, an internal
6789  * command is allocated and submitted to trigger the reserved command
6790  * infrastructure.
6791  */
scsi_debug_abort_cmnd(struct scsi_cmnd * cmnd)6792 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6793 {
6794 	struct Scsi_Host *shost = cmnd->device->host;
6795 	struct request *rq = scsi_cmd_to_rq(cmnd);
6796 	u32 unique_tag = blk_mq_unique_tag(rq);
6797 	struct sdebug_internal_cmd *internal_cmd;
6798 	struct scsi_cmnd *abort_cmd;
6799 	struct request *abort_rq;
6800 	blk_status_t res;
6801 
6802 	abort_cmd = scsi_get_internal_cmd(shost->pseudo_sdev, DMA_NONE,
6803 					  BLK_MQ_REQ_RESERVED);
6804 	if (!abort_cmd)
6805 		return false;
6806 	internal_cmd = scsi_cmd_priv(abort_cmd);
6807 	*internal_cmd = (struct sdebug_internal_cmd) {
6808 		.type = SCSI_DEBUG_ABORT_CMD,
6809 		.abort_cmd = {
6810 			.unique_tag = unique_tag,
6811 		},
6812 	};
6813 	abort_rq = scsi_cmd_to_rq(abort_cmd);
6814 	abort_rq->timeout = secs_to_jiffies(3);
6815 	res = blk_execute_rq(abort_rq, true);
6816 	scsi_put_internal_cmd(abort_cmd);
6817 	return res == BLK_STS_OK;
6818 }
6819 
6820 /*
6821  * All we can do is set the cmnd as internally aborted and wait for it to
6822  * finish. We cannot call scsi_done() as normal completion path may do that.
6823  */
sdebug_stop_cmnd(struct request * rq,void * data)6824 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6825 {
6826 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6827 
6828 	return true;
6829 }
6830 
6831 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)6832 static void stop_all_queued(void)
6833 {
6834 	struct sdebug_host_info *sdhp;
6835 
6836 	mutex_lock(&sdebug_host_list_mutex);
6837 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6838 		struct Scsi_Host *shost = sdhp->shost;
6839 
6840 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6841 	}
6842 	mutex_unlock(&sdebug_host_list_mutex);
6843 }
6844 
sdebug_fail_abort(struct scsi_cmnd * cmnd)6845 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6846 {
6847 	struct scsi_device *sdp = cmnd->device;
6848 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6849 	struct sdebug_err_inject *err;
6850 	unsigned char *cmd = cmnd->cmnd;
6851 	int ret = 0;
6852 
6853 	if (devip == NULL)
6854 		return 0;
6855 
6856 	rcu_read_lock();
6857 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6858 		if (err->type == ERR_ABORT_CMD_FAILED &&
6859 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6860 			ret = !!err->cnt;
6861 			if (err->cnt < 0)
6862 				err->cnt++;
6863 
6864 			rcu_read_unlock();
6865 			return ret;
6866 		}
6867 	}
6868 	rcu_read_unlock();
6869 
6870 	return 0;
6871 }
6872 
scsi_debug_abort(struct scsi_cmnd * SCpnt)6873 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6874 {
6875 	bool aborted = scsi_debug_abort_cmnd(SCpnt);
6876 	u8 *cmd = SCpnt->cmnd;
6877 	u8 opcode = cmd[0];
6878 
6879 	++num_aborts;
6880 
6881 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6882 		sdev_printk(KERN_INFO, SCpnt->device,
6883 			    "%s: command%s found\n", __func__,
6884 			    aborted ? "" : " not");
6885 
6886 
6887 	if (sdebug_fail_abort(SCpnt)) {
6888 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6889 			    opcode);
6890 		return FAILED;
6891 	}
6892 
6893 	if (aborted == false)
6894 		return FAILED;
6895 
6896 	return SUCCESS;
6897 }
6898 
scsi_debug_stop_all_queued_iter(struct request * rq,void * data)6899 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6900 {
6901 	struct scsi_device *sdp = data;
6902 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6903 
6904 	if (scmd->device == sdp)
6905 		scsi_debug_abort_cmnd(scmd);
6906 
6907 	return true;
6908 }
6909 
6910 /* Deletes (stops) timers or work queues of all queued commands per sdev */
scsi_debug_stop_all_queued(struct scsi_device * sdp)6911 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6912 {
6913 	struct Scsi_Host *shost = sdp->host;
6914 
6915 	blk_mq_tagset_busy_iter(&shost->tag_set,
6916 				scsi_debug_stop_all_queued_iter, sdp);
6917 }
6918 
sdebug_fail_lun_reset(struct scsi_cmnd * cmnd)6919 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6920 {
6921 	struct scsi_device *sdp = cmnd->device;
6922 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6923 	struct sdebug_err_inject *err;
6924 	unsigned char *cmd = cmnd->cmnd;
6925 	int ret = 0;
6926 
6927 	if (devip == NULL)
6928 		return 0;
6929 
6930 	rcu_read_lock();
6931 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6932 		if (err->type == ERR_LUN_RESET_FAILED &&
6933 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6934 			ret = !!err->cnt;
6935 			if (err->cnt < 0)
6936 				err->cnt++;
6937 
6938 			rcu_read_unlock();
6939 			return ret;
6940 		}
6941 	}
6942 	rcu_read_unlock();
6943 
6944 	return 0;
6945 }
6946 
scsi_tape_reset_clear(struct sdebug_dev_info * devip)6947 static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
6948 {
6949 	int i;
6950 
6951 	devip->tape_blksize = TAPE_DEF_BLKSIZE;
6952 	devip->tape_density = TAPE_DEF_DENSITY;
6953 	devip->tape_partition = 0;
6954 	devip->tape_dce = 0;
6955 	for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
6956 		devip->tape_location[i] = 0;
6957 	devip->tape_pending_nbr_partitions = -1;
6958 	/* Don't reset partitioning? */
6959 }
6960 
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)6961 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6962 {
6963 	struct scsi_device *sdp = SCpnt->device;
6964 	struct sdebug_dev_info *devip = sdp->hostdata;
6965 	u8 *cmd = SCpnt->cmnd;
6966 	u8 opcode = cmd[0];
6967 
6968 	++num_dev_resets;
6969 
6970 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6971 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6972 
6973 	scsi_debug_stop_all_queued(sdp);
6974 	if (devip) {
6975 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
6976 		if (SCpnt->device->type == TYPE_TAPE)
6977 			scsi_tape_reset_clear(devip);
6978 	}
6979 
6980 	if (sdebug_fail_lun_reset(SCpnt)) {
6981 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6982 		return FAILED;
6983 	}
6984 
6985 	return SUCCESS;
6986 }
6987 
sdebug_fail_target_reset(struct scsi_cmnd * cmnd)6988 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6989 {
6990 	struct scsi_target *starget = scsi_target(cmnd->device);
6991 	struct sdebug_target_info *targetip =
6992 		(struct sdebug_target_info *)starget->hostdata;
6993 
6994 	if (targetip)
6995 		return targetip->reset_fail;
6996 
6997 	return 0;
6998 }
6999 
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)7000 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
7001 {
7002 	struct scsi_device *sdp = SCpnt->device;
7003 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
7004 	struct sdebug_dev_info *devip;
7005 	u8 *cmd = SCpnt->cmnd;
7006 	u8 opcode = cmd[0];
7007 	int k = 0;
7008 
7009 	++num_target_resets;
7010 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7011 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
7012 
7013 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
7014 		if (devip->target == sdp->id) {
7015 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7016 			if (SCpnt->device->type == TYPE_TAPE)
7017 				scsi_tape_reset_clear(devip);
7018 			++k;
7019 		}
7020 	}
7021 
7022 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7023 		sdev_printk(KERN_INFO, sdp,
7024 			    "%s: %d device(s) found in target\n", __func__, k);
7025 
7026 	if (sdebug_fail_target_reset(SCpnt)) {
7027 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
7028 			    opcode);
7029 		return FAILED;
7030 	}
7031 
7032 	return SUCCESS;
7033 }
7034 
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)7035 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
7036 {
7037 	struct scsi_device *sdp = SCpnt->device;
7038 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
7039 	struct sdebug_dev_info *devip;
7040 	int k = 0;
7041 
7042 	++num_bus_resets;
7043 
7044 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7045 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
7046 
7047 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
7048 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7049 		if (SCpnt->device->type == TYPE_TAPE)
7050 			scsi_tape_reset_clear(devip);
7051 		++k;
7052 	}
7053 
7054 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7055 		sdev_printk(KERN_INFO, sdp,
7056 			    "%s: %d device(s) found in host\n", __func__, k);
7057 	return SUCCESS;
7058 }
7059 
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)7060 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
7061 {
7062 	struct sdebug_host_info *sdbg_host;
7063 	struct sdebug_dev_info *devip;
7064 	int k = 0;
7065 
7066 	++num_host_resets;
7067 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7068 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
7069 	mutex_lock(&sdebug_host_list_mutex);
7070 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
7071 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
7072 				    dev_list) {
7073 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7074 			if (SCpnt->device->type == TYPE_TAPE)
7075 				scsi_tape_reset_clear(devip);
7076 			++k;
7077 		}
7078 	}
7079 	mutex_unlock(&sdebug_host_list_mutex);
7080 	stop_all_queued();
7081 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7082 		sdev_printk(KERN_INFO, SCpnt->device,
7083 			    "%s: %d device(s) found\n", __func__, k);
7084 	return SUCCESS;
7085 }
7086 
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)7087 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
7088 {
7089 	struct msdos_partition *pp;
7090 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
7091 	int sectors_per_part, num_sectors, k;
7092 	int heads_by_sects, start_sec, end_sec;
7093 
7094 	/* assume partition table already zeroed */
7095 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
7096 		return;
7097 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
7098 		sdebug_num_parts = SDEBUG_MAX_PARTS;
7099 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
7100 	}
7101 	num_sectors = (int)get_sdebug_capacity();
7102 	sectors_per_part = (num_sectors - sdebug_sectors_per)
7103 			   / sdebug_num_parts;
7104 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
7105 	starts[0] = sdebug_sectors_per;
7106 	max_part_secs = sectors_per_part;
7107 	for (k = 1; k < sdebug_num_parts; ++k) {
7108 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
7109 			    * heads_by_sects;
7110 		if (starts[k] - starts[k - 1] < max_part_secs)
7111 			max_part_secs = starts[k] - starts[k - 1];
7112 	}
7113 	starts[sdebug_num_parts] = num_sectors;
7114 	starts[sdebug_num_parts + 1] = 0;
7115 
7116 	ramp[510] = 0x55;	/* magic partition markings */
7117 	ramp[511] = 0xAA;
7118 	pp = (struct msdos_partition *)(ramp + 0x1be);
7119 	for (k = 0; starts[k + 1]; ++k, ++pp) {
7120 		start_sec = starts[k];
7121 		end_sec = starts[k] + max_part_secs - 1;
7122 		pp->boot_ind = 0;
7123 
7124 		pp->cyl = start_sec / heads_by_sects;
7125 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
7126 			   / sdebug_sectors_per;
7127 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
7128 
7129 		pp->end_cyl = end_sec / heads_by_sects;
7130 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
7131 			       / sdebug_sectors_per;
7132 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
7133 
7134 		pp->start_sect = cpu_to_le32(start_sec);
7135 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
7136 		pp->sys_ind = 0x83;	/* plain Linux partition */
7137 	}
7138 }
7139 
block_unblock_all_queues(bool block)7140 static void block_unblock_all_queues(bool block)
7141 {
7142 	struct sdebug_host_info *sdhp;
7143 
7144 	lockdep_assert_held(&sdebug_host_list_mutex);
7145 
7146 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7147 		struct Scsi_Host *shost = sdhp->shost;
7148 
7149 		if (block)
7150 			scsi_block_requests(shost);
7151 		else
7152 			scsi_unblock_requests(shost);
7153 	}
7154 }
7155 
7156 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
7157  * commands will be processed normally before triggers occur.
7158  */
tweak_cmnd_count(void)7159 static void tweak_cmnd_count(void)
7160 {
7161 	int count, modulo;
7162 
7163 	modulo = abs(sdebug_every_nth);
7164 	if (modulo < 2)
7165 		return;
7166 
7167 	mutex_lock(&sdebug_host_list_mutex);
7168 	block_unblock_all_queues(true);
7169 	count = atomic_read(&sdebug_cmnd_count);
7170 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
7171 	block_unblock_all_queues(false);
7172 	mutex_unlock(&sdebug_host_list_mutex);
7173 }
7174 
clear_queue_stats(void)7175 static void clear_queue_stats(void)
7176 {
7177 	atomic_set(&sdebug_cmnd_count, 0);
7178 	atomic_set(&sdebug_completions, 0);
7179 	atomic_set(&sdebug_miss_cpus, 0);
7180 	atomic_set(&sdebug_a_tsf, 0);
7181 }
7182 
inject_on_this_cmd(void)7183 static bool inject_on_this_cmd(void)
7184 {
7185 	if (sdebug_every_nth == 0)
7186 		return false;
7187 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7188 }
7189 
7190 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
7191 
7192 /* Complete the processing of the thread that queued a SCSI command to this
7193  * driver. It either completes the command by calling cmnd_done() or
7194  * schedules a hr timer or work queue then returns 0. Returns
7195  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
7196  */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)7197 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
7198 			 int scsi_result,
7199 			 int (*pfp)(struct scsi_cmnd *,
7200 				    struct sdebug_dev_info *),
7201 			 int delta_jiff, int ndelay)
7202 {
7203 	struct request *rq = scsi_cmd_to_rq(cmnd);
7204 	bool polled = rq->cmd_flags & REQ_POLLED;
7205 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
7206 	unsigned long flags;
7207 	u64 ns_from_boot = 0;
7208 	struct scsi_device *sdp;
7209 	struct sdebug_defer *sd_dp;
7210 
7211 	if (unlikely(devip == NULL)) {
7212 		if (scsi_result == 0)
7213 			scsi_result = DID_NO_CONNECT << 16;
7214 		goto respond_in_thread;
7215 	}
7216 	sdp = cmnd->device;
7217 
7218 	if (delta_jiff == 0)
7219 		goto respond_in_thread;
7220 
7221 
7222 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
7223 		     (scsi_result == 0))) {
7224 		int num_in_q = scsi_device_busy(sdp);
7225 		int qdepth = cmnd->device->queue_depth;
7226 
7227 		if ((num_in_q == qdepth) &&
7228 		    (atomic_inc_return(&sdebug_a_tsf) >=
7229 		     abs(sdebug_every_nth))) {
7230 			atomic_set(&sdebug_a_tsf, 0);
7231 			scsi_result = device_qfull_result;
7232 
7233 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
7234 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
7235 					    __func__, num_in_q);
7236 		}
7237 	}
7238 
7239 	sd_dp = &sdsc->sd_dp;
7240 
7241 	if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
7242 		ns_from_boot = ktime_get_boottime_ns();
7243 
7244 	/* one of the resp_*() response functions is called here */
7245 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
7246 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
7247 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
7248 		delta_jiff = ndelay = 0;
7249 	}
7250 	if (cmnd->result == 0 && scsi_result != 0)
7251 		cmnd->result = scsi_result;
7252 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
7253 		if (atomic_read(&sdeb_inject_pending)) {
7254 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
7255 			atomic_set(&sdeb_inject_pending, 0);
7256 			cmnd->result = check_condition_result;
7257 		}
7258 	}
7259 
7260 	if (unlikely(sdebug_verbose && cmnd->result))
7261 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
7262 			    __func__, cmnd->result);
7263 
7264 	if (delta_jiff > 0 || ndelay > 0) {
7265 		ktime_t kt;
7266 
7267 		if (delta_jiff > 0) {
7268 			u64 ns = jiffies_to_nsecs(delta_jiff);
7269 
7270 			if (sdebug_random && ns < U32_MAX) {
7271 				ns = get_random_u32_below((u32)ns);
7272 			} else if (sdebug_random) {
7273 				ns >>= 12;	/* scale to 4 usec precision */
7274 				if (ns < U32_MAX)	/* over 4 hours max */
7275 					ns = get_random_u32_below((u32)ns);
7276 				ns <<= 12;
7277 			}
7278 			kt = ns_to_ktime(ns);
7279 		} else {	/* ndelay has a 4.2 second max */
7280 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
7281 					     (u32)ndelay;
7282 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
7283 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
7284 
7285 				if (kt <= d) {	/* elapsed duration >= kt */
7286 					/* call scsi_done() from this thread */
7287 					scsi_done(cmnd);
7288 					return 0;
7289 				}
7290 				/* otherwise reduce kt by elapsed time */
7291 				kt -= d;
7292 			}
7293 		}
7294 		if (sdebug_statistics)
7295 			sd_dp->issuing_cpu = raw_smp_processor_id();
7296 		if (polled) {
7297 			spin_lock_irqsave(&sdsc->lock, flags);
7298 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
7299 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7300 			spin_unlock_irqrestore(&sdsc->lock, flags);
7301 		} else {
7302 			/* schedule the invocation of scsi_done() for a later time */
7303 			spin_lock_irqsave(&sdsc->lock, flags);
7304 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
7305 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
7306 			/*
7307 			 * The completion handler will try to grab sqcp->lock,
7308 			 * so there is no chance that the completion handler
7309 			 * will call scsi_done() until we release the lock
7310 			 * here (so ok to keep referencing sdsc).
7311 			 */
7312 			spin_unlock_irqrestore(&sdsc->lock, flags);
7313 		}
7314 	} else {	/* jdelay < 0, use work queue */
7315 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
7316 			     atomic_read(&sdeb_inject_pending))) {
7317 			sd_dp->aborted = true;
7318 			atomic_set(&sdeb_inject_pending, 0);
7319 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
7320 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
7321 		}
7322 
7323 		if (sdebug_statistics)
7324 			sd_dp->issuing_cpu = raw_smp_processor_id();
7325 		if (polled) {
7326 			spin_lock_irqsave(&sdsc->lock, flags);
7327 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
7328 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7329 			spin_unlock_irqrestore(&sdsc->lock, flags);
7330 		} else {
7331 			spin_lock_irqsave(&sdsc->lock, flags);
7332 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
7333 			schedule_work(&sd_dp->ew.work);
7334 			spin_unlock_irqrestore(&sdsc->lock, flags);
7335 		}
7336 	}
7337 
7338 	return 0;
7339 
7340 respond_in_thread:	/* call back to mid-layer using invocation thread */
7341 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
7342 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
7343 	if (cmnd->result == 0 && scsi_result != 0)
7344 		cmnd->result = scsi_result;
7345 	scsi_done(cmnd);
7346 	return 0;
7347 }
7348 
7349 /* Note: The following macros create attribute files in the
7350    /sys/module/scsi_debug/parameters directory. Unfortunately this
7351    driver is unaware of a change and cannot trigger auxiliary actions
7352    as it can when the corresponding attribute in the
7353    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
7354  */
7355 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
7356 module_param_named(ato, sdebug_ato, int, S_IRUGO);
7357 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
7358 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
7359 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
7360 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
7361 module_param_named(dif, sdebug_dif, int, S_IRUGO);
7362 module_param_named(dix, sdebug_dix, int, S_IRUGO);
7363 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
7364 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
7365 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
7366 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
7367 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
7368 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
7369 module_param_string(inq_product, sdebug_inq_product_id,
7370 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
7371 module_param_string(inq_rev, sdebug_inq_product_rev,
7372 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
7373 module_param_string(inq_vendor, sdebug_inq_vendor_id,
7374 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
7375 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
7376 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
7377 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
7378 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
7379 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
7380 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
7381 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
7382 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
7383 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
7384 module_param_named(medium_error_count, sdebug_medium_error_count, int,
7385 		   S_IRUGO | S_IWUSR);
7386 module_param_named(medium_error_start, sdebug_medium_error_start, int,
7387 		   S_IRUGO | S_IWUSR);
7388 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
7389 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
7390 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
7391 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
7392 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
7393 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
7394 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
7395 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
7396 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
7397 module_param_named(per_host_store, sdebug_per_host_store, bool,
7398 		   S_IRUGO | S_IWUSR);
7399 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
7400 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
7401 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
7402 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
7403 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
7404 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
7405 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
7406 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
7407 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
7408 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
7409 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
7410 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
7411 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
7412 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
7413 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
7414 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
7415 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
7416 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
7417 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
7418 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
7419 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
7420 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
7421 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
7422 		   S_IRUGO | S_IWUSR);
7423 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
7424 module_param_named(write_same_length, sdebug_write_same_length, int,
7425 		   S_IRUGO | S_IWUSR);
7426 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
7427 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
7428 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
7429 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
7430 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
7431 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
7432 
7433 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
7434 MODULE_DESCRIPTION("SCSI debug adapter driver");
7435 MODULE_LICENSE("GPL");
7436 MODULE_VERSION(SDEBUG_VERSION);
7437 
7438 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
7439 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
7440 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
7441 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
7442 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
7443 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
7444 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
7445 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
7446 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
7447 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
7448 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
7449 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
7450 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
7451 MODULE_PARM_DESC(host_max_queue,
7452 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
7453 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
7454 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
7455 		 SDEBUG_VERSION "\")");
7456 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
7457 MODULE_PARM_DESC(lbprz,
7458 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
7459 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
7460 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
7461 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
7462 MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
7463 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
7464 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
7465 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
7466 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
7467 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
7468 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
7469 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
7470 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
7471 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
7472 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
7473 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
7474 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
7475 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
7476 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
7477 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
7478 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
7479 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
7480 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
7481 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
7482 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
7483 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
7484 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
7485 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
7486 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
7487 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
7488 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
7489 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
7490 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
7491 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
7492 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
7493 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
7494 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
7495 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
7496 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
7497 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
7498 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
7499 MODULE_PARM_DESC(uuid_ctl,
7500 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
7501 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
7502 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
7503 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
7504 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
7505 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
7506 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
7507 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
7508 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
7509 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
7510 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
7511 
7512 #define SDEBUG_INFO_LEN 256
7513 static char sdebug_info[SDEBUG_INFO_LEN];
7514 
scsi_debug_info(struct Scsi_Host * shp)7515 static const char *scsi_debug_info(struct Scsi_Host *shp)
7516 {
7517 	int k;
7518 
7519 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
7520 		      my_name, SDEBUG_VERSION, sdebug_version_date);
7521 	if (k >= (SDEBUG_INFO_LEN - 1))
7522 		return sdebug_info;
7523 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
7524 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
7525 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
7526 		  "statistics", (int)sdebug_statistics);
7527 	return sdebug_info;
7528 }
7529 
7530 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)7531 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
7532 				 int length)
7533 {
7534 	char arr[16];
7535 	int opts;
7536 	int minLen = length > 15 ? 15 : length;
7537 
7538 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
7539 		return -EACCES;
7540 	memcpy(arr, buffer, minLen);
7541 	arr[minLen] = '\0';
7542 	if (1 != sscanf(arr, "%d", &opts))
7543 		return -EINVAL;
7544 	sdebug_opts = opts;
7545 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7546 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7547 	if (sdebug_every_nth != 0)
7548 		tweak_cmnd_count();
7549 	return length;
7550 }
7551 
7552 struct sdebug_submit_queue_data {
7553 	int *first;
7554 	int *last;
7555 	int queue_num;
7556 };
7557 
sdebug_submit_queue_iter(struct request * rq,void * opaque)7558 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
7559 {
7560 	struct sdebug_submit_queue_data *data = opaque;
7561 	u32 unique_tag = blk_mq_unique_tag(rq);
7562 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7563 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
7564 	int queue_num = data->queue_num;
7565 
7566 	if (hwq != queue_num)
7567 		return true;
7568 
7569 	/* Rely on iter'ing in ascending tag order */
7570 	if (*data->first == -1)
7571 		*data->first = *data->last = tag;
7572 	else
7573 		*data->last = tag;
7574 
7575 	return true;
7576 }
7577 
7578 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
7579  * same for each scsi_debug host (if more than one). Some of the counters
7580  * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)7581 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
7582 {
7583 	struct sdebug_host_info *sdhp;
7584 	int j;
7585 
7586 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
7587 		   SDEBUG_VERSION, sdebug_version_date);
7588 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
7589 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
7590 		   sdebug_opts, sdebug_every_nth);
7591 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
7592 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
7593 		   sdebug_sector_size, "bytes");
7594 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
7595 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
7596 		   num_aborts);
7597 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
7598 		   num_dev_resets, num_target_resets, num_bus_resets,
7599 		   num_host_resets);
7600 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
7601 		   dix_reads, dix_writes, dif_errors);
7602 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
7603 		   sdebug_statistics);
7604 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
7605 		   atomic_read(&sdebug_cmnd_count),
7606 		   atomic_read(&sdebug_completions),
7607 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
7608 		   atomic_read(&sdebug_a_tsf),
7609 		   atomic_read(&sdeb_mq_poll_count));
7610 
7611 	seq_printf(m, "submit_queues=%d\n", submit_queues);
7612 	for (j = 0; j < submit_queues; ++j) {
7613 		int f = -1, l = -1;
7614 		struct sdebug_submit_queue_data data = {
7615 			.queue_num = j,
7616 			.first = &f,
7617 			.last = &l,
7618 		};
7619 		seq_printf(m, "  queue %d:\n", j);
7620 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
7621 					&data);
7622 		if (f >= 0) {
7623 			seq_printf(m, "    BUSY: %s: %d,%d\n",
7624 				   "first,last bits", f, l);
7625 		}
7626 	}
7627 
7628 	seq_printf(m, "this host_no=%d\n", host->host_no);
7629 	if (!xa_empty(per_store_ap)) {
7630 		bool niu;
7631 		int idx;
7632 		unsigned long l_idx;
7633 		struct sdeb_store_info *sip;
7634 
7635 		seq_puts(m, "\nhost list:\n");
7636 		j = 0;
7637 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7638 			idx = sdhp->si_idx;
7639 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
7640 				   sdhp->shost->host_no, idx);
7641 			++j;
7642 		}
7643 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
7644 			   sdeb_most_recent_idx);
7645 		j = 0;
7646 		xa_for_each(per_store_ap, l_idx, sip) {
7647 			niu = xa_get_mark(per_store_ap, l_idx,
7648 					  SDEB_XA_NOT_IN_USE);
7649 			idx = (int)l_idx;
7650 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
7651 				   (niu ? "  not_in_use" : ""));
7652 			++j;
7653 		}
7654 	}
7655 	return 0;
7656 }
7657 
delay_show(struct device_driver * ddp,char * buf)7658 static ssize_t delay_show(struct device_driver *ddp, char *buf)
7659 {
7660 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
7661 }
7662 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
7663  * of delay is jiffies.
7664  */
delay_store(struct device_driver * ddp,const char * buf,size_t count)7665 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
7666 			   size_t count)
7667 {
7668 	int jdelay, res;
7669 
7670 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
7671 		res = count;
7672 		if (sdebug_jdelay != jdelay) {
7673 			struct sdebug_host_info *sdhp;
7674 
7675 			mutex_lock(&sdebug_host_list_mutex);
7676 			block_unblock_all_queues(true);
7677 
7678 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7679 				struct Scsi_Host *shost = sdhp->shost;
7680 
7681 				if (scsi_host_busy(shost)) {
7682 					res = -EBUSY;   /* queued commands */
7683 					break;
7684 				}
7685 			}
7686 			if (res > 0) {
7687 				sdebug_jdelay = jdelay;
7688 				sdebug_ndelay = 0;
7689 			}
7690 			block_unblock_all_queues(false);
7691 			mutex_unlock(&sdebug_host_list_mutex);
7692 		}
7693 		return res;
7694 	}
7695 	return -EINVAL;
7696 }
7697 static DRIVER_ATTR_RW(delay);
7698 
ndelay_show(struct device_driver * ddp,char * buf)7699 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
7700 {
7701 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
7702 }
7703 /* Returns -EBUSY if ndelay is being changed and commands are queued */
7704 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)7705 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
7706 			    size_t count)
7707 {
7708 	int ndelay, res;
7709 
7710 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
7711 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
7712 		res = count;
7713 		if (sdebug_ndelay != ndelay) {
7714 			struct sdebug_host_info *sdhp;
7715 
7716 			mutex_lock(&sdebug_host_list_mutex);
7717 			block_unblock_all_queues(true);
7718 
7719 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7720 				struct Scsi_Host *shost = sdhp->shost;
7721 
7722 				if (scsi_host_busy(shost)) {
7723 					res = -EBUSY;   /* queued commands */
7724 					break;
7725 				}
7726 			}
7727 
7728 			if (res > 0) {
7729 				sdebug_ndelay = ndelay;
7730 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
7731 							: DEF_JDELAY;
7732 			}
7733 			block_unblock_all_queues(false);
7734 			mutex_unlock(&sdebug_host_list_mutex);
7735 		}
7736 		return res;
7737 	}
7738 	return -EINVAL;
7739 }
7740 static DRIVER_ATTR_RW(ndelay);
7741 
opts_show(struct device_driver * ddp,char * buf)7742 static ssize_t opts_show(struct device_driver *ddp, char *buf)
7743 {
7744 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
7745 }
7746 
opts_store(struct device_driver * ddp,const char * buf,size_t count)7747 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
7748 			  size_t count)
7749 {
7750 	int opts;
7751 	char work[20];
7752 
7753 	if (sscanf(buf, "%10s", work) == 1) {
7754 		if (strncasecmp(work, "0x", 2) == 0) {
7755 			if (kstrtoint(work + 2, 16, &opts) == 0)
7756 				goto opts_done;
7757 		} else {
7758 			if (kstrtoint(work, 10, &opts) == 0)
7759 				goto opts_done;
7760 		}
7761 	}
7762 	return -EINVAL;
7763 opts_done:
7764 	sdebug_opts = opts;
7765 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7766 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7767 	tweak_cmnd_count();
7768 	return count;
7769 }
7770 static DRIVER_ATTR_RW(opts);
7771 
ptype_show(struct device_driver * ddp,char * buf)7772 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
7773 {
7774 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
7775 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)7776 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
7777 			   size_t count)
7778 {
7779 	int n;
7780 
7781 	/* Cannot change from or to TYPE_ZBC with sysfs */
7782 	if (sdebug_ptype == TYPE_ZBC)
7783 		return -EINVAL;
7784 
7785 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7786 		if (n == TYPE_ZBC)
7787 			return -EINVAL;
7788 		sdebug_ptype = n;
7789 		return count;
7790 	}
7791 	return -EINVAL;
7792 }
7793 static DRIVER_ATTR_RW(ptype);
7794 
dsense_show(struct device_driver * ddp,char * buf)7795 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7796 {
7797 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7798 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)7799 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7800 			    size_t count)
7801 {
7802 	int n;
7803 
7804 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7805 		sdebug_dsense = n;
7806 		return count;
7807 	}
7808 	return -EINVAL;
7809 }
7810 static DRIVER_ATTR_RW(dsense);
7811 
fake_rw_show(struct device_driver * ddp,char * buf)7812 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7813 {
7814 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7815 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)7816 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7817 			     size_t count)
7818 {
7819 	int n, idx;
7820 
7821 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7822 		bool want_store = (n == 0);
7823 		struct sdebug_host_info *sdhp;
7824 
7825 		n = (n > 0);
7826 		sdebug_fake_rw = (sdebug_fake_rw > 0);
7827 		if (sdebug_fake_rw == n)
7828 			return count;	/* not transitioning so do nothing */
7829 
7830 		if (want_store) {	/* 1 --> 0 transition, set up store */
7831 			if (sdeb_first_idx < 0) {
7832 				idx = sdebug_add_store();
7833 				if (idx < 0)
7834 					return idx;
7835 			} else {
7836 				idx = sdeb_first_idx;
7837 				xa_clear_mark(per_store_ap, idx,
7838 					      SDEB_XA_NOT_IN_USE);
7839 			}
7840 			/* make all hosts use same store */
7841 			list_for_each_entry(sdhp, &sdebug_host_list,
7842 					    host_list) {
7843 				if (sdhp->si_idx != idx) {
7844 					xa_set_mark(per_store_ap, sdhp->si_idx,
7845 						    SDEB_XA_NOT_IN_USE);
7846 					sdhp->si_idx = idx;
7847 				}
7848 			}
7849 			sdeb_most_recent_idx = idx;
7850 		} else {	/* 0 --> 1 transition is trigger for shrink */
7851 			sdebug_erase_all_stores(true /* apart from first */);
7852 		}
7853 		sdebug_fake_rw = n;
7854 		return count;
7855 	}
7856 	return -EINVAL;
7857 }
7858 static DRIVER_ATTR_RW(fake_rw);
7859 
no_lun_0_show(struct device_driver * ddp,char * buf)7860 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7861 {
7862 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7863 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)7864 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7865 			      size_t count)
7866 {
7867 	int n;
7868 
7869 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7870 		sdebug_no_lun_0 = n;
7871 		return count;
7872 	}
7873 	return -EINVAL;
7874 }
7875 static DRIVER_ATTR_RW(no_lun_0);
7876 
num_tgts_show(struct device_driver * ddp,char * buf)7877 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7878 {
7879 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7880 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)7881 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7882 			      size_t count)
7883 {
7884 	int n;
7885 
7886 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7887 		sdebug_num_tgts = n;
7888 		sdebug_max_tgts_luns();
7889 		return count;
7890 	}
7891 	return -EINVAL;
7892 }
7893 static DRIVER_ATTR_RW(num_tgts);
7894 
dev_size_mb_show(struct device_driver * ddp,char * buf)7895 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7896 {
7897 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7898 }
7899 static DRIVER_ATTR_RO(dev_size_mb);
7900 
per_host_store_show(struct device_driver * ddp,char * buf)7901 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7902 {
7903 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7904 }
7905 
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)7906 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7907 				    size_t count)
7908 {
7909 	bool v;
7910 
7911 	if (kstrtobool(buf, &v))
7912 		return -EINVAL;
7913 
7914 	sdebug_per_host_store = v;
7915 	return count;
7916 }
7917 static DRIVER_ATTR_RW(per_host_store);
7918 
num_parts_show(struct device_driver * ddp,char * buf)7919 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7920 {
7921 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7922 }
7923 static DRIVER_ATTR_RO(num_parts);
7924 
every_nth_show(struct device_driver * ddp,char * buf)7925 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7926 {
7927 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7928 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)7929 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7930 			       size_t count)
7931 {
7932 	int nth;
7933 	char work[20];
7934 
7935 	if (sscanf(buf, "%10s", work) == 1) {
7936 		if (strncasecmp(work, "0x", 2) == 0) {
7937 			if (kstrtoint(work + 2, 16, &nth) == 0)
7938 				goto every_nth_done;
7939 		} else {
7940 			if (kstrtoint(work, 10, &nth) == 0)
7941 				goto every_nth_done;
7942 		}
7943 	}
7944 	return -EINVAL;
7945 
7946 every_nth_done:
7947 	sdebug_every_nth = nth;
7948 	if (nth && !sdebug_statistics) {
7949 		pr_info("every_nth needs statistics=1, set it\n");
7950 		sdebug_statistics = true;
7951 	}
7952 	tweak_cmnd_count();
7953 	return count;
7954 }
7955 static DRIVER_ATTR_RW(every_nth);
7956 
lun_format_show(struct device_driver * ddp,char * buf)7957 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7958 {
7959 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7960 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)7961 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7962 				size_t count)
7963 {
7964 	int n;
7965 	bool changed;
7966 
7967 	if (kstrtoint(buf, 0, &n))
7968 		return -EINVAL;
7969 	if (n >= 0) {
7970 		if (n > (int)SAM_LUN_AM_FLAT) {
7971 			pr_warn("only LUN address methods 0 and 1 are supported\n");
7972 			return -EINVAL;
7973 		}
7974 		changed = ((int)sdebug_lun_am != n);
7975 		sdebug_lun_am = n;
7976 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
7977 			struct sdebug_host_info *sdhp;
7978 			struct sdebug_dev_info *dp;
7979 
7980 			mutex_lock(&sdebug_host_list_mutex);
7981 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7982 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7983 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7984 				}
7985 			}
7986 			mutex_unlock(&sdebug_host_list_mutex);
7987 		}
7988 		return count;
7989 	}
7990 	return -EINVAL;
7991 }
7992 static DRIVER_ATTR_RW(lun_format);
7993 
max_luns_show(struct device_driver * ddp,char * buf)7994 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7995 {
7996 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7997 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)7998 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7999 			      size_t count)
8000 {
8001 	int n;
8002 	bool changed;
8003 
8004 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8005 		if (n > 256) {
8006 			pr_warn("max_luns can be no more than 256\n");
8007 			return -EINVAL;
8008 		}
8009 		changed = (sdebug_max_luns != n);
8010 		sdebug_max_luns = n;
8011 		sdebug_max_tgts_luns();
8012 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
8013 			struct sdebug_host_info *sdhp;
8014 			struct sdebug_dev_info *dp;
8015 
8016 			mutex_lock(&sdebug_host_list_mutex);
8017 			list_for_each_entry(sdhp, &sdebug_host_list,
8018 					    host_list) {
8019 				list_for_each_entry(dp, &sdhp->dev_info_list,
8020 						    dev_list) {
8021 					set_bit(SDEBUG_UA_LUNS_CHANGED,
8022 						dp->uas_bm);
8023 				}
8024 			}
8025 			mutex_unlock(&sdebug_host_list_mutex);
8026 		}
8027 		return count;
8028 	}
8029 	return -EINVAL;
8030 }
8031 static DRIVER_ATTR_RW(max_luns);
8032 
max_queue_show(struct device_driver * ddp,char * buf)8033 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
8034 {
8035 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
8036 }
8037 /* N.B. max_queue can be changed while there are queued commands. In flight
8038  * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)8039 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
8040 			       size_t count)
8041 {
8042 	int n;
8043 
8044 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
8045 	    (n <= SDEBUG_CANQUEUE) &&
8046 	    (sdebug_host_max_queue == 0)) {
8047 		mutex_lock(&sdebug_host_list_mutex);
8048 
8049 		/* We may only change sdebug_max_queue when we have no shosts */
8050 		if (list_empty(&sdebug_host_list))
8051 			sdebug_max_queue = n;
8052 		else
8053 			count = -EBUSY;
8054 		mutex_unlock(&sdebug_host_list_mutex);
8055 		return count;
8056 	}
8057 	return -EINVAL;
8058 }
8059 static DRIVER_ATTR_RW(max_queue);
8060 
host_max_queue_show(struct device_driver * ddp,char * buf)8061 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
8062 {
8063 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
8064 }
8065 
no_rwlock_show(struct device_driver * ddp,char * buf)8066 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
8067 {
8068 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
8069 }
8070 
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)8071 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
8072 {
8073 	bool v;
8074 
8075 	if (kstrtobool(buf, &v))
8076 		return -EINVAL;
8077 
8078 	sdebug_no_rwlock = v;
8079 	return count;
8080 }
8081 static DRIVER_ATTR_RW(no_rwlock);
8082 
8083 /*
8084  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
8085  * in range [0, sdebug_host_max_queue), we can't change it.
8086  */
8087 static DRIVER_ATTR_RO(host_max_queue);
8088 
no_uld_show(struct device_driver * ddp,char * buf)8089 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
8090 {
8091 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
8092 }
8093 static DRIVER_ATTR_RO(no_uld);
8094 
scsi_level_show(struct device_driver * ddp,char * buf)8095 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
8096 {
8097 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
8098 }
8099 static DRIVER_ATTR_RO(scsi_level);
8100 
virtual_gb_show(struct device_driver * ddp,char * buf)8101 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
8102 {
8103 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
8104 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)8105 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
8106 				size_t count)
8107 {
8108 	int n;
8109 	bool changed;
8110 
8111 	/* Ignore capacity change for ZBC drives for now */
8112 	if (sdeb_zbc_in_use)
8113 		return -ENOTSUPP;
8114 
8115 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8116 		changed = (sdebug_virtual_gb != n);
8117 		sdebug_virtual_gb = n;
8118 		sdebug_capacity = get_sdebug_capacity();
8119 		if (changed) {
8120 			struct sdebug_host_info *sdhp;
8121 			struct sdebug_dev_info *dp;
8122 
8123 			mutex_lock(&sdebug_host_list_mutex);
8124 			list_for_each_entry(sdhp, &sdebug_host_list,
8125 					    host_list) {
8126 				list_for_each_entry(dp, &sdhp->dev_info_list,
8127 						    dev_list) {
8128 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
8129 						dp->uas_bm);
8130 				}
8131 			}
8132 			mutex_unlock(&sdebug_host_list_mutex);
8133 		}
8134 		return count;
8135 	}
8136 	return -EINVAL;
8137 }
8138 static DRIVER_ATTR_RW(virtual_gb);
8139 
add_host_show(struct device_driver * ddp,char * buf)8140 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
8141 {
8142 	/* absolute number of hosts currently active is what is shown */
8143 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
8144 }
8145 
add_host_store(struct device_driver * ddp,const char * buf,size_t count)8146 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
8147 			      size_t count)
8148 {
8149 	bool found;
8150 	unsigned long idx;
8151 	struct sdeb_store_info *sip;
8152 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
8153 	int delta_hosts;
8154 
8155 	if (sscanf(buf, "%d", &delta_hosts) != 1)
8156 		return -EINVAL;
8157 	if (delta_hosts > 0) {
8158 		do {
8159 			found = false;
8160 			if (want_phs) {
8161 				xa_for_each_marked(per_store_ap, idx, sip,
8162 						   SDEB_XA_NOT_IN_USE) {
8163 					sdeb_most_recent_idx = (int)idx;
8164 					found = true;
8165 					break;
8166 				}
8167 				if (found)	/* re-use case */
8168 					sdebug_add_host_helper((int)idx);
8169 				else
8170 					sdebug_do_add_host(true);
8171 			} else {
8172 				sdebug_do_add_host(false);
8173 			}
8174 		} while (--delta_hosts);
8175 	} else if (delta_hosts < 0) {
8176 		do {
8177 			sdebug_do_remove_host(false);
8178 		} while (++delta_hosts);
8179 	}
8180 	return count;
8181 }
8182 static DRIVER_ATTR_RW(add_host);
8183 
vpd_use_hostno_show(struct device_driver * ddp,char * buf)8184 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
8185 {
8186 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
8187 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)8188 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
8189 				    size_t count)
8190 {
8191 	int n;
8192 
8193 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8194 		sdebug_vpd_use_hostno = n;
8195 		return count;
8196 	}
8197 	return -EINVAL;
8198 }
8199 static DRIVER_ATTR_RW(vpd_use_hostno);
8200 
statistics_show(struct device_driver * ddp,char * buf)8201 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
8202 {
8203 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
8204 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)8205 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
8206 				size_t count)
8207 {
8208 	int n;
8209 
8210 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
8211 		if (n > 0)
8212 			sdebug_statistics = true;
8213 		else {
8214 			clear_queue_stats();
8215 			sdebug_statistics = false;
8216 		}
8217 		return count;
8218 	}
8219 	return -EINVAL;
8220 }
8221 static DRIVER_ATTR_RW(statistics);
8222 
sector_size_show(struct device_driver * ddp,char * buf)8223 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
8224 {
8225 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
8226 }
8227 static DRIVER_ATTR_RO(sector_size);
8228 
submit_queues_show(struct device_driver * ddp,char * buf)8229 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
8230 {
8231 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
8232 }
8233 static DRIVER_ATTR_RO(submit_queues);
8234 
dix_show(struct device_driver * ddp,char * buf)8235 static ssize_t dix_show(struct device_driver *ddp, char *buf)
8236 {
8237 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
8238 }
8239 static DRIVER_ATTR_RO(dix);
8240 
dif_show(struct device_driver * ddp,char * buf)8241 static ssize_t dif_show(struct device_driver *ddp, char *buf)
8242 {
8243 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
8244 }
8245 static DRIVER_ATTR_RO(dif);
8246 
guard_show(struct device_driver * ddp,char * buf)8247 static ssize_t guard_show(struct device_driver *ddp, char *buf)
8248 {
8249 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
8250 }
8251 static DRIVER_ATTR_RO(guard);
8252 
ato_show(struct device_driver * ddp,char * buf)8253 static ssize_t ato_show(struct device_driver *ddp, char *buf)
8254 {
8255 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
8256 }
8257 static DRIVER_ATTR_RO(ato);
8258 
map_show(struct device_driver * ddp,char * buf)8259 static ssize_t map_show(struct device_driver *ddp, char *buf)
8260 {
8261 	ssize_t count = 0;
8262 
8263 	if (!scsi_debug_lbp())
8264 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
8265 				 sdebug_store_sectors);
8266 
8267 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
8268 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
8269 
8270 		if (sip)
8271 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
8272 					  (int)map_size, sip->map_storep);
8273 	}
8274 	buf[count++] = '\n';
8275 	buf[count] = '\0';
8276 
8277 	return count;
8278 }
8279 static DRIVER_ATTR_RO(map);
8280 
random_show(struct device_driver * ddp,char * buf)8281 static ssize_t random_show(struct device_driver *ddp, char *buf)
8282 {
8283 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
8284 }
8285 
random_store(struct device_driver * ddp,const char * buf,size_t count)8286 static ssize_t random_store(struct device_driver *ddp, const char *buf,
8287 			    size_t count)
8288 {
8289 	bool v;
8290 
8291 	if (kstrtobool(buf, &v))
8292 		return -EINVAL;
8293 
8294 	sdebug_random = v;
8295 	return count;
8296 }
8297 static DRIVER_ATTR_RW(random);
8298 
removable_show(struct device_driver * ddp,char * buf)8299 static ssize_t removable_show(struct device_driver *ddp, char *buf)
8300 {
8301 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
8302 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)8303 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
8304 			       size_t count)
8305 {
8306 	int n;
8307 
8308 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8309 		sdebug_removable = (n > 0);
8310 		return count;
8311 	}
8312 	return -EINVAL;
8313 }
8314 static DRIVER_ATTR_RW(removable);
8315 
host_lock_show(struct device_driver * ddp,char * buf)8316 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
8317 {
8318 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
8319 }
8320 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)8321 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
8322 			       size_t count)
8323 {
8324 	int n;
8325 
8326 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8327 		sdebug_host_lock = (n > 0);
8328 		return count;
8329 	}
8330 	return -EINVAL;
8331 }
8332 static DRIVER_ATTR_RW(host_lock);
8333 
strict_show(struct device_driver * ddp,char * buf)8334 static ssize_t strict_show(struct device_driver *ddp, char *buf)
8335 {
8336 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
8337 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)8338 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
8339 			    size_t count)
8340 {
8341 	int n;
8342 
8343 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8344 		sdebug_strict = (n > 0);
8345 		return count;
8346 	}
8347 	return -EINVAL;
8348 }
8349 static DRIVER_ATTR_RW(strict);
8350 
uuid_ctl_show(struct device_driver * ddp,char * buf)8351 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
8352 {
8353 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
8354 }
8355 static DRIVER_ATTR_RO(uuid_ctl);
8356 
cdb_len_show(struct device_driver * ddp,char * buf)8357 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
8358 {
8359 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
8360 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)8361 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
8362 			     size_t count)
8363 {
8364 	int ret, n;
8365 
8366 	ret = kstrtoint(buf, 0, &n);
8367 	if (ret)
8368 		return ret;
8369 	sdebug_cdb_len = n;
8370 	all_config_cdb_len();
8371 	return count;
8372 }
8373 static DRIVER_ATTR_RW(cdb_len);
8374 
8375 static const char * const zbc_model_strs_a[] = {
8376 	[BLK_ZONED_NONE] = "none",
8377 	[BLK_ZONED_HA]   = "host-aware",
8378 	[BLK_ZONED_HM]   = "host-managed",
8379 };
8380 
8381 static const char * const zbc_model_strs_b[] = {
8382 	[BLK_ZONED_NONE] = "no",
8383 	[BLK_ZONED_HA]   = "aware",
8384 	[BLK_ZONED_HM]   = "managed",
8385 };
8386 
8387 static const char * const zbc_model_strs_c[] = {
8388 	[BLK_ZONED_NONE] = "0",
8389 	[BLK_ZONED_HA]   = "1",
8390 	[BLK_ZONED_HM]   = "2",
8391 };
8392 
sdeb_zbc_model_str(const char * cp)8393 static int sdeb_zbc_model_str(const char *cp)
8394 {
8395 	int res = sysfs_match_string(zbc_model_strs_a, cp);
8396 
8397 	if (res < 0) {
8398 		res = sysfs_match_string(zbc_model_strs_b, cp);
8399 		if (res < 0) {
8400 			res = sysfs_match_string(zbc_model_strs_c, cp);
8401 			if (res < 0)
8402 				return -EINVAL;
8403 		}
8404 	}
8405 	return res;
8406 }
8407 
zbc_show(struct device_driver * ddp,char * buf)8408 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
8409 {
8410 	return scnprintf(buf, PAGE_SIZE, "%s\n",
8411 			 zbc_model_strs_a[sdeb_zbc_model]);
8412 }
8413 static DRIVER_ATTR_RO(zbc);
8414 
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)8415 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
8416 {
8417 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
8418 }
8419 static DRIVER_ATTR_RO(tur_ms_to_ready);
8420 
group_number_stats_show(struct device_driver * ddp,char * buf)8421 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
8422 {
8423 	char *p = buf, *end = buf + PAGE_SIZE;
8424 	int i;
8425 
8426 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8427 		p += scnprintf(p, end - p, "%d %ld\n", i,
8428 			       atomic_long_read(&writes_by_group_number[i]));
8429 
8430 	return p - buf;
8431 }
8432 
group_number_stats_store(struct device_driver * ddp,const char * buf,size_t count)8433 static ssize_t group_number_stats_store(struct device_driver *ddp,
8434 					const char *buf, size_t count)
8435 {
8436 	int i;
8437 
8438 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8439 		atomic_long_set(&writes_by_group_number[i], 0);
8440 
8441 	return count;
8442 }
8443 static DRIVER_ATTR_RW(group_number_stats);
8444 
8445 /* Note: The following array creates attribute files in the
8446    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
8447    files (over those found in the /sys/module/scsi_debug/parameters
8448    directory) is that auxiliary actions can be triggered when an attribute
8449    is changed. For example see: add_host_store() above.
8450  */
8451 
8452 static struct attribute *sdebug_drv_attrs[] = {
8453 	&driver_attr_delay.attr,
8454 	&driver_attr_opts.attr,
8455 	&driver_attr_ptype.attr,
8456 	&driver_attr_dsense.attr,
8457 	&driver_attr_fake_rw.attr,
8458 	&driver_attr_host_max_queue.attr,
8459 	&driver_attr_no_lun_0.attr,
8460 	&driver_attr_num_tgts.attr,
8461 	&driver_attr_dev_size_mb.attr,
8462 	&driver_attr_num_parts.attr,
8463 	&driver_attr_every_nth.attr,
8464 	&driver_attr_lun_format.attr,
8465 	&driver_attr_max_luns.attr,
8466 	&driver_attr_max_queue.attr,
8467 	&driver_attr_no_rwlock.attr,
8468 	&driver_attr_no_uld.attr,
8469 	&driver_attr_scsi_level.attr,
8470 	&driver_attr_virtual_gb.attr,
8471 	&driver_attr_add_host.attr,
8472 	&driver_attr_per_host_store.attr,
8473 	&driver_attr_vpd_use_hostno.attr,
8474 	&driver_attr_sector_size.attr,
8475 	&driver_attr_statistics.attr,
8476 	&driver_attr_submit_queues.attr,
8477 	&driver_attr_dix.attr,
8478 	&driver_attr_dif.attr,
8479 	&driver_attr_guard.attr,
8480 	&driver_attr_ato.attr,
8481 	&driver_attr_map.attr,
8482 	&driver_attr_random.attr,
8483 	&driver_attr_removable.attr,
8484 	&driver_attr_host_lock.attr,
8485 	&driver_attr_ndelay.attr,
8486 	&driver_attr_strict.attr,
8487 	&driver_attr_uuid_ctl.attr,
8488 	&driver_attr_cdb_len.attr,
8489 	&driver_attr_tur_ms_to_ready.attr,
8490 	&driver_attr_zbc.attr,
8491 	&driver_attr_group_number_stats.attr,
8492 	NULL,
8493 };
8494 ATTRIBUTE_GROUPS(sdebug_drv);
8495 
8496 static struct device *pseudo_primary;
8497 
scsi_debug_init(void)8498 static int __init scsi_debug_init(void)
8499 {
8500 	bool want_store = (sdebug_fake_rw == 0);
8501 	unsigned long sz;
8502 	int k, ret, hosts_to_add;
8503 	int idx = -1;
8504 
8505 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
8506 		pr_warn("ndelay must be less than 1 second, ignored\n");
8507 		sdebug_ndelay = 0;
8508 	} else if (sdebug_ndelay > 0)
8509 		sdebug_jdelay = JDELAY_OVERRIDDEN;
8510 
8511 	switch (sdebug_sector_size) {
8512 	case  512:
8513 	case 1024:
8514 	case 2048:
8515 	case 4096:
8516 		break;
8517 	default:
8518 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
8519 		return -EINVAL;
8520 	}
8521 
8522 	switch (sdebug_dif) {
8523 	case T10_PI_TYPE0_PROTECTION:
8524 		break;
8525 	case T10_PI_TYPE1_PROTECTION:
8526 	case T10_PI_TYPE2_PROTECTION:
8527 	case T10_PI_TYPE3_PROTECTION:
8528 		have_dif_prot = true;
8529 		break;
8530 
8531 	default:
8532 		pr_err("dif must be 0, 1, 2 or 3\n");
8533 		return -EINVAL;
8534 	}
8535 
8536 	if (sdebug_num_tgts < 0) {
8537 		pr_err("num_tgts must be >= 0\n");
8538 		return -EINVAL;
8539 	}
8540 
8541 	if (sdebug_guard > 1) {
8542 		pr_err("guard must be 0 or 1\n");
8543 		return -EINVAL;
8544 	}
8545 
8546 	if (sdebug_ato > 1) {
8547 		pr_err("ato must be 0 or 1\n");
8548 		return -EINVAL;
8549 	}
8550 
8551 	if (sdebug_physblk_exp > 15) {
8552 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
8553 		return -EINVAL;
8554 	}
8555 
8556 	sdebug_lun_am = sdebug_lun_am_i;
8557 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
8558 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
8559 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
8560 	}
8561 
8562 	if (sdebug_max_luns > 256) {
8563 		if (sdebug_max_luns > 16384) {
8564 			pr_warn("max_luns can be no more than 16384, use default\n");
8565 			sdebug_max_luns = DEF_MAX_LUNS;
8566 		}
8567 		sdebug_lun_am = SAM_LUN_AM_FLAT;
8568 	}
8569 
8570 	if (sdebug_lowest_aligned > 0x3fff) {
8571 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
8572 		return -EINVAL;
8573 	}
8574 
8575 	if (submit_queues < 1) {
8576 		pr_err("submit_queues must be 1 or more\n");
8577 		return -EINVAL;
8578 	}
8579 
8580 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
8581 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
8582 		return -EINVAL;
8583 	}
8584 
8585 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
8586 	    (sdebug_host_max_queue < 0)) {
8587 		pr_err("host_max_queue must be in range [0 %d]\n",
8588 		       SDEBUG_CANQUEUE);
8589 		return -EINVAL;
8590 	}
8591 
8592 	if (sdebug_host_max_queue &&
8593 	    (sdebug_max_queue != sdebug_host_max_queue)) {
8594 		sdebug_max_queue = sdebug_host_max_queue;
8595 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
8596 			sdebug_max_queue);
8597 	}
8598 
8599 	/*
8600 	 * check for host managed zoned block device specified with
8601 	 * ptype=0x14 or zbc=XXX.
8602 	 */
8603 	if (sdebug_ptype == TYPE_ZBC) {
8604 		sdeb_zbc_model = BLK_ZONED_HM;
8605 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
8606 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
8607 		if (k < 0)
8608 			return k;
8609 		sdeb_zbc_model = k;
8610 		switch (sdeb_zbc_model) {
8611 		case BLK_ZONED_NONE:
8612 		case BLK_ZONED_HA:
8613 			sdebug_ptype = TYPE_DISK;
8614 			break;
8615 		case BLK_ZONED_HM:
8616 			sdebug_ptype = TYPE_ZBC;
8617 			break;
8618 		default:
8619 			pr_err("Invalid ZBC model\n");
8620 			return -EINVAL;
8621 		}
8622 	}
8623 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
8624 		sdeb_zbc_in_use = true;
8625 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8626 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
8627 	}
8628 
8629 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8630 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
8631 	if (sdebug_dev_size_mb < 1)
8632 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
8633 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8634 	sdebug_store_sectors = sz / sdebug_sector_size;
8635 	sdebug_capacity = get_sdebug_capacity();
8636 
8637 	/* play around with geometry, don't waste too much on track 0 */
8638 	sdebug_heads = 8;
8639 	sdebug_sectors_per = 32;
8640 	if (sdebug_dev_size_mb >= 256)
8641 		sdebug_heads = 64;
8642 	else if (sdebug_dev_size_mb >= 16)
8643 		sdebug_heads = 32;
8644 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8645 			       (sdebug_sectors_per * sdebug_heads);
8646 	if (sdebug_cylinders_per >= 1024) {
8647 		/* other LLDs do this; implies >= 1GB ram disk ... */
8648 		sdebug_heads = 255;
8649 		sdebug_sectors_per = 63;
8650 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8651 			       (sdebug_sectors_per * sdebug_heads);
8652 	}
8653 	if (scsi_debug_lbp()) {
8654 		sdebug_unmap_max_blocks =
8655 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
8656 
8657 		sdebug_unmap_max_desc =
8658 			clamp(sdebug_unmap_max_desc, 0U, 256U);
8659 
8660 		sdebug_unmap_granularity =
8661 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
8662 
8663 		if (sdebug_unmap_alignment &&
8664 		    sdebug_unmap_granularity <=
8665 		    sdebug_unmap_alignment) {
8666 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
8667 			return -EINVAL;
8668 		}
8669 	}
8670 
8671 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
8672 	if (want_store) {
8673 		idx = sdebug_add_store();
8674 		if (idx < 0)
8675 			return idx;
8676 	}
8677 
8678 	pseudo_primary = root_device_register("pseudo_0");
8679 	if (IS_ERR(pseudo_primary)) {
8680 		pr_warn("root_device_register() error\n");
8681 		ret = PTR_ERR(pseudo_primary);
8682 		goto free_vm;
8683 	}
8684 	ret = bus_register(&pseudo_lld_bus);
8685 	if (ret < 0) {
8686 		pr_warn("bus_register error: %d\n", ret);
8687 		goto dev_unreg;
8688 	}
8689 	ret = driver_register(&sdebug_driverfs_driver);
8690 	if (ret < 0) {
8691 		pr_warn("driver_register error: %d\n", ret);
8692 		goto bus_unreg;
8693 	}
8694 
8695 	hosts_to_add = sdebug_add_host;
8696 	sdebug_add_host = 0;
8697 
8698 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
8699 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
8700 		pr_info("%s: failed to create initial debugfs directory\n", __func__);
8701 
8702 	for (k = 0; k < hosts_to_add; k++) {
8703 		if (want_store && k == 0) {
8704 			ret = sdebug_add_host_helper(idx);
8705 			if (ret < 0) {
8706 				pr_err("add_host_helper k=%d, error=%d\n",
8707 				       k, -ret);
8708 				break;
8709 			}
8710 		} else {
8711 			ret = sdebug_do_add_host(want_store &&
8712 						 sdebug_per_host_store);
8713 			if (ret < 0) {
8714 				pr_err("add_host k=%d error=%d\n", k, -ret);
8715 				break;
8716 			}
8717 		}
8718 	}
8719 	if (sdebug_verbose)
8720 		pr_info("built %d host(s)\n", sdebug_num_hosts);
8721 
8722 	return 0;
8723 
8724 bus_unreg:
8725 	bus_unregister(&pseudo_lld_bus);
8726 dev_unreg:
8727 	root_device_unregister(pseudo_primary);
8728 free_vm:
8729 	sdebug_erase_store(idx, NULL);
8730 	return ret;
8731 }
8732 
scsi_debug_exit(void)8733 static void __exit scsi_debug_exit(void)
8734 {
8735 	int k = sdebug_num_hosts;
8736 
8737 	for (; k; k--)
8738 		sdebug_do_remove_host(true);
8739 	driver_unregister(&sdebug_driverfs_driver);
8740 	bus_unregister(&pseudo_lld_bus);
8741 	root_device_unregister(pseudo_primary);
8742 
8743 	sdebug_erase_all_stores(false);
8744 	xa_destroy(per_store_ap);
8745 	debugfs_remove(sdebug_debugfs_root);
8746 }
8747 
8748 device_initcall(scsi_debug_init);
8749 module_exit(scsi_debug_exit);
8750 
sdebug_release_adapter(struct device * dev)8751 static void sdebug_release_adapter(struct device *dev)
8752 {
8753 	struct sdebug_host_info *sdbg_host;
8754 
8755 	sdbg_host = dev_to_sdebug_host(dev);
8756 	kfree(sdbg_host);
8757 }
8758 
8759 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)8760 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
8761 {
8762 	if (idx < 0)
8763 		return;
8764 	if (!sip) {
8765 		if (xa_empty(per_store_ap))
8766 			return;
8767 		sip = xa_load(per_store_ap, idx);
8768 		if (!sip)
8769 			return;
8770 	}
8771 	vfree(sip->map_storep);
8772 	vfree(sip->dif_storep);
8773 	vfree(sip->storep);
8774 	xa_erase(per_store_ap, idx);
8775 	kfree(sip);
8776 }
8777 
8778 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)8779 static void sdebug_erase_all_stores(bool apart_from_first)
8780 {
8781 	unsigned long idx;
8782 	struct sdeb_store_info *sip = NULL;
8783 
8784 	xa_for_each(per_store_ap, idx, sip) {
8785 		if (apart_from_first)
8786 			apart_from_first = false;
8787 		else
8788 			sdebug_erase_store(idx, sip);
8789 	}
8790 	if (apart_from_first)
8791 		sdeb_most_recent_idx = sdeb_first_idx;
8792 }
8793 
8794 /*
8795  * Returns store xarray new element index (idx) if >=0 else negated errno.
8796  * Limit the number of stores to 65536.
8797  */
sdebug_add_store(void)8798 static int sdebug_add_store(void)
8799 {
8800 	int res;
8801 	u32 n_idx;
8802 	unsigned long iflags;
8803 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8804 	struct sdeb_store_info *sip = NULL;
8805 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8806 
8807 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8808 	if (!sip)
8809 		return -ENOMEM;
8810 
8811 	xa_lock_irqsave(per_store_ap, iflags);
8812 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8813 	if (unlikely(res < 0)) {
8814 		xa_unlock_irqrestore(per_store_ap, iflags);
8815 		kfree(sip);
8816 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8817 		return res;
8818 	}
8819 	sdeb_most_recent_idx = n_idx;
8820 	if (sdeb_first_idx < 0)
8821 		sdeb_first_idx = n_idx;
8822 	xa_unlock_irqrestore(per_store_ap, iflags);
8823 
8824 	res = -ENOMEM;
8825 	sip->storep = vzalloc(sz);
8826 	if (!sip->storep) {
8827 		pr_err("user data oom\n");
8828 		goto err;
8829 	}
8830 	if (sdebug_num_parts > 0)
8831 		sdebug_build_parts(sip->storep, sz);
8832 
8833 	/* DIF/DIX: what T10 calls Protection Information (PI) */
8834 	if (sdebug_dix) {
8835 		int dif_size;
8836 
8837 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8838 		sip->dif_storep = vmalloc(dif_size);
8839 
8840 		pr_info("dif_storep %u bytes @ %p\n", dif_size,
8841 			sip->dif_storep);
8842 
8843 		if (!sip->dif_storep) {
8844 			pr_err("DIX oom\n");
8845 			goto err;
8846 		}
8847 		memset(sip->dif_storep, 0xff, dif_size);
8848 	}
8849 	/* Logical Block Provisioning */
8850 	if (scsi_debug_lbp()) {
8851 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8852 		sip->map_storep = vcalloc(BITS_TO_LONGS(map_size),
8853 					  sizeof(long));
8854 
8855 		pr_info("%lu provisioning blocks\n", map_size);
8856 
8857 		if (!sip->map_storep) {
8858 			pr_err("LBP map oom\n");
8859 			goto err;
8860 		}
8861 
8862 		/* Map first 1KB for partition table */
8863 		if (sdebug_num_parts)
8864 			map_region(sip, 0, 2);
8865 	}
8866 
8867 	rwlock_init(&sip->macc_data_lck);
8868 	rwlock_init(&sip->macc_meta_lck);
8869 	rwlock_init(&sip->macc_sector_lck);
8870 	return (int)n_idx;
8871 err:
8872 	sdebug_erase_store((int)n_idx, sip);
8873 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
8874 	return res;
8875 }
8876 
sdebug_add_host_helper(int per_host_idx)8877 static int sdebug_add_host_helper(int per_host_idx)
8878 {
8879 	int k, devs_per_host, idx;
8880 	int error = -ENOMEM;
8881 	struct sdebug_host_info *sdbg_host;
8882 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8883 
8884 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8885 	if (!sdbg_host)
8886 		return -ENOMEM;
8887 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8888 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8889 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8890 	sdbg_host->si_idx = idx;
8891 
8892 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8893 
8894 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8895 	for (k = 0; k < devs_per_host; k++) {
8896 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8897 		if (!sdbg_devinfo)
8898 			goto clean;
8899 	}
8900 
8901 	mutex_lock(&sdebug_host_list_mutex);
8902 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8903 	mutex_unlock(&sdebug_host_list_mutex);
8904 
8905 	sdbg_host->dev.bus = &pseudo_lld_bus;
8906 	sdbg_host->dev.parent = pseudo_primary;
8907 	sdbg_host->dev.release = &sdebug_release_adapter;
8908 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8909 
8910 	error = device_register(&sdbg_host->dev);
8911 	if (error) {
8912 		mutex_lock(&sdebug_host_list_mutex);
8913 		list_del(&sdbg_host->host_list);
8914 		mutex_unlock(&sdebug_host_list_mutex);
8915 		goto clean;
8916 	}
8917 
8918 	++sdebug_num_hosts;
8919 	return 0;
8920 
8921 clean:
8922 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8923 				 dev_list) {
8924 		list_del(&sdbg_devinfo->dev_list);
8925 		kfree(sdbg_devinfo->zstate);
8926 		kfree(sdbg_devinfo);
8927 	}
8928 	if (sdbg_host->dev.release)
8929 		put_device(&sdbg_host->dev);
8930 	else
8931 		kfree(sdbg_host);
8932 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
8933 	return error;
8934 }
8935 
sdebug_do_add_host(bool mk_new_store)8936 static int sdebug_do_add_host(bool mk_new_store)
8937 {
8938 	int ph_idx = sdeb_most_recent_idx;
8939 
8940 	if (mk_new_store) {
8941 		ph_idx = sdebug_add_store();
8942 		if (ph_idx < 0)
8943 			return ph_idx;
8944 	}
8945 	return sdebug_add_host_helper(ph_idx);
8946 }
8947 
sdebug_do_remove_host(bool the_end)8948 static void sdebug_do_remove_host(bool the_end)
8949 {
8950 	int idx = -1;
8951 	struct sdebug_host_info *sdbg_host = NULL;
8952 	struct sdebug_host_info *sdbg_host2;
8953 
8954 	mutex_lock(&sdebug_host_list_mutex);
8955 	if (!list_empty(&sdebug_host_list)) {
8956 		sdbg_host = list_entry(sdebug_host_list.prev,
8957 				       struct sdebug_host_info, host_list);
8958 		idx = sdbg_host->si_idx;
8959 	}
8960 	if (!the_end && idx >= 0) {
8961 		bool unique = true;
8962 
8963 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8964 			if (sdbg_host2 == sdbg_host)
8965 				continue;
8966 			if (idx == sdbg_host2->si_idx) {
8967 				unique = false;
8968 				break;
8969 			}
8970 		}
8971 		if (unique) {
8972 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8973 			if (idx == sdeb_most_recent_idx)
8974 				--sdeb_most_recent_idx;
8975 		}
8976 	}
8977 	if (sdbg_host)
8978 		list_del(&sdbg_host->host_list);
8979 	mutex_unlock(&sdebug_host_list_mutex);
8980 
8981 	if (!sdbg_host)
8982 		return;
8983 
8984 	device_unregister(&sdbg_host->dev);
8985 	--sdebug_num_hosts;
8986 }
8987 
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)8988 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8989 {
8990 	struct sdebug_dev_info *devip = sdev->hostdata;
8991 
8992 	if (!devip)
8993 		return	-ENODEV;
8994 
8995 	mutex_lock(&sdebug_host_list_mutex);
8996 	block_unblock_all_queues(true);
8997 
8998 	if (qdepth > SDEBUG_CANQUEUE) {
8999 		qdepth = SDEBUG_CANQUEUE;
9000 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
9001 			qdepth, SDEBUG_CANQUEUE);
9002 	}
9003 	if (qdepth < 1)
9004 		qdepth = 1;
9005 	if (qdepth != sdev->queue_depth)
9006 		scsi_change_queue_depth(sdev, qdepth);
9007 
9008 	block_unblock_all_queues(false);
9009 	mutex_unlock(&sdebug_host_list_mutex);
9010 
9011 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
9012 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
9013 
9014 	return sdev->queue_depth;
9015 }
9016 
fake_timeout(struct scsi_cmnd * scp)9017 static bool fake_timeout(struct scsi_cmnd *scp)
9018 {
9019 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
9020 		if (sdebug_every_nth < -1)
9021 			sdebug_every_nth = -1;
9022 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
9023 			return true; /* ignore command causing timeout */
9024 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
9025 			 scsi_medium_access_command(scp))
9026 			return true; /* time out reads and writes */
9027 	}
9028 	return false;
9029 }
9030 
9031 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)9032 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
9033 {
9034 	int stopped_state;
9035 	u64 diff_ns = 0;
9036 	ktime_t now_ts = ktime_get_boottime();
9037 	struct scsi_device *sdp = scp->device;
9038 
9039 	stopped_state = atomic_read(&devip->stopped);
9040 	if (stopped_state == 2) {
9041 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
9042 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
9043 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
9044 				/* tur_ms_to_ready timer extinguished */
9045 				atomic_set(&devip->stopped, 0);
9046 				return 0;
9047 			}
9048 		}
9049 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
9050 		if (sdebug_verbose)
9051 			sdev_printk(KERN_INFO, sdp,
9052 				    "%s: Not ready: in process of becoming ready\n", my_name);
9053 		if (scp->cmnd[0] == TEST_UNIT_READY) {
9054 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
9055 
9056 			if (diff_ns <= tur_nanosecs_to_ready)
9057 				diff_ns = tur_nanosecs_to_ready - diff_ns;
9058 			else
9059 				diff_ns = tur_nanosecs_to_ready;
9060 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
9061 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
9062 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
9063 						   diff_ns);
9064 			return check_condition_result;
9065 		}
9066 	}
9067 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
9068 	if (sdebug_verbose)
9069 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
9070 			    my_name);
9071 	return check_condition_result;
9072 }
9073 
sdebug_map_queues(struct Scsi_Host * shost)9074 static void sdebug_map_queues(struct Scsi_Host *shost)
9075 {
9076 	int i, qoff;
9077 
9078 	if (shost->nr_hw_queues == 1)
9079 		return;
9080 
9081 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
9082 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
9083 
9084 		map->nr_queues  = 0;
9085 
9086 		if (i == HCTX_TYPE_DEFAULT)
9087 			map->nr_queues = submit_queues - poll_queues;
9088 		else if (i == HCTX_TYPE_POLL)
9089 			map->nr_queues = poll_queues;
9090 
9091 		if (!map->nr_queues) {
9092 			BUG_ON(i == HCTX_TYPE_DEFAULT);
9093 			continue;
9094 		}
9095 
9096 		map->queue_offset = qoff;
9097 		blk_mq_map_queues(map);
9098 
9099 		qoff += map->nr_queues;
9100 	}
9101 }
9102 
9103 struct sdebug_blk_mq_poll_data {
9104 	unsigned int queue_num;
9105 	int *num_entries;
9106 };
9107 
9108 /*
9109  * We don't handle aborted commands here, but it does not seem possible to have
9110  * aborted polled commands from schedule_resp()
9111  */
sdebug_blk_mq_poll_iter(struct request * rq,void * opaque)9112 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
9113 {
9114 	struct sdebug_blk_mq_poll_data *data = opaque;
9115 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
9116 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9117 	struct sdebug_defer *sd_dp;
9118 	u32 unique_tag = blk_mq_unique_tag(rq);
9119 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
9120 	unsigned long flags;
9121 	int queue_num = data->queue_num;
9122 	ktime_t time;
9123 
9124 	/* We're only interested in one queue for this iteration */
9125 	if (hwq != queue_num)
9126 		return true;
9127 
9128 	/* Subsequent checks would fail if this failed, but check anyway */
9129 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
9130 		return true;
9131 
9132 	time = ktime_get_boottime();
9133 
9134 	spin_lock_irqsave(&sdsc->lock, flags);
9135 	sd_dp = &sdsc->sd_dp;
9136 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
9137 		spin_unlock_irqrestore(&sdsc->lock, flags);
9138 		return true;
9139 	}
9140 
9141 	if (time < sd_dp->cmpl_ts) {
9142 		spin_unlock_irqrestore(&sdsc->lock, flags);
9143 		return true;
9144 	}
9145 	spin_unlock_irqrestore(&sdsc->lock, flags);
9146 
9147 	if (sdebug_statistics) {
9148 		atomic_inc(&sdebug_completions);
9149 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
9150 			atomic_inc(&sdebug_miss_cpus);
9151 	}
9152 
9153 	scsi_done(cmd); /* callback to mid level */
9154 	(*data->num_entries)++;
9155 	return true;
9156 }
9157 
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)9158 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
9159 {
9160 	int num_entries = 0;
9161 	struct sdebug_blk_mq_poll_data data = {
9162 		.queue_num = queue_num,
9163 		.num_entries = &num_entries,
9164 	};
9165 
9166 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
9167 				&data);
9168 
9169 	if (num_entries > 0)
9170 		atomic_add(num_entries, &sdeb_mq_poll_count);
9171 	return num_entries;
9172 }
9173 
sdebug_timeout_cmd(struct scsi_cmnd * cmnd)9174 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
9175 {
9176 	struct scsi_device *sdp = cmnd->device;
9177 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9178 	struct sdebug_err_inject *err;
9179 	unsigned char *cmd = cmnd->cmnd;
9180 	int ret = 0;
9181 
9182 	if (devip == NULL)
9183 		return 0;
9184 
9185 	rcu_read_lock();
9186 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9187 		if (err->type == ERR_TMOUT_CMD &&
9188 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9189 			ret = !!err->cnt;
9190 			if (err->cnt < 0)
9191 				err->cnt++;
9192 
9193 			rcu_read_unlock();
9194 			return ret;
9195 		}
9196 	}
9197 	rcu_read_unlock();
9198 
9199 	return 0;
9200 }
9201 
sdebug_fail_queue_cmd(struct scsi_cmnd * cmnd)9202 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
9203 {
9204 	struct scsi_device *sdp = cmnd->device;
9205 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9206 	struct sdebug_err_inject *err;
9207 	unsigned char *cmd = cmnd->cmnd;
9208 	int ret = 0;
9209 
9210 	if (devip == NULL)
9211 		return 0;
9212 
9213 	rcu_read_lock();
9214 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9215 		if (err->type == ERR_FAIL_QUEUE_CMD &&
9216 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9217 			ret = err->cnt ? err->queuecmd_ret : 0;
9218 			if (err->cnt < 0)
9219 				err->cnt++;
9220 
9221 			rcu_read_unlock();
9222 			return ret;
9223 		}
9224 	}
9225 	rcu_read_unlock();
9226 
9227 	return 0;
9228 }
9229 
sdebug_fail_cmd(struct scsi_cmnd * cmnd,int * retval,struct sdebug_err_inject * info)9230 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
9231 			   struct sdebug_err_inject *info)
9232 {
9233 	struct scsi_device *sdp = cmnd->device;
9234 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9235 	struct sdebug_err_inject *err;
9236 	unsigned char *cmd = cmnd->cmnd;
9237 	int ret = 0;
9238 	int result;
9239 
9240 	if (devip == NULL)
9241 		return 0;
9242 
9243 	rcu_read_lock();
9244 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9245 		if (err->type == ERR_FAIL_CMD &&
9246 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9247 			if (!err->cnt) {
9248 				rcu_read_unlock();
9249 				return 0;
9250 			}
9251 
9252 			ret = !!err->cnt;
9253 			rcu_read_unlock();
9254 			goto out_handle;
9255 		}
9256 	}
9257 	rcu_read_unlock();
9258 
9259 	return 0;
9260 
9261 out_handle:
9262 	if (err->cnt < 0)
9263 		err->cnt++;
9264 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
9265 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
9266 	*info = *err;
9267 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
9268 
9269 	return ret;
9270 }
9271 
9272 /* Process @scp, a request to abort a SCSI command by tag. */
scsi_debug_abort_cmd(struct Scsi_Host * shost,struct scsi_cmnd * scp)9273 static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
9274 {
9275 	struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
9276 	struct sdebug_abort_cmd *abort_cmd = &internal_cmd->abort_cmd;
9277 	const u32 unique_tag = abort_cmd->unique_tag;
9278 	struct scsi_cmnd *to_be_aborted_scmd =
9279 		scsi_host_find_tag(shost, unique_tag);
9280 	struct sdebug_scsi_cmd *to_be_aborted_sdsc =
9281 		scsi_cmd_priv(to_be_aborted_scmd);
9282 	bool res = false;
9283 
9284 	if (!to_be_aborted_scmd) {
9285 		pr_err("%s: command with tag %#x not found\n", __func__,
9286 		       unique_tag);
9287 		return;
9288 	}
9289 
9290 	scoped_guard(spinlock_irqsave, &to_be_aborted_sdsc->lock)
9291 		res = scsi_debug_stop_cmnd(to_be_aborted_scmd);
9292 
9293 	if (res)
9294 		pr_info("%s: aborted command with tag %#x\n",
9295 			__func__, unique_tag);
9296 	else
9297 		pr_err("%s: failed to abort command with tag %#x\n",
9298 		       __func__, unique_tag);
9299 
9300 	set_host_byte(scp, res ? DID_OK : DID_ERROR);
9301 }
9302 
scsi_debug_process_reserved_command(struct Scsi_Host * shost,struct scsi_cmnd * scp)9303 static int scsi_debug_process_reserved_command(struct Scsi_Host *shost,
9304 					       struct scsi_cmnd *scp)
9305 {
9306 	struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
9307 
9308 	switch (internal_cmd->type) {
9309 	case SCSI_DEBUG_ABORT_CMD:
9310 		scsi_debug_abort_cmd(shost, scp);
9311 		break;
9312 	default:
9313 		WARN_ON_ONCE(true);
9314 		set_host_byte(scp, DID_ERROR);
9315 		break;
9316 	}
9317 
9318 	scsi_done(scp);
9319 	return 0;
9320 }
9321 
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)9322 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
9323 				   struct scsi_cmnd *scp)
9324 {
9325 	u8 sdeb_i;
9326 	struct scsi_device *sdp = scp->device;
9327 	const struct opcode_info_t *oip;
9328 	const struct opcode_info_t *r_oip;
9329 	struct sdebug_dev_info *devip;
9330 	u8 *cmd = scp->cmnd;
9331 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
9332 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
9333 	int k, na;
9334 	int errsts = 0;
9335 	u64 lun_index = sdp->lun & 0x3FFF;
9336 	u32 flags;
9337 	u16 sa;
9338 	u8 opcode = cmd[0];
9339 	u32 devsel = sdebug_get_devsel(scp->device);
9340 	bool has_wlun_rl;
9341 	bool inject_now;
9342 	int ret = 0;
9343 	struct sdebug_err_inject err;
9344 
9345 	scsi_set_resid(scp, 0);
9346 	if (sdebug_statistics) {
9347 		atomic_inc(&sdebug_cmnd_count);
9348 		inject_now = inject_on_this_cmd();
9349 	} else {
9350 		inject_now = false;
9351 	}
9352 	if (unlikely(sdebug_verbose &&
9353 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
9354 		char b[120];
9355 		int n, len, sb;
9356 
9357 		len = scp->cmd_len;
9358 		sb = (int)sizeof(b);
9359 		if (len > 32)
9360 			strcpy(b, "too long, over 32 bytes");
9361 		else {
9362 			for (k = 0, n = 0; k < len && n < sb; ++k)
9363 				n += scnprintf(b + n, sb - n, "%02x ",
9364 					       (u32)cmd[k]);
9365 		}
9366 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
9367 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
9368 	}
9369 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
9370 		return SCSI_MLQUEUE_HOST_BUSY;
9371 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
9372 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
9373 		goto err_out;
9374 
9375 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
9376 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
9377 	devip = (struct sdebug_dev_info *)sdp->hostdata;
9378 	if (unlikely(!devip)) {
9379 		devip = find_build_dev_info(sdp);
9380 		if (NULL == devip)
9381 			goto err_out;
9382 	}
9383 
9384 	if (sdebug_timeout_cmd(scp)) {
9385 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
9386 		return 0;
9387 	}
9388 
9389 	ret = sdebug_fail_queue_cmd(scp);
9390 	if (ret) {
9391 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
9392 				opcode, ret);
9393 		return ret;
9394 	}
9395 
9396 	if (sdebug_fail_cmd(scp, &ret, &err)) {
9397 		scmd_printk(KERN_INFO, scp,
9398 			"fail command 0x%x with hostbyte=0x%x, "
9399 			"driverbyte=0x%x, statusbyte=0x%x, "
9400 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
9401 			opcode, err.host_byte, err.driver_byte,
9402 			err.status_byte, err.sense_key, err.asc, err.asq);
9403 		return ret;
9404 	}
9405 
9406 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
9407 		atomic_set(&sdeb_inject_pending, 1);
9408 
9409 	na = oip->num_attached;
9410 	r_pfp = oip->pfp;
9411 	if (na) {	/* multiple commands with this opcode */
9412 		r_oip = oip;
9413 		if (FF_SA & r_oip->flags) {
9414 			if (F_SA_LOW & oip->flags)
9415 				sa = 0x1f & cmd[1];
9416 			else
9417 				sa = get_unaligned_be16(cmd + 8);
9418 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9419 				if (opcode == oip->opcode && sa == oip->sa &&
9420 					(devsel & oip->devsel) != 0)
9421 					break;
9422 			}
9423 		} else {   /* since no service action only check opcode */
9424 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9425 				if (opcode == oip->opcode &&
9426 					(devsel & oip->devsel) != 0)
9427 					break;
9428 			}
9429 		}
9430 		if (k > na) {
9431 			if (F_SA_LOW & r_oip->flags)
9432 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
9433 			else if (F_SA_HIGH & r_oip->flags)
9434 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
9435 			else
9436 				mk_sense_invalid_opcode(scp);
9437 			goto check_cond;
9438 		}
9439 	}	/* else (when na==0) we assume the oip is a match */
9440 	flags = oip->flags;
9441 	if (unlikely(F_INV_OP & flags)) {
9442 		mk_sense_invalid_opcode(scp);
9443 		goto check_cond;
9444 	}
9445 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
9446 		if (sdebug_verbose)
9447 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
9448 				    my_name, opcode, " supported for wlun");
9449 		mk_sense_invalid_opcode(scp);
9450 		goto check_cond;
9451 	}
9452 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
9453 		u8 rem;
9454 		int j;
9455 
9456 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
9457 			rem = ~oip->len_mask[k] & cmd[k];
9458 			if (rem) {
9459 				for (j = 7; j >= 0; --j, rem <<= 1) {
9460 					if (0x80 & rem)
9461 						break;
9462 				}
9463 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
9464 				goto check_cond;
9465 			}
9466 		}
9467 	}
9468 	if (unlikely(!(F_SKIP_UA & flags) &&
9469 		     find_first_bit(devip->uas_bm,
9470 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
9471 		errsts = make_ua(scp, devip);
9472 		if (errsts)
9473 			goto check_cond;
9474 	}
9475 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
9476 		     atomic_read(&devip->stopped))) {
9477 		errsts = resp_not_ready(scp, devip);
9478 		if (errsts)
9479 			goto fini;
9480 	}
9481 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
9482 		goto fini;
9483 	if (unlikely(sdebug_every_nth)) {
9484 		if (fake_timeout(scp))
9485 			return 0;	/* ignore command: make trouble */
9486 	}
9487 	if (likely(oip->pfp))
9488 		pfp = oip->pfp;	/* calls a resp_* function */
9489 	else
9490 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
9491 
9492 fini:
9493 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
9494 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
9495 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
9496 					    sdebug_ndelay > 10000)) {
9497 		/*
9498 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
9499 		 * for Start Stop Unit (SSU) want at least 1 second delay and
9500 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
9501 		 * For Synchronize Cache want 1/20 of SSU's delay.
9502 		 */
9503 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
9504 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
9505 
9506 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
9507 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
9508 	} else
9509 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
9510 				     sdebug_ndelay);
9511 check_cond:
9512 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
9513 err_out:
9514 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
9515 }
9516 
sdebug_init_cmd_priv(struct Scsi_Host * shost,struct scsi_cmnd * cmd)9517 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
9518 {
9519 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9520 	struct sdebug_defer *sd_dp = &sdsc->sd_dp;
9521 
9522 	if (blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd)))
9523 		return 0;
9524 
9525 	spin_lock_init(&sdsc->lock);
9526 	hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
9527 		      HRTIMER_MODE_REL_PINNED);
9528 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
9529 
9530 	return 0;
9531 }
9532 
9533 static const struct scsi_host_template sdebug_driver_template = {
9534 	.show_info =		scsi_debug_show_info,
9535 	.write_info =		scsi_debug_write_info,
9536 	.proc_name =		sdebug_proc_name,
9537 	.name =			"SCSI DEBUG",
9538 	.info =			scsi_debug_info,
9539 	.sdev_init =		scsi_debug_sdev_init,
9540 	.sdev_configure =	scsi_debug_sdev_configure,
9541 	.sdev_destroy =		scsi_debug_sdev_destroy,
9542 	.ioctl =		scsi_debug_ioctl,
9543 	.queuecommand =		scsi_debug_queuecommand,
9544 	.queue_reserved_command = scsi_debug_process_reserved_command,
9545 	.change_queue_depth =	sdebug_change_qdepth,
9546 	.map_queues =		sdebug_map_queues,
9547 	.mq_poll =		sdebug_blk_mq_poll,
9548 	.eh_abort_handler =	scsi_debug_abort,
9549 	.eh_device_reset_handler = scsi_debug_device_reset,
9550 	.eh_target_reset_handler = scsi_debug_target_reset,
9551 	.eh_bus_reset_handler = scsi_debug_bus_reset,
9552 	.eh_host_reset_handler = scsi_debug_host_reset,
9553 	.can_queue =		SDEBUG_CANQUEUE,
9554 	.nr_reserved_cmds =	1,
9555 	.this_id =		7,
9556 	.sg_tablesize =		SG_MAX_SEGMENTS,
9557 	.cmd_per_lun =		DEF_CMD_PER_LUN,
9558 	.max_sectors =		-1U,
9559 	.max_segment_size =	-1U,
9560 	.module =		THIS_MODULE,
9561 	.skip_settle_delay =	1,
9562 	.track_queue_depth =	1,
9563 	.cmd_size = sizeof(union sdebug_priv),
9564 	.init_cmd_priv = sdebug_init_cmd_priv,
9565 	.target_alloc =		sdebug_target_alloc,
9566 	.target_destroy =	sdebug_target_destroy,
9567 };
9568 
sdebug_driver_probe(struct device * dev)9569 static int sdebug_driver_probe(struct device *dev)
9570 {
9571 	int error = 0;
9572 	struct sdebug_host_info *sdbg_host;
9573 	struct Scsi_Host *hpnt;
9574 	int hprot;
9575 
9576 	sdbg_host = dev_to_sdebug_host(dev);
9577 
9578 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
9579 	if (NULL == hpnt) {
9580 		pr_err("scsi_host_alloc failed\n");
9581 		error = -ENODEV;
9582 		return error;
9583 	}
9584 	hpnt->can_queue = sdebug_max_queue;
9585 	hpnt->cmd_per_lun = sdebug_max_queue;
9586 	if (!sdebug_clustering)
9587 		hpnt->dma_boundary = PAGE_SIZE - 1;
9588 
9589 	if (submit_queues > nr_cpu_ids) {
9590 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
9591 			my_name, submit_queues, nr_cpu_ids);
9592 		submit_queues = nr_cpu_ids;
9593 	}
9594 	/*
9595 	 * Decide whether to tell scsi subsystem that we want mq. The
9596 	 * following should give the same answer for each host.
9597 	 */
9598 	hpnt->nr_hw_queues = submit_queues;
9599 	if (sdebug_host_max_queue)
9600 		hpnt->host_tagset = 1;
9601 
9602 	/* poll queues are possible for nr_hw_queues > 1 */
9603 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
9604 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
9605 			 my_name, poll_queues, hpnt->nr_hw_queues);
9606 		poll_queues = 0;
9607 	}
9608 
9609 	/*
9610 	 * Poll queues don't need interrupts, but we need at least one I/O queue
9611 	 * left over for non-polled I/O.
9612 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
9613 	 */
9614 	if (poll_queues >= submit_queues) {
9615 		if (submit_queues < 3)
9616 			pr_warn("%s: trim poll_queues to 1\n", my_name);
9617 		else
9618 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
9619 				my_name, submit_queues - 1);
9620 		poll_queues = 1;
9621 	}
9622 	if (poll_queues)
9623 		hpnt->nr_maps = 3;
9624 
9625 	sdbg_host->shost = hpnt;
9626 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
9627 		hpnt->max_id = sdebug_num_tgts + 1;
9628 	else
9629 		hpnt->max_id = sdebug_num_tgts;
9630 	/* = sdebug_max_luns; */
9631 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
9632 
9633 	hprot = 0;
9634 
9635 	switch (sdebug_dif) {
9636 
9637 	case T10_PI_TYPE1_PROTECTION:
9638 		hprot = SHOST_DIF_TYPE1_PROTECTION;
9639 		if (sdebug_dix)
9640 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
9641 		break;
9642 
9643 	case T10_PI_TYPE2_PROTECTION:
9644 		hprot = SHOST_DIF_TYPE2_PROTECTION;
9645 		if (sdebug_dix)
9646 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
9647 		break;
9648 
9649 	case T10_PI_TYPE3_PROTECTION:
9650 		hprot = SHOST_DIF_TYPE3_PROTECTION;
9651 		if (sdebug_dix)
9652 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
9653 		break;
9654 
9655 	default:
9656 		if (sdebug_dix)
9657 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
9658 		break;
9659 	}
9660 
9661 	scsi_host_set_prot(hpnt, hprot);
9662 
9663 	if (have_dif_prot || sdebug_dix)
9664 		pr_info("host protection%s%s%s%s%s%s%s\n",
9665 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
9666 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
9667 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
9668 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
9669 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
9670 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
9671 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
9672 
9673 	if (sdebug_guard == 1)
9674 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
9675 	else
9676 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
9677 
9678 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
9679 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
9680 	if (sdebug_every_nth)	/* need stats counters for every_nth */
9681 		sdebug_statistics = true;
9682 	error = scsi_add_host(hpnt, &sdbg_host->dev);
9683 	if (error) {
9684 		pr_err("scsi_add_host failed\n");
9685 		error = -ENODEV;
9686 		scsi_host_put(hpnt);
9687 	} else {
9688 		scsi_scan_host(hpnt);
9689 	}
9690 
9691 	return error;
9692 }
9693 
sdebug_driver_remove(struct device * dev)9694 static void sdebug_driver_remove(struct device *dev)
9695 {
9696 	struct sdebug_host_info *sdbg_host;
9697 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
9698 
9699 	sdbg_host = dev_to_sdebug_host(dev);
9700 
9701 	scsi_remove_host(sdbg_host->shost);
9702 
9703 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
9704 				 dev_list) {
9705 		list_del(&sdbg_devinfo->dev_list);
9706 		kfree(sdbg_devinfo->zstate);
9707 		kfree(sdbg_devinfo);
9708 	}
9709 
9710 	scsi_host_put(sdbg_host->shost);
9711 }
9712 
9713 static const struct bus_type pseudo_lld_bus = {
9714 	.name = "pseudo",
9715 	.probe = sdebug_driver_probe,
9716 	.remove = sdebug_driver_remove,
9717 	.drv_groups = sdebug_drv_groups,
9718 };
9719