xref: /linux/drivers/scsi/scsi_debug.c (revision ae3d56d81507c33024ba7c1eae2ef433aa9bc0d5)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2018 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0188"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20180128";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 
98 /* Additional Sense Code Qualifier (ASCQ) */
99 #define ACK_NAK_TO 0x3
100 
101 /* Default values for driver parameters */
102 #define DEF_NUM_HOST   1
103 #define DEF_NUM_TGTS   1
104 #define DEF_MAX_LUNS   1
105 /* With these defaults, this driver will make 1 host with 1 target
106  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107  */
108 #define DEF_ATO 1
109 #define DEF_CDB_LEN 10
110 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
111 #define DEF_DEV_SIZE_MB   8
112 #define DEF_DIF 0
113 #define DEF_DIX 0
114 #define DEF_D_SENSE   0
115 #define DEF_EVERY_NTH   0
116 #define DEF_FAKE_RW	0
117 #define DEF_GUARD 0
118 #define DEF_HOST_LOCK 0
119 #define DEF_LBPU 0
120 #define DEF_LBPWS 0
121 #define DEF_LBPWS10 0
122 #define DEF_LBPRZ 1
123 #define DEF_LOWEST_ALIGNED 0
124 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
125 #define DEF_NO_LUN_0   0
126 #define DEF_NUM_PARTS   0
127 #define DEF_OPTS   0
128 #define DEF_OPT_BLKS 1024
129 #define DEF_PHYSBLK_EXP 0
130 #define DEF_OPT_XFERLEN_EXP 0
131 #define DEF_PTYPE   TYPE_DISK
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB   0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
142 #define DEF_STRICT 0
143 #define DEF_STATISTICS false
144 #define DEF_SUBMIT_QUEUES 1
145 #define DEF_UUID_CTL 0
146 #define JDELAY_OVERRIDDEN -9999
147 
148 #define SDEBUG_LUN_0_VAL 0
149 
150 /* bit mask values for sdebug_opts */
151 #define SDEBUG_OPT_NOISE		1
152 #define SDEBUG_OPT_MEDIUM_ERR		2
153 #define SDEBUG_OPT_TIMEOUT		4
154 #define SDEBUG_OPT_RECOVERED_ERR	8
155 #define SDEBUG_OPT_TRANSPORT_ERR	16
156 #define SDEBUG_OPT_DIF_ERR		32
157 #define SDEBUG_OPT_DIX_ERR		64
158 #define SDEBUG_OPT_MAC_TIMEOUT		128
159 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
160 #define SDEBUG_OPT_Q_NOISE		0x200
161 #define SDEBUG_OPT_ALL_TSF		0x400
162 #define SDEBUG_OPT_RARE_TSF		0x800
163 #define SDEBUG_OPT_N_WCE		0x1000
164 #define SDEBUG_OPT_RESET_NOISE		0x2000
165 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
166 #define SDEBUG_OPT_HOST_BUSY		0x8000
167 #define SDEBUG_OPT_CMD_ABORT		0x10000
168 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
169 			      SDEBUG_OPT_RESET_NOISE)
170 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
171 				  SDEBUG_OPT_TRANSPORT_ERR | \
172 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
173 				  SDEBUG_OPT_SHORT_TRANSFER | \
174 				  SDEBUG_OPT_HOST_BUSY | \
175 				  SDEBUG_OPT_CMD_ABORT)
176 /* When "every_nth" > 0 then modulo "every_nth" commands:
177  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
178  *   - a RECOVERED_ERROR is simulated on successful read and write
179  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
180  *   - a TRANSPORT_ERROR is simulated on successful read and write
181  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
182  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
183  *     CMD_ABORT
184  *
185  * When "every_nth" < 0 then after "- every_nth" commands the selected
186  * error will be injected. The error will be injected on every subsequent
187  * command until some other action occurs; for example, the user writing
188  * a new value (other than -1 or 1) to every_nth:
189  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
190  */
191 
192 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
193  * priority order. In the subset implemented here lower numbers have higher
194  * priority. The UA numbers should be a sequence starting from 0 with
195  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
196 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
197 #define SDEBUG_UA_BUS_RESET 1
198 #define SDEBUG_UA_MODE_CHANGED 2
199 #define SDEBUG_UA_CAPACITY_CHANGED 3
200 #define SDEBUG_UA_LUNS_CHANGED 4
201 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
202 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
203 #define SDEBUG_NUM_UAS 7
204 
205 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
206  * sector on read commands: */
207 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
208 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
209 
210 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
211  * or "peripheral device" addressing (value 0) */
212 #define SAM2_LUN_ADDRESS_METHOD 0
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  255
224 
225 #define F_D_IN			1
226 #define F_D_OUT			2
227 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
228 #define F_D_UNKN		8
229 #define F_RL_WLUN_OK		0x10
230 #define F_SKIP_UA		0x20
231 #define F_DELAY_OVERR		0x40
232 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
234 #define F_INV_OP		0x200
235 #define F_FAKE_RW		0x400
236 #define F_M_ACCESS		0x800	/* media access */
237 #define F_SSU_DELAY		0x1000
238 #define F_SYNC_DELAY		0x2000
239 
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
244 
245 #define SDEBUG_MAX_PARTS 4
246 
247 #define SDEBUG_MAX_CMD_LEN 32
248 
249 
250 struct sdebug_dev_info {
251 	struct list_head dev_list;
252 	unsigned int channel;
253 	unsigned int target;
254 	u64 lun;
255 	uuid_t lu_name;
256 	struct sdebug_host_info *sdbg_host;
257 	unsigned long uas_bm[1];
258 	atomic_t num_in_q;
259 	atomic_t stopped;
260 	bool used;
261 };
262 
263 struct sdebug_host_info {
264 	struct list_head host_list;
265 	struct Scsi_Host *shost;
266 	struct device dev;
267 	struct list_head dev_info_list;
268 };
269 
270 #define to_sdebug_host(d)	\
271 	container_of(d, struct sdebug_host_info, dev)
272 
273 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
274 		      SDEB_DEFER_WQ = 2};
275 
276 struct sdebug_defer {
277 	struct hrtimer hrt;
278 	struct execute_work ew;
279 	int sqa_idx;	/* index of sdebug_queue array */
280 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
281 	int issuing_cpu;
282 	bool init_hrt;
283 	bool init_wq;
284 	bool aborted;	/* true when blk_abort_request() already called */
285 	enum sdeb_defer_type defer_t;
286 };
287 
288 struct sdebug_queued_cmd {
289 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
290 	 * instance indicates this slot is in use.
291 	 */
292 	struct sdebug_defer *sd_dp;
293 	struct scsi_cmnd *a_cmnd;
294 	unsigned int inj_recovered:1;
295 	unsigned int inj_transport:1;
296 	unsigned int inj_dif:1;
297 	unsigned int inj_dix:1;
298 	unsigned int inj_short:1;
299 	unsigned int inj_host_busy:1;
300 	unsigned int inj_cmd_abort:1;
301 };
302 
303 struct sdebug_queue {
304 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
305 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
306 	spinlock_t qc_lock;
307 	atomic_t blocked;	/* to temporarily stop more being queued */
308 };
309 
310 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
311 static atomic_t sdebug_completions;  /* count of deferred completions */
312 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
313 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
314 
315 struct opcode_info_t {
316 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
317 				/* for terminating element */
318 	u8 opcode;		/* if num_attached > 0, preferred */
319 	u16 sa;			/* service action */
320 	u32 flags;		/* OR-ed set of SDEB_F_* */
321 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
322 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
323 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
324 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
325 };
326 
327 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
328 enum sdeb_opcode_index {
329 	SDEB_I_INVALID_OPCODE =	0,
330 	SDEB_I_INQUIRY = 1,
331 	SDEB_I_REPORT_LUNS = 2,
332 	SDEB_I_REQUEST_SENSE = 3,
333 	SDEB_I_TEST_UNIT_READY = 4,
334 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
335 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
336 	SDEB_I_LOG_SENSE = 7,
337 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
338 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
339 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
340 	SDEB_I_START_STOP = 11,
341 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
342 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
343 	SDEB_I_MAINT_IN = 14,
344 	SDEB_I_MAINT_OUT = 15,
345 	SDEB_I_VERIFY = 16,		/* 10 only */
346 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
347 	SDEB_I_RESERVE = 18,		/* 6, 10 */
348 	SDEB_I_RELEASE = 19,		/* 6, 10 */
349 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
350 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
351 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
352 	SDEB_I_SEND_DIAG = 23,
353 	SDEB_I_UNMAP = 24,
354 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
355 	SDEB_I_WRITE_BUFFER = 26,
356 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
357 	SDEB_I_SYNC_CACHE = 28,		/* 10, 16 */
358 	SDEB_I_COMP_WRITE = 29,
359 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last (previous + 1) */
360 };
361 
362 
363 static const unsigned char opcode_ind_arr[256] = {
364 /* 0x0; 0x0->0x1f: 6 byte cdbs */
365 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
366 	    0, 0, 0, 0,
367 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
368 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
369 	    SDEB_I_RELEASE,
370 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
371 	    SDEB_I_ALLOW_REMOVAL, 0,
372 /* 0x20; 0x20->0x3f: 10 byte cdbs */
373 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
374 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
375 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
376 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
377 /* 0x40; 0x40->0x5f: 10 byte cdbs */
378 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
379 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
380 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
381 	    SDEB_I_RELEASE,
382 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
383 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
384 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386 	0, SDEB_I_VARIABLE_LEN,
387 /* 0x80; 0x80->0x9f: 16 byte cdbs */
388 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
389 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
390 	0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
391 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
392 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
393 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
394 	     SDEB_I_MAINT_OUT, 0, 0, 0,
395 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
396 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
397 	0, 0, 0, 0, 0, 0, 0, 0,
398 	0, 0, 0, 0, 0, 0, 0, 0,
399 /* 0xc0; 0xc0->0xff: vendor specific */
400 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
401 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
402 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
403 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
404 };
405 
406 /*
407  * The following "response" functions return the SCSI mid-level's 4 byte
408  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
409  * command completion, they can mask their return value with
410  * SDEG_RES_IMMED_MASK .
411  */
412 #define SDEG_RES_IMMED_MASK 0x40000000
413 
414 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
417 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
418 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
419 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
420 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
421 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
422 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
423 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
424 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
425 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
426 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
427 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
428 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
429 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
430 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
431 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
432 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
433 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
434 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
435 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
436 
437 /*
438  * The following are overflow arrays for cdbs that "hit" the same index in
439  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
440  * should be placed in opcode_info_arr[], the others should be placed here.
441  */
442 static const struct opcode_info_t msense_iarr[] = {
443 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
444 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
445 };
446 
447 static const struct opcode_info_t mselect_iarr[] = {
448 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
449 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
450 };
451 
452 static const struct opcode_info_t read_iarr[] = {
453 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
454 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
455 	     0, 0, 0, 0} },
456 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
457 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
458 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
459 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
460 	     0xc7, 0, 0, 0, 0} },
461 };
462 
463 static const struct opcode_info_t write_iarr[] = {
464 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
465 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
466 		   0, 0, 0, 0, 0, 0} },
467 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
468 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
469 		   0, 0, 0} },
470 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
471 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
472 		   0xbf, 0xc7, 0, 0, 0, 0} },
473 };
474 
475 static const struct opcode_info_t sa_in_16_iarr[] = {
476 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
477 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
478 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
479 };
480 
481 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
482 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
483 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
484 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
485 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
486 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
487 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
488 };
489 
490 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
491 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
492 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
493 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
494 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
495 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
496 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
497 };
498 
499 static const struct opcode_info_t write_same_iarr[] = {
500 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
501 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
502 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
503 };
504 
505 static const struct opcode_info_t reserve_iarr[] = {
506 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
507 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
508 };
509 
510 static const struct opcode_info_t release_iarr[] = {
511 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
512 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
513 };
514 
515 static const struct opcode_info_t sync_cache_iarr[] = {
516 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
517 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
518 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
519 };
520 
521 
522 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
523  * plus the terminating elements for logic that scans this table such as
524  * REPORT SUPPORTED OPERATION CODES. */
525 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
526 /* 0 */
527 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
528 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
529 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
530 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
532 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
533 	     0, 0} },					/* REPORT LUNS */
534 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
535 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
536 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
537 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
538 /* 5 */
539 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
540 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
541 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
542 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
543 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
544 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
545 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
546 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
547 	     0, 0, 0} },
548 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
549 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
550 	     0, 0} },
551 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
552 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
553 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
554 /* 10 */
555 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
556 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
557 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
558 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
559 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
560 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
561 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
562 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
563 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
564 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
565 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
566 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
567 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
568 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
569 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
570 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
571 				0xff, 0, 0xc7, 0, 0, 0, 0} },
572 /* 15 */
573 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
574 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
575 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
576 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
577 	     0, 0, 0, 0, 0, 0} },
578 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
579 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
580 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
581 	     0xff, 0xff} },
582 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
583 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
584 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
585 	     0} },
586 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
587 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
588 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
589 	     0} },
590 /* 20 */
591 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
592 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
593 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
594 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
595 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
596 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
597 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
598 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
599 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
600 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
601 /* 25 */
602 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
603 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
604 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
605 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
606 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
607 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
608 		 0, 0, 0, 0, 0} },
609 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
610 	    resp_sync_cache, sync_cache_iarr,
611 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
612 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
613 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
614 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
615 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
616 
617 /* 30 */
618 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
619 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
620 };
621 
622 static int sdebug_add_host = DEF_NUM_HOST;
623 static int sdebug_ato = DEF_ATO;
624 static int sdebug_cdb_len = DEF_CDB_LEN;
625 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
626 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
627 static int sdebug_dif = DEF_DIF;
628 static int sdebug_dix = DEF_DIX;
629 static int sdebug_dsense = DEF_D_SENSE;
630 static int sdebug_every_nth = DEF_EVERY_NTH;
631 static int sdebug_fake_rw = DEF_FAKE_RW;
632 static unsigned int sdebug_guard = DEF_GUARD;
633 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
634 static int sdebug_max_luns = DEF_MAX_LUNS;
635 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
636 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
637 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
638 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
639 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
640 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
641 static int sdebug_no_uld;
642 static int sdebug_num_parts = DEF_NUM_PARTS;
643 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
644 static int sdebug_opt_blks = DEF_OPT_BLKS;
645 static int sdebug_opts = DEF_OPTS;
646 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
647 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
648 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
649 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
650 static int sdebug_sector_size = DEF_SECTOR_SIZE;
651 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
652 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
653 static unsigned int sdebug_lbpu = DEF_LBPU;
654 static unsigned int sdebug_lbpws = DEF_LBPWS;
655 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
656 static unsigned int sdebug_lbprz = DEF_LBPRZ;
657 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
658 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
659 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
660 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
661 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
662 static int sdebug_uuid_ctl = DEF_UUID_CTL;
663 static bool sdebug_removable = DEF_REMOVABLE;
664 static bool sdebug_clustering;
665 static bool sdebug_host_lock = DEF_HOST_LOCK;
666 static bool sdebug_strict = DEF_STRICT;
667 static bool sdebug_any_injecting_opt;
668 static bool sdebug_verbose;
669 static bool have_dif_prot;
670 static bool write_since_sync;
671 static bool sdebug_statistics = DEF_STATISTICS;
672 
673 static unsigned int sdebug_store_sectors;
674 static sector_t sdebug_capacity;	/* in sectors */
675 
676 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
677    may still need them */
678 static int sdebug_heads;		/* heads per disk */
679 static int sdebug_cylinders_per;	/* cylinders per surface */
680 static int sdebug_sectors_per;		/* sectors per cylinder */
681 
682 static LIST_HEAD(sdebug_host_list);
683 static DEFINE_SPINLOCK(sdebug_host_list_lock);
684 
685 static unsigned char *fake_storep;	/* ramdisk storage */
686 static struct t10_pi_tuple *dif_storep;	/* protection info */
687 static void *map_storep;		/* provisioning map */
688 
689 static unsigned long map_size;
690 static int num_aborts;
691 static int num_dev_resets;
692 static int num_target_resets;
693 static int num_bus_resets;
694 static int num_host_resets;
695 static int dix_writes;
696 static int dix_reads;
697 static int dif_errors;
698 
699 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
700 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
701 
702 static DEFINE_RWLOCK(atomic_rw);
703 
704 static char sdebug_proc_name[] = MY_NAME;
705 static const char *my_name = MY_NAME;
706 
707 static struct bus_type pseudo_lld_bus;
708 
709 static struct device_driver sdebug_driverfs_driver = {
710 	.name 		= sdebug_proc_name,
711 	.bus		= &pseudo_lld_bus,
712 };
713 
714 static const int check_condition_result =
715 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
716 
717 static const int illegal_condition_result =
718 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
719 
720 static const int device_qfull_result =
721 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
722 
723 
724 /* Only do the extra work involved in logical block provisioning if one or
725  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
726  * real reads and writes (i.e. not skipping them for speed).
727  */
728 static inline bool scsi_debug_lbp(void)
729 {
730 	return 0 == sdebug_fake_rw &&
731 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
732 }
733 
734 static void *fake_store(unsigned long long lba)
735 {
736 	lba = do_div(lba, sdebug_store_sectors);
737 
738 	return fake_storep + lba * sdebug_sector_size;
739 }
740 
741 static struct t10_pi_tuple *dif_store(sector_t sector)
742 {
743 	sector = sector_div(sector, sdebug_store_sectors);
744 
745 	return dif_storep + sector;
746 }
747 
748 static void sdebug_max_tgts_luns(void)
749 {
750 	struct sdebug_host_info *sdbg_host;
751 	struct Scsi_Host *hpnt;
752 
753 	spin_lock(&sdebug_host_list_lock);
754 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
755 		hpnt = sdbg_host->shost;
756 		if ((hpnt->this_id >= 0) &&
757 		    (sdebug_num_tgts > hpnt->this_id))
758 			hpnt->max_id = sdebug_num_tgts + 1;
759 		else
760 			hpnt->max_id = sdebug_num_tgts;
761 		/* sdebug_max_luns; */
762 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
763 	}
764 	spin_unlock(&sdebug_host_list_lock);
765 }
766 
767 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
768 
769 /* Set in_bit to -1 to indicate no bit position of invalid field */
770 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
771 				 enum sdeb_cmd_data c_d,
772 				 int in_byte, int in_bit)
773 {
774 	unsigned char *sbuff;
775 	u8 sks[4];
776 	int sl, asc;
777 
778 	sbuff = scp->sense_buffer;
779 	if (!sbuff) {
780 		sdev_printk(KERN_ERR, scp->device,
781 			    "%s: sense_buffer is NULL\n", __func__);
782 		return;
783 	}
784 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
785 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
786 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
787 	memset(sks, 0, sizeof(sks));
788 	sks[0] = 0x80;
789 	if (c_d)
790 		sks[0] |= 0x40;
791 	if (in_bit >= 0) {
792 		sks[0] |= 0x8;
793 		sks[0] |= 0x7 & in_bit;
794 	}
795 	put_unaligned_be16(in_byte, sks + 1);
796 	if (sdebug_dsense) {
797 		sl = sbuff[7] + 8;
798 		sbuff[7] = sl;
799 		sbuff[sl] = 0x2;
800 		sbuff[sl + 1] = 0x6;
801 		memcpy(sbuff + sl + 4, sks, 3);
802 	} else
803 		memcpy(sbuff + 15, sks, 3);
804 	if (sdebug_verbose)
805 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
806 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
807 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
808 }
809 
810 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
811 {
812 	unsigned char *sbuff;
813 
814 	sbuff = scp->sense_buffer;
815 	if (!sbuff) {
816 		sdev_printk(KERN_ERR, scp->device,
817 			    "%s: sense_buffer is NULL\n", __func__);
818 		return;
819 	}
820 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
821 
822 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
823 
824 	if (sdebug_verbose)
825 		sdev_printk(KERN_INFO, scp->device,
826 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
827 			    my_name, key, asc, asq);
828 }
829 
830 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
831 {
832 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
833 }
834 
835 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
836 {
837 	if (sdebug_verbose) {
838 		if (0x1261 == cmd)
839 			sdev_printk(KERN_INFO, dev,
840 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
841 		else if (0x5331 == cmd)
842 			sdev_printk(KERN_INFO, dev,
843 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
844 				    __func__);
845 		else
846 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
847 				    __func__, cmd);
848 	}
849 	return -EINVAL;
850 	/* return -ENOTTY; // correct return but upsets fdisk */
851 }
852 
853 static void config_cdb_len(struct scsi_device *sdev)
854 {
855 	switch (sdebug_cdb_len) {
856 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
857 		sdev->use_10_for_rw = false;
858 		sdev->use_16_for_rw = false;
859 		sdev->use_10_for_ms = false;
860 		break;
861 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
862 		sdev->use_10_for_rw = true;
863 		sdev->use_16_for_rw = false;
864 		sdev->use_10_for_ms = false;
865 		break;
866 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
867 		sdev->use_10_for_rw = true;
868 		sdev->use_16_for_rw = false;
869 		sdev->use_10_for_ms = true;
870 		break;
871 	case 16:
872 		sdev->use_10_for_rw = false;
873 		sdev->use_16_for_rw = true;
874 		sdev->use_10_for_ms = true;
875 		break;
876 	case 32: /* No knobs to suggest this so same as 16 for now */
877 		sdev->use_10_for_rw = false;
878 		sdev->use_16_for_rw = true;
879 		sdev->use_10_for_ms = true;
880 		break;
881 	default:
882 		pr_warn("unexpected cdb_len=%d, force to 10\n",
883 			sdebug_cdb_len);
884 		sdev->use_10_for_rw = true;
885 		sdev->use_16_for_rw = false;
886 		sdev->use_10_for_ms = false;
887 		sdebug_cdb_len = 10;
888 		break;
889 	}
890 }
891 
892 static void all_config_cdb_len(void)
893 {
894 	struct sdebug_host_info *sdbg_host;
895 	struct Scsi_Host *shost;
896 	struct scsi_device *sdev;
897 
898 	spin_lock(&sdebug_host_list_lock);
899 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
900 		shost = sdbg_host->shost;
901 		shost_for_each_device(sdev, shost) {
902 			config_cdb_len(sdev);
903 		}
904 	}
905 	spin_unlock(&sdebug_host_list_lock);
906 }
907 
908 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
909 {
910 	struct sdebug_host_info *sdhp;
911 	struct sdebug_dev_info *dp;
912 
913 	spin_lock(&sdebug_host_list_lock);
914 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
915 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
916 			if ((devip->sdbg_host == dp->sdbg_host) &&
917 			    (devip->target == dp->target))
918 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
919 		}
920 	}
921 	spin_unlock(&sdebug_host_list_lock);
922 }
923 
924 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
925 {
926 	int k;
927 
928 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
929 	if (k != SDEBUG_NUM_UAS) {
930 		const char *cp = NULL;
931 
932 		switch (k) {
933 		case SDEBUG_UA_POR:
934 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
935 					POWER_ON_RESET_ASCQ);
936 			if (sdebug_verbose)
937 				cp = "power on reset";
938 			break;
939 		case SDEBUG_UA_BUS_RESET:
940 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
941 					BUS_RESET_ASCQ);
942 			if (sdebug_verbose)
943 				cp = "bus reset";
944 			break;
945 		case SDEBUG_UA_MODE_CHANGED:
946 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
947 					MODE_CHANGED_ASCQ);
948 			if (sdebug_verbose)
949 				cp = "mode parameters changed";
950 			break;
951 		case SDEBUG_UA_CAPACITY_CHANGED:
952 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
953 					CAPACITY_CHANGED_ASCQ);
954 			if (sdebug_verbose)
955 				cp = "capacity data changed";
956 			break;
957 		case SDEBUG_UA_MICROCODE_CHANGED:
958 			mk_sense_buffer(scp, UNIT_ATTENTION,
959 					TARGET_CHANGED_ASC,
960 					MICROCODE_CHANGED_ASCQ);
961 			if (sdebug_verbose)
962 				cp = "microcode has been changed";
963 			break;
964 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
965 			mk_sense_buffer(scp, UNIT_ATTENTION,
966 					TARGET_CHANGED_ASC,
967 					MICROCODE_CHANGED_WO_RESET_ASCQ);
968 			if (sdebug_verbose)
969 				cp = "microcode has been changed without reset";
970 			break;
971 		case SDEBUG_UA_LUNS_CHANGED:
972 			/*
973 			 * SPC-3 behavior is to report a UNIT ATTENTION with
974 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
975 			 * on the target, until a REPORT LUNS command is
976 			 * received.  SPC-4 behavior is to report it only once.
977 			 * NOTE:  sdebug_scsi_level does not use the same
978 			 * values as struct scsi_device->scsi_level.
979 			 */
980 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
981 				clear_luns_changed_on_target(devip);
982 			mk_sense_buffer(scp, UNIT_ATTENTION,
983 					TARGET_CHANGED_ASC,
984 					LUNS_CHANGED_ASCQ);
985 			if (sdebug_verbose)
986 				cp = "reported luns data has changed";
987 			break;
988 		default:
989 			pr_warn("unexpected unit attention code=%d\n", k);
990 			if (sdebug_verbose)
991 				cp = "unknown";
992 			break;
993 		}
994 		clear_bit(k, devip->uas_bm);
995 		if (sdebug_verbose)
996 			sdev_printk(KERN_INFO, scp->device,
997 				   "%s reports: Unit attention: %s\n",
998 				   my_name, cp);
999 		return check_condition_result;
1000 	}
1001 	return 0;
1002 }
1003 
1004 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1005 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1006 				int arr_len)
1007 {
1008 	int act_len;
1009 	struct scsi_data_buffer *sdb = &scp->sdb;
1010 
1011 	if (!sdb->length)
1012 		return 0;
1013 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1014 		return DID_ERROR << 16;
1015 
1016 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1017 				      arr, arr_len);
1018 	sdb->resid = scsi_bufflen(scp) - act_len;
1019 
1020 	return 0;
1021 }
1022 
1023 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1024  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1025  * calls, not required to write in ascending offset order. Assumes resid
1026  * set to scsi_bufflen() prior to any calls.
1027  */
1028 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1029 				  int arr_len, unsigned int off_dst)
1030 {
1031 	int act_len, n;
1032 	struct scsi_data_buffer *sdb = &scp->sdb;
1033 	off_t skip = off_dst;
1034 
1035 	if (sdb->length <= off_dst)
1036 		return 0;
1037 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1038 		return DID_ERROR << 16;
1039 
1040 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1041 				       arr, arr_len, skip);
1042 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1043 		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1044 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1045 	sdb->resid = min(sdb->resid, n);
1046 	return 0;
1047 }
1048 
1049 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1050  * 'arr' or -1 if error.
1051  */
1052 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1053 			       int arr_len)
1054 {
1055 	if (!scsi_bufflen(scp))
1056 		return 0;
1057 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1058 		return -1;
1059 
1060 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1061 }
1062 
1063 
1064 static char sdebug_inq_vendor_id[9] = "Linux   ";
1065 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1066 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1067 /* Use some locally assigned NAAs for SAS addresses. */
1068 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1069 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1070 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1071 
1072 /* Device identification VPD page. Returns number of bytes placed in arr */
1073 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1074 			  int target_dev_id, int dev_id_num,
1075 			  const char *dev_id_str, int dev_id_str_len,
1076 			  const uuid_t *lu_name)
1077 {
1078 	int num, port_a;
1079 	char b[32];
1080 
1081 	port_a = target_dev_id + 1;
1082 	/* T10 vendor identifier field format (faked) */
1083 	arr[0] = 0x2;	/* ASCII */
1084 	arr[1] = 0x1;
1085 	arr[2] = 0x0;
1086 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1087 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1088 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1089 	num = 8 + 16 + dev_id_str_len;
1090 	arr[3] = num;
1091 	num += 4;
1092 	if (dev_id_num >= 0) {
1093 		if (sdebug_uuid_ctl) {
1094 			/* Locally assigned UUID */
1095 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1096 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1097 			arr[num++] = 0x0;
1098 			arr[num++] = 0x12;
1099 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1100 			arr[num++] = 0x0;
1101 			memcpy(arr + num, lu_name, 16);
1102 			num += 16;
1103 		} else {
1104 			/* NAA-3, Logical unit identifier (binary) */
1105 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1106 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1107 			arr[num++] = 0x0;
1108 			arr[num++] = 0x8;
1109 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1110 			num += 8;
1111 		}
1112 		/* Target relative port number */
1113 		arr[num++] = 0x61;	/* proto=sas, binary */
1114 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1115 		arr[num++] = 0x0;	/* reserved */
1116 		arr[num++] = 0x4;	/* length */
1117 		arr[num++] = 0x0;	/* reserved */
1118 		arr[num++] = 0x0;	/* reserved */
1119 		arr[num++] = 0x0;
1120 		arr[num++] = 0x1;	/* relative port A */
1121 	}
1122 	/* NAA-3, Target port identifier */
1123 	arr[num++] = 0x61;	/* proto=sas, binary */
1124 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1125 	arr[num++] = 0x0;
1126 	arr[num++] = 0x8;
1127 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1128 	num += 8;
1129 	/* NAA-3, Target port group identifier */
1130 	arr[num++] = 0x61;	/* proto=sas, binary */
1131 	arr[num++] = 0x95;	/* piv=1, target port group id */
1132 	arr[num++] = 0x0;
1133 	arr[num++] = 0x4;
1134 	arr[num++] = 0;
1135 	arr[num++] = 0;
1136 	put_unaligned_be16(port_group_id, arr + num);
1137 	num += 2;
1138 	/* NAA-3, Target device identifier */
1139 	arr[num++] = 0x61;	/* proto=sas, binary */
1140 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1141 	arr[num++] = 0x0;
1142 	arr[num++] = 0x8;
1143 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1144 	num += 8;
1145 	/* SCSI name string: Target device identifier */
1146 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1147 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1148 	arr[num++] = 0x0;
1149 	arr[num++] = 24;
1150 	memcpy(arr + num, "naa.32222220", 12);
1151 	num += 12;
1152 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1153 	memcpy(arr + num, b, 8);
1154 	num += 8;
1155 	memset(arr + num, 0, 4);
1156 	num += 4;
1157 	return num;
1158 }
1159 
1160 static unsigned char vpd84_data[] = {
1161 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1162     0x22,0x22,0x22,0x0,0xbb,0x1,
1163     0x22,0x22,0x22,0x0,0xbb,0x2,
1164 };
1165 
1166 /*  Software interface identification VPD page */
1167 static int inquiry_vpd_84(unsigned char *arr)
1168 {
1169 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1170 	return sizeof(vpd84_data);
1171 }
1172 
1173 /* Management network addresses VPD page */
1174 static int inquiry_vpd_85(unsigned char *arr)
1175 {
1176 	int num = 0;
1177 	const char *na1 = "https://www.kernel.org/config";
1178 	const char *na2 = "http://www.kernel.org/log";
1179 	int plen, olen;
1180 
1181 	arr[num++] = 0x1;	/* lu, storage config */
1182 	arr[num++] = 0x0;	/* reserved */
1183 	arr[num++] = 0x0;
1184 	olen = strlen(na1);
1185 	plen = olen + 1;
1186 	if (plen % 4)
1187 		plen = ((plen / 4) + 1) * 4;
1188 	arr[num++] = plen;	/* length, null termianted, padded */
1189 	memcpy(arr + num, na1, olen);
1190 	memset(arr + num + olen, 0, plen - olen);
1191 	num += plen;
1192 
1193 	arr[num++] = 0x4;	/* lu, logging */
1194 	arr[num++] = 0x0;	/* reserved */
1195 	arr[num++] = 0x0;
1196 	olen = strlen(na2);
1197 	plen = olen + 1;
1198 	if (plen % 4)
1199 		plen = ((plen / 4) + 1) * 4;
1200 	arr[num++] = plen;	/* length, null terminated, padded */
1201 	memcpy(arr + num, na2, olen);
1202 	memset(arr + num + olen, 0, plen - olen);
1203 	num += plen;
1204 
1205 	return num;
1206 }
1207 
1208 /* SCSI ports VPD page */
1209 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1210 {
1211 	int num = 0;
1212 	int port_a, port_b;
1213 
1214 	port_a = target_dev_id + 1;
1215 	port_b = port_a + 1;
1216 	arr[num++] = 0x0;	/* reserved */
1217 	arr[num++] = 0x0;	/* reserved */
1218 	arr[num++] = 0x0;
1219 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1220 	memset(arr + num, 0, 6);
1221 	num += 6;
1222 	arr[num++] = 0x0;
1223 	arr[num++] = 12;	/* length tp descriptor */
1224 	/* naa-5 target port identifier (A) */
1225 	arr[num++] = 0x61;	/* proto=sas, binary */
1226 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1227 	arr[num++] = 0x0;	/* reserved */
1228 	arr[num++] = 0x8;	/* length */
1229 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1230 	num += 8;
1231 	arr[num++] = 0x0;	/* reserved */
1232 	arr[num++] = 0x0;	/* reserved */
1233 	arr[num++] = 0x0;
1234 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1235 	memset(arr + num, 0, 6);
1236 	num += 6;
1237 	arr[num++] = 0x0;
1238 	arr[num++] = 12;	/* length tp descriptor */
1239 	/* naa-5 target port identifier (B) */
1240 	arr[num++] = 0x61;	/* proto=sas, binary */
1241 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1242 	arr[num++] = 0x0;	/* reserved */
1243 	arr[num++] = 0x8;	/* length */
1244 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1245 	num += 8;
1246 
1247 	return num;
1248 }
1249 
1250 
1251 static unsigned char vpd89_data[] = {
1252 /* from 4th byte */ 0,0,0,0,
1253 'l','i','n','u','x',' ',' ',' ',
1254 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1255 '1','2','3','4',
1256 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1257 0xec,0,0,0,
1258 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1259 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1260 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1261 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1262 0x53,0x41,
1263 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1264 0x20,0x20,
1265 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1266 0x10,0x80,
1267 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1268 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1269 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1270 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1271 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1272 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1273 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1274 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1275 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1276 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1277 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1278 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1279 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1280 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1281 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1282 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1283 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1284 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1285 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1286 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1287 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1289 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1290 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1291 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1292 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1293 };
1294 
1295 /* ATA Information VPD page */
1296 static int inquiry_vpd_89(unsigned char *arr)
1297 {
1298 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1299 	return sizeof(vpd89_data);
1300 }
1301 
1302 
1303 static unsigned char vpdb0_data[] = {
1304 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1305 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1306 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1307 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1308 };
1309 
1310 /* Block limits VPD page (SBC-3) */
1311 static int inquiry_vpd_b0(unsigned char *arr)
1312 {
1313 	unsigned int gran;
1314 
1315 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1316 
1317 	/* Optimal transfer length granularity */
1318 	if (sdebug_opt_xferlen_exp != 0 &&
1319 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1320 		gran = 1 << sdebug_opt_xferlen_exp;
1321 	else
1322 		gran = 1 << sdebug_physblk_exp;
1323 	put_unaligned_be16(gran, arr + 2);
1324 
1325 	/* Maximum Transfer Length */
1326 	if (sdebug_store_sectors > 0x400)
1327 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1328 
1329 	/* Optimal Transfer Length */
1330 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1331 
1332 	if (sdebug_lbpu) {
1333 		/* Maximum Unmap LBA Count */
1334 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1335 
1336 		/* Maximum Unmap Block Descriptor Count */
1337 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1338 	}
1339 
1340 	/* Unmap Granularity Alignment */
1341 	if (sdebug_unmap_alignment) {
1342 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1343 		arr[28] |= 0x80; /* UGAVALID */
1344 	}
1345 
1346 	/* Optimal Unmap Granularity */
1347 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1348 
1349 	/* Maximum WRITE SAME Length */
1350 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1351 
1352 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1353 
1354 	return sizeof(vpdb0_data);
1355 }
1356 
1357 /* Block device characteristics VPD page (SBC-3) */
1358 static int inquiry_vpd_b1(unsigned char *arr)
1359 {
1360 	memset(arr, 0, 0x3c);
1361 	arr[0] = 0;
1362 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1363 	arr[2] = 0;
1364 	arr[3] = 5;	/* less than 1.8" */
1365 
1366 	return 0x3c;
1367 }
1368 
1369 /* Logical block provisioning VPD page (SBC-4) */
1370 static int inquiry_vpd_b2(unsigned char *arr)
1371 {
1372 	memset(arr, 0, 0x4);
1373 	arr[0] = 0;			/* threshold exponent */
1374 	if (sdebug_lbpu)
1375 		arr[1] = 1 << 7;
1376 	if (sdebug_lbpws)
1377 		arr[1] |= 1 << 6;
1378 	if (sdebug_lbpws10)
1379 		arr[1] |= 1 << 5;
1380 	if (sdebug_lbprz && scsi_debug_lbp())
1381 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1382 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1383 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1384 	/* threshold_percentage=0 */
1385 	return 0x4;
1386 }
1387 
1388 #define SDEBUG_LONG_INQ_SZ 96
1389 #define SDEBUG_MAX_INQ_ARR_SZ 584
1390 
1391 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1392 {
1393 	unsigned char pq_pdt;
1394 	unsigned char *arr;
1395 	unsigned char *cmd = scp->cmnd;
1396 	int alloc_len, n, ret;
1397 	bool have_wlun, is_disk;
1398 
1399 	alloc_len = get_unaligned_be16(cmd + 3);
1400 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1401 	if (! arr)
1402 		return DID_REQUEUE << 16;
1403 	is_disk = (sdebug_ptype == TYPE_DISK);
1404 	have_wlun = scsi_is_wlun(scp->device->lun);
1405 	if (have_wlun)
1406 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1407 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1408 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1409 	else
1410 		pq_pdt = (sdebug_ptype & 0x1f);
1411 	arr[0] = pq_pdt;
1412 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1413 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1414 		kfree(arr);
1415 		return check_condition_result;
1416 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1417 		int lu_id_num, port_group_id, target_dev_id, len;
1418 		char lu_id_str[6];
1419 		int host_no = devip->sdbg_host->shost->host_no;
1420 
1421 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1422 		    (devip->channel & 0x7f);
1423 		if (sdebug_vpd_use_hostno == 0)
1424 			host_no = 0;
1425 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1426 			    (devip->target * 1000) + devip->lun);
1427 		target_dev_id = ((host_no + 1) * 2000) +
1428 				 (devip->target * 1000) - 3;
1429 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1430 		if (0 == cmd[2]) { /* supported vital product data pages */
1431 			arr[1] = cmd[2];	/*sanity */
1432 			n = 4;
1433 			arr[n++] = 0x0;   /* this page */
1434 			arr[n++] = 0x80;  /* unit serial number */
1435 			arr[n++] = 0x83;  /* device identification */
1436 			arr[n++] = 0x84;  /* software interface ident. */
1437 			arr[n++] = 0x85;  /* management network addresses */
1438 			arr[n++] = 0x86;  /* extended inquiry */
1439 			arr[n++] = 0x87;  /* mode page policy */
1440 			arr[n++] = 0x88;  /* SCSI ports */
1441 			if (is_disk) {	  /* SBC only */
1442 				arr[n++] = 0x89;  /* ATA information */
1443 				arr[n++] = 0xb0;  /* Block limits */
1444 				arr[n++] = 0xb1;  /* Block characteristics */
1445 				arr[n++] = 0xb2;  /* Logical Block Prov */
1446 			}
1447 			arr[3] = n - 4;	  /* number of supported VPD pages */
1448 		} else if (0x80 == cmd[2]) { /* unit serial number */
1449 			arr[1] = cmd[2];	/*sanity */
1450 			arr[3] = len;
1451 			memcpy(&arr[4], lu_id_str, len);
1452 		} else if (0x83 == cmd[2]) { /* device identification */
1453 			arr[1] = cmd[2];	/*sanity */
1454 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1455 						target_dev_id, lu_id_num,
1456 						lu_id_str, len,
1457 						&devip->lu_name);
1458 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1459 			arr[1] = cmd[2];	/*sanity */
1460 			arr[3] = inquiry_vpd_84(&arr[4]);
1461 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1462 			arr[1] = cmd[2];	/*sanity */
1463 			arr[3] = inquiry_vpd_85(&arr[4]);
1464 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1465 			arr[1] = cmd[2];	/*sanity */
1466 			arr[3] = 0x3c;	/* number of following entries */
1467 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1468 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1469 			else if (have_dif_prot)
1470 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1471 			else
1472 				arr[4] = 0x0;   /* no protection stuff */
1473 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1474 		} else if (0x87 == cmd[2]) { /* mode page policy */
1475 			arr[1] = cmd[2];	/*sanity */
1476 			arr[3] = 0x8;	/* number of following entries */
1477 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1478 			arr[6] = 0x80;	/* mlus, shared */
1479 			arr[8] = 0x18;	 /* protocol specific lu */
1480 			arr[10] = 0x82;	 /* mlus, per initiator port */
1481 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1482 			arr[1] = cmd[2];	/*sanity */
1483 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1484 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1485 			arr[1] = cmd[2];        /*sanity */
1486 			n = inquiry_vpd_89(&arr[4]);
1487 			put_unaligned_be16(n, arr + 2);
1488 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1489 			arr[1] = cmd[2];        /*sanity */
1490 			arr[3] = inquiry_vpd_b0(&arr[4]);
1491 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1492 			arr[1] = cmd[2];        /*sanity */
1493 			arr[3] = inquiry_vpd_b1(&arr[4]);
1494 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1495 			arr[1] = cmd[2];        /*sanity */
1496 			arr[3] = inquiry_vpd_b2(&arr[4]);
1497 		} else {
1498 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1499 			kfree(arr);
1500 			return check_condition_result;
1501 		}
1502 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1503 		ret = fill_from_dev_buffer(scp, arr,
1504 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1505 		kfree(arr);
1506 		return ret;
1507 	}
1508 	/* drops through here for a standard inquiry */
1509 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1510 	arr[2] = sdebug_scsi_level;
1511 	arr[3] = 2;    /* response_data_format==2 */
1512 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1513 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1514 	if (sdebug_vpd_use_hostno == 0)
1515 		arr[5] |= 0x10; /* claim: implicit TPGS */
1516 	arr[6] = 0x10; /* claim: MultiP */
1517 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1518 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1519 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1520 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1521 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1522 	/* Use Vendor Specific area to place driver date in ASCII hex */
1523 	memcpy(&arr[36], sdebug_version_date, 8);
1524 	/* version descriptors (2 bytes each) follow */
1525 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1526 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1527 	n = 62;
1528 	if (is_disk) {		/* SBC-4 no version claimed */
1529 		put_unaligned_be16(0x600, arr + n);
1530 		n += 2;
1531 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1532 		put_unaligned_be16(0x525, arr + n);
1533 		n += 2;
1534 	}
1535 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1536 	ret = fill_from_dev_buffer(scp, arr,
1537 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1538 	kfree(arr);
1539 	return ret;
1540 }
1541 
1542 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1543 				   0, 0, 0x0, 0x0};
1544 
1545 static int resp_requests(struct scsi_cmnd *scp,
1546 			 struct sdebug_dev_info *devip)
1547 {
1548 	unsigned char *sbuff;
1549 	unsigned char *cmd = scp->cmnd;
1550 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1551 	bool dsense;
1552 	int len = 18;
1553 
1554 	memset(arr, 0, sizeof(arr));
1555 	dsense = !!(cmd[1] & 1);
1556 	sbuff = scp->sense_buffer;
1557 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1558 		if (dsense) {
1559 			arr[0] = 0x72;
1560 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1561 			arr[2] = THRESHOLD_EXCEEDED;
1562 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1563 			len = 8;
1564 		} else {
1565 			arr[0] = 0x70;
1566 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1567 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1568 			arr[12] = THRESHOLD_EXCEEDED;
1569 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1570 		}
1571 	} else {
1572 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1573 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1574 			;	/* have sense and formats match */
1575 		else if (arr[0] <= 0x70) {
1576 			if (dsense) {
1577 				memset(arr, 0, 8);
1578 				arr[0] = 0x72;
1579 				len = 8;
1580 			} else {
1581 				memset(arr, 0, 18);
1582 				arr[0] = 0x70;
1583 				arr[7] = 0xa;
1584 			}
1585 		} else if (dsense) {
1586 			memset(arr, 0, 8);
1587 			arr[0] = 0x72;
1588 			arr[1] = sbuff[2];     /* sense key */
1589 			arr[2] = sbuff[12];    /* asc */
1590 			arr[3] = sbuff[13];    /* ascq */
1591 			len = 8;
1592 		} else {
1593 			memset(arr, 0, 18);
1594 			arr[0] = 0x70;
1595 			arr[2] = sbuff[1];
1596 			arr[7] = 0xa;
1597 			arr[12] = sbuff[1];
1598 			arr[13] = sbuff[3];
1599 		}
1600 
1601 	}
1602 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1603 	return fill_from_dev_buffer(scp, arr, len);
1604 }
1605 
1606 static int resp_start_stop(struct scsi_cmnd *scp,
1607 			   struct sdebug_dev_info *devip)
1608 {
1609 	unsigned char *cmd = scp->cmnd;
1610 	int power_cond, stop;
1611 	bool changing;
1612 
1613 	power_cond = (cmd[4] & 0xf0) >> 4;
1614 	if (power_cond) {
1615 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1616 		return check_condition_result;
1617 	}
1618 	stop = !(cmd[4] & 1);
1619 	changing = atomic_read(&devip->stopped) == !stop;
1620 	atomic_xchg(&devip->stopped, stop);
1621 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1622 		return SDEG_RES_IMMED_MASK;
1623 	else
1624 		return 0;
1625 }
1626 
1627 static sector_t get_sdebug_capacity(void)
1628 {
1629 	static const unsigned int gibibyte = 1073741824;
1630 
1631 	if (sdebug_virtual_gb > 0)
1632 		return (sector_t)sdebug_virtual_gb *
1633 			(gibibyte / sdebug_sector_size);
1634 	else
1635 		return sdebug_store_sectors;
1636 }
1637 
1638 #define SDEBUG_READCAP_ARR_SZ 8
1639 static int resp_readcap(struct scsi_cmnd *scp,
1640 			struct sdebug_dev_info *devip)
1641 {
1642 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1643 	unsigned int capac;
1644 
1645 	/* following just in case virtual_gb changed */
1646 	sdebug_capacity = get_sdebug_capacity();
1647 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1648 	if (sdebug_capacity < 0xffffffff) {
1649 		capac = (unsigned int)sdebug_capacity - 1;
1650 		put_unaligned_be32(capac, arr + 0);
1651 	} else
1652 		put_unaligned_be32(0xffffffff, arr + 0);
1653 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1654 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1655 }
1656 
1657 #define SDEBUG_READCAP16_ARR_SZ 32
1658 static int resp_readcap16(struct scsi_cmnd *scp,
1659 			  struct sdebug_dev_info *devip)
1660 {
1661 	unsigned char *cmd = scp->cmnd;
1662 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1663 	int alloc_len;
1664 
1665 	alloc_len = get_unaligned_be32(cmd + 10);
1666 	/* following just in case virtual_gb changed */
1667 	sdebug_capacity = get_sdebug_capacity();
1668 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1669 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1670 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1671 	arr[13] = sdebug_physblk_exp & 0xf;
1672 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1673 
1674 	if (scsi_debug_lbp()) {
1675 		arr[14] |= 0x80; /* LBPME */
1676 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1677 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1678 		 * in the wider field maps to 0 in this field.
1679 		 */
1680 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1681 			arr[14] |= 0x40;
1682 	}
1683 
1684 	arr[15] = sdebug_lowest_aligned & 0xff;
1685 
1686 	if (have_dif_prot) {
1687 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1688 		arr[12] |= 1; /* PROT_EN */
1689 	}
1690 
1691 	return fill_from_dev_buffer(scp, arr,
1692 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1693 }
1694 
1695 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1696 
1697 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1698 			      struct sdebug_dev_info *devip)
1699 {
1700 	unsigned char *cmd = scp->cmnd;
1701 	unsigned char *arr;
1702 	int host_no = devip->sdbg_host->shost->host_no;
1703 	int n, ret, alen, rlen;
1704 	int port_group_a, port_group_b, port_a, port_b;
1705 
1706 	alen = get_unaligned_be32(cmd + 6);
1707 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1708 	if (! arr)
1709 		return DID_REQUEUE << 16;
1710 	/*
1711 	 * EVPD page 0x88 states we have two ports, one
1712 	 * real and a fake port with no device connected.
1713 	 * So we create two port groups with one port each
1714 	 * and set the group with port B to unavailable.
1715 	 */
1716 	port_a = 0x1; /* relative port A */
1717 	port_b = 0x2; /* relative port B */
1718 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1719 			(devip->channel & 0x7f);
1720 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1721 			(devip->channel & 0x7f) + 0x80;
1722 
1723 	/*
1724 	 * The asymmetric access state is cycled according to the host_id.
1725 	 */
1726 	n = 4;
1727 	if (sdebug_vpd_use_hostno == 0) {
1728 		arr[n++] = host_no % 3; /* Asymm access state */
1729 		arr[n++] = 0x0F; /* claim: all states are supported */
1730 	} else {
1731 		arr[n++] = 0x0; /* Active/Optimized path */
1732 		arr[n++] = 0x01; /* only support active/optimized paths */
1733 	}
1734 	put_unaligned_be16(port_group_a, arr + n);
1735 	n += 2;
1736 	arr[n++] = 0;    /* Reserved */
1737 	arr[n++] = 0;    /* Status code */
1738 	arr[n++] = 0;    /* Vendor unique */
1739 	arr[n++] = 0x1;  /* One port per group */
1740 	arr[n++] = 0;    /* Reserved */
1741 	arr[n++] = 0;    /* Reserved */
1742 	put_unaligned_be16(port_a, arr + n);
1743 	n += 2;
1744 	arr[n++] = 3;    /* Port unavailable */
1745 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1746 	put_unaligned_be16(port_group_b, arr + n);
1747 	n += 2;
1748 	arr[n++] = 0;    /* Reserved */
1749 	arr[n++] = 0;    /* Status code */
1750 	arr[n++] = 0;    /* Vendor unique */
1751 	arr[n++] = 0x1;  /* One port per group */
1752 	arr[n++] = 0;    /* Reserved */
1753 	arr[n++] = 0;    /* Reserved */
1754 	put_unaligned_be16(port_b, arr + n);
1755 	n += 2;
1756 
1757 	rlen = n - 4;
1758 	put_unaligned_be32(rlen, arr + 0);
1759 
1760 	/*
1761 	 * Return the smallest value of either
1762 	 * - The allocated length
1763 	 * - The constructed command length
1764 	 * - The maximum array size
1765 	 */
1766 	rlen = min(alen,n);
1767 	ret = fill_from_dev_buffer(scp, arr,
1768 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1769 	kfree(arr);
1770 	return ret;
1771 }
1772 
1773 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1774 			     struct sdebug_dev_info *devip)
1775 {
1776 	bool rctd;
1777 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1778 	u16 req_sa, u;
1779 	u32 alloc_len, a_len;
1780 	int k, offset, len, errsts, count, bump, na;
1781 	const struct opcode_info_t *oip;
1782 	const struct opcode_info_t *r_oip;
1783 	u8 *arr;
1784 	u8 *cmd = scp->cmnd;
1785 
1786 	rctd = !!(cmd[2] & 0x80);
1787 	reporting_opts = cmd[2] & 0x7;
1788 	req_opcode = cmd[3];
1789 	req_sa = get_unaligned_be16(cmd + 4);
1790 	alloc_len = get_unaligned_be32(cmd + 6);
1791 	if (alloc_len < 4 || alloc_len > 0xffff) {
1792 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1793 		return check_condition_result;
1794 	}
1795 	if (alloc_len > 8192)
1796 		a_len = 8192;
1797 	else
1798 		a_len = alloc_len;
1799 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1800 	if (NULL == arr) {
1801 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1802 				INSUFF_RES_ASCQ);
1803 		return check_condition_result;
1804 	}
1805 	switch (reporting_opts) {
1806 	case 0:	/* all commands */
1807 		/* count number of commands */
1808 		for (count = 0, oip = opcode_info_arr;
1809 		     oip->num_attached != 0xff; ++oip) {
1810 			if (F_INV_OP & oip->flags)
1811 				continue;
1812 			count += (oip->num_attached + 1);
1813 		}
1814 		bump = rctd ? 20 : 8;
1815 		put_unaligned_be32(count * bump, arr);
1816 		for (offset = 4, oip = opcode_info_arr;
1817 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1818 			if (F_INV_OP & oip->flags)
1819 				continue;
1820 			na = oip->num_attached;
1821 			arr[offset] = oip->opcode;
1822 			put_unaligned_be16(oip->sa, arr + offset + 2);
1823 			if (rctd)
1824 				arr[offset + 5] |= 0x2;
1825 			if (FF_SA & oip->flags)
1826 				arr[offset + 5] |= 0x1;
1827 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1828 			if (rctd)
1829 				put_unaligned_be16(0xa, arr + offset + 8);
1830 			r_oip = oip;
1831 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1832 				if (F_INV_OP & oip->flags)
1833 					continue;
1834 				offset += bump;
1835 				arr[offset] = oip->opcode;
1836 				put_unaligned_be16(oip->sa, arr + offset + 2);
1837 				if (rctd)
1838 					arr[offset + 5] |= 0x2;
1839 				if (FF_SA & oip->flags)
1840 					arr[offset + 5] |= 0x1;
1841 				put_unaligned_be16(oip->len_mask[0],
1842 						   arr + offset + 6);
1843 				if (rctd)
1844 					put_unaligned_be16(0xa,
1845 							   arr + offset + 8);
1846 			}
1847 			oip = r_oip;
1848 			offset += bump;
1849 		}
1850 		break;
1851 	case 1:	/* one command: opcode only */
1852 	case 2:	/* one command: opcode plus service action */
1853 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1854 		sdeb_i = opcode_ind_arr[req_opcode];
1855 		oip = &opcode_info_arr[sdeb_i];
1856 		if (F_INV_OP & oip->flags) {
1857 			supp = 1;
1858 			offset = 4;
1859 		} else {
1860 			if (1 == reporting_opts) {
1861 				if (FF_SA & oip->flags) {
1862 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1863 							     2, 2);
1864 					kfree(arr);
1865 					return check_condition_result;
1866 				}
1867 				req_sa = 0;
1868 			} else if (2 == reporting_opts &&
1869 				   0 == (FF_SA & oip->flags)) {
1870 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1871 				kfree(arr);	/* point at requested sa */
1872 				return check_condition_result;
1873 			}
1874 			if (0 == (FF_SA & oip->flags) &&
1875 			    req_opcode == oip->opcode)
1876 				supp = 3;
1877 			else if (0 == (FF_SA & oip->flags)) {
1878 				na = oip->num_attached;
1879 				for (k = 0, oip = oip->arrp; k < na;
1880 				     ++k, ++oip) {
1881 					if (req_opcode == oip->opcode)
1882 						break;
1883 				}
1884 				supp = (k >= na) ? 1 : 3;
1885 			} else if (req_sa != oip->sa) {
1886 				na = oip->num_attached;
1887 				for (k = 0, oip = oip->arrp; k < na;
1888 				     ++k, ++oip) {
1889 					if (req_sa == oip->sa)
1890 						break;
1891 				}
1892 				supp = (k >= na) ? 1 : 3;
1893 			} else
1894 				supp = 3;
1895 			if (3 == supp) {
1896 				u = oip->len_mask[0];
1897 				put_unaligned_be16(u, arr + 2);
1898 				arr[4] = oip->opcode;
1899 				for (k = 1; k < u; ++k)
1900 					arr[4 + k] = (k < 16) ?
1901 						 oip->len_mask[k] : 0xff;
1902 				offset = 4 + u;
1903 			} else
1904 				offset = 4;
1905 		}
1906 		arr[1] = (rctd ? 0x80 : 0) | supp;
1907 		if (rctd) {
1908 			put_unaligned_be16(0xa, arr + offset);
1909 			offset += 12;
1910 		}
1911 		break;
1912 	default:
1913 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1914 		kfree(arr);
1915 		return check_condition_result;
1916 	}
1917 	offset = (offset < a_len) ? offset : a_len;
1918 	len = (offset < alloc_len) ? offset : alloc_len;
1919 	errsts = fill_from_dev_buffer(scp, arr, len);
1920 	kfree(arr);
1921 	return errsts;
1922 }
1923 
1924 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1925 			  struct sdebug_dev_info *devip)
1926 {
1927 	bool repd;
1928 	u32 alloc_len, len;
1929 	u8 arr[16];
1930 	u8 *cmd = scp->cmnd;
1931 
1932 	memset(arr, 0, sizeof(arr));
1933 	repd = !!(cmd[2] & 0x80);
1934 	alloc_len = get_unaligned_be32(cmd + 6);
1935 	if (alloc_len < 4) {
1936 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1937 		return check_condition_result;
1938 	}
1939 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1940 	arr[1] = 0x1;		/* ITNRS */
1941 	if (repd) {
1942 		arr[3] = 0xc;
1943 		len = 16;
1944 	} else
1945 		len = 4;
1946 
1947 	len = (len < alloc_len) ? len : alloc_len;
1948 	return fill_from_dev_buffer(scp, arr, len);
1949 }
1950 
1951 /* <<Following mode page info copied from ST318451LW>> */
1952 
1953 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1954 {	/* Read-Write Error Recovery page for mode_sense */
1955 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1956 					5, 0, 0xff, 0xff};
1957 
1958 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1959 	if (1 == pcontrol)
1960 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1961 	return sizeof(err_recov_pg);
1962 }
1963 
1964 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1965 { 	/* Disconnect-Reconnect page for mode_sense */
1966 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1967 					 0, 0, 0, 0, 0, 0, 0, 0};
1968 
1969 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1970 	if (1 == pcontrol)
1971 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1972 	return sizeof(disconnect_pg);
1973 }
1974 
1975 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1976 {       /* Format device page for mode_sense */
1977 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1978 				     0, 0, 0, 0, 0, 0, 0, 0,
1979 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1980 
1981 	memcpy(p, format_pg, sizeof(format_pg));
1982 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1983 	put_unaligned_be16(sdebug_sector_size, p + 12);
1984 	if (sdebug_removable)
1985 		p[20] |= 0x20; /* should agree with INQUIRY */
1986 	if (1 == pcontrol)
1987 		memset(p + 2, 0, sizeof(format_pg) - 2);
1988 	return sizeof(format_pg);
1989 }
1990 
1991 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1992 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1993 				     0, 0, 0, 0};
1994 
1995 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
1996 { 	/* Caching page for mode_sense */
1997 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1998 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1999 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2000 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2001 
2002 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2003 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2004 	memcpy(p, caching_pg, sizeof(caching_pg));
2005 	if (1 == pcontrol)
2006 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2007 	else if (2 == pcontrol)
2008 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2009 	return sizeof(caching_pg);
2010 }
2011 
2012 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2013 				    0, 0, 0x2, 0x4b};
2014 
2015 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2016 { 	/* Control mode page for mode_sense */
2017 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2018 					0, 0, 0, 0};
2019 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2020 				     0, 0, 0x2, 0x4b};
2021 
2022 	if (sdebug_dsense)
2023 		ctrl_m_pg[2] |= 0x4;
2024 	else
2025 		ctrl_m_pg[2] &= ~0x4;
2026 
2027 	if (sdebug_ato)
2028 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2029 
2030 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2031 	if (1 == pcontrol)
2032 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2033 	else if (2 == pcontrol)
2034 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2035 	return sizeof(ctrl_m_pg);
2036 }
2037 
2038 
2039 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2040 {	/* Informational Exceptions control mode page for mode_sense */
2041 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2042 				       0, 0, 0x0, 0x0};
2043 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2044 				      0, 0, 0x0, 0x0};
2045 
2046 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2047 	if (1 == pcontrol)
2048 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2049 	else if (2 == pcontrol)
2050 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2051 	return sizeof(iec_m_pg);
2052 }
2053 
2054 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2055 {	/* SAS SSP mode page - short format for mode_sense */
2056 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2057 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2058 
2059 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2060 	if (1 == pcontrol)
2061 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2062 	return sizeof(sas_sf_m_pg);
2063 }
2064 
2065 
2066 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2067 			      int target_dev_id)
2068 {	/* SAS phy control and discover mode page for mode_sense */
2069 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2070 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2071 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2072 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2073 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2074 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2075 		    0, 0, 0, 0, 0, 0, 0, 0,
2076 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2077 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2078 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2079 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2080 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2081 		    0, 0, 0, 0, 0, 0, 0, 0,
2082 		};
2083 	int port_a, port_b;
2084 
2085 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2086 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2087 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2088 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2089 	port_a = target_dev_id + 1;
2090 	port_b = port_a + 1;
2091 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2092 	put_unaligned_be32(port_a, p + 20);
2093 	put_unaligned_be32(port_b, p + 48 + 20);
2094 	if (1 == pcontrol)
2095 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2096 	return sizeof(sas_pcd_m_pg);
2097 }
2098 
2099 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2100 {	/* SAS SSP shared protocol specific port mode subpage */
2101 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2102 		    0, 0, 0, 0, 0, 0, 0, 0,
2103 		};
2104 
2105 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2106 	if (1 == pcontrol)
2107 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2108 	return sizeof(sas_sha_m_pg);
2109 }
2110 
2111 #define SDEBUG_MAX_MSENSE_SZ 256
2112 
2113 static int resp_mode_sense(struct scsi_cmnd *scp,
2114 			   struct sdebug_dev_info *devip)
2115 {
2116 	int pcontrol, pcode, subpcode, bd_len;
2117 	unsigned char dev_spec;
2118 	int alloc_len, offset, len, target_dev_id;
2119 	int target = scp->device->id;
2120 	unsigned char *ap;
2121 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2122 	unsigned char *cmd = scp->cmnd;
2123 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2124 
2125 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2126 	pcontrol = (cmd[2] & 0xc0) >> 6;
2127 	pcode = cmd[2] & 0x3f;
2128 	subpcode = cmd[3];
2129 	msense_6 = (MODE_SENSE == cmd[0]);
2130 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2131 	is_disk = (sdebug_ptype == TYPE_DISK);
2132 	if (is_disk && !dbd)
2133 		bd_len = llbaa ? 16 : 8;
2134 	else
2135 		bd_len = 0;
2136 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2137 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2138 	if (0x3 == pcontrol) {  /* Saving values not supported */
2139 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2140 		return check_condition_result;
2141 	}
2142 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2143 			(devip->target * 1000) - 3;
2144 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2145 	if (is_disk)
2146 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2147 	else
2148 		dev_spec = 0x0;
2149 	if (msense_6) {
2150 		arr[2] = dev_spec;
2151 		arr[3] = bd_len;
2152 		offset = 4;
2153 	} else {
2154 		arr[3] = dev_spec;
2155 		if (16 == bd_len)
2156 			arr[4] = 0x1;	/* set LONGLBA bit */
2157 		arr[7] = bd_len;	/* assume 255 or less */
2158 		offset = 8;
2159 	}
2160 	ap = arr + offset;
2161 	if ((bd_len > 0) && (!sdebug_capacity))
2162 		sdebug_capacity = get_sdebug_capacity();
2163 
2164 	if (8 == bd_len) {
2165 		if (sdebug_capacity > 0xfffffffe)
2166 			put_unaligned_be32(0xffffffff, ap + 0);
2167 		else
2168 			put_unaligned_be32(sdebug_capacity, ap + 0);
2169 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2170 		offset += bd_len;
2171 		ap = arr + offset;
2172 	} else if (16 == bd_len) {
2173 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2174 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2175 		offset += bd_len;
2176 		ap = arr + offset;
2177 	}
2178 
2179 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2180 		/* TODO: Control Extension page */
2181 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2182 		return check_condition_result;
2183 	}
2184 	bad_pcode = false;
2185 
2186 	switch (pcode) {
2187 	case 0x1:	/* Read-Write error recovery page, direct access */
2188 		len = resp_err_recov_pg(ap, pcontrol, target);
2189 		offset += len;
2190 		break;
2191 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2192 		len = resp_disconnect_pg(ap, pcontrol, target);
2193 		offset += len;
2194 		break;
2195 	case 0x3:       /* Format device page, direct access */
2196 		if (is_disk) {
2197 			len = resp_format_pg(ap, pcontrol, target);
2198 			offset += len;
2199 		} else
2200 			bad_pcode = true;
2201 		break;
2202 	case 0x8:	/* Caching page, direct access */
2203 		if (is_disk) {
2204 			len = resp_caching_pg(ap, pcontrol, target);
2205 			offset += len;
2206 		} else
2207 			bad_pcode = true;
2208 		break;
2209 	case 0xa:	/* Control Mode page, all devices */
2210 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2211 		offset += len;
2212 		break;
2213 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2214 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2215 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2216 			return check_condition_result;
2217 		}
2218 		len = 0;
2219 		if ((0x0 == subpcode) || (0xff == subpcode))
2220 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2221 		if ((0x1 == subpcode) || (0xff == subpcode))
2222 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2223 						  target_dev_id);
2224 		if ((0x2 == subpcode) || (0xff == subpcode))
2225 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2226 		offset += len;
2227 		break;
2228 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2229 		len = resp_iec_m_pg(ap, pcontrol, target);
2230 		offset += len;
2231 		break;
2232 	case 0x3f:	/* Read all Mode pages */
2233 		if ((0 == subpcode) || (0xff == subpcode)) {
2234 			len = resp_err_recov_pg(ap, pcontrol, target);
2235 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2236 			if (is_disk) {
2237 				len += resp_format_pg(ap + len, pcontrol,
2238 						      target);
2239 				len += resp_caching_pg(ap + len, pcontrol,
2240 						       target);
2241 			}
2242 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2243 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2244 			if (0xff == subpcode) {
2245 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2246 						  target, target_dev_id);
2247 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2248 			}
2249 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2250 			offset += len;
2251 		} else {
2252 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2253 			return check_condition_result;
2254 		}
2255 		break;
2256 	default:
2257 		bad_pcode = true;
2258 		break;
2259 	}
2260 	if (bad_pcode) {
2261 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2262 		return check_condition_result;
2263 	}
2264 	if (msense_6)
2265 		arr[0] = offset - 1;
2266 	else
2267 		put_unaligned_be16((offset - 2), arr + 0);
2268 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2269 }
2270 
2271 #define SDEBUG_MAX_MSELECT_SZ 512
2272 
2273 static int resp_mode_select(struct scsi_cmnd *scp,
2274 			    struct sdebug_dev_info *devip)
2275 {
2276 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2277 	int param_len, res, mpage;
2278 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2279 	unsigned char *cmd = scp->cmnd;
2280 	int mselect6 = (MODE_SELECT == cmd[0]);
2281 
2282 	memset(arr, 0, sizeof(arr));
2283 	pf = cmd[1] & 0x10;
2284 	sp = cmd[1] & 0x1;
2285 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2286 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2287 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2288 		return check_condition_result;
2289 	}
2290 	res = fetch_to_dev_buffer(scp, arr, param_len);
2291 	if (-1 == res)
2292 		return DID_ERROR << 16;
2293 	else if (sdebug_verbose && (res < param_len))
2294 		sdev_printk(KERN_INFO, scp->device,
2295 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2296 			    __func__, param_len, res);
2297 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2298 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2299 	if (md_len > 2) {
2300 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2301 		return check_condition_result;
2302 	}
2303 	off = bd_len + (mselect6 ? 4 : 8);
2304 	mpage = arr[off] & 0x3f;
2305 	ps = !!(arr[off] & 0x80);
2306 	if (ps) {
2307 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2308 		return check_condition_result;
2309 	}
2310 	spf = !!(arr[off] & 0x40);
2311 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2312 		       (arr[off + 1] + 2);
2313 	if ((pg_len + off) > param_len) {
2314 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2315 				PARAMETER_LIST_LENGTH_ERR, 0);
2316 		return check_condition_result;
2317 	}
2318 	switch (mpage) {
2319 	case 0x8:      /* Caching Mode page */
2320 		if (caching_pg[1] == arr[off + 1]) {
2321 			memcpy(caching_pg + 2, arr + off + 2,
2322 			       sizeof(caching_pg) - 2);
2323 			goto set_mode_changed_ua;
2324 		}
2325 		break;
2326 	case 0xa:      /* Control Mode page */
2327 		if (ctrl_m_pg[1] == arr[off + 1]) {
2328 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2329 			       sizeof(ctrl_m_pg) - 2);
2330 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2331 			goto set_mode_changed_ua;
2332 		}
2333 		break;
2334 	case 0x1c:      /* Informational Exceptions Mode page */
2335 		if (iec_m_pg[1] == arr[off + 1]) {
2336 			memcpy(iec_m_pg + 2, arr + off + 2,
2337 			       sizeof(iec_m_pg) - 2);
2338 			goto set_mode_changed_ua;
2339 		}
2340 		break;
2341 	default:
2342 		break;
2343 	}
2344 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2345 	return check_condition_result;
2346 set_mode_changed_ua:
2347 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2348 	return 0;
2349 }
2350 
2351 static int resp_temp_l_pg(unsigned char *arr)
2352 {
2353 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2354 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2355 		};
2356 
2357 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2358 	return sizeof(temp_l_pg);
2359 }
2360 
2361 static int resp_ie_l_pg(unsigned char *arr)
2362 {
2363 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2364 		};
2365 
2366 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2367 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2368 		arr[4] = THRESHOLD_EXCEEDED;
2369 		arr[5] = 0xff;
2370 	}
2371 	return sizeof(ie_l_pg);
2372 }
2373 
2374 #define SDEBUG_MAX_LSENSE_SZ 512
2375 
2376 static int resp_log_sense(struct scsi_cmnd *scp,
2377 			  struct sdebug_dev_info *devip)
2378 {
2379 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2380 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2381 	unsigned char *cmd = scp->cmnd;
2382 
2383 	memset(arr, 0, sizeof(arr));
2384 	ppc = cmd[1] & 0x2;
2385 	sp = cmd[1] & 0x1;
2386 	if (ppc || sp) {
2387 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2388 		return check_condition_result;
2389 	}
2390 	pcode = cmd[2] & 0x3f;
2391 	subpcode = cmd[3] & 0xff;
2392 	alloc_len = get_unaligned_be16(cmd + 7);
2393 	arr[0] = pcode;
2394 	if (0 == subpcode) {
2395 		switch (pcode) {
2396 		case 0x0:	/* Supported log pages log page */
2397 			n = 4;
2398 			arr[n++] = 0x0;		/* this page */
2399 			arr[n++] = 0xd;		/* Temperature */
2400 			arr[n++] = 0x2f;	/* Informational exceptions */
2401 			arr[3] = n - 4;
2402 			break;
2403 		case 0xd:	/* Temperature log page */
2404 			arr[3] = resp_temp_l_pg(arr + 4);
2405 			break;
2406 		case 0x2f:	/* Informational exceptions log page */
2407 			arr[3] = resp_ie_l_pg(arr + 4);
2408 			break;
2409 		default:
2410 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2411 			return check_condition_result;
2412 		}
2413 	} else if (0xff == subpcode) {
2414 		arr[0] |= 0x40;
2415 		arr[1] = subpcode;
2416 		switch (pcode) {
2417 		case 0x0:	/* Supported log pages and subpages log page */
2418 			n = 4;
2419 			arr[n++] = 0x0;
2420 			arr[n++] = 0x0;		/* 0,0 page */
2421 			arr[n++] = 0x0;
2422 			arr[n++] = 0xff;	/* this page */
2423 			arr[n++] = 0xd;
2424 			arr[n++] = 0x0;		/* Temperature */
2425 			arr[n++] = 0x2f;
2426 			arr[n++] = 0x0;	/* Informational exceptions */
2427 			arr[3] = n - 4;
2428 			break;
2429 		case 0xd:	/* Temperature subpages */
2430 			n = 4;
2431 			arr[n++] = 0xd;
2432 			arr[n++] = 0x0;		/* Temperature */
2433 			arr[3] = n - 4;
2434 			break;
2435 		case 0x2f:	/* Informational exceptions subpages */
2436 			n = 4;
2437 			arr[n++] = 0x2f;
2438 			arr[n++] = 0x0;		/* Informational exceptions */
2439 			arr[3] = n - 4;
2440 			break;
2441 		default:
2442 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2443 			return check_condition_result;
2444 		}
2445 	} else {
2446 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2447 		return check_condition_result;
2448 	}
2449 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2450 	return fill_from_dev_buffer(scp, arr,
2451 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2452 }
2453 
2454 static int check_device_access_params(struct scsi_cmnd *scp,
2455 				      unsigned long long lba, unsigned int num)
2456 {
2457 	if (lba + num > sdebug_capacity) {
2458 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2459 		return check_condition_result;
2460 	}
2461 	/* transfer length excessive (tie in to block limits VPD page) */
2462 	if (num > sdebug_store_sectors) {
2463 		/* needs work to find which cdb byte 'num' comes from */
2464 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2465 		return check_condition_result;
2466 	}
2467 	return 0;
2468 }
2469 
2470 /* Returns number of bytes copied or -1 if error. */
2471 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2472 			    u32 num, bool do_write)
2473 {
2474 	int ret;
2475 	u64 block, rest = 0;
2476 	struct scsi_data_buffer *sdb = &scmd->sdb;
2477 	enum dma_data_direction dir;
2478 
2479 	if (do_write) {
2480 		dir = DMA_TO_DEVICE;
2481 		write_since_sync = true;
2482 	} else {
2483 		dir = DMA_FROM_DEVICE;
2484 	}
2485 
2486 	if (!sdb->length)
2487 		return 0;
2488 	if (scmd->sc_data_direction != dir)
2489 		return -1;
2490 
2491 	block = do_div(lba, sdebug_store_sectors);
2492 	if (block + num > sdebug_store_sectors)
2493 		rest = block + num - sdebug_store_sectors;
2494 
2495 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2496 		   fake_storep + (block * sdebug_sector_size),
2497 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2498 	if (ret != (num - rest) * sdebug_sector_size)
2499 		return ret;
2500 
2501 	if (rest) {
2502 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2503 			    fake_storep, rest * sdebug_sector_size,
2504 			    sg_skip + ((num - rest) * sdebug_sector_size),
2505 			    do_write);
2506 	}
2507 
2508 	return ret;
2509 }
2510 
2511 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2512  * arr into fake_store(lba,num) and return true. If comparison fails then
2513  * return false. */
2514 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2515 {
2516 	bool res;
2517 	u64 block, rest = 0;
2518 	u32 store_blks = sdebug_store_sectors;
2519 	u32 lb_size = sdebug_sector_size;
2520 
2521 	block = do_div(lba, store_blks);
2522 	if (block + num > store_blks)
2523 		rest = block + num - store_blks;
2524 
2525 	res = !memcmp(fake_storep + (block * lb_size), arr,
2526 		      (num - rest) * lb_size);
2527 	if (!res)
2528 		return res;
2529 	if (rest)
2530 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2531 			     rest * lb_size);
2532 	if (!res)
2533 		return res;
2534 	arr += num * lb_size;
2535 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2536 	if (rest)
2537 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2538 		       rest * lb_size);
2539 	return res;
2540 }
2541 
2542 static __be16 dif_compute_csum(const void *buf, int len)
2543 {
2544 	__be16 csum;
2545 
2546 	if (sdebug_guard)
2547 		csum = (__force __be16)ip_compute_csum(buf, len);
2548 	else
2549 		csum = cpu_to_be16(crc_t10dif(buf, len));
2550 
2551 	return csum;
2552 }
2553 
2554 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2555 		      sector_t sector, u32 ei_lba)
2556 {
2557 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2558 
2559 	if (sdt->guard_tag != csum) {
2560 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2561 			(unsigned long)sector,
2562 			be16_to_cpu(sdt->guard_tag),
2563 			be16_to_cpu(csum));
2564 		return 0x01;
2565 	}
2566 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2567 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2568 		pr_err("REF check failed on sector %lu\n",
2569 			(unsigned long)sector);
2570 		return 0x03;
2571 	}
2572 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2573 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2574 		pr_err("REF check failed on sector %lu\n",
2575 			(unsigned long)sector);
2576 		return 0x03;
2577 	}
2578 	return 0;
2579 }
2580 
2581 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2582 			  unsigned int sectors, bool read)
2583 {
2584 	size_t resid;
2585 	void *paddr;
2586 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2587 	struct sg_mapping_iter miter;
2588 
2589 	/* Bytes of protection data to copy into sgl */
2590 	resid = sectors * sizeof(*dif_storep);
2591 
2592 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2593 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2594 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2595 
2596 	while (sg_miter_next(&miter) && resid > 0) {
2597 		size_t len = min(miter.length, resid);
2598 		void *start = dif_store(sector);
2599 		size_t rest = 0;
2600 
2601 		if (dif_store_end < start + len)
2602 			rest = start + len - dif_store_end;
2603 
2604 		paddr = miter.addr;
2605 
2606 		if (read)
2607 			memcpy(paddr, start, len - rest);
2608 		else
2609 			memcpy(start, paddr, len - rest);
2610 
2611 		if (rest) {
2612 			if (read)
2613 				memcpy(paddr + len - rest, dif_storep, rest);
2614 			else
2615 				memcpy(dif_storep, paddr + len - rest, rest);
2616 		}
2617 
2618 		sector += len / sizeof(*dif_storep);
2619 		resid -= len;
2620 	}
2621 	sg_miter_stop(&miter);
2622 }
2623 
2624 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2625 			    unsigned int sectors, u32 ei_lba)
2626 {
2627 	unsigned int i;
2628 	struct t10_pi_tuple *sdt;
2629 	sector_t sector;
2630 
2631 	for (i = 0; i < sectors; i++, ei_lba++) {
2632 		int ret;
2633 
2634 		sector = start_sec + i;
2635 		sdt = dif_store(sector);
2636 
2637 		if (sdt->app_tag == cpu_to_be16(0xffff))
2638 			continue;
2639 
2640 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2641 		if (ret) {
2642 			dif_errors++;
2643 			return ret;
2644 		}
2645 	}
2646 
2647 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2648 	dix_reads++;
2649 
2650 	return 0;
2651 }
2652 
2653 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2654 {
2655 	u8 *cmd = scp->cmnd;
2656 	struct sdebug_queued_cmd *sqcp;
2657 	u64 lba;
2658 	u32 num;
2659 	u32 ei_lba;
2660 	unsigned long iflags;
2661 	int ret;
2662 	bool check_prot;
2663 
2664 	switch (cmd[0]) {
2665 	case READ_16:
2666 		ei_lba = 0;
2667 		lba = get_unaligned_be64(cmd + 2);
2668 		num = get_unaligned_be32(cmd + 10);
2669 		check_prot = true;
2670 		break;
2671 	case READ_10:
2672 		ei_lba = 0;
2673 		lba = get_unaligned_be32(cmd + 2);
2674 		num = get_unaligned_be16(cmd + 7);
2675 		check_prot = true;
2676 		break;
2677 	case READ_6:
2678 		ei_lba = 0;
2679 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2680 		      (u32)(cmd[1] & 0x1f) << 16;
2681 		num = (0 == cmd[4]) ? 256 : cmd[4];
2682 		check_prot = true;
2683 		break;
2684 	case READ_12:
2685 		ei_lba = 0;
2686 		lba = get_unaligned_be32(cmd + 2);
2687 		num = get_unaligned_be32(cmd + 6);
2688 		check_prot = true;
2689 		break;
2690 	case XDWRITEREAD_10:
2691 		ei_lba = 0;
2692 		lba = get_unaligned_be32(cmd + 2);
2693 		num = get_unaligned_be16(cmd + 7);
2694 		check_prot = false;
2695 		break;
2696 	default:	/* assume READ(32) */
2697 		lba = get_unaligned_be64(cmd + 12);
2698 		ei_lba = get_unaligned_be32(cmd + 20);
2699 		num = get_unaligned_be32(cmd + 28);
2700 		check_prot = false;
2701 		break;
2702 	}
2703 	if (unlikely(have_dif_prot && check_prot)) {
2704 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2705 		    (cmd[1] & 0xe0)) {
2706 			mk_sense_invalid_opcode(scp);
2707 			return check_condition_result;
2708 		}
2709 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2710 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2711 		    (cmd[1] & 0xe0) == 0)
2712 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2713 				    "to DIF device\n");
2714 	}
2715 	if (unlikely(sdebug_any_injecting_opt)) {
2716 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2717 
2718 		if (sqcp) {
2719 			if (sqcp->inj_short)
2720 				num /= 2;
2721 		}
2722 	} else
2723 		sqcp = NULL;
2724 
2725 	/* inline check_device_access_params() */
2726 	if (unlikely(lba + num > sdebug_capacity)) {
2727 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2728 		return check_condition_result;
2729 	}
2730 	/* transfer length excessive (tie in to block limits VPD page) */
2731 	if (unlikely(num > sdebug_store_sectors)) {
2732 		/* needs work to find which cdb byte 'num' comes from */
2733 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2734 		return check_condition_result;
2735 	}
2736 
2737 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2738 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2739 		     ((lba + num) > sdebug_medium_error_start))) {
2740 		/* claim unrecoverable read error */
2741 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2742 		/* set info field and valid bit for fixed descriptor */
2743 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2744 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2745 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2746 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2747 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2748 		}
2749 		scsi_set_resid(scp, scsi_bufflen(scp));
2750 		return check_condition_result;
2751 	}
2752 
2753 	read_lock_irqsave(&atomic_rw, iflags);
2754 
2755 	/* DIX + T10 DIF */
2756 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2757 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2758 
2759 		if (prot_ret) {
2760 			read_unlock_irqrestore(&atomic_rw, iflags);
2761 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2762 			return illegal_condition_result;
2763 		}
2764 	}
2765 
2766 	ret = do_device_access(scp, 0, lba, num, false);
2767 	read_unlock_irqrestore(&atomic_rw, iflags);
2768 	if (unlikely(ret == -1))
2769 		return DID_ERROR << 16;
2770 
2771 	scp->sdb.resid = scsi_bufflen(scp) - ret;
2772 
2773 	if (unlikely(sqcp)) {
2774 		if (sqcp->inj_recovered) {
2775 			mk_sense_buffer(scp, RECOVERED_ERROR,
2776 					THRESHOLD_EXCEEDED, 0);
2777 			return check_condition_result;
2778 		} else if (sqcp->inj_transport) {
2779 			mk_sense_buffer(scp, ABORTED_COMMAND,
2780 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2781 			return check_condition_result;
2782 		} else if (sqcp->inj_dif) {
2783 			/* Logical block guard check failed */
2784 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2785 			return illegal_condition_result;
2786 		} else if (sqcp->inj_dix) {
2787 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2788 			return illegal_condition_result;
2789 		}
2790 	}
2791 	return 0;
2792 }
2793 
2794 static void dump_sector(unsigned char *buf, int len)
2795 {
2796 	int i, j, n;
2797 
2798 	pr_err(">>> Sector Dump <<<\n");
2799 	for (i = 0 ; i < len ; i += 16) {
2800 		char b[128];
2801 
2802 		for (j = 0, n = 0; j < 16; j++) {
2803 			unsigned char c = buf[i+j];
2804 
2805 			if (c >= 0x20 && c < 0x7e)
2806 				n += scnprintf(b + n, sizeof(b) - n,
2807 					       " %c ", buf[i+j]);
2808 			else
2809 				n += scnprintf(b + n, sizeof(b) - n,
2810 					       "%02x ", buf[i+j]);
2811 		}
2812 		pr_err("%04d: %s\n", i, b);
2813 	}
2814 }
2815 
2816 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2817 			     unsigned int sectors, u32 ei_lba)
2818 {
2819 	int ret;
2820 	struct t10_pi_tuple *sdt;
2821 	void *daddr;
2822 	sector_t sector = start_sec;
2823 	int ppage_offset;
2824 	int dpage_offset;
2825 	struct sg_mapping_iter diter;
2826 	struct sg_mapping_iter piter;
2827 
2828 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2829 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2830 
2831 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2832 			scsi_prot_sg_count(SCpnt),
2833 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2834 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2835 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2836 
2837 	/* For each protection page */
2838 	while (sg_miter_next(&piter)) {
2839 		dpage_offset = 0;
2840 		if (WARN_ON(!sg_miter_next(&diter))) {
2841 			ret = 0x01;
2842 			goto out;
2843 		}
2844 
2845 		for (ppage_offset = 0; ppage_offset < piter.length;
2846 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2847 			/* If we're at the end of the current
2848 			 * data page advance to the next one
2849 			 */
2850 			if (dpage_offset >= diter.length) {
2851 				if (WARN_ON(!sg_miter_next(&diter))) {
2852 					ret = 0x01;
2853 					goto out;
2854 				}
2855 				dpage_offset = 0;
2856 			}
2857 
2858 			sdt = piter.addr + ppage_offset;
2859 			daddr = diter.addr + dpage_offset;
2860 
2861 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2862 			if (ret) {
2863 				dump_sector(daddr, sdebug_sector_size);
2864 				goto out;
2865 			}
2866 
2867 			sector++;
2868 			ei_lba++;
2869 			dpage_offset += sdebug_sector_size;
2870 		}
2871 		diter.consumed = dpage_offset;
2872 		sg_miter_stop(&diter);
2873 	}
2874 	sg_miter_stop(&piter);
2875 
2876 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2877 	dix_writes++;
2878 
2879 	return 0;
2880 
2881 out:
2882 	dif_errors++;
2883 	sg_miter_stop(&diter);
2884 	sg_miter_stop(&piter);
2885 	return ret;
2886 }
2887 
2888 static unsigned long lba_to_map_index(sector_t lba)
2889 {
2890 	if (sdebug_unmap_alignment)
2891 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2892 	sector_div(lba, sdebug_unmap_granularity);
2893 	return lba;
2894 }
2895 
2896 static sector_t map_index_to_lba(unsigned long index)
2897 {
2898 	sector_t lba = index * sdebug_unmap_granularity;
2899 
2900 	if (sdebug_unmap_alignment)
2901 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2902 	return lba;
2903 }
2904 
2905 static unsigned int map_state(sector_t lba, unsigned int *num)
2906 {
2907 	sector_t end;
2908 	unsigned int mapped;
2909 	unsigned long index;
2910 	unsigned long next;
2911 
2912 	index = lba_to_map_index(lba);
2913 	mapped = test_bit(index, map_storep);
2914 
2915 	if (mapped)
2916 		next = find_next_zero_bit(map_storep, map_size, index);
2917 	else
2918 		next = find_next_bit(map_storep, map_size, index);
2919 
2920 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2921 	*num = end - lba;
2922 	return mapped;
2923 }
2924 
2925 static void map_region(sector_t lba, unsigned int len)
2926 {
2927 	sector_t end = lba + len;
2928 
2929 	while (lba < end) {
2930 		unsigned long index = lba_to_map_index(lba);
2931 
2932 		if (index < map_size)
2933 			set_bit(index, map_storep);
2934 
2935 		lba = map_index_to_lba(index + 1);
2936 	}
2937 }
2938 
2939 static void unmap_region(sector_t lba, unsigned int len)
2940 {
2941 	sector_t end = lba + len;
2942 
2943 	while (lba < end) {
2944 		unsigned long index = lba_to_map_index(lba);
2945 
2946 		if (lba == map_index_to_lba(index) &&
2947 		    lba + sdebug_unmap_granularity <= end &&
2948 		    index < map_size) {
2949 			clear_bit(index, map_storep);
2950 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2951 				memset(fake_storep +
2952 				       lba * sdebug_sector_size,
2953 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2954 				       sdebug_sector_size *
2955 				       sdebug_unmap_granularity);
2956 			}
2957 			if (dif_storep) {
2958 				memset(dif_storep + lba, 0xff,
2959 				       sizeof(*dif_storep) *
2960 				       sdebug_unmap_granularity);
2961 			}
2962 		}
2963 		lba = map_index_to_lba(index + 1);
2964 	}
2965 }
2966 
2967 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2968 {
2969 	u8 *cmd = scp->cmnd;
2970 	u64 lba;
2971 	u32 num;
2972 	u32 ei_lba;
2973 	unsigned long iflags;
2974 	int ret;
2975 	bool check_prot;
2976 
2977 	switch (cmd[0]) {
2978 	case WRITE_16:
2979 		ei_lba = 0;
2980 		lba = get_unaligned_be64(cmd + 2);
2981 		num = get_unaligned_be32(cmd + 10);
2982 		check_prot = true;
2983 		break;
2984 	case WRITE_10:
2985 		ei_lba = 0;
2986 		lba = get_unaligned_be32(cmd + 2);
2987 		num = get_unaligned_be16(cmd + 7);
2988 		check_prot = true;
2989 		break;
2990 	case WRITE_6:
2991 		ei_lba = 0;
2992 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2993 		      (u32)(cmd[1] & 0x1f) << 16;
2994 		num = (0 == cmd[4]) ? 256 : cmd[4];
2995 		check_prot = true;
2996 		break;
2997 	case WRITE_12:
2998 		ei_lba = 0;
2999 		lba = get_unaligned_be32(cmd + 2);
3000 		num = get_unaligned_be32(cmd + 6);
3001 		check_prot = true;
3002 		break;
3003 	case 0x53:	/* XDWRITEREAD(10) */
3004 		ei_lba = 0;
3005 		lba = get_unaligned_be32(cmd + 2);
3006 		num = get_unaligned_be16(cmd + 7);
3007 		check_prot = false;
3008 		break;
3009 	default:	/* assume WRITE(32) */
3010 		lba = get_unaligned_be64(cmd + 12);
3011 		ei_lba = get_unaligned_be32(cmd + 20);
3012 		num = get_unaligned_be32(cmd + 28);
3013 		check_prot = false;
3014 		break;
3015 	}
3016 	if (unlikely(have_dif_prot && check_prot)) {
3017 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3018 		    (cmd[1] & 0xe0)) {
3019 			mk_sense_invalid_opcode(scp);
3020 			return check_condition_result;
3021 		}
3022 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3023 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3024 		    (cmd[1] & 0xe0) == 0)
3025 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3026 				    "to DIF device\n");
3027 	}
3028 
3029 	/* inline check_device_access_params() */
3030 	if (unlikely(lba + num > sdebug_capacity)) {
3031 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3032 		return check_condition_result;
3033 	}
3034 	/* transfer length excessive (tie in to block limits VPD page) */
3035 	if (unlikely(num > sdebug_store_sectors)) {
3036 		/* needs work to find which cdb byte 'num' comes from */
3037 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3038 		return check_condition_result;
3039 	}
3040 
3041 	write_lock_irqsave(&atomic_rw, iflags);
3042 
3043 	/* DIX + T10 DIF */
3044 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3045 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3046 
3047 		if (prot_ret) {
3048 			write_unlock_irqrestore(&atomic_rw, iflags);
3049 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3050 			return illegal_condition_result;
3051 		}
3052 	}
3053 
3054 	ret = do_device_access(scp, 0, lba, num, true);
3055 	if (unlikely(scsi_debug_lbp()))
3056 		map_region(lba, num);
3057 	write_unlock_irqrestore(&atomic_rw, iflags);
3058 	if (unlikely(-1 == ret))
3059 		return DID_ERROR << 16;
3060 	else if (unlikely(sdebug_verbose &&
3061 			  (ret < (num * sdebug_sector_size))))
3062 		sdev_printk(KERN_INFO, scp->device,
3063 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3064 			    my_name, num * sdebug_sector_size, ret);
3065 
3066 	if (unlikely(sdebug_any_injecting_opt)) {
3067 		struct sdebug_queued_cmd *sqcp =
3068 				(struct sdebug_queued_cmd *)scp->host_scribble;
3069 
3070 		if (sqcp) {
3071 			if (sqcp->inj_recovered) {
3072 				mk_sense_buffer(scp, RECOVERED_ERROR,
3073 						THRESHOLD_EXCEEDED, 0);
3074 				return check_condition_result;
3075 			} else if (sqcp->inj_dif) {
3076 				/* Logical block guard check failed */
3077 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3078 				return illegal_condition_result;
3079 			} else if (sqcp->inj_dix) {
3080 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3081 				return illegal_condition_result;
3082 			}
3083 		}
3084 	}
3085 	return 0;
3086 }
3087 
3088 /*
3089  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3090  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3091  */
3092 static int resp_write_scat(struct scsi_cmnd *scp,
3093 			   struct sdebug_dev_info *devip)
3094 {
3095 	u8 *cmd = scp->cmnd;
3096 	u8 *lrdp = NULL;
3097 	u8 *up;
3098 	u8 wrprotect;
3099 	u16 lbdof, num_lrd, k;
3100 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3101 	u32 lb_size = sdebug_sector_size;
3102 	u32 ei_lba;
3103 	u64 lba;
3104 	unsigned long iflags;
3105 	int ret, res;
3106 	bool is_16;
3107 	static const u32 lrd_size = 32; /* + parameter list header size */
3108 
3109 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3110 		is_16 = false;
3111 		wrprotect = (cmd[10] >> 5) & 0x7;
3112 		lbdof = get_unaligned_be16(cmd + 12);
3113 		num_lrd = get_unaligned_be16(cmd + 16);
3114 		bt_len = get_unaligned_be32(cmd + 28);
3115 	} else {        /* that leaves WRITE SCATTERED(16) */
3116 		is_16 = true;
3117 		wrprotect = (cmd[2] >> 5) & 0x7;
3118 		lbdof = get_unaligned_be16(cmd + 4);
3119 		num_lrd = get_unaligned_be16(cmd + 8);
3120 		bt_len = get_unaligned_be32(cmd + 10);
3121 		if (unlikely(have_dif_prot)) {
3122 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3123 			    wrprotect) {
3124 				mk_sense_invalid_opcode(scp);
3125 				return illegal_condition_result;
3126 			}
3127 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3128 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3129 			     wrprotect == 0)
3130 				sdev_printk(KERN_ERR, scp->device,
3131 					    "Unprotected WR to DIF device\n");
3132 		}
3133 	}
3134 	if ((num_lrd == 0) || (bt_len == 0))
3135 		return 0;       /* T10 says these do-nothings are not errors */
3136 	if (lbdof == 0) {
3137 		if (sdebug_verbose)
3138 			sdev_printk(KERN_INFO, scp->device,
3139 				"%s: %s: LB Data Offset field bad\n",
3140 				my_name, __func__);
3141 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3142 		return illegal_condition_result;
3143 	}
3144 	lbdof_blen = lbdof * lb_size;
3145 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3146 		if (sdebug_verbose)
3147 			sdev_printk(KERN_INFO, scp->device,
3148 				"%s: %s: LBA range descriptors don't fit\n",
3149 				my_name, __func__);
3150 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3151 		return illegal_condition_result;
3152 	}
3153 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3154 	if (lrdp == NULL)
3155 		return SCSI_MLQUEUE_HOST_BUSY;
3156 	if (sdebug_verbose)
3157 		sdev_printk(KERN_INFO, scp->device,
3158 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3159 			my_name, __func__, lbdof_blen);
3160 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3161 	if (res == -1) {
3162 		ret = DID_ERROR << 16;
3163 		goto err_out;
3164 	}
3165 
3166 	write_lock_irqsave(&atomic_rw, iflags);
3167 	sg_off = lbdof_blen;
3168 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3169 	cum_lb = 0;
3170 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3171 		lba = get_unaligned_be64(up + 0);
3172 		num = get_unaligned_be32(up + 8);
3173 		if (sdebug_verbose)
3174 			sdev_printk(KERN_INFO, scp->device,
3175 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3176 				my_name, __func__, k, lba, num, sg_off);
3177 		if (num == 0)
3178 			continue;
3179 		ret = check_device_access_params(scp, lba, num);
3180 		if (ret)
3181 			goto err_out_unlock;
3182 		num_by = num * lb_size;
3183 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3184 
3185 		if ((cum_lb + num) > bt_len) {
3186 			if (sdebug_verbose)
3187 				sdev_printk(KERN_INFO, scp->device,
3188 				    "%s: %s: sum of blocks > data provided\n",
3189 				    my_name, __func__);
3190 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3191 					0);
3192 			ret = illegal_condition_result;
3193 			goto err_out_unlock;
3194 		}
3195 
3196 		/* DIX + T10 DIF */
3197 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3198 			int prot_ret = prot_verify_write(scp, lba, num,
3199 							 ei_lba);
3200 
3201 			if (prot_ret) {
3202 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3203 						prot_ret);
3204 				ret = illegal_condition_result;
3205 				goto err_out_unlock;
3206 			}
3207 		}
3208 
3209 		ret = do_device_access(scp, sg_off, lba, num, true);
3210 		if (unlikely(scsi_debug_lbp()))
3211 			map_region(lba, num);
3212 		if (unlikely(-1 == ret)) {
3213 			ret = DID_ERROR << 16;
3214 			goto err_out_unlock;
3215 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3216 			sdev_printk(KERN_INFO, scp->device,
3217 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3218 			    my_name, num_by, ret);
3219 
3220 		if (unlikely(sdebug_any_injecting_opt)) {
3221 			struct sdebug_queued_cmd *sqcp =
3222 				(struct sdebug_queued_cmd *)scp->host_scribble;
3223 
3224 			if (sqcp) {
3225 				if (sqcp->inj_recovered) {
3226 					mk_sense_buffer(scp, RECOVERED_ERROR,
3227 							THRESHOLD_EXCEEDED, 0);
3228 					ret = illegal_condition_result;
3229 					goto err_out_unlock;
3230 				} else if (sqcp->inj_dif) {
3231 					/* Logical block guard check failed */
3232 					mk_sense_buffer(scp, ABORTED_COMMAND,
3233 							0x10, 1);
3234 					ret = illegal_condition_result;
3235 					goto err_out_unlock;
3236 				} else if (sqcp->inj_dix) {
3237 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3238 							0x10, 1);
3239 					ret = illegal_condition_result;
3240 					goto err_out_unlock;
3241 				}
3242 			}
3243 		}
3244 		sg_off += num_by;
3245 		cum_lb += num;
3246 	}
3247 	ret = 0;
3248 err_out_unlock:
3249 	write_unlock_irqrestore(&atomic_rw, iflags);
3250 err_out:
3251 	kfree(lrdp);
3252 	return ret;
3253 }
3254 
3255 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3256 			   u32 ei_lba, bool unmap, bool ndob)
3257 {
3258 	unsigned long iflags;
3259 	unsigned long long i;
3260 	int ret;
3261 	u64 lba_off;
3262 
3263 	ret = check_device_access_params(scp, lba, num);
3264 	if (ret)
3265 		return ret;
3266 
3267 	write_lock_irqsave(&atomic_rw, iflags);
3268 
3269 	if (unmap && scsi_debug_lbp()) {
3270 		unmap_region(lba, num);
3271 		goto out;
3272 	}
3273 
3274 	lba_off = lba * sdebug_sector_size;
3275 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3276 	if (ndob) {
3277 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
3278 		ret = 0;
3279 	} else
3280 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3281 					  sdebug_sector_size);
3282 
3283 	if (-1 == ret) {
3284 		write_unlock_irqrestore(&atomic_rw, iflags);
3285 		return DID_ERROR << 16;
3286 	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3287 		sdev_printk(KERN_INFO, scp->device,
3288 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3289 			    my_name, "write same",
3290 			    sdebug_sector_size, ret);
3291 
3292 	/* Copy first sector to remaining blocks */
3293 	for (i = 1 ; i < num ; i++)
3294 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3295 		       fake_storep + lba_off,
3296 		       sdebug_sector_size);
3297 
3298 	if (scsi_debug_lbp())
3299 		map_region(lba, num);
3300 out:
3301 	write_unlock_irqrestore(&atomic_rw, iflags);
3302 
3303 	return 0;
3304 }
3305 
3306 static int resp_write_same_10(struct scsi_cmnd *scp,
3307 			      struct sdebug_dev_info *devip)
3308 {
3309 	u8 *cmd = scp->cmnd;
3310 	u32 lba;
3311 	u16 num;
3312 	u32 ei_lba = 0;
3313 	bool unmap = false;
3314 
3315 	if (cmd[1] & 0x8) {
3316 		if (sdebug_lbpws10 == 0) {
3317 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3318 			return check_condition_result;
3319 		} else
3320 			unmap = true;
3321 	}
3322 	lba = get_unaligned_be32(cmd + 2);
3323 	num = get_unaligned_be16(cmd + 7);
3324 	if (num > sdebug_write_same_length) {
3325 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3326 		return check_condition_result;
3327 	}
3328 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3329 }
3330 
3331 static int resp_write_same_16(struct scsi_cmnd *scp,
3332 			      struct sdebug_dev_info *devip)
3333 {
3334 	u8 *cmd = scp->cmnd;
3335 	u64 lba;
3336 	u32 num;
3337 	u32 ei_lba = 0;
3338 	bool unmap = false;
3339 	bool ndob = false;
3340 
3341 	if (cmd[1] & 0x8) {	/* UNMAP */
3342 		if (sdebug_lbpws == 0) {
3343 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3344 			return check_condition_result;
3345 		} else
3346 			unmap = true;
3347 	}
3348 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3349 		ndob = true;
3350 	lba = get_unaligned_be64(cmd + 2);
3351 	num = get_unaligned_be32(cmd + 10);
3352 	if (num > sdebug_write_same_length) {
3353 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3354 		return check_condition_result;
3355 	}
3356 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3357 }
3358 
3359 /* Note the mode field is in the same position as the (lower) service action
3360  * field. For the Report supported operation codes command, SPC-4 suggests
3361  * each mode of this command should be reported separately; for future. */
3362 static int resp_write_buffer(struct scsi_cmnd *scp,
3363 			     struct sdebug_dev_info *devip)
3364 {
3365 	u8 *cmd = scp->cmnd;
3366 	struct scsi_device *sdp = scp->device;
3367 	struct sdebug_dev_info *dp;
3368 	u8 mode;
3369 
3370 	mode = cmd[1] & 0x1f;
3371 	switch (mode) {
3372 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3373 		/* set UAs on this device only */
3374 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3375 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3376 		break;
3377 	case 0x5:	/* download MC, save and ACT */
3378 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3379 		break;
3380 	case 0x6:	/* download MC with offsets and ACT */
3381 		/* set UAs on most devices (LUs) in this target */
3382 		list_for_each_entry(dp,
3383 				    &devip->sdbg_host->dev_info_list,
3384 				    dev_list)
3385 			if (dp->target == sdp->id) {
3386 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3387 				if (devip != dp)
3388 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3389 						dp->uas_bm);
3390 			}
3391 		break;
3392 	case 0x7:	/* download MC with offsets, save, and ACT */
3393 		/* set UA on all devices (LUs) in this target */
3394 		list_for_each_entry(dp,
3395 				    &devip->sdbg_host->dev_info_list,
3396 				    dev_list)
3397 			if (dp->target == sdp->id)
3398 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3399 					dp->uas_bm);
3400 		break;
3401 	default:
3402 		/* do nothing for this command for other mode values */
3403 		break;
3404 	}
3405 	return 0;
3406 }
3407 
3408 static int resp_comp_write(struct scsi_cmnd *scp,
3409 			   struct sdebug_dev_info *devip)
3410 {
3411 	u8 *cmd = scp->cmnd;
3412 	u8 *arr;
3413 	u8 *fake_storep_hold;
3414 	u64 lba;
3415 	u32 dnum;
3416 	u32 lb_size = sdebug_sector_size;
3417 	u8 num;
3418 	unsigned long iflags;
3419 	int ret;
3420 	int retval = 0;
3421 
3422 	lba = get_unaligned_be64(cmd + 2);
3423 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3424 	if (0 == num)
3425 		return 0;	/* degenerate case, not an error */
3426 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3427 	    (cmd[1] & 0xe0)) {
3428 		mk_sense_invalid_opcode(scp);
3429 		return check_condition_result;
3430 	}
3431 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3432 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3433 	    (cmd[1] & 0xe0) == 0)
3434 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3435 			    "to DIF device\n");
3436 
3437 	/* inline check_device_access_params() */
3438 	if (lba + num > sdebug_capacity) {
3439 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3440 		return check_condition_result;
3441 	}
3442 	/* transfer length excessive (tie in to block limits VPD page) */
3443 	if (num > sdebug_store_sectors) {
3444 		/* needs work to find which cdb byte 'num' comes from */
3445 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3446 		return check_condition_result;
3447 	}
3448 	dnum = 2 * num;
3449 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3450 	if (NULL == arr) {
3451 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3452 				INSUFF_RES_ASCQ);
3453 		return check_condition_result;
3454 	}
3455 
3456 	write_lock_irqsave(&atomic_rw, iflags);
3457 
3458 	/* trick do_device_access() to fetch both compare and write buffers
3459 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3460 	fake_storep_hold = fake_storep;
3461 	fake_storep = arr;
3462 	ret = do_device_access(scp, 0, 0, dnum, true);
3463 	fake_storep = fake_storep_hold;
3464 	if (ret == -1) {
3465 		retval = DID_ERROR << 16;
3466 		goto cleanup;
3467 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3468 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3469 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3470 			    dnum * lb_size, ret);
3471 	if (!comp_write_worker(lba, num, arr)) {
3472 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3473 		retval = check_condition_result;
3474 		goto cleanup;
3475 	}
3476 	if (scsi_debug_lbp())
3477 		map_region(lba, num);
3478 cleanup:
3479 	write_unlock_irqrestore(&atomic_rw, iflags);
3480 	kfree(arr);
3481 	return retval;
3482 }
3483 
3484 struct unmap_block_desc {
3485 	__be64	lba;
3486 	__be32	blocks;
3487 	__be32	__reserved;
3488 };
3489 
3490 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3491 {
3492 	unsigned char *buf;
3493 	struct unmap_block_desc *desc;
3494 	unsigned int i, payload_len, descriptors;
3495 	int ret;
3496 	unsigned long iflags;
3497 
3498 
3499 	if (!scsi_debug_lbp())
3500 		return 0;	/* fib and say its done */
3501 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3502 	BUG_ON(scsi_bufflen(scp) != payload_len);
3503 
3504 	descriptors = (payload_len - 8) / 16;
3505 	if (descriptors > sdebug_unmap_max_desc) {
3506 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3507 		return check_condition_result;
3508 	}
3509 
3510 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3511 	if (!buf) {
3512 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3513 				INSUFF_RES_ASCQ);
3514 		return check_condition_result;
3515 	}
3516 
3517 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3518 
3519 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3520 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3521 
3522 	desc = (void *)&buf[8];
3523 
3524 	write_lock_irqsave(&atomic_rw, iflags);
3525 
3526 	for (i = 0 ; i < descriptors ; i++) {
3527 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3528 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3529 
3530 		ret = check_device_access_params(scp, lba, num);
3531 		if (ret)
3532 			goto out;
3533 
3534 		unmap_region(lba, num);
3535 	}
3536 
3537 	ret = 0;
3538 
3539 out:
3540 	write_unlock_irqrestore(&atomic_rw, iflags);
3541 	kfree(buf);
3542 
3543 	return ret;
3544 }
3545 
3546 #define SDEBUG_GET_LBA_STATUS_LEN 32
3547 
3548 static int resp_get_lba_status(struct scsi_cmnd *scp,
3549 			       struct sdebug_dev_info *devip)
3550 {
3551 	u8 *cmd = scp->cmnd;
3552 	u64 lba;
3553 	u32 alloc_len, mapped, num;
3554 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3555 	int ret;
3556 
3557 	lba = get_unaligned_be64(cmd + 2);
3558 	alloc_len = get_unaligned_be32(cmd + 10);
3559 
3560 	if (alloc_len < 24)
3561 		return 0;
3562 
3563 	ret = check_device_access_params(scp, lba, 1);
3564 	if (ret)
3565 		return ret;
3566 
3567 	if (scsi_debug_lbp())
3568 		mapped = map_state(lba, &num);
3569 	else {
3570 		mapped = 1;
3571 		/* following just in case virtual_gb changed */
3572 		sdebug_capacity = get_sdebug_capacity();
3573 		if (sdebug_capacity - lba <= 0xffffffff)
3574 			num = sdebug_capacity - lba;
3575 		else
3576 			num = 0xffffffff;
3577 	}
3578 
3579 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3580 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3581 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3582 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3583 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3584 
3585 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3586 }
3587 
3588 static int resp_sync_cache(struct scsi_cmnd *scp,
3589 			   struct sdebug_dev_info *devip)
3590 {
3591 	int res = 0;
3592 	u64 lba;
3593 	u32 num_blocks;
3594 	u8 *cmd = scp->cmnd;
3595 
3596 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
3597 		lba = get_unaligned_be32(cmd + 2);
3598 		num_blocks = get_unaligned_be16(cmd + 7);
3599 	} else {				/* SYNCHRONIZE_CACHE(16) */
3600 		lba = get_unaligned_be64(cmd + 2);
3601 		num_blocks = get_unaligned_be32(cmd + 10);
3602 	}
3603 	if (lba + num_blocks > sdebug_capacity) {
3604 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3605 		return check_condition_result;
3606 	}
3607 	if (!write_since_sync || cmd[1] & 0x2)
3608 		res = SDEG_RES_IMMED_MASK;
3609 	else		/* delay if write_since_sync and IMMED clear */
3610 		write_since_sync = false;
3611 	return res;
3612 }
3613 
3614 #define RL_BUCKET_ELEMS 8
3615 
3616 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3617  * (W-LUN), the normal Linux scanning logic does not associate it with a
3618  * device (e.g. /dev/sg7). The following magic will make that association:
3619  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3620  * where <n> is a host number. If there are multiple targets in a host then
3621  * the above will associate a W-LUN to each target. To only get a W-LUN
3622  * for target 2, then use "echo '- 2 49409' > scan" .
3623  */
3624 static int resp_report_luns(struct scsi_cmnd *scp,
3625 			    struct sdebug_dev_info *devip)
3626 {
3627 	unsigned char *cmd = scp->cmnd;
3628 	unsigned int alloc_len;
3629 	unsigned char select_report;
3630 	u64 lun;
3631 	struct scsi_lun *lun_p;
3632 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3633 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3634 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3635 	unsigned int tlun_cnt;	/* total LUN count */
3636 	unsigned int rlen;	/* response length (in bytes) */
3637 	int k, j, n, res;
3638 	unsigned int off_rsp = 0;
3639 	const int sz_lun = sizeof(struct scsi_lun);
3640 
3641 	clear_luns_changed_on_target(devip);
3642 
3643 	select_report = cmd[2];
3644 	alloc_len = get_unaligned_be32(cmd + 6);
3645 
3646 	if (alloc_len < 4) {
3647 		pr_err("alloc len too small %d\n", alloc_len);
3648 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3649 		return check_condition_result;
3650 	}
3651 
3652 	switch (select_report) {
3653 	case 0:		/* all LUNs apart from W-LUNs */
3654 		lun_cnt = sdebug_max_luns;
3655 		wlun_cnt = 0;
3656 		break;
3657 	case 1:		/* only W-LUNs */
3658 		lun_cnt = 0;
3659 		wlun_cnt = 1;
3660 		break;
3661 	case 2:		/* all LUNs */
3662 		lun_cnt = sdebug_max_luns;
3663 		wlun_cnt = 1;
3664 		break;
3665 	case 0x10:	/* only administrative LUs */
3666 	case 0x11:	/* see SPC-5 */
3667 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3668 	default:
3669 		pr_debug("select report invalid %d\n", select_report);
3670 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3671 		return check_condition_result;
3672 	}
3673 
3674 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3675 		--lun_cnt;
3676 
3677 	tlun_cnt = lun_cnt + wlun_cnt;
3678 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3679 	scsi_set_resid(scp, scsi_bufflen(scp));
3680 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3681 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3682 
3683 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3684 	lun = sdebug_no_lun_0 ? 1 : 0;
3685 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3686 		memset(arr, 0, sizeof(arr));
3687 		lun_p = (struct scsi_lun *)&arr[0];
3688 		if (k == 0) {
3689 			put_unaligned_be32(rlen, &arr[0]);
3690 			++lun_p;
3691 			j = 1;
3692 		}
3693 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3694 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3695 				break;
3696 			int_to_scsilun(lun++, lun_p);
3697 		}
3698 		if (j < RL_BUCKET_ELEMS)
3699 			break;
3700 		n = j * sz_lun;
3701 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3702 		if (res)
3703 			return res;
3704 		off_rsp += n;
3705 	}
3706 	if (wlun_cnt) {
3707 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3708 		++j;
3709 	}
3710 	if (j > 0)
3711 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3712 	return res;
3713 }
3714 
3715 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3716 {
3717 	u32 tag = blk_mq_unique_tag(cmnd->request);
3718 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3719 
3720 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3721 	if (WARN_ON_ONCE(hwq >= submit_queues))
3722 		hwq = 0;
3723 	return sdebug_q_arr + hwq;
3724 }
3725 
3726 /* Queued (deferred) command completions converge here. */
3727 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3728 {
3729 	bool aborted = sd_dp->aborted;
3730 	int qc_idx;
3731 	int retiring = 0;
3732 	unsigned long iflags;
3733 	struct sdebug_queue *sqp;
3734 	struct sdebug_queued_cmd *sqcp;
3735 	struct scsi_cmnd *scp;
3736 	struct sdebug_dev_info *devip;
3737 
3738 	sd_dp->defer_t = SDEB_DEFER_NONE;
3739 	if (unlikely(aborted))
3740 		sd_dp->aborted = false;
3741 	qc_idx = sd_dp->qc_idx;
3742 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3743 	if (sdebug_statistics) {
3744 		atomic_inc(&sdebug_completions);
3745 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3746 			atomic_inc(&sdebug_miss_cpus);
3747 	}
3748 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3749 		pr_err("wild qc_idx=%d\n", qc_idx);
3750 		return;
3751 	}
3752 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3753 	sqcp = &sqp->qc_arr[qc_idx];
3754 	scp = sqcp->a_cmnd;
3755 	if (unlikely(scp == NULL)) {
3756 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3757 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3758 		       sd_dp->sqa_idx, qc_idx);
3759 		return;
3760 	}
3761 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3762 	if (likely(devip))
3763 		atomic_dec(&devip->num_in_q);
3764 	else
3765 		pr_err("devip=NULL\n");
3766 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3767 		retiring = 1;
3768 
3769 	sqcp->a_cmnd = NULL;
3770 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3771 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3772 		pr_err("Unexpected completion\n");
3773 		return;
3774 	}
3775 
3776 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3777 		int k, retval;
3778 
3779 		retval = atomic_read(&retired_max_queue);
3780 		if (qc_idx >= retval) {
3781 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3782 			pr_err("index %d too large\n", retval);
3783 			return;
3784 		}
3785 		k = find_last_bit(sqp->in_use_bm, retval);
3786 		if ((k < sdebug_max_queue) || (k == retval))
3787 			atomic_set(&retired_max_queue, 0);
3788 		else
3789 			atomic_set(&retired_max_queue, k + 1);
3790 	}
3791 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3792 	if (unlikely(aborted)) {
3793 		if (sdebug_verbose)
3794 			pr_info("bypassing scsi_done() due to aborted cmd\n");
3795 		return;
3796 	}
3797 	scp->scsi_done(scp); /* callback to mid level */
3798 }
3799 
3800 /* When high resolution timer goes off this function is called. */
3801 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3802 {
3803 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3804 						  hrt);
3805 	sdebug_q_cmd_complete(sd_dp);
3806 	return HRTIMER_NORESTART;
3807 }
3808 
3809 /* When work queue schedules work, it calls this function. */
3810 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3811 {
3812 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3813 						  ew.work);
3814 	sdebug_q_cmd_complete(sd_dp);
3815 }
3816 
3817 static bool got_shared_uuid;
3818 static uuid_t shared_uuid;
3819 
3820 static struct sdebug_dev_info *sdebug_device_create(
3821 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3822 {
3823 	struct sdebug_dev_info *devip;
3824 
3825 	devip = kzalloc(sizeof(*devip), flags);
3826 	if (devip) {
3827 		if (sdebug_uuid_ctl == 1)
3828 			uuid_gen(&devip->lu_name);
3829 		else if (sdebug_uuid_ctl == 2) {
3830 			if (got_shared_uuid)
3831 				devip->lu_name = shared_uuid;
3832 			else {
3833 				uuid_gen(&shared_uuid);
3834 				got_shared_uuid = true;
3835 				devip->lu_name = shared_uuid;
3836 			}
3837 		}
3838 		devip->sdbg_host = sdbg_host;
3839 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3840 	}
3841 	return devip;
3842 }
3843 
3844 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3845 {
3846 	struct sdebug_host_info *sdbg_host;
3847 	struct sdebug_dev_info *open_devip = NULL;
3848 	struct sdebug_dev_info *devip;
3849 
3850 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3851 	if (!sdbg_host) {
3852 		pr_err("Host info NULL\n");
3853 		return NULL;
3854 	}
3855 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3856 		if ((devip->used) && (devip->channel == sdev->channel) &&
3857 		    (devip->target == sdev->id) &&
3858 		    (devip->lun == sdev->lun))
3859 			return devip;
3860 		else {
3861 			if ((!devip->used) && (!open_devip))
3862 				open_devip = devip;
3863 		}
3864 	}
3865 	if (!open_devip) { /* try and make a new one */
3866 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3867 		if (!open_devip) {
3868 			pr_err("out of memory at line %d\n", __LINE__);
3869 			return NULL;
3870 		}
3871 	}
3872 
3873 	open_devip->channel = sdev->channel;
3874 	open_devip->target = sdev->id;
3875 	open_devip->lun = sdev->lun;
3876 	open_devip->sdbg_host = sdbg_host;
3877 	atomic_set(&open_devip->num_in_q, 0);
3878 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3879 	open_devip->used = true;
3880 	return open_devip;
3881 }
3882 
3883 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3884 {
3885 	if (sdebug_verbose)
3886 		pr_info("slave_alloc <%u %u %u %llu>\n",
3887 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3888 	return 0;
3889 }
3890 
3891 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3892 {
3893 	struct sdebug_dev_info *devip =
3894 			(struct sdebug_dev_info *)sdp->hostdata;
3895 
3896 	if (sdebug_verbose)
3897 		pr_info("slave_configure <%u %u %u %llu>\n",
3898 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3899 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3900 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3901 	if (devip == NULL) {
3902 		devip = find_build_dev_info(sdp);
3903 		if (devip == NULL)
3904 			return 1;  /* no resources, will be marked offline */
3905 	}
3906 	sdp->hostdata = devip;
3907 	if (sdebug_no_uld)
3908 		sdp->no_uld_attach = 1;
3909 	config_cdb_len(sdp);
3910 	return 0;
3911 }
3912 
3913 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3914 {
3915 	struct sdebug_dev_info *devip =
3916 		(struct sdebug_dev_info *)sdp->hostdata;
3917 
3918 	if (sdebug_verbose)
3919 		pr_info("slave_destroy <%u %u %u %llu>\n",
3920 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3921 	if (devip) {
3922 		/* make this slot available for re-use */
3923 		devip->used = false;
3924 		sdp->hostdata = NULL;
3925 	}
3926 }
3927 
3928 static void stop_qc_helper(struct sdebug_defer *sd_dp,
3929 			   enum sdeb_defer_type defer_t)
3930 {
3931 	if (!sd_dp)
3932 		return;
3933 	if (defer_t == SDEB_DEFER_HRT)
3934 		hrtimer_cancel(&sd_dp->hrt);
3935 	else if (defer_t == SDEB_DEFER_WQ)
3936 		cancel_work_sync(&sd_dp->ew.work);
3937 }
3938 
3939 /* If @cmnd found deletes its timer or work queue and returns true; else
3940    returns false */
3941 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3942 {
3943 	unsigned long iflags;
3944 	int j, k, qmax, r_qmax;
3945 	enum sdeb_defer_type l_defer_t;
3946 	struct sdebug_queue *sqp;
3947 	struct sdebug_queued_cmd *sqcp;
3948 	struct sdebug_dev_info *devip;
3949 	struct sdebug_defer *sd_dp;
3950 
3951 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3952 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3953 		qmax = sdebug_max_queue;
3954 		r_qmax = atomic_read(&retired_max_queue);
3955 		if (r_qmax > qmax)
3956 			qmax = r_qmax;
3957 		for (k = 0; k < qmax; ++k) {
3958 			if (test_bit(k, sqp->in_use_bm)) {
3959 				sqcp = &sqp->qc_arr[k];
3960 				if (cmnd != sqcp->a_cmnd)
3961 					continue;
3962 				/* found */
3963 				devip = (struct sdebug_dev_info *)
3964 						cmnd->device->hostdata;
3965 				if (devip)
3966 					atomic_dec(&devip->num_in_q);
3967 				sqcp->a_cmnd = NULL;
3968 				sd_dp = sqcp->sd_dp;
3969 				if (sd_dp) {
3970 					l_defer_t = sd_dp->defer_t;
3971 					sd_dp->defer_t = SDEB_DEFER_NONE;
3972 				} else
3973 					l_defer_t = SDEB_DEFER_NONE;
3974 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3975 				stop_qc_helper(sd_dp, l_defer_t);
3976 				clear_bit(k, sqp->in_use_bm);
3977 				return true;
3978 			}
3979 		}
3980 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3981 	}
3982 	return false;
3983 }
3984 
3985 /* Deletes (stops) timers or work queues of all queued commands */
3986 static void stop_all_queued(void)
3987 {
3988 	unsigned long iflags;
3989 	int j, k;
3990 	enum sdeb_defer_type l_defer_t;
3991 	struct sdebug_queue *sqp;
3992 	struct sdebug_queued_cmd *sqcp;
3993 	struct sdebug_dev_info *devip;
3994 	struct sdebug_defer *sd_dp;
3995 
3996 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3997 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3998 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3999 			if (test_bit(k, sqp->in_use_bm)) {
4000 				sqcp = &sqp->qc_arr[k];
4001 				if (sqcp->a_cmnd == NULL)
4002 					continue;
4003 				devip = (struct sdebug_dev_info *)
4004 					sqcp->a_cmnd->device->hostdata;
4005 				if (devip)
4006 					atomic_dec(&devip->num_in_q);
4007 				sqcp->a_cmnd = NULL;
4008 				sd_dp = sqcp->sd_dp;
4009 				if (sd_dp) {
4010 					l_defer_t = sd_dp->defer_t;
4011 					sd_dp->defer_t = SDEB_DEFER_NONE;
4012 				} else
4013 					l_defer_t = SDEB_DEFER_NONE;
4014 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4015 				stop_qc_helper(sd_dp, l_defer_t);
4016 				clear_bit(k, sqp->in_use_bm);
4017 				spin_lock_irqsave(&sqp->qc_lock, iflags);
4018 			}
4019 		}
4020 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4021 	}
4022 }
4023 
4024 /* Free queued command memory on heap */
4025 static void free_all_queued(void)
4026 {
4027 	int j, k;
4028 	struct sdebug_queue *sqp;
4029 	struct sdebug_queued_cmd *sqcp;
4030 
4031 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4032 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4033 			sqcp = &sqp->qc_arr[k];
4034 			kfree(sqcp->sd_dp);
4035 			sqcp->sd_dp = NULL;
4036 		}
4037 	}
4038 }
4039 
4040 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4041 {
4042 	bool ok;
4043 
4044 	++num_aborts;
4045 	if (SCpnt) {
4046 		ok = stop_queued_cmnd(SCpnt);
4047 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4048 			sdev_printk(KERN_INFO, SCpnt->device,
4049 				    "%s: command%s found\n", __func__,
4050 				    ok ? "" : " not");
4051 	}
4052 	return SUCCESS;
4053 }
4054 
4055 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4056 {
4057 	++num_dev_resets;
4058 	if (SCpnt && SCpnt->device) {
4059 		struct scsi_device *sdp = SCpnt->device;
4060 		struct sdebug_dev_info *devip =
4061 				(struct sdebug_dev_info *)sdp->hostdata;
4062 
4063 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4064 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4065 		if (devip)
4066 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
4067 	}
4068 	return SUCCESS;
4069 }
4070 
4071 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4072 {
4073 	struct sdebug_host_info *sdbg_host;
4074 	struct sdebug_dev_info *devip;
4075 	struct scsi_device *sdp;
4076 	struct Scsi_Host *hp;
4077 	int k = 0;
4078 
4079 	++num_target_resets;
4080 	if (!SCpnt)
4081 		goto lie;
4082 	sdp = SCpnt->device;
4083 	if (!sdp)
4084 		goto lie;
4085 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4086 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4087 	hp = sdp->host;
4088 	if (!hp)
4089 		goto lie;
4090 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4091 	if (sdbg_host) {
4092 		list_for_each_entry(devip,
4093 				    &sdbg_host->dev_info_list,
4094 				    dev_list)
4095 			if (devip->target == sdp->id) {
4096 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4097 				++k;
4098 			}
4099 	}
4100 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4101 		sdev_printk(KERN_INFO, sdp,
4102 			    "%s: %d device(s) found in target\n", __func__, k);
4103 lie:
4104 	return SUCCESS;
4105 }
4106 
4107 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4108 {
4109 	struct sdebug_host_info *sdbg_host;
4110 	struct sdebug_dev_info *devip;
4111 	struct scsi_device *sdp;
4112 	struct Scsi_Host *hp;
4113 	int k = 0;
4114 
4115 	++num_bus_resets;
4116 	if (!(SCpnt && SCpnt->device))
4117 		goto lie;
4118 	sdp = SCpnt->device;
4119 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4120 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4121 	hp = sdp->host;
4122 	if (hp) {
4123 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4124 		if (sdbg_host) {
4125 			list_for_each_entry(devip,
4126 					    &sdbg_host->dev_info_list,
4127 					    dev_list) {
4128 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4129 				++k;
4130 			}
4131 		}
4132 	}
4133 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4134 		sdev_printk(KERN_INFO, sdp,
4135 			    "%s: %d device(s) found in host\n", __func__, k);
4136 lie:
4137 	return SUCCESS;
4138 }
4139 
4140 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4141 {
4142 	struct sdebug_host_info *sdbg_host;
4143 	struct sdebug_dev_info *devip;
4144 	int k = 0;
4145 
4146 	++num_host_resets;
4147 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4148 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4149 	spin_lock(&sdebug_host_list_lock);
4150 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4151 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
4152 				    dev_list) {
4153 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4154 			++k;
4155 		}
4156 	}
4157 	spin_unlock(&sdebug_host_list_lock);
4158 	stop_all_queued();
4159 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4160 		sdev_printk(KERN_INFO, SCpnt->device,
4161 			    "%s: %d device(s) found\n", __func__, k);
4162 	return SUCCESS;
4163 }
4164 
4165 static void __init sdebug_build_parts(unsigned char *ramp,
4166 				      unsigned long store_size)
4167 {
4168 	struct partition *pp;
4169 	int starts[SDEBUG_MAX_PARTS + 2];
4170 	int sectors_per_part, num_sectors, k;
4171 	int heads_by_sects, start_sec, end_sec;
4172 
4173 	/* assume partition table already zeroed */
4174 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
4175 		return;
4176 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4177 		sdebug_num_parts = SDEBUG_MAX_PARTS;
4178 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4179 	}
4180 	num_sectors = (int)sdebug_store_sectors;
4181 	sectors_per_part = (num_sectors - sdebug_sectors_per)
4182 			   / sdebug_num_parts;
4183 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
4184 	starts[0] = sdebug_sectors_per;
4185 	for (k = 1; k < sdebug_num_parts; ++k)
4186 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
4187 			    * heads_by_sects;
4188 	starts[sdebug_num_parts] = num_sectors;
4189 	starts[sdebug_num_parts + 1] = 0;
4190 
4191 	ramp[510] = 0x55;	/* magic partition markings */
4192 	ramp[511] = 0xAA;
4193 	pp = (struct partition *)(ramp + 0x1be);
4194 	for (k = 0; starts[k + 1]; ++k, ++pp) {
4195 		start_sec = starts[k];
4196 		end_sec = starts[k + 1] - 1;
4197 		pp->boot_ind = 0;
4198 
4199 		pp->cyl = start_sec / heads_by_sects;
4200 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4201 			   / sdebug_sectors_per;
4202 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4203 
4204 		pp->end_cyl = end_sec / heads_by_sects;
4205 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4206 			       / sdebug_sectors_per;
4207 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4208 
4209 		pp->start_sect = cpu_to_le32(start_sec);
4210 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4211 		pp->sys_ind = 0x83;	/* plain Linux partition */
4212 	}
4213 }
4214 
4215 static void block_unblock_all_queues(bool block)
4216 {
4217 	int j;
4218 	struct sdebug_queue *sqp;
4219 
4220 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4221 		atomic_set(&sqp->blocked, (int)block);
4222 }
4223 
4224 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4225  * commands will be processed normally before triggers occur.
4226  */
4227 static void tweak_cmnd_count(void)
4228 {
4229 	int count, modulo;
4230 
4231 	modulo = abs(sdebug_every_nth);
4232 	if (modulo < 2)
4233 		return;
4234 	block_unblock_all_queues(true);
4235 	count = atomic_read(&sdebug_cmnd_count);
4236 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4237 	block_unblock_all_queues(false);
4238 }
4239 
4240 static void clear_queue_stats(void)
4241 {
4242 	atomic_set(&sdebug_cmnd_count, 0);
4243 	atomic_set(&sdebug_completions, 0);
4244 	atomic_set(&sdebug_miss_cpus, 0);
4245 	atomic_set(&sdebug_a_tsf, 0);
4246 }
4247 
4248 static void setup_inject(struct sdebug_queue *sqp,
4249 			 struct sdebug_queued_cmd *sqcp)
4250 {
4251 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4252 		if (sdebug_every_nth > 0)
4253 			sqcp->inj_recovered = sqcp->inj_transport
4254 				= sqcp->inj_dif
4255 				= sqcp->inj_dix = sqcp->inj_short
4256 				= sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
4257 		return;
4258 	}
4259 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4260 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4261 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4262 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4263 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4264 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4265 	sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
4266 }
4267 
4268 /* Complete the processing of the thread that queued a SCSI command to this
4269  * driver. It either completes the command by calling cmnd_done() or
4270  * schedules a hr timer or work queue then returns 0. Returns
4271  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4272  */
4273 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4274 			 int scsi_result,
4275 			 int (*pfp)(struct scsi_cmnd *,
4276 				    struct sdebug_dev_info *),
4277 			 int delta_jiff, int ndelay)
4278 {
4279 	unsigned long iflags;
4280 	int k, num_in_q, qdepth, inject;
4281 	struct sdebug_queue *sqp;
4282 	struct sdebug_queued_cmd *sqcp;
4283 	struct scsi_device *sdp;
4284 	struct sdebug_defer *sd_dp;
4285 
4286 	if (unlikely(devip == NULL)) {
4287 		if (scsi_result == 0)
4288 			scsi_result = DID_NO_CONNECT << 16;
4289 		goto respond_in_thread;
4290 	}
4291 	sdp = cmnd->device;
4292 
4293 	if (delta_jiff == 0)
4294 		goto respond_in_thread;
4295 
4296 	/* schedule the response at a later time if resources permit */
4297 	sqp = get_queue(cmnd);
4298 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4299 	if (unlikely(atomic_read(&sqp->blocked))) {
4300 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4301 		return SCSI_MLQUEUE_HOST_BUSY;
4302 	}
4303 	num_in_q = atomic_read(&devip->num_in_q);
4304 	qdepth = cmnd->device->queue_depth;
4305 	inject = 0;
4306 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4307 		if (scsi_result) {
4308 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4309 			goto respond_in_thread;
4310 		} else
4311 			scsi_result = device_qfull_result;
4312 	} else if (unlikely(sdebug_every_nth &&
4313 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4314 			    (scsi_result == 0))) {
4315 		if ((num_in_q == (qdepth - 1)) &&
4316 		    (atomic_inc_return(&sdebug_a_tsf) >=
4317 		     abs(sdebug_every_nth))) {
4318 			atomic_set(&sdebug_a_tsf, 0);
4319 			inject = 1;
4320 			scsi_result = device_qfull_result;
4321 		}
4322 	}
4323 
4324 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4325 	if (unlikely(k >= sdebug_max_queue)) {
4326 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4327 		if (scsi_result)
4328 			goto respond_in_thread;
4329 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4330 			scsi_result = device_qfull_result;
4331 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4332 			sdev_printk(KERN_INFO, sdp,
4333 				    "%s: max_queue=%d exceeded, %s\n",
4334 				    __func__, sdebug_max_queue,
4335 				    (scsi_result ?  "status: TASK SET FULL" :
4336 						    "report: host busy"));
4337 		if (scsi_result)
4338 			goto respond_in_thread;
4339 		else
4340 			return SCSI_MLQUEUE_HOST_BUSY;
4341 	}
4342 	__set_bit(k, sqp->in_use_bm);
4343 	atomic_inc(&devip->num_in_q);
4344 	sqcp = &sqp->qc_arr[k];
4345 	sqcp->a_cmnd = cmnd;
4346 	cmnd->host_scribble = (unsigned char *)sqcp;
4347 	sd_dp = sqcp->sd_dp;
4348 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4349 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4350 		setup_inject(sqp, sqcp);
4351 	if (sd_dp == NULL) {
4352 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4353 		if (sd_dp == NULL)
4354 			return SCSI_MLQUEUE_HOST_BUSY;
4355 	}
4356 
4357 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4358 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
4359 		/*
4360 		 * This is the F_DELAY_OVERR case. No delay.
4361 		 */
4362 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
4363 		delta_jiff = ndelay = 0;
4364 	}
4365 	if (cmnd->result == 0 && scsi_result != 0)
4366 		cmnd->result = scsi_result;
4367 
4368 	if (unlikely(sdebug_verbose && cmnd->result))
4369 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4370 			    __func__, cmnd->result);
4371 
4372 	if (delta_jiff > 0 || ndelay > 0) {
4373 		ktime_t kt;
4374 
4375 		if (delta_jiff > 0) {
4376 			kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4377 		} else
4378 			kt = ndelay;
4379 		if (!sd_dp->init_hrt) {
4380 			sd_dp->init_hrt = true;
4381 			sqcp->sd_dp = sd_dp;
4382 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4383 				     HRTIMER_MODE_REL_PINNED);
4384 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4385 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4386 			sd_dp->qc_idx = k;
4387 		}
4388 		if (sdebug_statistics)
4389 			sd_dp->issuing_cpu = raw_smp_processor_id();
4390 		sd_dp->defer_t = SDEB_DEFER_HRT;
4391 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4392 	} else {	/* jdelay < 0, use work queue */
4393 		if (!sd_dp->init_wq) {
4394 			sd_dp->init_wq = true;
4395 			sqcp->sd_dp = sd_dp;
4396 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4397 			sd_dp->qc_idx = k;
4398 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4399 		}
4400 		if (sdebug_statistics)
4401 			sd_dp->issuing_cpu = raw_smp_processor_id();
4402 		sd_dp->defer_t = SDEB_DEFER_WQ;
4403 		if (unlikely(sqcp->inj_cmd_abort))
4404 			sd_dp->aborted = true;
4405 		schedule_work(&sd_dp->ew.work);
4406 		if (unlikely(sqcp->inj_cmd_abort)) {
4407 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
4408 				    cmnd->request->tag);
4409 			blk_abort_request(cmnd->request);
4410 		}
4411 	}
4412 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4413 		     (scsi_result == device_qfull_result)))
4414 		sdev_printk(KERN_INFO, sdp,
4415 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4416 			    num_in_q, (inject ? "<inject> " : ""),
4417 			    "status: TASK SET FULL");
4418 	return 0;
4419 
4420 respond_in_thread:	/* call back to mid-layer using invocation thread */
4421 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4422 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
4423 	if (cmnd->result == 0 && scsi_result != 0)
4424 		cmnd->result = scsi_result;
4425 	cmnd->scsi_done(cmnd);
4426 	return 0;
4427 }
4428 
4429 /* Note: The following macros create attribute files in the
4430    /sys/module/scsi_debug/parameters directory. Unfortunately this
4431    driver is unaware of a change and cannot trigger auxiliary actions
4432    as it can when the corresponding attribute in the
4433    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4434  */
4435 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4436 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4437 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4438 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4439 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4440 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4441 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4442 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4443 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4444 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4445 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4446 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4447 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4448 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4449 		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4450 module_param_string(inq_product, sdebug_inq_product_id,
4451 		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4452 module_param_string(inq_rev, sdebug_inq_product_rev,
4453 		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4454 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4455 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4456 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4457 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4458 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4459 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4460 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4461 module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4462 module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4463 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4464 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4465 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4466 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4467 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4468 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4469 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4470 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4471 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4472 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4473 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4474 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4475 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4476 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4477 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4478 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4479 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4480 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4481 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4482 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4483 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4484 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4485 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4486 		   S_IRUGO | S_IWUSR);
4487 module_param_named(write_same_length, sdebug_write_same_length, int,
4488 		   S_IRUGO | S_IWUSR);
4489 
4490 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4491 MODULE_DESCRIPTION("SCSI debug adapter driver");
4492 MODULE_LICENSE("GPL");
4493 MODULE_VERSION(SDEBUG_VERSION);
4494 
4495 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4496 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4497 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4498 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4499 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4500 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4501 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4502 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4503 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4504 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4505 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4506 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4507 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4508 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4509 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4510 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4511 		 SDEBUG_VERSION "\")");
4512 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4513 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4514 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4515 MODULE_PARM_DESC(lbprz,
4516 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4517 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4518 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4519 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4520 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4521 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4522 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4523 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4524 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4525 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4526 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4527 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4528 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4529 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4530 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4531 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4532 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4533 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4534 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4535 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4536 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4537 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4538 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4539 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4540 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4541 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4542 MODULE_PARM_DESC(uuid_ctl,
4543 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4544 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4545 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4546 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4547 
4548 #define SDEBUG_INFO_LEN 256
4549 static char sdebug_info[SDEBUG_INFO_LEN];
4550 
4551 static const char *scsi_debug_info(struct Scsi_Host *shp)
4552 {
4553 	int k;
4554 
4555 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4556 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4557 	if (k >= (SDEBUG_INFO_LEN - 1))
4558 		return sdebug_info;
4559 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4560 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4561 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4562 		  "statistics", (int)sdebug_statistics);
4563 	return sdebug_info;
4564 }
4565 
4566 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4567 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4568 				 int length)
4569 {
4570 	char arr[16];
4571 	int opts;
4572 	int minLen = length > 15 ? 15 : length;
4573 
4574 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4575 		return -EACCES;
4576 	memcpy(arr, buffer, minLen);
4577 	arr[minLen] = '\0';
4578 	if (1 != sscanf(arr, "%d", &opts))
4579 		return -EINVAL;
4580 	sdebug_opts = opts;
4581 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4582 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4583 	if (sdebug_every_nth != 0)
4584 		tweak_cmnd_count();
4585 	return length;
4586 }
4587 
4588 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4589  * same for each scsi_debug host (if more than one). Some of the counters
4590  * output are not atomics so might be inaccurate in a busy system. */
4591 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4592 {
4593 	int f, j, l;
4594 	struct sdebug_queue *sqp;
4595 
4596 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4597 		   SDEBUG_VERSION, sdebug_version_date);
4598 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4599 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4600 		   sdebug_opts, sdebug_every_nth);
4601 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4602 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4603 		   sdebug_sector_size, "bytes");
4604 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4605 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4606 		   num_aborts);
4607 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4608 		   num_dev_resets, num_target_resets, num_bus_resets,
4609 		   num_host_resets);
4610 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4611 		   dix_reads, dix_writes, dif_errors);
4612 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4613 		   sdebug_statistics);
4614 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4615 		   atomic_read(&sdebug_cmnd_count),
4616 		   atomic_read(&sdebug_completions),
4617 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4618 		   atomic_read(&sdebug_a_tsf));
4619 
4620 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4621 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4622 		seq_printf(m, "  queue %d:\n", j);
4623 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4624 		if (f != sdebug_max_queue) {
4625 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4626 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4627 				   "first,last bits", f, l);
4628 		}
4629 	}
4630 	return 0;
4631 }
4632 
4633 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4634 {
4635 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4636 }
4637 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4638  * of delay is jiffies.
4639  */
4640 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4641 			   size_t count)
4642 {
4643 	int jdelay, res;
4644 
4645 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4646 		res = count;
4647 		if (sdebug_jdelay != jdelay) {
4648 			int j, k;
4649 			struct sdebug_queue *sqp;
4650 
4651 			block_unblock_all_queues(true);
4652 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4653 			     ++j, ++sqp) {
4654 				k = find_first_bit(sqp->in_use_bm,
4655 						   sdebug_max_queue);
4656 				if (k != sdebug_max_queue) {
4657 					res = -EBUSY;   /* queued commands */
4658 					break;
4659 				}
4660 			}
4661 			if (res > 0) {
4662 				sdebug_jdelay = jdelay;
4663 				sdebug_ndelay = 0;
4664 			}
4665 			block_unblock_all_queues(false);
4666 		}
4667 		return res;
4668 	}
4669 	return -EINVAL;
4670 }
4671 static DRIVER_ATTR_RW(delay);
4672 
4673 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4674 {
4675 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4676 }
4677 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4678 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4679 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4680 			    size_t count)
4681 {
4682 	int ndelay, res;
4683 
4684 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4685 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4686 		res = count;
4687 		if (sdebug_ndelay != ndelay) {
4688 			int j, k;
4689 			struct sdebug_queue *sqp;
4690 
4691 			block_unblock_all_queues(true);
4692 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4693 			     ++j, ++sqp) {
4694 				k = find_first_bit(sqp->in_use_bm,
4695 						   sdebug_max_queue);
4696 				if (k != sdebug_max_queue) {
4697 					res = -EBUSY;   /* queued commands */
4698 					break;
4699 				}
4700 			}
4701 			if (res > 0) {
4702 				sdebug_ndelay = ndelay;
4703 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4704 							: DEF_JDELAY;
4705 			}
4706 			block_unblock_all_queues(false);
4707 		}
4708 		return res;
4709 	}
4710 	return -EINVAL;
4711 }
4712 static DRIVER_ATTR_RW(ndelay);
4713 
4714 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4715 {
4716 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4717 }
4718 
4719 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4720 			  size_t count)
4721 {
4722 	int opts;
4723 	char work[20];
4724 
4725 	if (sscanf(buf, "%10s", work) == 1) {
4726 		if (strncasecmp(work, "0x", 2) == 0) {
4727 			if (kstrtoint(work + 2, 16, &opts) == 0)
4728 				goto opts_done;
4729 		} else {
4730 			if (kstrtoint(work, 10, &opts) == 0)
4731 				goto opts_done;
4732 		}
4733 	}
4734 	return -EINVAL;
4735 opts_done:
4736 	sdebug_opts = opts;
4737 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4738 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4739 	tweak_cmnd_count();
4740 	return count;
4741 }
4742 static DRIVER_ATTR_RW(opts);
4743 
4744 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4745 {
4746 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4747 }
4748 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4749 			   size_t count)
4750 {
4751 	int n;
4752 
4753 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4754 		sdebug_ptype = n;
4755 		return count;
4756 	}
4757 	return -EINVAL;
4758 }
4759 static DRIVER_ATTR_RW(ptype);
4760 
4761 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4762 {
4763 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4764 }
4765 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4766 			    size_t count)
4767 {
4768 	int n;
4769 
4770 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4771 		sdebug_dsense = n;
4772 		return count;
4773 	}
4774 	return -EINVAL;
4775 }
4776 static DRIVER_ATTR_RW(dsense);
4777 
4778 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4779 {
4780 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4781 }
4782 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4783 			     size_t count)
4784 {
4785 	int n;
4786 
4787 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4788 		n = (n > 0);
4789 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4790 		if (sdebug_fake_rw != n) {
4791 			if ((0 == n) && (NULL == fake_storep)) {
4792 				unsigned long sz =
4793 					(unsigned long)sdebug_dev_size_mb *
4794 					1048576;
4795 
4796 				fake_storep = vzalloc(sz);
4797 				if (NULL == fake_storep) {
4798 					pr_err("out of memory, 9\n");
4799 					return -ENOMEM;
4800 				}
4801 			}
4802 			sdebug_fake_rw = n;
4803 		}
4804 		return count;
4805 	}
4806 	return -EINVAL;
4807 }
4808 static DRIVER_ATTR_RW(fake_rw);
4809 
4810 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4811 {
4812 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4813 }
4814 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4815 			      size_t count)
4816 {
4817 	int n;
4818 
4819 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4820 		sdebug_no_lun_0 = n;
4821 		return count;
4822 	}
4823 	return -EINVAL;
4824 }
4825 static DRIVER_ATTR_RW(no_lun_0);
4826 
4827 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4828 {
4829 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4830 }
4831 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4832 			      size_t count)
4833 {
4834 	int n;
4835 
4836 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4837 		sdebug_num_tgts = n;
4838 		sdebug_max_tgts_luns();
4839 		return count;
4840 	}
4841 	return -EINVAL;
4842 }
4843 static DRIVER_ATTR_RW(num_tgts);
4844 
4845 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4846 {
4847 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4848 }
4849 static DRIVER_ATTR_RO(dev_size_mb);
4850 
4851 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4852 {
4853 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4854 }
4855 static DRIVER_ATTR_RO(num_parts);
4856 
4857 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4858 {
4859 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4860 }
4861 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4862 			       size_t count)
4863 {
4864 	int nth;
4865 
4866 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4867 		sdebug_every_nth = nth;
4868 		if (nth && !sdebug_statistics) {
4869 			pr_info("every_nth needs statistics=1, set it\n");
4870 			sdebug_statistics = true;
4871 		}
4872 		tweak_cmnd_count();
4873 		return count;
4874 	}
4875 	return -EINVAL;
4876 }
4877 static DRIVER_ATTR_RW(every_nth);
4878 
4879 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4880 {
4881 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4882 }
4883 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4884 			      size_t count)
4885 {
4886 	int n;
4887 	bool changed;
4888 
4889 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4890 		if (n > 256) {
4891 			pr_warn("max_luns can be no more than 256\n");
4892 			return -EINVAL;
4893 		}
4894 		changed = (sdebug_max_luns != n);
4895 		sdebug_max_luns = n;
4896 		sdebug_max_tgts_luns();
4897 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4898 			struct sdebug_host_info *sdhp;
4899 			struct sdebug_dev_info *dp;
4900 
4901 			spin_lock(&sdebug_host_list_lock);
4902 			list_for_each_entry(sdhp, &sdebug_host_list,
4903 					    host_list) {
4904 				list_for_each_entry(dp, &sdhp->dev_info_list,
4905 						    dev_list) {
4906 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4907 						dp->uas_bm);
4908 				}
4909 			}
4910 			spin_unlock(&sdebug_host_list_lock);
4911 		}
4912 		return count;
4913 	}
4914 	return -EINVAL;
4915 }
4916 static DRIVER_ATTR_RW(max_luns);
4917 
4918 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4919 {
4920 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4921 }
4922 /* N.B. max_queue can be changed while there are queued commands. In flight
4923  * commands beyond the new max_queue will be completed. */
4924 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4925 			       size_t count)
4926 {
4927 	int j, n, k, a;
4928 	struct sdebug_queue *sqp;
4929 
4930 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4931 	    (n <= SDEBUG_CANQUEUE)) {
4932 		block_unblock_all_queues(true);
4933 		k = 0;
4934 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4935 		     ++j, ++sqp) {
4936 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4937 			if (a > k)
4938 				k = a;
4939 		}
4940 		sdebug_max_queue = n;
4941 		if (k == SDEBUG_CANQUEUE)
4942 			atomic_set(&retired_max_queue, 0);
4943 		else if (k >= n)
4944 			atomic_set(&retired_max_queue, k + 1);
4945 		else
4946 			atomic_set(&retired_max_queue, 0);
4947 		block_unblock_all_queues(false);
4948 		return count;
4949 	}
4950 	return -EINVAL;
4951 }
4952 static DRIVER_ATTR_RW(max_queue);
4953 
4954 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4955 {
4956 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4957 }
4958 static DRIVER_ATTR_RO(no_uld);
4959 
4960 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4961 {
4962 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4963 }
4964 static DRIVER_ATTR_RO(scsi_level);
4965 
4966 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4967 {
4968 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4969 }
4970 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4971 				size_t count)
4972 {
4973 	int n;
4974 	bool changed;
4975 
4976 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4977 		changed = (sdebug_virtual_gb != n);
4978 		sdebug_virtual_gb = n;
4979 		sdebug_capacity = get_sdebug_capacity();
4980 		if (changed) {
4981 			struct sdebug_host_info *sdhp;
4982 			struct sdebug_dev_info *dp;
4983 
4984 			spin_lock(&sdebug_host_list_lock);
4985 			list_for_each_entry(sdhp, &sdebug_host_list,
4986 					    host_list) {
4987 				list_for_each_entry(dp, &sdhp->dev_info_list,
4988 						    dev_list) {
4989 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4990 						dp->uas_bm);
4991 				}
4992 			}
4993 			spin_unlock(&sdebug_host_list_lock);
4994 		}
4995 		return count;
4996 	}
4997 	return -EINVAL;
4998 }
4999 static DRIVER_ATTR_RW(virtual_gb);
5000 
5001 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
5002 {
5003 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
5004 }
5005 
5006 static int sdebug_add_adapter(void);
5007 static void sdebug_remove_adapter(void);
5008 
5009 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
5010 			      size_t count)
5011 {
5012 	int delta_hosts;
5013 
5014 	if (sscanf(buf, "%d", &delta_hosts) != 1)
5015 		return -EINVAL;
5016 	if (delta_hosts > 0) {
5017 		do {
5018 			sdebug_add_adapter();
5019 		} while (--delta_hosts);
5020 	} else if (delta_hosts < 0) {
5021 		do {
5022 			sdebug_remove_adapter();
5023 		} while (++delta_hosts);
5024 	}
5025 	return count;
5026 }
5027 static DRIVER_ATTR_RW(add_host);
5028 
5029 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5030 {
5031 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5032 }
5033 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5034 				    size_t count)
5035 {
5036 	int n;
5037 
5038 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5039 		sdebug_vpd_use_hostno = n;
5040 		return count;
5041 	}
5042 	return -EINVAL;
5043 }
5044 static DRIVER_ATTR_RW(vpd_use_hostno);
5045 
5046 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5047 {
5048 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5049 }
5050 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5051 				size_t count)
5052 {
5053 	int n;
5054 
5055 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5056 		if (n > 0)
5057 			sdebug_statistics = true;
5058 		else {
5059 			clear_queue_stats();
5060 			sdebug_statistics = false;
5061 		}
5062 		return count;
5063 	}
5064 	return -EINVAL;
5065 }
5066 static DRIVER_ATTR_RW(statistics);
5067 
5068 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5069 {
5070 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5071 }
5072 static DRIVER_ATTR_RO(sector_size);
5073 
5074 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5075 {
5076 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5077 }
5078 static DRIVER_ATTR_RO(submit_queues);
5079 
5080 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5081 {
5082 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5083 }
5084 static DRIVER_ATTR_RO(dix);
5085 
5086 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5087 {
5088 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5089 }
5090 static DRIVER_ATTR_RO(dif);
5091 
5092 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5093 {
5094 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5095 }
5096 static DRIVER_ATTR_RO(guard);
5097 
5098 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5099 {
5100 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5101 }
5102 static DRIVER_ATTR_RO(ato);
5103 
5104 static ssize_t map_show(struct device_driver *ddp, char *buf)
5105 {
5106 	ssize_t count;
5107 
5108 	if (!scsi_debug_lbp())
5109 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5110 				 sdebug_store_sectors);
5111 
5112 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5113 			  (int)map_size, map_storep);
5114 	buf[count++] = '\n';
5115 	buf[count] = '\0';
5116 
5117 	return count;
5118 }
5119 static DRIVER_ATTR_RO(map);
5120 
5121 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5122 {
5123 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5124 }
5125 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5126 			       size_t count)
5127 {
5128 	int n;
5129 
5130 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5131 		sdebug_removable = (n > 0);
5132 		return count;
5133 	}
5134 	return -EINVAL;
5135 }
5136 static DRIVER_ATTR_RW(removable);
5137 
5138 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5139 {
5140 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5141 }
5142 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5143 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5144 			       size_t count)
5145 {
5146 	int n;
5147 
5148 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5149 		sdebug_host_lock = (n > 0);
5150 		return count;
5151 	}
5152 	return -EINVAL;
5153 }
5154 static DRIVER_ATTR_RW(host_lock);
5155 
5156 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5157 {
5158 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5159 }
5160 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5161 			    size_t count)
5162 {
5163 	int n;
5164 
5165 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5166 		sdebug_strict = (n > 0);
5167 		return count;
5168 	}
5169 	return -EINVAL;
5170 }
5171 static DRIVER_ATTR_RW(strict);
5172 
5173 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5174 {
5175 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5176 }
5177 static DRIVER_ATTR_RO(uuid_ctl);
5178 
5179 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5180 {
5181 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5182 }
5183 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5184 			     size_t count)
5185 {
5186 	int ret, n;
5187 
5188 	ret = kstrtoint(buf, 0, &n);
5189 	if (ret)
5190 		return ret;
5191 	sdebug_cdb_len = n;
5192 	all_config_cdb_len();
5193 	return count;
5194 }
5195 static DRIVER_ATTR_RW(cdb_len);
5196 
5197 
5198 /* Note: The following array creates attribute files in the
5199    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5200    files (over those found in the /sys/module/scsi_debug/parameters
5201    directory) is that auxiliary actions can be triggered when an attribute
5202    is changed. For example see: sdebug_add_host_store() above.
5203  */
5204 
5205 static struct attribute *sdebug_drv_attrs[] = {
5206 	&driver_attr_delay.attr,
5207 	&driver_attr_opts.attr,
5208 	&driver_attr_ptype.attr,
5209 	&driver_attr_dsense.attr,
5210 	&driver_attr_fake_rw.attr,
5211 	&driver_attr_no_lun_0.attr,
5212 	&driver_attr_num_tgts.attr,
5213 	&driver_attr_dev_size_mb.attr,
5214 	&driver_attr_num_parts.attr,
5215 	&driver_attr_every_nth.attr,
5216 	&driver_attr_max_luns.attr,
5217 	&driver_attr_max_queue.attr,
5218 	&driver_attr_no_uld.attr,
5219 	&driver_attr_scsi_level.attr,
5220 	&driver_attr_virtual_gb.attr,
5221 	&driver_attr_add_host.attr,
5222 	&driver_attr_vpd_use_hostno.attr,
5223 	&driver_attr_sector_size.attr,
5224 	&driver_attr_statistics.attr,
5225 	&driver_attr_submit_queues.attr,
5226 	&driver_attr_dix.attr,
5227 	&driver_attr_dif.attr,
5228 	&driver_attr_guard.attr,
5229 	&driver_attr_ato.attr,
5230 	&driver_attr_map.attr,
5231 	&driver_attr_removable.attr,
5232 	&driver_attr_host_lock.attr,
5233 	&driver_attr_ndelay.attr,
5234 	&driver_attr_strict.attr,
5235 	&driver_attr_uuid_ctl.attr,
5236 	&driver_attr_cdb_len.attr,
5237 	NULL,
5238 };
5239 ATTRIBUTE_GROUPS(sdebug_drv);
5240 
5241 static struct device *pseudo_primary;
5242 
5243 static int __init scsi_debug_init(void)
5244 {
5245 	unsigned long sz;
5246 	int host_to_add;
5247 	int k;
5248 	int ret;
5249 
5250 	atomic_set(&retired_max_queue, 0);
5251 
5252 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5253 		pr_warn("ndelay must be less than 1 second, ignored\n");
5254 		sdebug_ndelay = 0;
5255 	} else if (sdebug_ndelay > 0)
5256 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5257 
5258 	switch (sdebug_sector_size) {
5259 	case  512:
5260 	case 1024:
5261 	case 2048:
5262 	case 4096:
5263 		break;
5264 	default:
5265 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5266 		return -EINVAL;
5267 	}
5268 
5269 	switch (sdebug_dif) {
5270 	case T10_PI_TYPE0_PROTECTION:
5271 		break;
5272 	case T10_PI_TYPE1_PROTECTION:
5273 	case T10_PI_TYPE2_PROTECTION:
5274 	case T10_PI_TYPE3_PROTECTION:
5275 		have_dif_prot = true;
5276 		break;
5277 
5278 	default:
5279 		pr_err("dif must be 0, 1, 2 or 3\n");
5280 		return -EINVAL;
5281 	}
5282 
5283 	if (sdebug_guard > 1) {
5284 		pr_err("guard must be 0 or 1\n");
5285 		return -EINVAL;
5286 	}
5287 
5288 	if (sdebug_ato > 1) {
5289 		pr_err("ato must be 0 or 1\n");
5290 		return -EINVAL;
5291 	}
5292 
5293 	if (sdebug_physblk_exp > 15) {
5294 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5295 		return -EINVAL;
5296 	}
5297 	if (sdebug_max_luns > 256) {
5298 		pr_warn("max_luns can be no more than 256, use default\n");
5299 		sdebug_max_luns = DEF_MAX_LUNS;
5300 	}
5301 
5302 	if (sdebug_lowest_aligned > 0x3fff) {
5303 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5304 		return -EINVAL;
5305 	}
5306 
5307 	if (submit_queues < 1) {
5308 		pr_err("submit_queues must be 1 or more\n");
5309 		return -EINVAL;
5310 	}
5311 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5312 			       GFP_KERNEL);
5313 	if (sdebug_q_arr == NULL)
5314 		return -ENOMEM;
5315 	for (k = 0; k < submit_queues; ++k)
5316 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5317 
5318 	if (sdebug_dev_size_mb < 1)
5319 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5320 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5321 	sdebug_store_sectors = sz / sdebug_sector_size;
5322 	sdebug_capacity = get_sdebug_capacity();
5323 
5324 	/* play around with geometry, don't waste too much on track 0 */
5325 	sdebug_heads = 8;
5326 	sdebug_sectors_per = 32;
5327 	if (sdebug_dev_size_mb >= 256)
5328 		sdebug_heads = 64;
5329 	else if (sdebug_dev_size_mb >= 16)
5330 		sdebug_heads = 32;
5331 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5332 			       (sdebug_sectors_per * sdebug_heads);
5333 	if (sdebug_cylinders_per >= 1024) {
5334 		/* other LLDs do this; implies >= 1GB ram disk ... */
5335 		sdebug_heads = 255;
5336 		sdebug_sectors_per = 63;
5337 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5338 			       (sdebug_sectors_per * sdebug_heads);
5339 	}
5340 
5341 	if (sdebug_fake_rw == 0) {
5342 		fake_storep = vzalloc(sz);
5343 		if (NULL == fake_storep) {
5344 			pr_err("out of memory, 1\n");
5345 			ret = -ENOMEM;
5346 			goto free_q_arr;
5347 		}
5348 		if (sdebug_num_parts > 0)
5349 			sdebug_build_parts(fake_storep, sz);
5350 	}
5351 
5352 	if (sdebug_dix) {
5353 		int dif_size;
5354 
5355 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5356 		dif_storep = vmalloc(dif_size);
5357 
5358 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5359 
5360 		if (dif_storep == NULL) {
5361 			pr_err("out of mem. (DIX)\n");
5362 			ret = -ENOMEM;
5363 			goto free_vm;
5364 		}
5365 
5366 		memset(dif_storep, 0xff, dif_size);
5367 	}
5368 
5369 	/* Logical Block Provisioning */
5370 	if (scsi_debug_lbp()) {
5371 		sdebug_unmap_max_blocks =
5372 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5373 
5374 		sdebug_unmap_max_desc =
5375 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5376 
5377 		sdebug_unmap_granularity =
5378 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5379 
5380 		if (sdebug_unmap_alignment &&
5381 		    sdebug_unmap_granularity <=
5382 		    sdebug_unmap_alignment) {
5383 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5384 			ret = -EINVAL;
5385 			goto free_vm;
5386 		}
5387 
5388 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5389 		map_storep = vmalloc(array_size(sizeof(long),
5390 						BITS_TO_LONGS(map_size)));
5391 
5392 		pr_info("%lu provisioning blocks\n", map_size);
5393 
5394 		if (map_storep == NULL) {
5395 			pr_err("out of mem. (MAP)\n");
5396 			ret = -ENOMEM;
5397 			goto free_vm;
5398 		}
5399 
5400 		bitmap_zero(map_storep, map_size);
5401 
5402 		/* Map first 1KB for partition table */
5403 		if (sdebug_num_parts)
5404 			map_region(0, 2);
5405 	}
5406 
5407 	pseudo_primary = root_device_register("pseudo_0");
5408 	if (IS_ERR(pseudo_primary)) {
5409 		pr_warn("root_device_register() error\n");
5410 		ret = PTR_ERR(pseudo_primary);
5411 		goto free_vm;
5412 	}
5413 	ret = bus_register(&pseudo_lld_bus);
5414 	if (ret < 0) {
5415 		pr_warn("bus_register error: %d\n", ret);
5416 		goto dev_unreg;
5417 	}
5418 	ret = driver_register(&sdebug_driverfs_driver);
5419 	if (ret < 0) {
5420 		pr_warn("driver_register error: %d\n", ret);
5421 		goto bus_unreg;
5422 	}
5423 
5424 	host_to_add = sdebug_add_host;
5425 	sdebug_add_host = 0;
5426 
5427 	for (k = 0; k < host_to_add; k++) {
5428 		if (sdebug_add_adapter()) {
5429 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5430 			break;
5431 		}
5432 	}
5433 
5434 	if (sdebug_verbose)
5435 		pr_info("built %d host(s)\n", sdebug_add_host);
5436 
5437 	return 0;
5438 
5439 bus_unreg:
5440 	bus_unregister(&pseudo_lld_bus);
5441 dev_unreg:
5442 	root_device_unregister(pseudo_primary);
5443 free_vm:
5444 	vfree(map_storep);
5445 	vfree(dif_storep);
5446 	vfree(fake_storep);
5447 free_q_arr:
5448 	kfree(sdebug_q_arr);
5449 	return ret;
5450 }
5451 
5452 static void __exit scsi_debug_exit(void)
5453 {
5454 	int k = sdebug_add_host;
5455 
5456 	stop_all_queued();
5457 	for (; k; k--)
5458 		sdebug_remove_adapter();
5459 	free_all_queued();
5460 	driver_unregister(&sdebug_driverfs_driver);
5461 	bus_unregister(&pseudo_lld_bus);
5462 	root_device_unregister(pseudo_primary);
5463 
5464 	vfree(map_storep);
5465 	vfree(dif_storep);
5466 	vfree(fake_storep);
5467 	kfree(sdebug_q_arr);
5468 }
5469 
5470 device_initcall(scsi_debug_init);
5471 module_exit(scsi_debug_exit);
5472 
5473 static void sdebug_release_adapter(struct device *dev)
5474 {
5475 	struct sdebug_host_info *sdbg_host;
5476 
5477 	sdbg_host = to_sdebug_host(dev);
5478 	kfree(sdbg_host);
5479 }
5480 
5481 static int sdebug_add_adapter(void)
5482 {
5483 	int k, devs_per_host;
5484 	int error = 0;
5485 	struct sdebug_host_info *sdbg_host;
5486 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5487 
5488 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5489 	if (sdbg_host == NULL) {
5490 		pr_err("out of memory at line %d\n", __LINE__);
5491 		return -ENOMEM;
5492 	}
5493 
5494 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5495 
5496 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5497 	for (k = 0; k < devs_per_host; k++) {
5498 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5499 		if (!sdbg_devinfo) {
5500 			pr_err("out of memory at line %d\n", __LINE__);
5501 			error = -ENOMEM;
5502 			goto clean;
5503 		}
5504 	}
5505 
5506 	spin_lock(&sdebug_host_list_lock);
5507 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5508 	spin_unlock(&sdebug_host_list_lock);
5509 
5510 	sdbg_host->dev.bus = &pseudo_lld_bus;
5511 	sdbg_host->dev.parent = pseudo_primary;
5512 	sdbg_host->dev.release = &sdebug_release_adapter;
5513 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5514 
5515 	error = device_register(&sdbg_host->dev);
5516 
5517 	if (error)
5518 		goto clean;
5519 
5520 	++sdebug_add_host;
5521 	return error;
5522 
5523 clean:
5524 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5525 				 dev_list) {
5526 		list_del(&sdbg_devinfo->dev_list);
5527 		kfree(sdbg_devinfo);
5528 	}
5529 
5530 	kfree(sdbg_host);
5531 	return error;
5532 }
5533 
5534 static void sdebug_remove_adapter(void)
5535 {
5536 	struct sdebug_host_info *sdbg_host = NULL;
5537 
5538 	spin_lock(&sdebug_host_list_lock);
5539 	if (!list_empty(&sdebug_host_list)) {
5540 		sdbg_host = list_entry(sdebug_host_list.prev,
5541 				       struct sdebug_host_info, host_list);
5542 		list_del(&sdbg_host->host_list);
5543 	}
5544 	spin_unlock(&sdebug_host_list_lock);
5545 
5546 	if (!sdbg_host)
5547 		return;
5548 
5549 	device_unregister(&sdbg_host->dev);
5550 	--sdebug_add_host;
5551 }
5552 
5553 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5554 {
5555 	int num_in_q = 0;
5556 	struct sdebug_dev_info *devip;
5557 
5558 	block_unblock_all_queues(true);
5559 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5560 	if (NULL == devip) {
5561 		block_unblock_all_queues(false);
5562 		return	-ENODEV;
5563 	}
5564 	num_in_q = atomic_read(&devip->num_in_q);
5565 
5566 	if (qdepth < 1)
5567 		qdepth = 1;
5568 	/* allow to exceed max host qc_arr elements for testing */
5569 	if (qdepth > SDEBUG_CANQUEUE + 10)
5570 		qdepth = SDEBUG_CANQUEUE + 10;
5571 	scsi_change_queue_depth(sdev, qdepth);
5572 
5573 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5574 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5575 			    __func__, qdepth, num_in_q);
5576 	}
5577 	block_unblock_all_queues(false);
5578 	return sdev->queue_depth;
5579 }
5580 
5581 static bool fake_timeout(struct scsi_cmnd *scp)
5582 {
5583 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5584 		if (sdebug_every_nth < -1)
5585 			sdebug_every_nth = -1;
5586 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5587 			return true; /* ignore command causing timeout */
5588 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5589 			 scsi_medium_access_command(scp))
5590 			return true; /* time out reads and writes */
5591 	}
5592 	return false;
5593 }
5594 
5595 static bool fake_host_busy(struct scsi_cmnd *scp)
5596 {
5597 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5598 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5599 }
5600 
5601 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5602 				   struct scsi_cmnd *scp)
5603 {
5604 	u8 sdeb_i;
5605 	struct scsi_device *sdp = scp->device;
5606 	const struct opcode_info_t *oip;
5607 	const struct opcode_info_t *r_oip;
5608 	struct sdebug_dev_info *devip;
5609 	u8 *cmd = scp->cmnd;
5610 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5611 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5612 	int k, na;
5613 	int errsts = 0;
5614 	u32 flags;
5615 	u16 sa;
5616 	u8 opcode = cmd[0];
5617 	bool has_wlun_rl;
5618 
5619 	scsi_set_resid(scp, 0);
5620 	if (sdebug_statistics)
5621 		atomic_inc(&sdebug_cmnd_count);
5622 	if (unlikely(sdebug_verbose &&
5623 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5624 		char b[120];
5625 		int n, len, sb;
5626 
5627 		len = scp->cmd_len;
5628 		sb = (int)sizeof(b);
5629 		if (len > 32)
5630 			strcpy(b, "too long, over 32 bytes");
5631 		else {
5632 			for (k = 0, n = 0; k < len && n < sb; ++k)
5633 				n += scnprintf(b + n, sb - n, "%02x ",
5634 					       (u32)cmd[k]);
5635 		}
5636 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5637 			    blk_mq_unique_tag(scp->request), b);
5638 	}
5639 	if (fake_host_busy(scp))
5640 		return SCSI_MLQUEUE_HOST_BUSY;
5641 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5642 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5643 		goto err_out;
5644 
5645 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5646 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5647 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5648 	if (unlikely(!devip)) {
5649 		devip = find_build_dev_info(sdp);
5650 		if (NULL == devip)
5651 			goto err_out;
5652 	}
5653 	na = oip->num_attached;
5654 	r_pfp = oip->pfp;
5655 	if (na) {	/* multiple commands with this opcode */
5656 		r_oip = oip;
5657 		if (FF_SA & r_oip->flags) {
5658 			if (F_SA_LOW & oip->flags)
5659 				sa = 0x1f & cmd[1];
5660 			else
5661 				sa = get_unaligned_be16(cmd + 8);
5662 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5663 				if (opcode == oip->opcode && sa == oip->sa)
5664 					break;
5665 			}
5666 		} else {   /* since no service action only check opcode */
5667 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5668 				if (opcode == oip->opcode)
5669 					break;
5670 			}
5671 		}
5672 		if (k > na) {
5673 			if (F_SA_LOW & r_oip->flags)
5674 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5675 			else if (F_SA_HIGH & r_oip->flags)
5676 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5677 			else
5678 				mk_sense_invalid_opcode(scp);
5679 			goto check_cond;
5680 		}
5681 	}	/* else (when na==0) we assume the oip is a match */
5682 	flags = oip->flags;
5683 	if (unlikely(F_INV_OP & flags)) {
5684 		mk_sense_invalid_opcode(scp);
5685 		goto check_cond;
5686 	}
5687 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5688 		if (sdebug_verbose)
5689 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5690 				    my_name, opcode, " supported for wlun");
5691 		mk_sense_invalid_opcode(scp);
5692 		goto check_cond;
5693 	}
5694 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5695 		u8 rem;
5696 		int j;
5697 
5698 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5699 			rem = ~oip->len_mask[k] & cmd[k];
5700 			if (rem) {
5701 				for (j = 7; j >= 0; --j, rem <<= 1) {
5702 					if (0x80 & rem)
5703 						break;
5704 				}
5705 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5706 				goto check_cond;
5707 			}
5708 		}
5709 	}
5710 	if (unlikely(!(F_SKIP_UA & flags) &&
5711 		     find_first_bit(devip->uas_bm,
5712 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5713 		errsts = make_ua(scp, devip);
5714 		if (errsts)
5715 			goto check_cond;
5716 	}
5717 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5718 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5719 		if (sdebug_verbose)
5720 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5721 				    "%s\n", my_name, "initializing command "
5722 				    "required");
5723 		errsts = check_condition_result;
5724 		goto fini;
5725 	}
5726 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5727 		goto fini;
5728 	if (unlikely(sdebug_every_nth)) {
5729 		if (fake_timeout(scp))
5730 			return 0;	/* ignore command: make trouble */
5731 	}
5732 	if (likely(oip->pfp))
5733 		pfp = oip->pfp;	/* calls a resp_* function */
5734 	else
5735 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
5736 
5737 fini:
5738 	if (F_DELAY_OVERR & flags)
5739 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5740 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
5741 					    sdebug_ndelay > 10000)) {
5742 		/*
5743 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
5744 		 * for Start Stop Unit (SSU) want at least 1 second delay and
5745 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
5746 		 * For Synchronize Cache want 1/20 of SSU's delay.
5747 		 */
5748 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5749 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5750 
5751 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5752 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5753 	} else
5754 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5755 				     sdebug_ndelay);
5756 check_cond:
5757 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5758 err_out:
5759 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5760 }
5761 
5762 static struct scsi_host_template sdebug_driver_template = {
5763 	.show_info =		scsi_debug_show_info,
5764 	.write_info =		scsi_debug_write_info,
5765 	.proc_name =		sdebug_proc_name,
5766 	.name =			"SCSI DEBUG",
5767 	.info =			scsi_debug_info,
5768 	.slave_alloc =		scsi_debug_slave_alloc,
5769 	.slave_configure =	scsi_debug_slave_configure,
5770 	.slave_destroy =	scsi_debug_slave_destroy,
5771 	.ioctl =		scsi_debug_ioctl,
5772 	.queuecommand =		scsi_debug_queuecommand,
5773 	.change_queue_depth =	sdebug_change_qdepth,
5774 	.eh_abort_handler =	scsi_debug_abort,
5775 	.eh_device_reset_handler = scsi_debug_device_reset,
5776 	.eh_target_reset_handler = scsi_debug_target_reset,
5777 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5778 	.eh_host_reset_handler = scsi_debug_host_reset,
5779 	.can_queue =		SDEBUG_CANQUEUE,
5780 	.this_id =		7,
5781 	.sg_tablesize =		SG_MAX_SEGMENTS,
5782 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5783 	.max_sectors =		-1U,
5784 	.max_segment_size =	-1U,
5785 	.module =		THIS_MODULE,
5786 	.track_queue_depth =	1,
5787 };
5788 
5789 static int sdebug_driver_probe(struct device *dev)
5790 {
5791 	int error = 0;
5792 	struct sdebug_host_info *sdbg_host;
5793 	struct Scsi_Host *hpnt;
5794 	int hprot;
5795 
5796 	sdbg_host = to_sdebug_host(dev);
5797 
5798 	sdebug_driver_template.can_queue = sdebug_max_queue;
5799 	if (!sdebug_clustering)
5800 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
5801 
5802 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5803 	if (NULL == hpnt) {
5804 		pr_err("scsi_host_alloc failed\n");
5805 		error = -ENODEV;
5806 		return error;
5807 	}
5808 	if (submit_queues > nr_cpu_ids) {
5809 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5810 			my_name, submit_queues, nr_cpu_ids);
5811 		submit_queues = nr_cpu_ids;
5812 	}
5813 	/* Decide whether to tell scsi subsystem that we want mq */
5814 	/* Following should give the same answer for each host */
5815 	hpnt->nr_hw_queues = submit_queues;
5816 
5817 	sdbg_host->shost = hpnt;
5818 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5819 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5820 		hpnt->max_id = sdebug_num_tgts + 1;
5821 	else
5822 		hpnt->max_id = sdebug_num_tgts;
5823 	/* = sdebug_max_luns; */
5824 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5825 
5826 	hprot = 0;
5827 
5828 	switch (sdebug_dif) {
5829 
5830 	case T10_PI_TYPE1_PROTECTION:
5831 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5832 		if (sdebug_dix)
5833 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5834 		break;
5835 
5836 	case T10_PI_TYPE2_PROTECTION:
5837 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5838 		if (sdebug_dix)
5839 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5840 		break;
5841 
5842 	case T10_PI_TYPE3_PROTECTION:
5843 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5844 		if (sdebug_dix)
5845 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5846 		break;
5847 
5848 	default:
5849 		if (sdebug_dix)
5850 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5851 		break;
5852 	}
5853 
5854 	scsi_host_set_prot(hpnt, hprot);
5855 
5856 	if (have_dif_prot || sdebug_dix)
5857 		pr_info("host protection%s%s%s%s%s%s%s\n",
5858 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5859 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5860 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5861 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5862 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5863 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5864 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5865 
5866 	if (sdebug_guard == 1)
5867 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5868 	else
5869 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5870 
5871 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5872 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5873 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5874 		sdebug_statistics = true;
5875 	error = scsi_add_host(hpnt, &sdbg_host->dev);
5876 	if (error) {
5877 		pr_err("scsi_add_host failed\n");
5878 		error = -ENODEV;
5879 		scsi_host_put(hpnt);
5880 	} else
5881 		scsi_scan_host(hpnt);
5882 
5883 	return error;
5884 }
5885 
5886 static int sdebug_driver_remove(struct device *dev)
5887 {
5888 	struct sdebug_host_info *sdbg_host;
5889 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5890 
5891 	sdbg_host = to_sdebug_host(dev);
5892 
5893 	if (!sdbg_host) {
5894 		pr_err("Unable to locate host info\n");
5895 		return -ENODEV;
5896 	}
5897 
5898 	scsi_remove_host(sdbg_host->shost);
5899 
5900 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5901 				 dev_list) {
5902 		list_del(&sdbg_devinfo->dev_list);
5903 		kfree(sdbg_devinfo);
5904 	}
5905 
5906 	scsi_host_put(sdbg_host->shost);
5907 	return 0;
5908 }
5909 
5910 static int pseudo_lld_bus_match(struct device *dev,
5911 				struct device_driver *dev_driver)
5912 {
5913 	return 1;
5914 }
5915 
5916 static struct bus_type pseudo_lld_bus = {
5917 	.name = "pseudo",
5918 	.match = pseudo_lld_bus_match,
5919 	.probe = sdebug_driver_probe,
5920 	.remove = sdebug_driver_remove,
5921 	.drv_groups = sdebug_drv_groups,
5922 };
5923