1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 *
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
11 *
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
13 */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47
48 #include <net/checksum.h>
49
50 #include <linux/unaligned.h>
51
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60
61 #include "sd.h"
62 #include "scsi_logging.h"
63
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67
68 #define MY_NAME "scsi_debug"
69
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define FILEMARK_DETECTED_ASCQ 0x1
75 #define EOP_EOM_DETECTED_ASCQ 0x2
76 #define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
77 #define EOD_DETECTED_ASCQ 0x5
78 #define LOGICAL_UNIT_NOT_READY 0x4
79 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
80 #define UNRECOVERED_READ_ERR 0x11
81 #define PARAMETER_LIST_LENGTH_ERR 0x1a
82 #define INVALID_OPCODE 0x20
83 #define LBA_OUT_OF_RANGE 0x21
84 #define INVALID_FIELD_IN_CDB 0x24
85 #define INVALID_FIELD_IN_PARAM_LIST 0x26
86 #define WRITE_PROTECTED 0x27
87 #define UA_READY_ASC 0x28
88 #define UA_RESET_ASC 0x29
89 #define UA_CHANGED_ASC 0x2a
90 #define TOO_MANY_IN_PARTITION_ASC 0x3b
91 #define TARGET_CHANGED_ASC 0x3f
92 #define LUNS_CHANGED_ASCQ 0x0e
93 #define INSUFF_RES_ASC 0x55
94 #define INSUFF_RES_ASCQ 0x3
95 #define POWER_ON_RESET_ASCQ 0x0
96 #define POWER_ON_OCCURRED_ASCQ 0x1
97 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
98 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
99 #define CAPACITY_CHANGED_ASCQ 0x9
100 #define SAVING_PARAMS_UNSUP 0x39
101 #define TRANSPORT_PROBLEM 0x4b
102 #define THRESHOLD_EXCEEDED 0x5d
103 #define LOW_POWER_COND_ON 0x5e
104 #define MISCOMPARE_VERIFY_ASC 0x1d
105 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
106 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
107 #define WRITE_ERROR_ASC 0xc
108 #define UNALIGNED_WRITE_ASCQ 0x4
109 #define WRITE_BOUNDARY_ASCQ 0x5
110 #define READ_INVDATA_ASCQ 0x6
111 #define READ_BOUNDARY_ASCQ 0x7
112 #define ATTEMPT_ACCESS_GAP 0x9
113 #define INSUFF_ZONE_ASCQ 0xe
114 /* see drivers/scsi/sense_codes.h */
115
116 /* Additional Sense Code Qualifier (ASCQ) */
117 #define ACK_NAK_TO 0x3
118
119 /* Default values for driver parameters */
120 #define DEF_NUM_HOST 1
121 #define DEF_NUM_TGTS 1
122 #define DEF_MAX_LUNS 1
123 /* With these defaults, this driver will make 1 host with 1 target
124 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
125 */
126 #define DEF_ATO 1
127 #define DEF_CDB_LEN 10
128 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
129 #define DEF_DEV_SIZE_PRE_INIT 0
130 #define DEF_DEV_SIZE_MB 8
131 #define DEF_ZBC_DEV_SIZE_MB 128
132 #define DEF_DIF 0
133 #define DEF_DIX 0
134 #define DEF_PER_HOST_STORE false
135 #define DEF_D_SENSE 0
136 #define DEF_EVERY_NTH 0
137 #define DEF_FAKE_RW 0
138 #define DEF_GUARD 0
139 #define DEF_HOST_LOCK 0
140 #define DEF_LBPU 0
141 #define DEF_LBPWS 0
142 #define DEF_LBPWS10 0
143 #define DEF_LBPRZ 1
144 #define DEF_LOWEST_ALIGNED 0
145 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
146 #define DEF_NO_LUN_0 0
147 #define DEF_NUM_PARTS 0
148 #define DEF_OPTS 0
149 #define DEF_OPT_BLKS 1024
150 #define DEF_PHYSBLK_EXP 0
151 #define DEF_OPT_XFERLEN_EXP 0
152 #define DEF_PTYPE TYPE_DISK
153 #define DEF_RANDOM false
154 #define DEF_REMOVABLE false
155 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
156 #define DEF_SECTOR_SIZE 512
157 #define DEF_UNMAP_ALIGNMENT 0
158 #define DEF_UNMAP_GRANULARITY 1
159 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
160 #define DEF_UNMAP_MAX_DESC 256
161 #define DEF_VIRTUAL_GB 0
162 #define DEF_VPD_USE_HOSTNO 1
163 #define DEF_WRITESAME_LENGTH 0xFFFF
164 #define DEF_ATOMIC_WR 0
165 #define DEF_ATOMIC_WR_MAX_LENGTH 8192
166 #define DEF_ATOMIC_WR_ALIGN 2
167 #define DEF_ATOMIC_WR_GRAN 2
168 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
169 #define DEF_ATOMIC_WR_MAX_BNDRY 128
170 #define DEF_STRICT 0
171 #define DEF_STATISTICS false
172 #define DEF_SUBMIT_QUEUES 1
173 #define DEF_TUR_MS_TO_READY 0
174 #define DEF_UUID_CTL 0
175 #define JDELAY_OVERRIDDEN -9999
176
177 /* Default parameters for ZBC drives */
178 #define DEF_ZBC_ZONE_SIZE_MB 128
179 #define DEF_ZBC_MAX_OPEN_ZONES 8
180 #define DEF_ZBC_NR_CONV_ZONES 1
181
182 /* Default parameters for tape drives */
183 #define TAPE_DEF_DENSITY 0x0
184 #define TAPE_BAD_DENSITY 0x65
185 #define TAPE_DEF_BLKSIZE 0
186 #define TAPE_MIN_BLKSIZE 512
187 #define TAPE_MAX_BLKSIZE 1048576
188 #define TAPE_EW 20
189 #define TAPE_MAX_PARTITIONS 2
190 #define TAPE_UNITS 10000
191 #define TAPE_PARTITION_1_UNITS 1000
192
193 /* The tape block data definitions */
194 #define TAPE_BLOCK_FM_FLAG ((u32)0x1 << 30)
195 #define TAPE_BLOCK_EOD_FLAG ((u32)0x2 << 30)
196 #define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
197 #define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
198 #define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
199 #define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
200 #define IS_TAPE_BLOCK_FM(a) ((a & TAPE_BLOCK_FM_FLAG) != 0)
201 #define IS_TAPE_BLOCK_EOD(a) ((a & TAPE_BLOCK_EOD_FLAG) != 0)
202
203 struct tape_block {
204 u32 fl_size;
205 unsigned char data[4];
206 };
207
208 /* Flags for sense data */
209 #define SENSE_FLAG_FILEMARK 0x80
210 #define SENSE_FLAG_EOM 0x40
211 #define SENSE_FLAG_ILI 0x20
212
213 #define SDEBUG_LUN_0_VAL 0
214
215 /* bit mask values for sdebug_opts */
216 #define SDEBUG_OPT_NOISE 1
217 #define SDEBUG_OPT_MEDIUM_ERR 2
218 #define SDEBUG_OPT_TIMEOUT 4
219 #define SDEBUG_OPT_RECOVERED_ERR 8
220 #define SDEBUG_OPT_TRANSPORT_ERR 16
221 #define SDEBUG_OPT_DIF_ERR 32
222 #define SDEBUG_OPT_DIX_ERR 64
223 #define SDEBUG_OPT_MAC_TIMEOUT 128
224 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
225 #define SDEBUG_OPT_Q_NOISE 0x200
226 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
227 #define SDEBUG_OPT_RARE_TSF 0x800
228 #define SDEBUG_OPT_N_WCE 0x1000
229 #define SDEBUG_OPT_RESET_NOISE 0x2000
230 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
231 #define SDEBUG_OPT_HOST_BUSY 0x8000
232 #define SDEBUG_OPT_CMD_ABORT 0x10000
233 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
234 SDEBUG_OPT_RESET_NOISE)
235 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
236 SDEBUG_OPT_TRANSPORT_ERR | \
237 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
238 SDEBUG_OPT_SHORT_TRANSFER | \
239 SDEBUG_OPT_HOST_BUSY | \
240 SDEBUG_OPT_CMD_ABORT)
241 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
242 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
243
244 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
245 * priority order. In the subset implemented here lower numbers have higher
246 * priority. The UA numbers should be a sequence starting from 0 with
247 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
248 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
249 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
250 #define SDEBUG_UA_BUS_RESET 2
251 #define SDEBUG_UA_MODE_CHANGED 3
252 #define SDEBUG_UA_CAPACITY_CHANGED 4
253 #define SDEBUG_UA_LUNS_CHANGED 5
254 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
255 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
256 #define SDEBUG_UA_NOT_READY_TO_READY 8
257 #define SDEBUG_NUM_UAS 9
258
259 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
260 * sector on read commands: */
261 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
262 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
263
264 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
265 * (for response) per submit queue at one time. Can be reduced by max_queue
266 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
267 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
268 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
269 * but cannot exceed SDEBUG_CANQUEUE .
270 */
271 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
272 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
273 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
274
275 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
276 #define F_D_IN 1 /* Data-in command (e.g. READ) */
277 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
278 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
279 #define F_D_UNKN 8
280 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
281 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
282 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
283 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
284 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
285 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
286 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
287 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
288 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
289 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
290
291 /* Useful combinations of the above flags */
292 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
293 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
294 #define FF_SA (F_SA_HIGH | F_SA_LOW)
295 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
296
297 #define SDEBUG_MAX_PARTS 4
298
299 #define SDEBUG_MAX_CMD_LEN 32
300
301 #define SDEB_XA_NOT_IN_USE XA_MARK_1
302
303 /* Zone types (zbcr05 table 25) */
304 enum sdebug_z_type {
305 ZBC_ZTYPE_CNV = 0x1,
306 ZBC_ZTYPE_SWR = 0x2,
307 ZBC_ZTYPE_SWP = 0x3,
308 /* ZBC_ZTYPE_SOBR = 0x4, */
309 ZBC_ZTYPE_GAP = 0x5,
310 };
311
312 /* enumeration names taken from table 26, zbcr05 */
313 enum sdebug_z_cond {
314 ZBC_NOT_WRITE_POINTER = 0x0,
315 ZC1_EMPTY = 0x1,
316 ZC2_IMPLICIT_OPEN = 0x2,
317 ZC3_EXPLICIT_OPEN = 0x3,
318 ZC4_CLOSED = 0x4,
319 ZC6_READ_ONLY = 0xd,
320 ZC5_FULL = 0xe,
321 ZC7_OFFLINE = 0xf,
322 };
323
324 struct sdeb_zone_state { /* ZBC: per zone state */
325 enum sdebug_z_type z_type;
326 enum sdebug_z_cond z_cond;
327 bool z_non_seq_resource;
328 unsigned int z_size;
329 sector_t z_start;
330 sector_t z_wp;
331 };
332
333 enum sdebug_err_type {
334 ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
335 ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
336 /* queuecmd return failed */
337 ERR_FAIL_CMD = 2, /* make specific scsi command's */
338 /* queuecmd return succeed but */
339 /* with errors set in scsi_cmnd */
340 ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
341 /* scsi_debug_abort() */
342 ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
343 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
344 };
345
346 struct sdebug_err_inject {
347 int type;
348 struct list_head list;
349 int cnt;
350 unsigned char cmd;
351 struct rcu_head rcu;
352
353 union {
354 /*
355 * For ERR_FAIL_QUEUE_CMD
356 */
357 int queuecmd_ret;
358
359 /*
360 * For ERR_FAIL_CMD
361 */
362 struct {
363 unsigned char host_byte;
364 unsigned char driver_byte;
365 unsigned char status_byte;
366 unsigned char sense_key;
367 unsigned char asc;
368 unsigned char asq;
369 };
370 };
371 };
372
373 struct sdebug_dev_info {
374 struct list_head dev_list;
375 unsigned int channel;
376 unsigned int target;
377 u64 lun;
378 uuid_t lu_name;
379 struct sdebug_host_info *sdbg_host;
380 unsigned long uas_bm[1];
381 atomic_t stopped; /* 1: by SSU, 2: device start */
382 bool used;
383
384 /* For ZBC devices */
385 bool zoned;
386 unsigned int zcap;
387 unsigned int zsize;
388 unsigned int zsize_shift;
389 unsigned int nr_zones;
390 unsigned int nr_conv_zones;
391 unsigned int nr_seq_zones;
392 unsigned int nr_imp_open;
393 unsigned int nr_exp_open;
394 unsigned int nr_closed;
395 unsigned int max_open;
396 ktime_t create_ts; /* time since bootup that this device was created */
397 struct sdeb_zone_state *zstate;
398
399 /* For tapes */
400 unsigned int tape_blksize;
401 unsigned int tape_density;
402 unsigned char tape_partition;
403 unsigned char tape_nbr_partitions;
404 unsigned char tape_pending_nbr_partitions;
405 unsigned int tape_pending_part_0_size;
406 unsigned int tape_pending_part_1_size;
407 unsigned char tape_dce;
408 unsigned int tape_location[TAPE_MAX_PARTITIONS];
409 unsigned int tape_eop[TAPE_MAX_PARTITIONS];
410 struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
411
412 struct dentry *debugfs_entry;
413 struct spinlock list_lock;
414 struct list_head inject_err_list;
415 };
416
417 struct sdebug_target_info {
418 bool reset_fail;
419 struct dentry *debugfs_entry;
420 };
421
422 struct sdebug_host_info {
423 struct list_head host_list;
424 int si_idx; /* sdeb_store_info (per host) xarray index */
425 struct Scsi_Host *shost;
426 struct device dev;
427 struct list_head dev_info_list;
428 };
429
430 /* There is an xarray of pointers to this struct's objects, one per host */
431 struct sdeb_store_info {
432 rwlock_t macc_data_lck; /* for media data access on this store */
433 rwlock_t macc_meta_lck; /* for atomic media meta access on this store */
434 rwlock_t macc_sector_lck; /* per-sector media data access on this store */
435 u8 *storep; /* user data storage (ram) */
436 struct t10_pi_tuple *dif_storep; /* protection info */
437 void *map_storep; /* provisioning map */
438 };
439
440 #define dev_to_sdebug_host(d) \
441 container_of(d, struct sdebug_host_info, dev)
442
443 #define shost_to_sdebug_host(shost) \
444 dev_to_sdebug_host(shost->dma_dev)
445
446 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
447 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
448
449 struct sdebug_defer {
450 struct hrtimer hrt;
451 struct execute_work ew;
452 ktime_t cmpl_ts;/* time since boot to complete this cmd */
453 int issuing_cpu;
454 bool aborted; /* true when blk_abort_request() already called */
455 enum sdeb_defer_type defer_t;
456 };
457
458 struct sdebug_scsi_cmd {
459 spinlock_t lock;
460 struct sdebug_defer sd_dp;
461 };
462
463 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
464 static atomic_t sdebug_completions; /* count of deferred completions */
465 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
466 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
467 static atomic_t sdeb_inject_pending;
468 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
469
470 struct opcode_info_t {
471 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
472 /* for terminating element */
473 u8 opcode; /* if num_attached > 0, preferred */
474 u16 sa; /* service action */
475 u32 flags; /* OR-ed set of SDEB_F_* */
476 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
477 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
478 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
479 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
480 };
481
482 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
483 enum sdeb_opcode_index {
484 SDEB_I_INVALID_OPCODE = 0,
485 SDEB_I_INQUIRY = 1,
486 SDEB_I_REPORT_LUNS = 2,
487 SDEB_I_REQUEST_SENSE = 3,
488 SDEB_I_TEST_UNIT_READY = 4,
489 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
490 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
491 SDEB_I_LOG_SENSE = 7,
492 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
493 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
494 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
495 SDEB_I_START_STOP = 11,
496 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
497 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
498 SDEB_I_MAINT_IN = 14,
499 SDEB_I_MAINT_OUT = 15,
500 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
501 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
502 SDEB_I_RESERVE = 18, /* 6, 10 */
503 SDEB_I_RELEASE = 19, /* 6, 10 */
504 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
505 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
506 SDEB_I_ATA_PT = 22, /* 12, 16 */
507 SDEB_I_SEND_DIAG = 23,
508 SDEB_I_UNMAP = 24,
509 SDEB_I_WRITE_BUFFER = 25,
510 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
511 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
512 SDEB_I_COMP_WRITE = 28,
513 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
514 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
515 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
516 SDEB_I_ATOMIC_WRITE_16 = 32,
517 SDEB_I_READ_BLOCK_LIMITS = 33,
518 SDEB_I_LOCATE = 34,
519 SDEB_I_WRITE_FILEMARKS = 35,
520 SDEB_I_SPACE = 36,
521 SDEB_I_FORMAT_MEDIUM = 37,
522 SDEB_I_LAST_ELEM_P1 = 38, /* keep this last (previous + 1) */
523 };
524
525
526 static const unsigned char opcode_ind_arr[256] = {
527 /* 0x0; 0x0->0x1f: 6 byte cdbs */
528 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
529 SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
530 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
531 SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
532 SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
533 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
534 SDEB_I_ALLOW_REMOVAL, 0,
535 /* 0x20; 0x20->0x3f: 10 byte cdbs */
536 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
537 SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
538 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
539 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
540 /* 0x40; 0x40->0x5f: 10 byte cdbs */
541 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
542 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
543 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
544 SDEB_I_RELEASE,
545 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
546 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
547 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
548 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
549 0, SDEB_I_VARIABLE_LEN,
550 /* 0x80; 0x80->0x9f: 16 byte cdbs */
551 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
552 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
553 0, 0, 0, SDEB_I_VERIFY,
554 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
555 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
556 0, 0, 0, 0,
557 SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
558 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
559 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
560 SDEB_I_MAINT_OUT, 0, 0, 0,
561 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
562 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
563 0, 0, 0, 0, 0, 0, 0, 0,
564 0, 0, 0, 0, 0, 0, 0, 0,
565 /* 0xc0; 0xc0->0xff: vendor specific */
566 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
567 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
568 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
569 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
570 };
571
572 /*
573 * The following "response" functions return the SCSI mid-level's 4 byte
574 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
575 * command completion, they can mask their return value with
576 * SDEG_RES_IMMED_MASK .
577 */
578 #define SDEG_RES_IMMED_MASK 0x40000000
579
580 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
581 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
582 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
583 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
584 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
585 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
586 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
587 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
588 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
589 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
590 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
591 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
592 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
593 static int resp_get_stream_status(struct scsi_cmnd *scp,
594 struct sdebug_dev_info *devip);
595 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
596 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
597 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
598 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
599 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
600 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
601 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
602 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
603 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
604 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
605 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
606 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
607 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
608 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
609 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
610 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
611 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
612 static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
613 static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
614 static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
615 static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
616 static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
617 static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
618
619 static int sdebug_do_add_host(bool mk_new_store);
620 static int sdebug_add_host_helper(int per_host_idx);
621 static void sdebug_do_remove_host(bool the_end);
622 static int sdebug_add_store(void);
623 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
624 static void sdebug_erase_all_stores(bool apart_from_first);
625
626 /*
627 * The following are overflow arrays for cdbs that "hit" the same index in
628 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
629 * should be placed in opcode_info_arr[], the others should be placed here.
630 */
631 static const struct opcode_info_t msense_iarr[] = {
632 {0, 0x1a, 0, F_D_IN, NULL, NULL,
633 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 };
635
636 static const struct opcode_info_t mselect_iarr[] = {
637 {0, 0x15, 0, F_D_OUT, NULL, NULL,
638 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 };
640
641 static const struct opcode_info_t read_iarr[] = {
642 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
643 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
644 0, 0, 0, 0} },
645 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
646 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
647 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
648 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
649 0xc7, 0, 0, 0, 0} },
650 };
651
652 static const struct opcode_info_t write_iarr[] = {
653 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
654 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
655 0, 0, 0, 0, 0, 0} },
656 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
657 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
658 0, 0, 0} },
659 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
660 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xbf, 0xc7, 0, 0, 0, 0} },
662 };
663
664 static const struct opcode_info_t verify_iarr[] = {
665 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
666 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
667 0, 0, 0, 0, 0, 0} },
668 };
669
670 static const struct opcode_info_t sa_in_16_iarr[] = {
671 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
672 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
673 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
674 {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
675 {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
676 0, 0} }, /* GET STREAM STATUS */
677 };
678
679 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
680 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
681 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
682 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
683 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
684 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
685 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
686 };
687
688 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
689 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
690 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
691 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
692 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
693 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
694 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
695 };
696
697 static const struct opcode_info_t write_same_iarr[] = {
698 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
699 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
700 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
701 };
702
703 static const struct opcode_info_t reserve_iarr[] = {
704 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
705 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
706 };
707
708 static const struct opcode_info_t release_iarr[] = {
709 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
710 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
711 };
712
713 static const struct opcode_info_t sync_cache_iarr[] = {
714 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
715 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
716 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
717 };
718
719 static const struct opcode_info_t pre_fetch_iarr[] = {
720 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
721 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
722 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
723 };
724
725 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
726 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
727 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
728 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
729 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
730 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
731 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
732 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
733 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
734 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
735 };
736
737 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
738 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
739 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
740 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
741 };
742
743
744 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
745 * plus the terminating elements for logic that scans this table such as
746 * REPORT SUPPORTED OPERATION CODES. */
747 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
748 /* 0 */
749 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
750 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
751 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
752 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
753 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
754 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
755 0, 0} }, /* REPORT LUNS */
756 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
757 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
758 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
759 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
760 /* 5 */
761 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
762 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
763 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
764 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
765 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
766 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
767 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
768 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
769 0, 0, 0} },
770 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
771 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
772 0, 0} },
773 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
774 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
775 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
776 /* 10 */
777 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
778 resp_write_dt0, write_iarr, /* WRITE(16) */
779 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
780 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
781 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
782 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
783 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
784 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
785 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
786 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
787 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
788 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
789 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
790 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
791 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
792 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
793 0xff, 0, 0xc7, 0, 0, 0, 0} },
794 /* 15 */
795 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
796 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
797 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
798 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
799 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
800 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
801 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
802 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
803 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
804 0xff, 0xff} },
805 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
806 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
807 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
808 0} },
809 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
810 NULL, release_iarr, /* RELEASE(10) <no response function> */
811 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
812 0} },
813 /* 20 */
814 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
815 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
816 {0, 0x1, 0, 0, resp_rewind, NULL,
817 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
818 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
819 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
820 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
821 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
822 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
823 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
824 /* 25 */
825 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
826 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
827 0, 0, 0, 0} }, /* WRITE_BUFFER */
828 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
829 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
830 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
831 0, 0, 0, 0, 0} },
832 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
833 resp_sync_cache, sync_cache_iarr,
834 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
835 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
836 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
837 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
838 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
839 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
840 resp_pre_fetch, pre_fetch_iarr,
841 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
842 0, 0, 0, 0} }, /* PRE-FETCH (10) */
843 /* READ POSITION (10) */
844
845 /* 30 */
846 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
847 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
848 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
849 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
850 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
851 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
852 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
853 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
854 /* 32 */
855 {0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
856 resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
857 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
858 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
859 {0, 0x05, 0, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
860 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
861 {0, 0x2b, 0, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
862 {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
863 0, 0, 0, 0} },
864 {0, 0x10, 0, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
865 {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
866 {0, 0x11, 0, F_D_IN, resp_space, NULL, /* SPACE (6) */
867 {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
868 {0, 0x4, 0, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
869 {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
870 /* 38 */
871 /* sentinel */
872 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
873 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
874 };
875
876 static int sdebug_num_hosts;
877 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
878 static int sdebug_ato = DEF_ATO;
879 static int sdebug_cdb_len = DEF_CDB_LEN;
880 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
881 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
882 static int sdebug_dif = DEF_DIF;
883 static int sdebug_dix = DEF_DIX;
884 static int sdebug_dsense = DEF_D_SENSE;
885 static int sdebug_every_nth = DEF_EVERY_NTH;
886 static int sdebug_fake_rw = DEF_FAKE_RW;
887 static unsigned int sdebug_guard = DEF_GUARD;
888 static int sdebug_host_max_queue; /* per host */
889 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
890 static int sdebug_max_luns = DEF_MAX_LUNS;
891 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
892 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
893 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
894 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
895 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
896 static int sdebug_no_uld;
897 static int sdebug_num_parts = DEF_NUM_PARTS;
898 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
899 static int sdebug_opt_blks = DEF_OPT_BLKS;
900 static int sdebug_opts = DEF_OPTS;
901 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
902 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
903 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
904 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
905 static int sdebug_sector_size = DEF_SECTOR_SIZE;
906 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
907 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
908 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
909 static unsigned int sdebug_lbpu = DEF_LBPU;
910 static unsigned int sdebug_lbpws = DEF_LBPWS;
911 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
912 static unsigned int sdebug_lbprz = DEF_LBPRZ;
913 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
914 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
915 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
916 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
917 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
918 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
919 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
920 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
921 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
922 static unsigned int sdebug_atomic_wr_max_length_bndry =
923 DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
924 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
925 static int sdebug_uuid_ctl = DEF_UUID_CTL;
926 static bool sdebug_random = DEF_RANDOM;
927 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
928 static bool sdebug_removable = DEF_REMOVABLE;
929 static bool sdebug_clustering;
930 static bool sdebug_host_lock = DEF_HOST_LOCK;
931 static bool sdebug_strict = DEF_STRICT;
932 static bool sdebug_any_injecting_opt;
933 static bool sdebug_no_rwlock;
934 static bool sdebug_verbose;
935 static bool have_dif_prot;
936 static bool write_since_sync;
937 static bool sdebug_statistics = DEF_STATISTICS;
938 static bool sdebug_wp;
939 static bool sdebug_allow_restart;
940 static enum {
941 BLK_ZONED_NONE = 0,
942 BLK_ZONED_HA = 1,
943 BLK_ZONED_HM = 2,
944 } sdeb_zbc_model = BLK_ZONED_NONE;
945 static char *sdeb_zbc_model_s;
946
947 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
948 SAM_LUN_AM_FLAT = 0x1,
949 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
950 SAM_LUN_AM_EXTENDED = 0x3};
951 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
952 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
953
954 static unsigned int sdebug_store_sectors;
955 static sector_t sdebug_capacity; /* in sectors */
956
957 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
958 may still need them */
959 static int sdebug_heads; /* heads per disk */
960 static int sdebug_cylinders_per; /* cylinders per surface */
961 static int sdebug_sectors_per; /* sectors per cylinder */
962
963 static LIST_HEAD(sdebug_host_list);
964 static DEFINE_MUTEX(sdebug_host_list_mutex);
965
966 static struct xarray per_store_arr;
967 static struct xarray *per_store_ap = &per_store_arr;
968 static int sdeb_first_idx = -1; /* invalid index ==> none created */
969 static int sdeb_most_recent_idx = -1;
970 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
971
972 static unsigned long map_size;
973 static int num_aborts;
974 static int num_dev_resets;
975 static int num_target_resets;
976 static int num_bus_resets;
977 static int num_host_resets;
978 static int dix_writes;
979 static int dix_reads;
980 static int dif_errors;
981
982 /* ZBC global data */
983 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
984 static int sdeb_zbc_zone_cap_mb;
985 static int sdeb_zbc_zone_size_mb;
986 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
987 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
988
989 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
990 static int poll_queues; /* iouring iopoll interface.*/
991
992 static atomic_long_t writes_by_group_number[64];
993
994 static char sdebug_proc_name[] = MY_NAME;
995 static const char *my_name = MY_NAME;
996
997 static const struct bus_type pseudo_lld_bus;
998
999 static struct device_driver sdebug_driverfs_driver = {
1000 .name = sdebug_proc_name,
1001 .bus = &pseudo_lld_bus,
1002 };
1003
1004 static const int check_condition_result =
1005 SAM_STAT_CHECK_CONDITION;
1006
1007 static const int illegal_condition_result =
1008 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
1009
1010 static const int device_qfull_result =
1011 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
1012
1013 static const int condition_met_result = SAM_STAT_CONDITION_MET;
1014
1015 static struct dentry *sdebug_debugfs_root;
1016 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
1017
sdebug_err_free(struct rcu_head * head)1018 static void sdebug_err_free(struct rcu_head *head)
1019 {
1020 struct sdebug_err_inject *inject =
1021 container_of(head, typeof(*inject), rcu);
1022
1023 kfree(inject);
1024 }
1025
sdebug_err_add(struct scsi_device * sdev,struct sdebug_err_inject * new)1026 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
1027 {
1028 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1029 struct sdebug_err_inject *err;
1030
1031 spin_lock(&devip->list_lock);
1032 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1033 if (err->type == new->type && err->cmd == new->cmd) {
1034 list_del_rcu(&err->list);
1035 call_rcu(&err->rcu, sdebug_err_free);
1036 }
1037 }
1038
1039 list_add_tail_rcu(&new->list, &devip->inject_err_list);
1040 spin_unlock(&devip->list_lock);
1041 }
1042
sdebug_err_remove(struct scsi_device * sdev,const char * buf,size_t count)1043 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
1044 {
1045 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1046 struct sdebug_err_inject *err;
1047 int type;
1048 unsigned char cmd;
1049
1050 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
1051 kfree(buf);
1052 return -EINVAL;
1053 }
1054
1055 spin_lock(&devip->list_lock);
1056 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1057 if (err->type == type && err->cmd == cmd) {
1058 list_del_rcu(&err->list);
1059 call_rcu(&err->rcu, sdebug_err_free);
1060 spin_unlock(&devip->list_lock);
1061 kfree(buf);
1062 return count;
1063 }
1064 }
1065 spin_unlock(&devip->list_lock);
1066
1067 kfree(buf);
1068 return -EINVAL;
1069 }
1070
sdebug_error_show(struct seq_file * m,void * p)1071 static int sdebug_error_show(struct seq_file *m, void *p)
1072 {
1073 struct scsi_device *sdev = (struct scsi_device *)m->private;
1074 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1075 struct sdebug_err_inject *err;
1076
1077 seq_puts(m, "Type\tCount\tCommand\n");
1078
1079 rcu_read_lock();
1080 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1081 switch (err->type) {
1082 case ERR_TMOUT_CMD:
1083 case ERR_ABORT_CMD_FAILED:
1084 case ERR_LUN_RESET_FAILED:
1085 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1086 err->cmd);
1087 break;
1088
1089 case ERR_FAIL_QUEUE_CMD:
1090 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1091 err->cnt, err->cmd, err->queuecmd_ret);
1092 break;
1093
1094 case ERR_FAIL_CMD:
1095 seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1096 err->type, err->cnt, err->cmd,
1097 err->host_byte, err->driver_byte,
1098 err->status_byte, err->sense_key,
1099 err->asc, err->asq);
1100 break;
1101 }
1102 }
1103 rcu_read_unlock();
1104
1105 return 0;
1106 }
1107
sdebug_error_open(struct inode * inode,struct file * file)1108 static int sdebug_error_open(struct inode *inode, struct file *file)
1109 {
1110 return single_open(file, sdebug_error_show, inode->i_private);
1111 }
1112
sdebug_error_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1113 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1114 size_t count, loff_t *ppos)
1115 {
1116 char *buf;
1117 unsigned int inject_type;
1118 struct sdebug_err_inject *inject;
1119 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1120
1121 buf = kzalloc(count + 1, GFP_KERNEL);
1122 if (!buf)
1123 return -ENOMEM;
1124
1125 if (copy_from_user(buf, ubuf, count)) {
1126 kfree(buf);
1127 return -EFAULT;
1128 }
1129
1130 if (buf[0] == '-')
1131 return sdebug_err_remove(sdev, buf, count);
1132
1133 if (sscanf(buf, "%d", &inject_type) != 1) {
1134 kfree(buf);
1135 return -EINVAL;
1136 }
1137
1138 inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1139 if (!inject) {
1140 kfree(buf);
1141 return -ENOMEM;
1142 }
1143
1144 switch (inject_type) {
1145 case ERR_TMOUT_CMD:
1146 case ERR_ABORT_CMD_FAILED:
1147 case ERR_LUN_RESET_FAILED:
1148 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1149 &inject->cmd) != 3)
1150 goto out_error;
1151 break;
1152
1153 case ERR_FAIL_QUEUE_CMD:
1154 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1155 &inject->cmd, &inject->queuecmd_ret) != 4)
1156 goto out_error;
1157 break;
1158
1159 case ERR_FAIL_CMD:
1160 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1161 &inject->type, &inject->cnt, &inject->cmd,
1162 &inject->host_byte, &inject->driver_byte,
1163 &inject->status_byte, &inject->sense_key,
1164 &inject->asc, &inject->asq) != 9)
1165 goto out_error;
1166 break;
1167
1168 default:
1169 goto out_error;
1170 break;
1171 }
1172
1173 kfree(buf);
1174 sdebug_err_add(sdev, inject);
1175
1176 return count;
1177
1178 out_error:
1179 kfree(buf);
1180 kfree(inject);
1181 return -EINVAL;
1182 }
1183
1184 static const struct file_operations sdebug_error_fops = {
1185 .open = sdebug_error_open,
1186 .read = seq_read,
1187 .write = sdebug_error_write,
1188 .release = single_release,
1189 };
1190
sdebug_target_reset_fail_show(struct seq_file * m,void * p)1191 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1192 {
1193 struct scsi_target *starget = (struct scsi_target *)m->private;
1194 struct sdebug_target_info *targetip =
1195 (struct sdebug_target_info *)starget->hostdata;
1196
1197 if (targetip)
1198 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1199
1200 return 0;
1201 }
1202
sdebug_target_reset_fail_open(struct inode * inode,struct file * file)1203 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1204 {
1205 return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1206 }
1207
sdebug_target_reset_fail_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1208 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1209 const char __user *ubuf, size_t count, loff_t *ppos)
1210 {
1211 int ret;
1212 struct scsi_target *starget =
1213 (struct scsi_target *)file->f_inode->i_private;
1214 struct sdebug_target_info *targetip =
1215 (struct sdebug_target_info *)starget->hostdata;
1216
1217 if (targetip) {
1218 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1219 return ret < 0 ? ret : count;
1220 }
1221 return -ENODEV;
1222 }
1223
1224 static const struct file_operations sdebug_target_reset_fail_fops = {
1225 .open = sdebug_target_reset_fail_open,
1226 .read = seq_read,
1227 .write = sdebug_target_reset_fail_write,
1228 .release = single_release,
1229 };
1230
sdebug_target_alloc(struct scsi_target * starget)1231 static int sdebug_target_alloc(struct scsi_target *starget)
1232 {
1233 struct sdebug_target_info *targetip;
1234
1235 targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1236 if (!targetip)
1237 return -ENOMEM;
1238
1239 async_synchronize_full_domain(&sdebug_async_domain);
1240
1241 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1242 sdebug_debugfs_root);
1243
1244 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1245 &sdebug_target_reset_fail_fops);
1246
1247 starget->hostdata = targetip;
1248
1249 return 0;
1250 }
1251
sdebug_tartget_cleanup_async(void * data,async_cookie_t cookie)1252 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1253 {
1254 struct sdebug_target_info *targetip = data;
1255
1256 debugfs_remove(targetip->debugfs_entry);
1257 kfree(targetip);
1258 }
1259
sdebug_target_destroy(struct scsi_target * starget)1260 static void sdebug_target_destroy(struct scsi_target *starget)
1261 {
1262 struct sdebug_target_info *targetip;
1263
1264 targetip = (struct sdebug_target_info *)starget->hostdata;
1265 if (targetip) {
1266 starget->hostdata = NULL;
1267 async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1268 &sdebug_async_domain);
1269 }
1270 }
1271
1272 /* Only do the extra work involved in logical block provisioning if one or
1273 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1274 * real reads and writes (i.e. not skipping them for speed).
1275 */
scsi_debug_lbp(void)1276 static inline bool scsi_debug_lbp(void)
1277 {
1278 return 0 == sdebug_fake_rw &&
1279 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1280 }
1281
scsi_debug_atomic_write(void)1282 static inline bool scsi_debug_atomic_write(void)
1283 {
1284 return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1285 }
1286
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)1287 static void *lba2fake_store(struct sdeb_store_info *sip,
1288 unsigned long long lba)
1289 {
1290 struct sdeb_store_info *lsip = sip;
1291
1292 lba = do_div(lba, sdebug_store_sectors);
1293 if (!sip || !sip->storep) {
1294 WARN_ON_ONCE(true);
1295 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
1296 }
1297 return lsip->storep + lba * sdebug_sector_size;
1298 }
1299
dif_store(struct sdeb_store_info * sip,sector_t sector)1300 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1301 sector_t sector)
1302 {
1303 sector = sector_div(sector, sdebug_store_sectors);
1304
1305 return sip->dif_storep + sector;
1306 }
1307
sdebug_max_tgts_luns(void)1308 static void sdebug_max_tgts_luns(void)
1309 {
1310 struct sdebug_host_info *sdbg_host;
1311 struct Scsi_Host *hpnt;
1312
1313 mutex_lock(&sdebug_host_list_mutex);
1314 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1315 hpnt = sdbg_host->shost;
1316 if ((hpnt->this_id >= 0) &&
1317 (sdebug_num_tgts > hpnt->this_id))
1318 hpnt->max_id = sdebug_num_tgts + 1;
1319 else
1320 hpnt->max_id = sdebug_num_tgts;
1321 /* sdebug_max_luns; */
1322 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1323 }
1324 mutex_unlock(&sdebug_host_list_mutex);
1325 }
1326
1327 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1328
1329 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)1330 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1331 enum sdeb_cmd_data c_d,
1332 int in_byte, int in_bit)
1333 {
1334 unsigned char *sbuff;
1335 u8 sks[4];
1336 int sl, asc;
1337
1338 sbuff = scp->sense_buffer;
1339 if (!sbuff) {
1340 sdev_printk(KERN_ERR, scp->device,
1341 "%s: sense_buffer is NULL\n", __func__);
1342 return;
1343 }
1344 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1345 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1346 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1347 memset(sks, 0, sizeof(sks));
1348 sks[0] = 0x80;
1349 if (c_d)
1350 sks[0] |= 0x40;
1351 if (in_bit >= 0) {
1352 sks[0] |= 0x8;
1353 sks[0] |= 0x7 & in_bit;
1354 }
1355 put_unaligned_be16(in_byte, sks + 1);
1356 if (sdebug_dsense) {
1357 sl = sbuff[7] + 8;
1358 sbuff[7] = sl;
1359 sbuff[sl] = 0x2;
1360 sbuff[sl + 1] = 0x6;
1361 memcpy(sbuff + sl + 4, sks, 3);
1362 } else
1363 memcpy(sbuff + 15, sks, 3);
1364 if (sdebug_verbose)
1365 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
1366 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1367 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1368 }
1369
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)1370 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1371 {
1372 if (!scp->sense_buffer) {
1373 sdev_printk(KERN_ERR, scp->device,
1374 "%s: sense_buffer is NULL\n", __func__);
1375 return;
1376 }
1377 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1378
1379 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1380
1381 if (sdebug_verbose)
1382 sdev_printk(KERN_INFO, scp->device,
1383 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1384 my_name, key, asc, asq);
1385 }
1386
1387 /* Sense data that has information fields for tapes */
mk_sense_info_tape(struct scsi_cmnd * scp,int key,int asc,int asq,unsigned int information,unsigned char tape_flags)1388 static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
1389 unsigned int information, unsigned char tape_flags)
1390 {
1391 if (!scp->sense_buffer) {
1392 sdev_printk(KERN_ERR, scp->device,
1393 "%s: sense_buffer is NULL\n", __func__);
1394 return;
1395 }
1396 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1397
1398 scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
1399 /* only fixed format so far */
1400
1401 scp->sense_buffer[0] |= 0x80; /* valid */
1402 scp->sense_buffer[2] |= tape_flags;
1403 put_unaligned_be32(information, &scp->sense_buffer[3]);
1404
1405 if (sdebug_verbose)
1406 sdev_printk(KERN_INFO, scp->device,
1407 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1408 my_name, key, asc, asq);
1409 }
1410
mk_sense_invalid_opcode(struct scsi_cmnd * scp)1411 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1412 {
1413 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1414 }
1415
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)1416 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1417 void __user *arg)
1418 {
1419 if (sdebug_verbose) {
1420 if (0x1261 == cmd)
1421 sdev_printk(KERN_INFO, dev,
1422 "%s: BLKFLSBUF [0x1261]\n", __func__);
1423 else if (0x5331 == cmd)
1424 sdev_printk(KERN_INFO, dev,
1425 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1426 __func__);
1427 else
1428 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1429 __func__, cmd);
1430 }
1431 return -EINVAL;
1432 /* return -ENOTTY; // correct return but upsets fdisk */
1433 }
1434
config_cdb_len(struct scsi_device * sdev)1435 static void config_cdb_len(struct scsi_device *sdev)
1436 {
1437 switch (sdebug_cdb_len) {
1438 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1439 sdev->use_10_for_rw = false;
1440 sdev->use_16_for_rw = false;
1441 sdev->use_10_for_ms = false;
1442 break;
1443 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1444 sdev->use_10_for_rw = true;
1445 sdev->use_16_for_rw = false;
1446 sdev->use_10_for_ms = false;
1447 break;
1448 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1449 sdev->use_10_for_rw = true;
1450 sdev->use_16_for_rw = false;
1451 sdev->use_10_for_ms = true;
1452 break;
1453 case 16:
1454 sdev->use_10_for_rw = false;
1455 sdev->use_16_for_rw = true;
1456 sdev->use_10_for_ms = true;
1457 break;
1458 case 32: /* No knobs to suggest this so same as 16 for now */
1459 sdev->use_10_for_rw = false;
1460 sdev->use_16_for_rw = true;
1461 sdev->use_10_for_ms = true;
1462 break;
1463 default:
1464 pr_warn("unexpected cdb_len=%d, force to 10\n",
1465 sdebug_cdb_len);
1466 sdev->use_10_for_rw = true;
1467 sdev->use_16_for_rw = false;
1468 sdev->use_10_for_ms = false;
1469 sdebug_cdb_len = 10;
1470 break;
1471 }
1472 }
1473
all_config_cdb_len(void)1474 static void all_config_cdb_len(void)
1475 {
1476 struct sdebug_host_info *sdbg_host;
1477 struct Scsi_Host *shost;
1478 struct scsi_device *sdev;
1479
1480 mutex_lock(&sdebug_host_list_mutex);
1481 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1482 shost = sdbg_host->shost;
1483 shost_for_each_device(sdev, shost) {
1484 config_cdb_len(sdev);
1485 }
1486 }
1487 mutex_unlock(&sdebug_host_list_mutex);
1488 }
1489
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1490 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1491 {
1492 struct sdebug_host_info *sdhp = devip->sdbg_host;
1493 struct sdebug_dev_info *dp;
1494
1495 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1496 if ((devip->sdbg_host == dp->sdbg_host) &&
1497 (devip->target == dp->target)) {
1498 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1499 }
1500 }
1501 }
1502
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1503 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1504 {
1505 int k;
1506
1507 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1508 if (k != SDEBUG_NUM_UAS) {
1509 const char *cp = NULL;
1510
1511 switch (k) {
1512 case SDEBUG_UA_POR:
1513 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1514 POWER_ON_RESET_ASCQ);
1515 if (sdebug_verbose)
1516 cp = "power on reset";
1517 break;
1518 case SDEBUG_UA_POOCCUR:
1519 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1520 POWER_ON_OCCURRED_ASCQ);
1521 if (sdebug_verbose)
1522 cp = "power on occurred";
1523 break;
1524 case SDEBUG_UA_BUS_RESET:
1525 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1526 BUS_RESET_ASCQ);
1527 if (sdebug_verbose)
1528 cp = "bus reset";
1529 break;
1530 case SDEBUG_UA_MODE_CHANGED:
1531 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1532 MODE_CHANGED_ASCQ);
1533 if (sdebug_verbose)
1534 cp = "mode parameters changed";
1535 break;
1536 case SDEBUG_UA_CAPACITY_CHANGED:
1537 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1538 CAPACITY_CHANGED_ASCQ);
1539 if (sdebug_verbose)
1540 cp = "capacity data changed";
1541 break;
1542 case SDEBUG_UA_MICROCODE_CHANGED:
1543 mk_sense_buffer(scp, UNIT_ATTENTION,
1544 TARGET_CHANGED_ASC,
1545 MICROCODE_CHANGED_ASCQ);
1546 if (sdebug_verbose)
1547 cp = "microcode has been changed";
1548 break;
1549 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1550 mk_sense_buffer(scp, UNIT_ATTENTION,
1551 TARGET_CHANGED_ASC,
1552 MICROCODE_CHANGED_WO_RESET_ASCQ);
1553 if (sdebug_verbose)
1554 cp = "microcode has been changed without reset";
1555 break;
1556 case SDEBUG_UA_LUNS_CHANGED:
1557 /*
1558 * SPC-3 behavior is to report a UNIT ATTENTION with
1559 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1560 * on the target, until a REPORT LUNS command is
1561 * received. SPC-4 behavior is to report it only once.
1562 * NOTE: sdebug_scsi_level does not use the same
1563 * values as struct scsi_device->scsi_level.
1564 */
1565 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1566 clear_luns_changed_on_target(devip);
1567 mk_sense_buffer(scp, UNIT_ATTENTION,
1568 TARGET_CHANGED_ASC,
1569 LUNS_CHANGED_ASCQ);
1570 if (sdebug_verbose)
1571 cp = "reported luns data has changed";
1572 break;
1573 case SDEBUG_UA_NOT_READY_TO_READY:
1574 mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
1575 0);
1576 if (sdebug_verbose)
1577 cp = "not ready to ready transition/media change";
1578 break;
1579 default:
1580 pr_warn("unexpected unit attention code=%d\n", k);
1581 if (sdebug_verbose)
1582 cp = "unknown";
1583 break;
1584 }
1585 clear_bit(k, devip->uas_bm);
1586 if (sdebug_verbose)
1587 sdev_printk(KERN_INFO, scp->device,
1588 "%s reports: Unit attention: %s\n",
1589 my_name, cp);
1590 return check_condition_result;
1591 }
1592 return 0;
1593 }
1594
1595 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1596 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1597 int arr_len)
1598 {
1599 int act_len;
1600 struct scsi_data_buffer *sdb = &scp->sdb;
1601
1602 if (!sdb->length)
1603 return 0;
1604 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1605 return DID_ERROR << 16;
1606
1607 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1608 arr, arr_len);
1609 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1610
1611 return 0;
1612 }
1613
1614 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1615 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1616 * calls, not required to write in ascending offset order. Assumes resid
1617 * set to scsi_bufflen() prior to any calls.
1618 */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1619 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1620 int arr_len, unsigned int off_dst)
1621 {
1622 unsigned int act_len, n;
1623 struct scsi_data_buffer *sdb = &scp->sdb;
1624 off_t skip = off_dst;
1625
1626 if (sdb->length <= off_dst)
1627 return 0;
1628 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1629 return DID_ERROR << 16;
1630
1631 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1632 arr, arr_len, skip);
1633 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1634 __func__, off_dst, scsi_bufflen(scp), act_len,
1635 scsi_get_resid(scp));
1636 n = scsi_bufflen(scp) - (off_dst + act_len);
1637 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1638 return 0;
1639 }
1640
1641 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1642 * 'arr' or -1 if error.
1643 */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1644 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1645 int arr_len)
1646 {
1647 if (!scsi_bufflen(scp))
1648 return 0;
1649 if (scp->sc_data_direction != DMA_TO_DEVICE)
1650 return -1;
1651
1652 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1653 }
1654
1655
1656 static char sdebug_inq_vendor_id[9] = "Linux ";
1657 static char sdebug_inq_product_id[17] = "scsi_debug ";
1658 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1659 /* Use some locally assigned NAAs for SAS addresses. */
1660 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1661 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1662 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1663
1664 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1665 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1666 int target_dev_id, int dev_id_num,
1667 const char *dev_id_str, int dev_id_str_len,
1668 const uuid_t *lu_name)
1669 {
1670 int num, port_a;
1671 char b[32];
1672
1673 port_a = target_dev_id + 1;
1674 /* T10 vendor identifier field format (faked) */
1675 arr[0] = 0x2; /* ASCII */
1676 arr[1] = 0x1;
1677 arr[2] = 0x0;
1678 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1679 memcpy(&arr[12], sdebug_inq_product_id, 16);
1680 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1681 num = 8 + 16 + dev_id_str_len;
1682 arr[3] = num;
1683 num += 4;
1684 if (dev_id_num >= 0) {
1685 if (sdebug_uuid_ctl) {
1686 /* Locally assigned UUID */
1687 arr[num++] = 0x1; /* binary (not necessarily sas) */
1688 arr[num++] = 0xa; /* PIV=0, lu, naa */
1689 arr[num++] = 0x0;
1690 arr[num++] = 0x12;
1691 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1692 arr[num++] = 0x0;
1693 memcpy(arr + num, lu_name, 16);
1694 num += 16;
1695 } else {
1696 /* NAA-3, Logical unit identifier (binary) */
1697 arr[num++] = 0x1; /* binary (not necessarily sas) */
1698 arr[num++] = 0x3; /* PIV=0, lu, naa */
1699 arr[num++] = 0x0;
1700 arr[num++] = 0x8;
1701 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1702 num += 8;
1703 }
1704 /* Target relative port number */
1705 arr[num++] = 0x61; /* proto=sas, binary */
1706 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1707 arr[num++] = 0x0; /* reserved */
1708 arr[num++] = 0x4; /* length */
1709 arr[num++] = 0x0; /* reserved */
1710 arr[num++] = 0x0; /* reserved */
1711 arr[num++] = 0x0;
1712 arr[num++] = 0x1; /* relative port A */
1713 }
1714 /* NAA-3, Target port identifier */
1715 arr[num++] = 0x61; /* proto=sas, binary */
1716 arr[num++] = 0x93; /* piv=1, target port, naa */
1717 arr[num++] = 0x0;
1718 arr[num++] = 0x8;
1719 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1720 num += 8;
1721 /* NAA-3, Target port group identifier */
1722 arr[num++] = 0x61; /* proto=sas, binary */
1723 arr[num++] = 0x95; /* piv=1, target port group id */
1724 arr[num++] = 0x0;
1725 arr[num++] = 0x4;
1726 arr[num++] = 0;
1727 arr[num++] = 0;
1728 put_unaligned_be16(port_group_id, arr + num);
1729 num += 2;
1730 /* NAA-3, Target device identifier */
1731 arr[num++] = 0x61; /* proto=sas, binary */
1732 arr[num++] = 0xa3; /* piv=1, target device, naa */
1733 arr[num++] = 0x0;
1734 arr[num++] = 0x8;
1735 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1736 num += 8;
1737 /* SCSI name string: Target device identifier */
1738 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1739 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1740 arr[num++] = 0x0;
1741 arr[num++] = 24;
1742 memcpy(arr + num, "naa.32222220", 12);
1743 num += 12;
1744 snprintf(b, sizeof(b), "%08X", target_dev_id);
1745 memcpy(arr + num, b, 8);
1746 num += 8;
1747 memset(arr + num, 0, 4);
1748 num += 4;
1749 return num;
1750 }
1751
1752 static unsigned char vpd84_data[] = {
1753 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1754 0x22,0x22,0x22,0x0,0xbb,0x1,
1755 0x22,0x22,0x22,0x0,0xbb,0x2,
1756 };
1757
1758 /* Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1759 static int inquiry_vpd_84(unsigned char *arr)
1760 {
1761 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1762 return sizeof(vpd84_data);
1763 }
1764
1765 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1766 static int inquiry_vpd_85(unsigned char *arr)
1767 {
1768 int num = 0;
1769 const char *na1 = "https://www.kernel.org/config";
1770 const char *na2 = "http://www.kernel.org/log";
1771 int plen, olen;
1772
1773 arr[num++] = 0x1; /* lu, storage config */
1774 arr[num++] = 0x0; /* reserved */
1775 arr[num++] = 0x0;
1776 olen = strlen(na1);
1777 plen = olen + 1;
1778 if (plen % 4)
1779 plen = ((plen / 4) + 1) * 4;
1780 arr[num++] = plen; /* length, null termianted, padded */
1781 memcpy(arr + num, na1, olen);
1782 memset(arr + num + olen, 0, plen - olen);
1783 num += plen;
1784
1785 arr[num++] = 0x4; /* lu, logging */
1786 arr[num++] = 0x0; /* reserved */
1787 arr[num++] = 0x0;
1788 olen = strlen(na2);
1789 plen = olen + 1;
1790 if (plen % 4)
1791 plen = ((plen / 4) + 1) * 4;
1792 arr[num++] = plen; /* length, null terminated, padded */
1793 memcpy(arr + num, na2, olen);
1794 memset(arr + num + olen, 0, plen - olen);
1795 num += plen;
1796
1797 return num;
1798 }
1799
1800 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1801 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1802 {
1803 int num = 0;
1804 int port_a, port_b;
1805
1806 port_a = target_dev_id + 1;
1807 port_b = port_a + 1;
1808 arr[num++] = 0x0; /* reserved */
1809 arr[num++] = 0x0; /* reserved */
1810 arr[num++] = 0x0;
1811 arr[num++] = 0x1; /* relative port 1 (primary) */
1812 memset(arr + num, 0, 6);
1813 num += 6;
1814 arr[num++] = 0x0;
1815 arr[num++] = 12; /* length tp descriptor */
1816 /* naa-5 target port identifier (A) */
1817 arr[num++] = 0x61; /* proto=sas, binary */
1818 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1819 arr[num++] = 0x0; /* reserved */
1820 arr[num++] = 0x8; /* length */
1821 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1822 num += 8;
1823 arr[num++] = 0x0; /* reserved */
1824 arr[num++] = 0x0; /* reserved */
1825 arr[num++] = 0x0;
1826 arr[num++] = 0x2; /* relative port 2 (secondary) */
1827 memset(arr + num, 0, 6);
1828 num += 6;
1829 arr[num++] = 0x0;
1830 arr[num++] = 12; /* length tp descriptor */
1831 /* naa-5 target port identifier (B) */
1832 arr[num++] = 0x61; /* proto=sas, binary */
1833 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1834 arr[num++] = 0x0; /* reserved */
1835 arr[num++] = 0x8; /* length */
1836 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1837 num += 8;
1838
1839 return num;
1840 }
1841
1842
1843 static unsigned char vpd89_data[] = {
1844 /* from 4th byte */ 0,0,0,0,
1845 'l','i','n','u','x',' ',' ',' ',
1846 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1847 '1','2','3','4',
1848 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1849 0xec,0,0,0,
1850 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1851 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1852 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1853 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1854 0x53,0x41,
1855 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1856 0x20,0x20,
1857 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1858 0x10,0x80,
1859 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1860 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1861 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1862 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1863 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1864 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1865 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1866 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1867 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1868 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1869 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1870 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1871 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1872 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1873 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1874 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1875 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1876 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1877 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1878 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1879 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1880 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1881 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1882 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1883 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1884 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1885 };
1886
1887 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1888 static int inquiry_vpd_89(unsigned char *arr)
1889 {
1890 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1891 return sizeof(vpd89_data);
1892 }
1893
1894
1895 static unsigned char vpdb0_data[] = {
1896 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1897 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1898 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1899 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1900 };
1901
1902 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1903 static int inquiry_vpd_b0(unsigned char *arr)
1904 {
1905 unsigned int gran;
1906
1907 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1908
1909 /* Optimal transfer length granularity */
1910 if (sdebug_opt_xferlen_exp != 0 &&
1911 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1912 gran = 1 << sdebug_opt_xferlen_exp;
1913 else
1914 gran = 1 << sdebug_physblk_exp;
1915 put_unaligned_be16(gran, arr + 2);
1916
1917 /* Maximum Transfer Length */
1918 if (sdebug_store_sectors > 0x400)
1919 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1920
1921 /* Optimal Transfer Length */
1922 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1923
1924 if (sdebug_lbpu) {
1925 /* Maximum Unmap LBA Count */
1926 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1927
1928 /* Maximum Unmap Block Descriptor Count */
1929 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1930 }
1931
1932 /* Unmap Granularity Alignment */
1933 if (sdebug_unmap_alignment) {
1934 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1935 arr[28] |= 0x80; /* UGAVALID */
1936 }
1937
1938 /* Optimal Unmap Granularity */
1939 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1940
1941 /* Maximum WRITE SAME Length */
1942 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1943
1944 if (sdebug_atomic_wr) {
1945 put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1946 put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1947 put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1948 put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1949 put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1950 }
1951
1952 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1953 }
1954
1955 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1956 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1957 {
1958 memset(arr, 0, 0x3c);
1959 arr[0] = 0;
1960 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1961 arr[2] = 0;
1962 arr[3] = 5; /* less than 1.8" */
1963
1964 return 0x3c;
1965 }
1966
1967 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1968 static int inquiry_vpd_b2(unsigned char *arr)
1969 {
1970 memset(arr, 0, 0x4);
1971 arr[0] = 0; /* threshold exponent */
1972 if (sdebug_lbpu)
1973 arr[1] = 1 << 7;
1974 if (sdebug_lbpws)
1975 arr[1] |= 1 << 6;
1976 if (sdebug_lbpws10)
1977 arr[1] |= 1 << 5;
1978 if (sdebug_lbprz && scsi_debug_lbp())
1979 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1980 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1981 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1982 /* threshold_percentage=0 */
1983 return 0x4;
1984 }
1985
1986 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1987 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1988 {
1989 memset(arr, 0, 0x3c);
1990 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1991 /*
1992 * Set Optimal number of open sequential write preferred zones and
1993 * Optimal number of non-sequentially written sequential write
1994 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1995 * fields set to zero, apart from Max. number of open swrz_s field.
1996 */
1997 put_unaligned_be32(0xffffffff, &arr[4]);
1998 put_unaligned_be32(0xffffffff, &arr[8]);
1999 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
2000 put_unaligned_be32(devip->max_open, &arr[12]);
2001 else
2002 put_unaligned_be32(0xffffffff, &arr[12]);
2003 if (devip->zcap < devip->zsize) {
2004 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
2005 put_unaligned_be64(devip->zsize, &arr[20]);
2006 } else {
2007 arr[19] = 0;
2008 }
2009 return 0x3c;
2010 }
2011
2012 #define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
2013
2014 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
2015
2016 /* Block limits extension VPD page (SBC-4) */
inquiry_vpd_b7(unsigned char * arrb4)2017 static int inquiry_vpd_b7(unsigned char *arrb4)
2018 {
2019 memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
2020 arrb4[1] = 1; /* Reduced stream control support (RSCS) */
2021 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
2022 return SDEBUG_BLE_LEN_AFTER_B4;
2023 }
2024
2025 #define SDEBUG_LONG_INQ_SZ 96
2026 #define SDEBUG_MAX_INQ_ARR_SZ 584
2027
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2028 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2029 {
2030 unsigned char pq_pdt;
2031 unsigned char *arr;
2032 unsigned char *cmd = scp->cmnd;
2033 u32 alloc_len, n;
2034 int ret;
2035 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
2036
2037 alloc_len = get_unaligned_be16(cmd + 3);
2038 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
2039 if (! arr)
2040 return DID_REQUEUE << 16;
2041 is_disk = (sdebug_ptype == TYPE_DISK);
2042 is_zbc = devip->zoned;
2043 is_disk_zbc = (is_disk || is_zbc);
2044 have_wlun = scsi_is_wlun(scp->device->lun);
2045 if (have_wlun)
2046 pq_pdt = TYPE_WLUN; /* present, wlun */
2047 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
2048 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
2049 else
2050 pq_pdt = (sdebug_ptype & 0x1f);
2051 arr[0] = pq_pdt;
2052 if (0x2 & cmd[1]) { /* CMDDT bit set */
2053 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
2054 kfree(arr);
2055 return check_condition_result;
2056 } else if (0x1 & cmd[1]) { /* EVPD bit set */
2057 int lu_id_num, port_group_id, target_dev_id;
2058 u32 len;
2059 char lu_id_str[6];
2060 int host_no = devip->sdbg_host->shost->host_no;
2061
2062 arr[1] = cmd[2];
2063 port_group_id = (((host_no + 1) & 0x7f) << 8) +
2064 (devip->channel & 0x7f);
2065 if (sdebug_vpd_use_hostno == 0)
2066 host_no = 0;
2067 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
2068 (devip->target * 1000) + devip->lun);
2069 target_dev_id = ((host_no + 1) * 2000) +
2070 (devip->target * 1000) - 3;
2071 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
2072 if (0 == cmd[2]) { /* supported vital product data pages */
2073 n = 4;
2074 arr[n++] = 0x0; /* this page */
2075 arr[n++] = 0x80; /* unit serial number */
2076 arr[n++] = 0x83; /* device identification */
2077 arr[n++] = 0x84; /* software interface ident. */
2078 arr[n++] = 0x85; /* management network addresses */
2079 arr[n++] = 0x86; /* extended inquiry */
2080 arr[n++] = 0x87; /* mode page policy */
2081 arr[n++] = 0x88; /* SCSI ports */
2082 if (is_disk_zbc) { /* SBC or ZBC */
2083 arr[n++] = 0x89; /* ATA information */
2084 arr[n++] = 0xb0; /* Block limits */
2085 arr[n++] = 0xb1; /* Block characteristics */
2086 if (is_disk)
2087 arr[n++] = 0xb2; /* LB Provisioning */
2088 if (is_zbc)
2089 arr[n++] = 0xb6; /* ZB dev. char. */
2090 arr[n++] = 0xb7; /* Block limits extension */
2091 }
2092 arr[3] = n - 4; /* number of supported VPD pages */
2093 } else if (0x80 == cmd[2]) { /* unit serial number */
2094 arr[3] = len;
2095 memcpy(&arr[4], lu_id_str, len);
2096 } else if (0x83 == cmd[2]) { /* device identification */
2097 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2098 target_dev_id, lu_id_num,
2099 lu_id_str, len,
2100 &devip->lu_name);
2101 } else if (0x84 == cmd[2]) { /* Software interface ident. */
2102 arr[3] = inquiry_vpd_84(&arr[4]);
2103 } else if (0x85 == cmd[2]) { /* Management network addresses */
2104 arr[3] = inquiry_vpd_85(&arr[4]);
2105 } else if (0x86 == cmd[2]) { /* extended inquiry */
2106 arr[3] = 0x3c; /* number of following entries */
2107 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2108 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
2109 else if (have_dif_prot)
2110 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
2111 else
2112 arr[4] = 0x0; /* no protection stuff */
2113 /*
2114 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2115 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2116 */
2117 arr[5] = 0x17;
2118 } else if (0x87 == cmd[2]) { /* mode page policy */
2119 arr[3] = 0x8; /* number of following entries */
2120 arr[4] = 0x2; /* disconnect-reconnect mp */
2121 arr[6] = 0x80; /* mlus, shared */
2122 arr[8] = 0x18; /* protocol specific lu */
2123 arr[10] = 0x82; /* mlus, per initiator port */
2124 } else if (0x88 == cmd[2]) { /* SCSI Ports */
2125 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2126 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2127 n = inquiry_vpd_89(&arr[4]);
2128 put_unaligned_be16(n, arr + 2);
2129 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2130 arr[3] = inquiry_vpd_b0(&arr[4]);
2131 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2132 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2133 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2134 arr[3] = inquiry_vpd_b2(&arr[4]);
2135 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2136 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2137 } else if (cmd[2] == 0xb7) { /* block limits extension page */
2138 arr[3] = inquiry_vpd_b7(&arr[4]);
2139 } else {
2140 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2141 kfree(arr);
2142 return check_condition_result;
2143 }
2144 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2145 ret = fill_from_dev_buffer(scp, arr,
2146 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2147 kfree(arr);
2148 return ret;
2149 }
2150 /* drops through here for a standard inquiry */
2151 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
2152 arr[2] = sdebug_scsi_level;
2153 arr[3] = 2; /* response_data_format==2 */
2154 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2155 arr[5] = (int)have_dif_prot; /* PROTECT bit */
2156 if (sdebug_vpd_use_hostno == 0)
2157 arr[5] |= 0x10; /* claim: implicit TPGS */
2158 arr[6] = 0x10; /* claim: MultiP */
2159 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2160 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2161 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2162 memcpy(&arr[16], sdebug_inq_product_id, 16);
2163 memcpy(&arr[32], sdebug_inq_product_rev, 4);
2164 /* Use Vendor Specific area to place driver date in ASCII hex */
2165 memcpy(&arr[36], sdebug_version_date, 8);
2166 /* version descriptors (2 bytes each) follow */
2167 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
2168 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
2169 n = 62;
2170 if (is_disk) { /* SBC-4 no version claimed */
2171 put_unaligned_be16(0x600, arr + n);
2172 n += 2;
2173 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
2174 put_unaligned_be16(0x525, arr + n);
2175 n += 2;
2176 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
2177 put_unaligned_be16(0x624, arr + n);
2178 n += 2;
2179 }
2180 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
2181 ret = fill_from_dev_buffer(scp, arr,
2182 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2183 kfree(arr);
2184 return ret;
2185 }
2186
2187 /* See resp_iec_m_pg() for how this data is manipulated */
2188 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2189 0, 0, 0x0, 0x0};
2190
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2191 static int resp_requests(struct scsi_cmnd *scp,
2192 struct sdebug_dev_info *devip)
2193 {
2194 unsigned char *cmd = scp->cmnd;
2195 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
2196 bool dsense = !!(cmd[1] & 1);
2197 u32 alloc_len = cmd[4];
2198 u32 len = 18;
2199 int stopped_state = atomic_read(&devip->stopped);
2200
2201 memset(arr, 0, sizeof(arr));
2202 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2203 if (dsense) {
2204 arr[0] = 0x72;
2205 arr[1] = NOT_READY;
2206 arr[2] = LOGICAL_UNIT_NOT_READY;
2207 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2208 len = 8;
2209 } else {
2210 arr[0] = 0x70;
2211 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
2212 arr[7] = 0xa; /* 18 byte sense buffer */
2213 arr[12] = LOGICAL_UNIT_NOT_READY;
2214 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2215 }
2216 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2217 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2218 if (dsense) {
2219 arr[0] = 0x72;
2220 arr[1] = 0x0; /* NO_SENSE in sense_key */
2221 arr[2] = THRESHOLD_EXCEEDED;
2222 arr[3] = 0xff; /* Failure prediction(false) */
2223 len = 8;
2224 } else {
2225 arr[0] = 0x70;
2226 arr[2] = 0x0; /* NO_SENSE in sense_key */
2227 arr[7] = 0xa; /* 18 byte sense buffer */
2228 arr[12] = THRESHOLD_EXCEEDED;
2229 arr[13] = 0xff; /* Failure prediction(false) */
2230 }
2231 } else { /* nothing to report */
2232 if (dsense) {
2233 len = 8;
2234 memset(arr, 0, len);
2235 arr[0] = 0x72;
2236 } else {
2237 memset(arr, 0, len);
2238 arr[0] = 0x70;
2239 arr[7] = 0xa;
2240 }
2241 }
2242 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2243 }
2244
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2245 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2246 {
2247 unsigned char *cmd = scp->cmnd;
2248 int power_cond, want_stop, stopped_state;
2249 bool changing;
2250
2251 power_cond = (cmd[4] & 0xf0) >> 4;
2252 if (power_cond) {
2253 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2254 return check_condition_result;
2255 }
2256 want_stop = !(cmd[4] & 1);
2257 stopped_state = atomic_read(&devip->stopped);
2258 if (stopped_state == 2) {
2259 ktime_t now_ts = ktime_get_boottime();
2260
2261 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2262 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2263
2264 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2265 /* tur_ms_to_ready timer extinguished */
2266 atomic_set(&devip->stopped, 0);
2267 stopped_state = 0;
2268 }
2269 }
2270 if (stopped_state == 2) {
2271 if (want_stop) {
2272 stopped_state = 1; /* dummy up success */
2273 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2274 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2275 return check_condition_result;
2276 }
2277 }
2278 }
2279 changing = (stopped_state != want_stop);
2280 if (changing)
2281 atomic_xchg(&devip->stopped, want_stop);
2282 if (sdebug_ptype == TYPE_TAPE && !want_stop) {
2283 int i;
2284
2285 set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
2286 for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
2287 devip->tape_location[i] = 0;
2288 devip->tape_partition = 0;
2289 }
2290 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2291 return SDEG_RES_IMMED_MASK;
2292 else
2293 return 0;
2294 }
2295
get_sdebug_capacity(void)2296 static sector_t get_sdebug_capacity(void)
2297 {
2298 static const unsigned int gibibyte = 1073741824;
2299
2300 if (sdebug_virtual_gb > 0)
2301 return (sector_t)sdebug_virtual_gb *
2302 (gibibyte / sdebug_sector_size);
2303 else
2304 return sdebug_store_sectors;
2305 }
2306
2307 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2308 static int resp_readcap(struct scsi_cmnd *scp,
2309 struct sdebug_dev_info *devip)
2310 {
2311 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2312 unsigned int capac;
2313
2314 /* following just in case virtual_gb changed */
2315 sdebug_capacity = get_sdebug_capacity();
2316 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2317 if (sdebug_capacity < 0xffffffff) {
2318 capac = (unsigned int)sdebug_capacity - 1;
2319 put_unaligned_be32(capac, arr + 0);
2320 } else
2321 put_unaligned_be32(0xffffffff, arr + 0);
2322 put_unaligned_be16(sdebug_sector_size, arr + 6);
2323 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2324 }
2325
2326 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2327 static int resp_readcap16(struct scsi_cmnd *scp,
2328 struct sdebug_dev_info *devip)
2329 {
2330 unsigned char *cmd = scp->cmnd;
2331 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2332 u32 alloc_len;
2333
2334 alloc_len = get_unaligned_be32(cmd + 10);
2335 /* following just in case virtual_gb changed */
2336 sdebug_capacity = get_sdebug_capacity();
2337 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2338 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2339 put_unaligned_be32(sdebug_sector_size, arr + 8);
2340 arr[13] = sdebug_physblk_exp & 0xf;
2341 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2342
2343 if (scsi_debug_lbp()) {
2344 arr[14] |= 0x80; /* LBPME */
2345 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2346 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2347 * in the wider field maps to 0 in this field.
2348 */
2349 if (sdebug_lbprz & 1) /* precisely what the draft requires */
2350 arr[14] |= 0x40;
2351 }
2352
2353 /*
2354 * Since the scsi_debug READ CAPACITY implementation always reports the
2355 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2356 */
2357 if (devip->zoned)
2358 arr[12] |= 1 << 4;
2359
2360 arr[15] = sdebug_lowest_aligned & 0xff;
2361
2362 if (have_dif_prot) {
2363 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2364 arr[12] |= 1; /* PROT_EN */
2365 }
2366
2367 return fill_from_dev_buffer(scp, arr,
2368 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2369 }
2370
2371 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2372
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2373 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2374 struct sdebug_dev_info *devip)
2375 {
2376 unsigned char *cmd = scp->cmnd;
2377 unsigned char *arr;
2378 int host_no = devip->sdbg_host->shost->host_no;
2379 int port_group_a, port_group_b, port_a, port_b;
2380 u32 alen, n, rlen;
2381 int ret;
2382
2383 alen = get_unaligned_be32(cmd + 6);
2384 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2385 if (! arr)
2386 return DID_REQUEUE << 16;
2387 /*
2388 * EVPD page 0x88 states we have two ports, one
2389 * real and a fake port with no device connected.
2390 * So we create two port groups with one port each
2391 * and set the group with port B to unavailable.
2392 */
2393 port_a = 0x1; /* relative port A */
2394 port_b = 0x2; /* relative port B */
2395 port_group_a = (((host_no + 1) & 0x7f) << 8) +
2396 (devip->channel & 0x7f);
2397 port_group_b = (((host_no + 1) & 0x7f) << 8) +
2398 (devip->channel & 0x7f) + 0x80;
2399
2400 /*
2401 * The asymmetric access state is cycled according to the host_id.
2402 */
2403 n = 4;
2404 if (sdebug_vpd_use_hostno == 0) {
2405 arr[n++] = host_no % 3; /* Asymm access state */
2406 arr[n++] = 0x0F; /* claim: all states are supported */
2407 } else {
2408 arr[n++] = 0x0; /* Active/Optimized path */
2409 arr[n++] = 0x01; /* only support active/optimized paths */
2410 }
2411 put_unaligned_be16(port_group_a, arr + n);
2412 n += 2;
2413 arr[n++] = 0; /* Reserved */
2414 arr[n++] = 0; /* Status code */
2415 arr[n++] = 0; /* Vendor unique */
2416 arr[n++] = 0x1; /* One port per group */
2417 arr[n++] = 0; /* Reserved */
2418 arr[n++] = 0; /* Reserved */
2419 put_unaligned_be16(port_a, arr + n);
2420 n += 2;
2421 arr[n++] = 3; /* Port unavailable */
2422 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2423 put_unaligned_be16(port_group_b, arr + n);
2424 n += 2;
2425 arr[n++] = 0; /* Reserved */
2426 arr[n++] = 0; /* Status code */
2427 arr[n++] = 0; /* Vendor unique */
2428 arr[n++] = 0x1; /* One port per group */
2429 arr[n++] = 0; /* Reserved */
2430 arr[n++] = 0; /* Reserved */
2431 put_unaligned_be16(port_b, arr + n);
2432 n += 2;
2433
2434 rlen = n - 4;
2435 put_unaligned_be32(rlen, arr + 0);
2436
2437 /*
2438 * Return the smallest value of either
2439 * - The allocated length
2440 * - The constructed command length
2441 * - The maximum array size
2442 */
2443 rlen = min(alen, n);
2444 ret = fill_from_dev_buffer(scp, arr,
2445 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2446 kfree(arr);
2447 return ret;
2448 }
2449
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2450 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2451 struct sdebug_dev_info *devip)
2452 {
2453 bool rctd;
2454 u8 reporting_opts, req_opcode, sdeb_i, supp;
2455 u16 req_sa, u;
2456 u32 alloc_len, a_len;
2457 int k, offset, len, errsts, count, bump, na;
2458 const struct opcode_info_t *oip;
2459 const struct opcode_info_t *r_oip;
2460 u8 *arr;
2461 u8 *cmd = scp->cmnd;
2462
2463 rctd = !!(cmd[2] & 0x80);
2464 reporting_opts = cmd[2] & 0x7;
2465 req_opcode = cmd[3];
2466 req_sa = get_unaligned_be16(cmd + 4);
2467 alloc_len = get_unaligned_be32(cmd + 6);
2468 if (alloc_len < 4 || alloc_len > 0xffff) {
2469 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2470 return check_condition_result;
2471 }
2472 if (alloc_len > 8192)
2473 a_len = 8192;
2474 else
2475 a_len = alloc_len;
2476 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2477 if (NULL == arr) {
2478 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2479 INSUFF_RES_ASCQ);
2480 return check_condition_result;
2481 }
2482 switch (reporting_opts) {
2483 case 0: /* all commands */
2484 /* count number of commands */
2485 for (count = 0, oip = opcode_info_arr;
2486 oip->num_attached != 0xff; ++oip) {
2487 if (F_INV_OP & oip->flags)
2488 continue;
2489 count += (oip->num_attached + 1);
2490 }
2491 bump = rctd ? 20 : 8;
2492 put_unaligned_be32(count * bump, arr);
2493 for (offset = 4, oip = opcode_info_arr;
2494 oip->num_attached != 0xff && offset < a_len; ++oip) {
2495 if (F_INV_OP & oip->flags)
2496 continue;
2497 na = oip->num_attached;
2498 arr[offset] = oip->opcode;
2499 put_unaligned_be16(oip->sa, arr + offset + 2);
2500 if (rctd)
2501 arr[offset + 5] |= 0x2;
2502 if (FF_SA & oip->flags)
2503 arr[offset + 5] |= 0x1;
2504 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2505 if (rctd)
2506 put_unaligned_be16(0xa, arr + offset + 8);
2507 r_oip = oip;
2508 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2509 if (F_INV_OP & oip->flags)
2510 continue;
2511 offset += bump;
2512 arr[offset] = oip->opcode;
2513 put_unaligned_be16(oip->sa, arr + offset + 2);
2514 if (rctd)
2515 arr[offset + 5] |= 0x2;
2516 if (FF_SA & oip->flags)
2517 arr[offset + 5] |= 0x1;
2518 put_unaligned_be16(oip->len_mask[0],
2519 arr + offset + 6);
2520 if (rctd)
2521 put_unaligned_be16(0xa,
2522 arr + offset + 8);
2523 }
2524 oip = r_oip;
2525 offset += bump;
2526 }
2527 break;
2528 case 1: /* one command: opcode only */
2529 case 2: /* one command: opcode plus service action */
2530 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2531 sdeb_i = opcode_ind_arr[req_opcode];
2532 oip = &opcode_info_arr[sdeb_i];
2533 if (F_INV_OP & oip->flags) {
2534 supp = 1;
2535 offset = 4;
2536 } else {
2537 if (1 == reporting_opts) {
2538 if (FF_SA & oip->flags) {
2539 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2540 2, 2);
2541 kfree(arr);
2542 return check_condition_result;
2543 }
2544 req_sa = 0;
2545 } else if (2 == reporting_opts &&
2546 0 == (FF_SA & oip->flags)) {
2547 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2548 kfree(arr); /* point at requested sa */
2549 return check_condition_result;
2550 }
2551 if (0 == (FF_SA & oip->flags) &&
2552 req_opcode == oip->opcode)
2553 supp = 3;
2554 else if (0 == (FF_SA & oip->flags)) {
2555 na = oip->num_attached;
2556 for (k = 0, oip = oip->arrp; k < na;
2557 ++k, ++oip) {
2558 if (req_opcode == oip->opcode)
2559 break;
2560 }
2561 supp = (k >= na) ? 1 : 3;
2562 } else if (req_sa != oip->sa) {
2563 na = oip->num_attached;
2564 for (k = 0, oip = oip->arrp; k < na;
2565 ++k, ++oip) {
2566 if (req_sa == oip->sa)
2567 break;
2568 }
2569 supp = (k >= na) ? 1 : 3;
2570 } else
2571 supp = 3;
2572 if (3 == supp) {
2573 u = oip->len_mask[0];
2574 put_unaligned_be16(u, arr + 2);
2575 arr[4] = oip->opcode;
2576 for (k = 1; k < u; ++k)
2577 arr[4 + k] = (k < 16) ?
2578 oip->len_mask[k] : 0xff;
2579 offset = 4 + u;
2580 } else
2581 offset = 4;
2582 }
2583 arr[1] = (rctd ? 0x80 : 0) | supp;
2584 if (rctd) {
2585 put_unaligned_be16(0xa, arr + offset);
2586 offset += 12;
2587 }
2588 break;
2589 default:
2590 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2591 kfree(arr);
2592 return check_condition_result;
2593 }
2594 offset = (offset < a_len) ? offset : a_len;
2595 len = (offset < alloc_len) ? offset : alloc_len;
2596 errsts = fill_from_dev_buffer(scp, arr, len);
2597 kfree(arr);
2598 return errsts;
2599 }
2600
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2601 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2602 struct sdebug_dev_info *devip)
2603 {
2604 bool repd;
2605 u32 alloc_len, len;
2606 u8 arr[16];
2607 u8 *cmd = scp->cmnd;
2608
2609 memset(arr, 0, sizeof(arr));
2610 repd = !!(cmd[2] & 0x80);
2611 alloc_len = get_unaligned_be32(cmd + 6);
2612 if (alloc_len < 4) {
2613 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2614 return check_condition_result;
2615 }
2616 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2617 arr[1] = 0x1; /* ITNRS */
2618 if (repd) {
2619 arr[3] = 0xc;
2620 len = 16;
2621 } else
2622 len = 4;
2623
2624 len = (len < alloc_len) ? len : alloc_len;
2625 return fill_from_dev_buffer(scp, arr, len);
2626 }
2627
2628 /* <<Following mode page info copied from ST318451LW>> */
2629
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2630 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2631 { /* Read-Write Error Recovery page for mode_sense */
2632 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2633 5, 0, 0xff, 0xff};
2634
2635 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2636 if (1 == pcontrol)
2637 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2638 return sizeof(err_recov_pg);
2639 }
2640
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2641 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2642 { /* Disconnect-Reconnect page for mode_sense */
2643 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2644 0, 0, 0, 0, 0, 0, 0, 0};
2645
2646 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2647 if (1 == pcontrol)
2648 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2649 return sizeof(disconnect_pg);
2650 }
2651
resp_format_pg(unsigned char * p,int pcontrol,int target)2652 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2653 { /* Format device page for mode_sense */
2654 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2655 0, 0, 0, 0, 0, 0, 0, 0,
2656 0, 0, 0, 0, 0x40, 0, 0, 0};
2657
2658 memcpy(p, format_pg, sizeof(format_pg));
2659 put_unaligned_be16(sdebug_sectors_per, p + 10);
2660 put_unaligned_be16(sdebug_sector_size, p + 12);
2661 if (sdebug_removable)
2662 p[20] |= 0x20; /* should agree with INQUIRY */
2663 if (1 == pcontrol)
2664 memset(p + 2, 0, sizeof(format_pg) - 2);
2665 return sizeof(format_pg);
2666 }
2667
2668 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2669 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2670 0, 0, 0, 0};
2671
resp_caching_pg(unsigned char * p,int pcontrol,int target)2672 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2673 { /* Caching page for mode_sense */
2674 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2675 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2676 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2677 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2678
2679 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2680 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2681 memcpy(p, caching_pg, sizeof(caching_pg));
2682 if (1 == pcontrol)
2683 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2684 else if (2 == pcontrol)
2685 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2686 return sizeof(caching_pg);
2687 }
2688
2689 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2690 0, 0, 0x2, 0x4b};
2691
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2692 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2693 { /* Control mode page for mode_sense */
2694 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2695 0, 0, 0, 0};
2696 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2697 0, 0, 0x2, 0x4b};
2698
2699 if (sdebug_dsense)
2700 ctrl_m_pg[2] |= 0x4;
2701 else
2702 ctrl_m_pg[2] &= ~0x4;
2703
2704 if (sdebug_ato)
2705 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2706
2707 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2708 if (1 == pcontrol)
2709 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2710 else if (2 == pcontrol)
2711 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2712 return sizeof(ctrl_m_pg);
2713 }
2714
2715 /* IO Advice Hints Grouping mode page */
resp_grouping_m_pg(unsigned char * p,int pcontrol,int target)2716 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2717 {
2718 /* IO Advice Hints Grouping mode page */
2719 struct grouping_m_pg {
2720 u8 page_code; /* OR 0x40 when subpage_code > 0 */
2721 u8 subpage_code;
2722 __be16 page_length;
2723 u8 reserved[12];
2724 struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2725 };
2726 static const struct grouping_m_pg gr_m_pg = {
2727 .page_code = 0xa | 0x40,
2728 .subpage_code = 5,
2729 .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2730 .descr = {
2731 { .st_enble = 1 },
2732 { .st_enble = 1 },
2733 { .st_enble = 1 },
2734 { .st_enble = 1 },
2735 { .st_enble = 1 },
2736 { .st_enble = 0 },
2737 }
2738 };
2739
2740 BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2741 16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2742 memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2743 if (1 == pcontrol) {
2744 /* There are no changeable values so clear from byte 4 on. */
2745 memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2746 }
2747 return sizeof(gr_m_pg);
2748 }
2749
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2750 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2751 { /* Informational Exceptions control mode page for mode_sense */
2752 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2753 0, 0, 0x0, 0x0};
2754 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2755 0, 0, 0x0, 0x0};
2756
2757 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2758 if (1 == pcontrol)
2759 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2760 else if (2 == pcontrol)
2761 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2762 return sizeof(iec_m_pg);
2763 }
2764
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2765 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2766 { /* SAS SSP mode page - short format for mode_sense */
2767 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2768 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2769
2770 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2771 if (1 == pcontrol)
2772 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2773 return sizeof(sas_sf_m_pg);
2774 }
2775
2776
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2777 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2778 int target_dev_id)
2779 { /* SAS phy control and discover mode page for mode_sense */
2780 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2781 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2782 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2783 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2784 0x2, 0, 0, 0, 0, 0, 0, 0,
2785 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2786 0, 0, 0, 0, 0, 0, 0, 0,
2787 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2788 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2789 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2790 0x3, 0, 0, 0, 0, 0, 0, 0,
2791 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2792 0, 0, 0, 0, 0, 0, 0, 0,
2793 };
2794 int port_a, port_b;
2795
2796 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2797 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2798 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2799 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2800 port_a = target_dev_id + 1;
2801 port_b = port_a + 1;
2802 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2803 put_unaligned_be32(port_a, p + 20);
2804 put_unaligned_be32(port_b, p + 48 + 20);
2805 if (1 == pcontrol)
2806 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2807 return sizeof(sas_pcd_m_pg);
2808 }
2809
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2810 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2811 { /* SAS SSP shared protocol specific port mode subpage */
2812 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2813 0, 0, 0, 0, 0, 0, 0, 0,
2814 };
2815
2816 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2817 if (1 == pcontrol)
2818 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2819 return sizeof(sas_sha_m_pg);
2820 }
2821
2822 static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
2823 0xff, 0xff, 0x00, 0x00};
2824
resp_partition_m_pg(unsigned char * p,int pcontrol,int target)2825 static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
2826 { /* Partition page for mode_sense (tape) */
2827 memcpy(p, partition_pg, sizeof(partition_pg));
2828 if (pcontrol == 1)
2829 memset(p + 2, 0, sizeof(partition_pg) - 2);
2830 return sizeof(partition_pg);
2831 }
2832
process_medium_part_m_pg(struct sdebug_dev_info * devip,unsigned char * new,int pg_len)2833 static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
2834 unsigned char *new, int pg_len)
2835 {
2836 int new_nbr, p0_size, p1_size;
2837
2838 if ((new[4] & 0x80) != 0) { /* FDP */
2839 partition_pg[4] |= 0x80;
2840 devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
2841 devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
2842 devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
2843 } else {
2844 new_nbr = new[3] + 1;
2845 if (new_nbr > TAPE_MAX_PARTITIONS)
2846 return 3;
2847 if ((new[4] & 0x40) != 0) { /* SDP */
2848 p1_size = TAPE_PARTITION_1_UNITS;
2849 p0_size = TAPE_UNITS - p1_size;
2850 if (p0_size < 100)
2851 return 4;
2852 } else if ((new[4] & 0x20) != 0) {
2853 if (new_nbr > 1) {
2854 p0_size = get_unaligned_be16(new + 8);
2855 p1_size = get_unaligned_be16(new + 10);
2856 if (p1_size == 0xFFFF)
2857 p1_size = TAPE_UNITS - p0_size;
2858 else if (p0_size == 0xFFFF)
2859 p0_size = TAPE_UNITS - p1_size;
2860 if (p0_size < 100 || p1_size < 100)
2861 return 8;
2862 } else {
2863 p0_size = TAPE_UNITS;
2864 p1_size = 0;
2865 }
2866 } else
2867 return 6;
2868 devip->tape_pending_nbr_partitions = new_nbr;
2869 devip->tape_pending_part_0_size = p0_size;
2870 devip->tape_pending_part_1_size = p1_size;
2871 partition_pg[3] = new_nbr;
2872 devip->tape_pending_nbr_partitions = new_nbr;
2873 }
2874
2875 return 0;
2876 }
2877
resp_compression_m_pg(unsigned char * p,int pcontrol,int target,unsigned char dce)2878 static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
2879 unsigned char dce)
2880 { /* Compression page for mode_sense (tape) */
2881 unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
2882 0, 0, 0, 0, 00, 00};
2883
2884 memcpy(p, compression_pg, sizeof(compression_pg));
2885 if (dce)
2886 p[2] |= 0x80;
2887 if (pcontrol == 1)
2888 memset(p + 2, 0, sizeof(compression_pg) - 2);
2889 return sizeof(compression_pg);
2890 }
2891
2892 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2893 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2894
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2895 static int resp_mode_sense(struct scsi_cmnd *scp,
2896 struct sdebug_dev_info *devip)
2897 {
2898 int pcontrol, pcode, subpcode, bd_len;
2899 unsigned char dev_spec;
2900 u32 alloc_len, offset, len;
2901 int target_dev_id;
2902 int target = scp->device->id;
2903 unsigned char *ap;
2904 unsigned char *arr __free(kfree);
2905 unsigned char *cmd = scp->cmnd;
2906 bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
2907
2908 arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2909 if (!arr)
2910 return -ENOMEM;
2911 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2912 pcontrol = (cmd[2] & 0xc0) >> 6;
2913 pcode = cmd[2] & 0x3f;
2914 subpcode = cmd[3];
2915 msense_6 = (MODE_SENSE == cmd[0]);
2916 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2917 is_disk = (sdebug_ptype == TYPE_DISK);
2918 is_zbc = devip->zoned;
2919 is_tape = (sdebug_ptype == TYPE_TAPE);
2920 if ((is_disk || is_zbc || is_tape) && !dbd)
2921 bd_len = llbaa ? 16 : 8;
2922 else
2923 bd_len = 0;
2924 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2925 if (0x3 == pcontrol) { /* Saving values not supported */
2926 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2927 return check_condition_result;
2928 }
2929 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2930 (devip->target * 1000) - 3;
2931 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2932 if (is_disk || is_zbc) {
2933 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2934 if (sdebug_wp)
2935 dev_spec |= 0x80;
2936 } else
2937 dev_spec = 0x0;
2938 if (msense_6) {
2939 arr[2] = dev_spec;
2940 arr[3] = bd_len;
2941 offset = 4;
2942 } else {
2943 arr[3] = dev_spec;
2944 if (16 == bd_len)
2945 arr[4] = 0x1; /* set LONGLBA bit */
2946 arr[7] = bd_len; /* assume 255 or less */
2947 offset = 8;
2948 }
2949 ap = arr + offset;
2950 if ((bd_len > 0) && (!sdebug_capacity))
2951 sdebug_capacity = get_sdebug_capacity();
2952
2953 if (8 == bd_len) {
2954 if (sdebug_capacity > 0xfffffffe)
2955 put_unaligned_be32(0xffffffff, ap + 0);
2956 else
2957 put_unaligned_be32(sdebug_capacity, ap + 0);
2958 if (is_tape) {
2959 ap[0] = devip->tape_density;
2960 put_unaligned_be16(devip->tape_blksize, ap + 6);
2961 } else
2962 put_unaligned_be16(sdebug_sector_size, ap + 6);
2963 offset += bd_len;
2964 ap = arr + offset;
2965 } else if (16 == bd_len) {
2966 if (is_tape) {
2967 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
2968 return check_condition_result;
2969 }
2970 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2971 put_unaligned_be32(sdebug_sector_size, ap + 12);
2972 offset += bd_len;
2973 ap = arr + offset;
2974 }
2975 if (cmd[2] == 0)
2976 goto only_bd; /* Only block descriptor requested */
2977
2978 /*
2979 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2980 * len += resp_*_pg(ap + len, pcontrol, target);
2981 */
2982 switch (pcode) {
2983 case 0x1: /* Read-Write error recovery page, direct access */
2984 if (subpcode > 0x0 && subpcode < 0xff)
2985 goto bad_subpcode;
2986 len = resp_err_recov_pg(ap, pcontrol, target);
2987 offset += len;
2988 break;
2989 case 0x2: /* Disconnect-Reconnect page, all devices */
2990 if (subpcode > 0x0 && subpcode < 0xff)
2991 goto bad_subpcode;
2992 len = resp_disconnect_pg(ap, pcontrol, target);
2993 offset += len;
2994 break;
2995 case 0x3: /* Format device page, direct access */
2996 if (subpcode > 0x0 && subpcode < 0xff)
2997 goto bad_subpcode;
2998 if (is_disk) {
2999 len = resp_format_pg(ap, pcontrol, target);
3000 offset += len;
3001 } else {
3002 goto bad_pcode;
3003 }
3004 break;
3005 case 0x8: /* Caching page, direct access */
3006 if (subpcode > 0x0 && subpcode < 0xff)
3007 goto bad_subpcode;
3008 if (is_disk || is_zbc) {
3009 len = resp_caching_pg(ap, pcontrol, target);
3010 offset += len;
3011 } else {
3012 goto bad_pcode;
3013 }
3014 break;
3015 case 0xa: /* Control Mode page, all devices */
3016 switch (subpcode) {
3017 case 0:
3018 len = resp_ctrl_m_pg(ap, pcontrol, target);
3019 break;
3020 case 0x05:
3021 len = resp_grouping_m_pg(ap, pcontrol, target);
3022 break;
3023 case 0xff:
3024 len = resp_ctrl_m_pg(ap, pcontrol, target);
3025 len += resp_grouping_m_pg(ap + len, pcontrol, target);
3026 break;
3027 default:
3028 goto bad_subpcode;
3029 }
3030 offset += len;
3031 break;
3032 case 0xf: /* Compression Mode Page (tape) */
3033 if (!is_tape)
3034 goto bad_pcode;
3035 len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
3036 offset += len;
3037 break;
3038 case 0x11: /* Partition Mode Page (tape) */
3039 if (!is_tape)
3040 goto bad_pcode;
3041 len = resp_partition_m_pg(ap, pcontrol, target);
3042 offset += len;
3043 break;
3044 case 0x19: /* if spc==1 then sas phy, control+discover */
3045 if (subpcode > 0x2 && subpcode < 0xff)
3046 goto bad_subpcode;
3047 len = 0;
3048 if ((0x0 == subpcode) || (0xff == subpcode))
3049 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3050 if ((0x1 == subpcode) || (0xff == subpcode))
3051 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3052 target_dev_id);
3053 if ((0x2 == subpcode) || (0xff == subpcode))
3054 len += resp_sas_sha_m_spg(ap + len, pcontrol);
3055 offset += len;
3056 break;
3057 case 0x1c: /* Informational Exceptions Mode page, all devices */
3058 if (subpcode > 0x0 && subpcode < 0xff)
3059 goto bad_subpcode;
3060 len = resp_iec_m_pg(ap, pcontrol, target);
3061 offset += len;
3062 break;
3063 case 0x3f: /* Read all Mode pages */
3064 if (subpcode > 0x0 && subpcode < 0xff)
3065 goto bad_subpcode;
3066 len = resp_err_recov_pg(ap, pcontrol, target);
3067 len += resp_disconnect_pg(ap + len, pcontrol, target);
3068 if (is_disk) {
3069 len += resp_format_pg(ap + len, pcontrol, target);
3070 len += resp_caching_pg(ap + len, pcontrol, target);
3071 } else if (is_zbc) {
3072 len += resp_caching_pg(ap + len, pcontrol, target);
3073 }
3074 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
3075 if (0xff == subpcode)
3076 len += resp_grouping_m_pg(ap + len, pcontrol, target);
3077 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3078 if (0xff == subpcode) {
3079 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3080 target_dev_id);
3081 len += resp_sas_sha_m_spg(ap + len, pcontrol);
3082 }
3083 len += resp_iec_m_pg(ap + len, pcontrol, target);
3084 offset += len;
3085 break;
3086 default:
3087 goto bad_pcode;
3088 }
3089 only_bd:
3090 if (msense_6)
3091 arr[0] = offset - 1;
3092 else
3093 put_unaligned_be16((offset - 2), arr + 0);
3094 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
3095
3096 bad_pcode:
3097 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3098 return check_condition_result;
3099
3100 bad_subpcode:
3101 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3102 return check_condition_result;
3103 }
3104
3105 #define SDEBUG_MAX_MSELECT_SZ 512
3106
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3107 static int resp_mode_select(struct scsi_cmnd *scp,
3108 struct sdebug_dev_info *devip)
3109 {
3110 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
3111 int param_len, res, mpage;
3112 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
3113 unsigned char *cmd = scp->cmnd;
3114 int mselect6 = (MODE_SELECT == cmd[0]);
3115
3116 memset(arr, 0, sizeof(arr));
3117 pf = cmd[1] & 0x10;
3118 sp = cmd[1] & 0x1;
3119 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
3120 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
3121 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
3122 return check_condition_result;
3123 }
3124 res = fetch_to_dev_buffer(scp, arr, param_len);
3125 if (-1 == res)
3126 return DID_ERROR << 16;
3127 else if (sdebug_verbose && (res < param_len))
3128 sdev_printk(KERN_INFO, scp->device,
3129 "%s: cdb indicated=%d, IO sent=%d bytes\n",
3130 __func__, param_len, res);
3131 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
3132 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
3133 off = (mselect6 ? 4 : 8);
3134 if (sdebug_ptype == TYPE_TAPE) {
3135 int blksize;
3136
3137 if (bd_len != 8) {
3138 mk_sense_invalid_fld(scp, SDEB_IN_DATA,
3139 mselect6 ? 3 : 6, -1);
3140 return check_condition_result;
3141 }
3142 if (arr[off] == TAPE_BAD_DENSITY) {
3143 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3144 return check_condition_result;
3145 }
3146 blksize = get_unaligned_be16(arr + off + 6);
3147 if (blksize != 0 &&
3148 (blksize < TAPE_MIN_BLKSIZE ||
3149 blksize > TAPE_MAX_BLKSIZE ||
3150 (blksize % 4) != 0)) {
3151 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
3152 return check_condition_result;
3153 }
3154 devip->tape_density = arr[off];
3155 devip->tape_blksize = blksize;
3156 }
3157 off += bd_len;
3158 if (off >= res)
3159 return 0; /* No page written, just descriptors */
3160 if (md_len > 2) {
3161 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3162 return check_condition_result;
3163 }
3164 mpage = arr[off] & 0x3f;
3165 ps = !!(arr[off] & 0x80);
3166 if (ps) {
3167 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
3168 return check_condition_result;
3169 }
3170 spf = !!(arr[off] & 0x40);
3171 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
3172 (arr[off + 1] + 2);
3173 if ((pg_len + off) > param_len) {
3174 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3175 PARAMETER_LIST_LENGTH_ERR, 0);
3176 return check_condition_result;
3177 }
3178 switch (mpage) {
3179 case 0x8: /* Caching Mode page */
3180 if (caching_pg[1] == arr[off + 1]) {
3181 memcpy(caching_pg + 2, arr + off + 2,
3182 sizeof(caching_pg) - 2);
3183 goto set_mode_changed_ua;
3184 }
3185 break;
3186 case 0xa: /* Control Mode page */
3187 if (ctrl_m_pg[1] == arr[off + 1]) {
3188 memcpy(ctrl_m_pg + 2, arr + off + 2,
3189 sizeof(ctrl_m_pg) - 2);
3190 if (ctrl_m_pg[4] & 0x8)
3191 sdebug_wp = true;
3192 else
3193 sdebug_wp = false;
3194 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
3195 goto set_mode_changed_ua;
3196 }
3197 break;
3198 case 0xf: /* Compression mode page */
3199 if (sdebug_ptype != TYPE_TAPE)
3200 goto bad_pcode;
3201 if ((arr[off + 2] & 0x40) != 0) {
3202 devip->tape_dce = (arr[off + 2] & 0x80) != 0;
3203 return 0;
3204 }
3205 break;
3206 case 0x11: /* Medium Partition Mode Page (tape) */
3207 if (sdebug_ptype == TYPE_TAPE) {
3208 int fld;
3209
3210 fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
3211 if (fld == 0)
3212 return 0;
3213 mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
3214 return check_condition_result;
3215 }
3216 break;
3217 case 0x1c: /* Informational Exceptions Mode page */
3218 if (iec_m_pg[1] == arr[off + 1]) {
3219 memcpy(iec_m_pg + 2, arr + off + 2,
3220 sizeof(iec_m_pg) - 2);
3221 goto set_mode_changed_ua;
3222 }
3223 break;
3224 default:
3225 break;
3226 }
3227 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
3228 return check_condition_result;
3229 set_mode_changed_ua:
3230 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3231 return 0;
3232
3233 bad_pcode:
3234 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3235 return check_condition_result;
3236 }
3237
resp_temp_l_pg(unsigned char * arr)3238 static int resp_temp_l_pg(unsigned char *arr)
3239 {
3240 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
3241 0x0, 0x1, 0x3, 0x2, 0x0, 65,
3242 };
3243
3244 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3245 return sizeof(temp_l_pg);
3246 }
3247
resp_ie_l_pg(unsigned char * arr)3248 static int resp_ie_l_pg(unsigned char *arr)
3249 {
3250 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3251 };
3252
3253 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3254 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
3255 arr[4] = THRESHOLD_EXCEEDED;
3256 arr[5] = 0xff;
3257 }
3258 return sizeof(ie_l_pg);
3259 }
3260
resp_env_rep_l_spg(unsigned char * arr)3261 static int resp_env_rep_l_spg(unsigned char *arr)
3262 {
3263 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
3264 0x0, 40, 72, 0xff, 45, 18, 0, 0,
3265 0x1, 0x0, 0x23, 0x8,
3266 0x0, 55, 72, 35, 55, 45, 0, 0,
3267 };
3268
3269 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3270 return sizeof(env_rep_l_spg);
3271 }
3272
3273 #define SDEBUG_MAX_LSENSE_SZ 512
3274
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3275 static int resp_log_sense(struct scsi_cmnd *scp,
3276 struct sdebug_dev_info *devip)
3277 {
3278 int ppc, sp, pcode, subpcode;
3279 u32 alloc_len, len, n;
3280 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3281 unsigned char *cmd = scp->cmnd;
3282
3283 memset(arr, 0, sizeof(arr));
3284 ppc = cmd[1] & 0x2;
3285 sp = cmd[1] & 0x1;
3286 if (ppc || sp) {
3287 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3288 return check_condition_result;
3289 }
3290 pcode = cmd[2] & 0x3f;
3291 subpcode = cmd[3] & 0xff;
3292 alloc_len = get_unaligned_be16(cmd + 7);
3293 arr[0] = pcode;
3294 if (0 == subpcode) {
3295 switch (pcode) {
3296 case 0x0: /* Supported log pages log page */
3297 n = 4;
3298 arr[n++] = 0x0; /* this page */
3299 arr[n++] = 0xd; /* Temperature */
3300 arr[n++] = 0x2f; /* Informational exceptions */
3301 arr[3] = n - 4;
3302 break;
3303 case 0xd: /* Temperature log page */
3304 arr[3] = resp_temp_l_pg(arr + 4);
3305 break;
3306 case 0x2f: /* Informational exceptions log page */
3307 arr[3] = resp_ie_l_pg(arr + 4);
3308 break;
3309 default:
3310 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3311 return check_condition_result;
3312 }
3313 } else if (0xff == subpcode) {
3314 arr[0] |= 0x40;
3315 arr[1] = subpcode;
3316 switch (pcode) {
3317 case 0x0: /* Supported log pages and subpages log page */
3318 n = 4;
3319 arr[n++] = 0x0;
3320 arr[n++] = 0x0; /* 0,0 page */
3321 arr[n++] = 0x0;
3322 arr[n++] = 0xff; /* this page */
3323 arr[n++] = 0xd;
3324 arr[n++] = 0x0; /* Temperature */
3325 arr[n++] = 0xd;
3326 arr[n++] = 0x1; /* Environment reporting */
3327 arr[n++] = 0xd;
3328 arr[n++] = 0xff; /* all 0xd subpages */
3329 arr[n++] = 0x2f;
3330 arr[n++] = 0x0; /* Informational exceptions */
3331 arr[n++] = 0x2f;
3332 arr[n++] = 0xff; /* all 0x2f subpages */
3333 arr[3] = n - 4;
3334 break;
3335 case 0xd: /* Temperature subpages */
3336 n = 4;
3337 arr[n++] = 0xd;
3338 arr[n++] = 0x0; /* Temperature */
3339 arr[n++] = 0xd;
3340 arr[n++] = 0x1; /* Environment reporting */
3341 arr[n++] = 0xd;
3342 arr[n++] = 0xff; /* these subpages */
3343 arr[3] = n - 4;
3344 break;
3345 case 0x2f: /* Informational exceptions subpages */
3346 n = 4;
3347 arr[n++] = 0x2f;
3348 arr[n++] = 0x0; /* Informational exceptions */
3349 arr[n++] = 0x2f;
3350 arr[n++] = 0xff; /* these subpages */
3351 arr[3] = n - 4;
3352 break;
3353 default:
3354 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3355 return check_condition_result;
3356 }
3357 } else if (subpcode > 0) {
3358 arr[0] |= 0x40;
3359 arr[1] = subpcode;
3360 if (pcode == 0xd && subpcode == 1)
3361 arr[3] = resp_env_rep_l_spg(arr + 4);
3362 else {
3363 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3364 return check_condition_result;
3365 }
3366 } else {
3367 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3368 return check_condition_result;
3369 }
3370 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3371 return fill_from_dev_buffer(scp, arr,
3372 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3373 }
3374
3375 enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
resp_read_blklimits(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3376 static int resp_read_blklimits(struct scsi_cmnd *scp,
3377 struct sdebug_dev_info *devip)
3378 {
3379 unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
3380
3381 arr[0] = 4;
3382 put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
3383 put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
3384 return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
3385 }
3386
resp_locate(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3387 static int resp_locate(struct scsi_cmnd *scp,
3388 struct sdebug_dev_info *devip)
3389 {
3390 unsigned char *cmd = scp->cmnd;
3391 unsigned int i, pos;
3392 struct tape_block *blp;
3393 int partition;
3394
3395 if ((cmd[1] & 0x02) != 0) {
3396 if (cmd[8] >= devip->tape_nbr_partitions) {
3397 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3398 return check_condition_result;
3399 }
3400 devip->tape_partition = cmd[8];
3401 }
3402 pos = get_unaligned_be32(cmd + 3);
3403 partition = devip->tape_partition;
3404
3405 for (i = 0, blp = devip->tape_blocks[partition];
3406 i < pos && i < devip->tape_eop[partition]; i++, blp++)
3407 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3408 break;
3409 if (i < pos) {
3410 devip->tape_location[partition] = i;
3411 mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
3412 return check_condition_result;
3413 }
3414 devip->tape_location[partition] = pos;
3415
3416 return 0;
3417 }
3418
resp_write_filemarks(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3419 static int resp_write_filemarks(struct scsi_cmnd *scp,
3420 struct sdebug_dev_info *devip)
3421 {
3422 unsigned char *cmd = scp->cmnd;
3423 unsigned int i, count, pos;
3424 u32 data;
3425 int partition = devip->tape_partition;
3426
3427 if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
3428 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
3429 return check_condition_result;
3430 }
3431 count = get_unaligned_be24(cmd + 2);
3432 data = TAPE_BLOCK_FM_FLAG;
3433 for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
3434 if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
3435 devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3436 mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
3437 EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
3438 return check_condition_result;
3439 }
3440 (devip->tape_blocks[partition] + pos)->fl_size = data;
3441 }
3442 (devip->tape_blocks[partition] + pos)->fl_size =
3443 TAPE_BLOCK_EOD_FLAG;
3444 devip->tape_location[partition] = pos;
3445
3446 return 0;
3447 }
3448
resp_space(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3449 static int resp_space(struct scsi_cmnd *scp,
3450 struct sdebug_dev_info *devip)
3451 {
3452 unsigned char *cmd = scp->cmnd, code;
3453 int i = 0, pos, count;
3454 struct tape_block *blp;
3455 int partition = devip->tape_partition;
3456
3457 count = get_unaligned_be24(cmd + 2);
3458 if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
3459 count |= 0xff000000;
3460 code = cmd[1] & 0x0f;
3461
3462 pos = devip->tape_location[partition];
3463 if (code == 0) { /* blocks */
3464 if (count < 0) {
3465 count = (-count);
3466 pos -= 1;
3467 for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3468 i++) {
3469 if (pos < 0)
3470 goto is_bop;
3471 else if (IS_TAPE_BLOCK_FM(blp->fl_size))
3472 goto is_fm;
3473 if (i > 0) {
3474 pos--;
3475 blp--;
3476 }
3477 }
3478 } else if (count > 0) {
3479 for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3480 i++, pos++, blp++) {
3481 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3482 goto is_eod;
3483 if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
3484 pos += 1;
3485 goto is_fm;
3486 }
3487 if (pos >= devip->tape_eop[partition])
3488 goto is_eop;
3489 }
3490 }
3491 } else if (code == 1) { /* filemarks */
3492 if (count < 0) {
3493 count = (-count);
3494 if (pos == 0)
3495 goto is_bop;
3496 else {
3497 for (i = 0, blp = devip->tape_blocks[partition] + pos;
3498 i < count && pos >= 0; i++, pos--, blp--) {
3499 for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3500 pos >= 0; pos--, blp--)
3501 ; /* empty */
3502 if (pos < 0)
3503 goto is_bop;
3504 }
3505 }
3506 pos += 1;
3507 } else if (count > 0) {
3508 for (i = 0, blp = devip->tape_blocks[partition] + pos;
3509 i < count; i++, pos++, blp++) {
3510 for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3511 !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
3512 pos < devip->tape_eop[partition];
3513 pos++, blp++)
3514 ; /* empty */
3515 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3516 goto is_eod;
3517 if (pos >= devip->tape_eop[partition])
3518 goto is_eop;
3519 }
3520 }
3521 } else if (code == 3) { /* EOD */
3522 for (blp = devip->tape_blocks[partition] + pos;
3523 !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
3524 pos++, blp++)
3525 ; /* empty */
3526 if (pos >= devip->tape_eop[partition])
3527 goto is_eop;
3528 } else {
3529 /* sequential filemarks not supported */
3530 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3531 return check_condition_result;
3532 }
3533 devip->tape_location[partition] = pos;
3534 return 0;
3535
3536 is_fm:
3537 devip->tape_location[partition] = pos;
3538 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3539 FILEMARK_DETECTED_ASCQ, count - i,
3540 SENSE_FLAG_FILEMARK);
3541 return check_condition_result;
3542
3543 is_eod:
3544 devip->tape_location[partition] = pos;
3545 mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
3546 EOD_DETECTED_ASCQ, count - i,
3547 0);
3548 return check_condition_result;
3549
3550 is_bop:
3551 devip->tape_location[partition] = 0;
3552 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3553 BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
3554 SENSE_FLAG_EOM);
3555 devip->tape_location[partition] = 0;
3556 return check_condition_result;
3557
3558 is_eop:
3559 devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3560 mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
3561 EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
3562 SENSE_FLAG_EOM);
3563 return check_condition_result;
3564 }
3565
resp_rewind(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3566 static int resp_rewind(struct scsi_cmnd *scp,
3567 struct sdebug_dev_info *devip)
3568 {
3569 devip->tape_location[devip->tape_partition] = 0;
3570
3571 return 0;
3572 }
3573
partition_tape(struct sdebug_dev_info * devip,int nbr_partitions,int part_0_size,int part_1_size)3574 static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
3575 int part_0_size, int part_1_size)
3576 {
3577 int i;
3578
3579 if (part_0_size + part_1_size > TAPE_UNITS)
3580 return -1;
3581 devip->tape_eop[0] = part_0_size;
3582 devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
3583 devip->tape_eop[1] = part_1_size;
3584 devip->tape_blocks[1] = devip->tape_blocks[0] +
3585 devip->tape_eop[0];
3586 devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
3587
3588 for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
3589 devip->tape_location[i] = 0;
3590
3591 devip->tape_nbr_partitions = nbr_partitions;
3592 devip->tape_partition = 0;
3593
3594 partition_pg[3] = nbr_partitions - 1;
3595 put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
3596 put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
3597
3598 return nbr_partitions;
3599 }
3600
resp_format_medium(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3601 static int resp_format_medium(struct scsi_cmnd *scp,
3602 struct sdebug_dev_info *devip)
3603 {
3604 int res = 0;
3605 unsigned char *cmd = scp->cmnd;
3606
3607 if (sdebug_ptype != TYPE_TAPE) {
3608 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1);
3609 return check_condition_result;
3610 }
3611 if (cmd[2] > 2) {
3612 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
3613 return check_condition_result;
3614 }
3615 if (cmd[2] != 0) {
3616 if (devip->tape_pending_nbr_partitions > 0) {
3617 res = partition_tape(devip,
3618 devip->tape_pending_nbr_partitions,
3619 devip->tape_pending_part_0_size,
3620 devip->tape_pending_part_1_size);
3621 } else
3622 res = partition_tape(devip, devip->tape_nbr_partitions,
3623 devip->tape_eop[0], devip->tape_eop[1]);
3624 } else
3625 res = partition_tape(devip, 1, TAPE_UNITS, 0);
3626 if (res < 0)
3627 return -EINVAL;
3628
3629 devip->tape_pending_nbr_partitions = -1;
3630
3631 return 0;
3632 }
3633
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)3634 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3635 {
3636 return devip->nr_zones != 0;
3637 }
3638
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)3639 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3640 unsigned long long lba)
3641 {
3642 u32 zno = lba >> devip->zsize_shift;
3643 struct sdeb_zone_state *zsp;
3644
3645 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3646 return &devip->zstate[zno];
3647
3648 /*
3649 * If the zone capacity is less than the zone size, adjust for gap
3650 * zones.
3651 */
3652 zno = 2 * zno - devip->nr_conv_zones;
3653 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3654 zsp = &devip->zstate[zno];
3655 if (lba >= zsp->z_start + zsp->z_size)
3656 zsp++;
3657 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3658 return zsp;
3659 }
3660
zbc_zone_is_conv(struct sdeb_zone_state * zsp)3661 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3662 {
3663 return zsp->z_type == ZBC_ZTYPE_CNV;
3664 }
3665
zbc_zone_is_gap(struct sdeb_zone_state * zsp)3666 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3667 {
3668 return zsp->z_type == ZBC_ZTYPE_GAP;
3669 }
3670
zbc_zone_is_seq(struct sdeb_zone_state * zsp)3671 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3672 {
3673 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3674 }
3675
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3676 static void zbc_close_zone(struct sdebug_dev_info *devip,
3677 struct sdeb_zone_state *zsp)
3678 {
3679 enum sdebug_z_cond zc;
3680
3681 if (!zbc_zone_is_seq(zsp))
3682 return;
3683
3684 zc = zsp->z_cond;
3685 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3686 return;
3687
3688 if (zc == ZC2_IMPLICIT_OPEN)
3689 devip->nr_imp_open--;
3690 else
3691 devip->nr_exp_open--;
3692
3693 if (zsp->z_wp == zsp->z_start) {
3694 zsp->z_cond = ZC1_EMPTY;
3695 } else {
3696 zsp->z_cond = ZC4_CLOSED;
3697 devip->nr_closed++;
3698 }
3699 }
3700
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)3701 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3702 {
3703 struct sdeb_zone_state *zsp = &devip->zstate[0];
3704 unsigned int i;
3705
3706 for (i = 0; i < devip->nr_zones; i++, zsp++) {
3707 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3708 zbc_close_zone(devip, zsp);
3709 return;
3710 }
3711 }
3712 }
3713
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)3714 static void zbc_open_zone(struct sdebug_dev_info *devip,
3715 struct sdeb_zone_state *zsp, bool explicit)
3716 {
3717 enum sdebug_z_cond zc;
3718
3719 if (!zbc_zone_is_seq(zsp))
3720 return;
3721
3722 zc = zsp->z_cond;
3723 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3724 (!explicit && zc == ZC2_IMPLICIT_OPEN))
3725 return;
3726
3727 /* Close an implicit open zone if necessary */
3728 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3729 zbc_close_zone(devip, zsp);
3730 else if (devip->max_open &&
3731 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3732 zbc_close_imp_open_zone(devip);
3733
3734 if (zsp->z_cond == ZC4_CLOSED)
3735 devip->nr_closed--;
3736 if (explicit) {
3737 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3738 devip->nr_exp_open++;
3739 } else {
3740 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3741 devip->nr_imp_open++;
3742 }
3743 }
3744
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3745 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3746 struct sdeb_zone_state *zsp)
3747 {
3748 switch (zsp->z_cond) {
3749 case ZC2_IMPLICIT_OPEN:
3750 devip->nr_imp_open--;
3751 break;
3752 case ZC3_EXPLICIT_OPEN:
3753 devip->nr_exp_open--;
3754 break;
3755 default:
3756 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3757 zsp->z_start, zsp->z_cond);
3758 break;
3759 }
3760 zsp->z_cond = ZC5_FULL;
3761 }
3762
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)3763 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3764 unsigned long long lba, unsigned int num)
3765 {
3766 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3767 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3768
3769 if (!zbc_zone_is_seq(zsp))
3770 return;
3771
3772 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3773 zsp->z_wp += num;
3774 if (zsp->z_wp >= zend)
3775 zbc_set_zone_full(devip, zsp);
3776 return;
3777 }
3778
3779 while (num) {
3780 if (lba != zsp->z_wp)
3781 zsp->z_non_seq_resource = true;
3782
3783 end = lba + num;
3784 if (end >= zend) {
3785 n = zend - lba;
3786 zsp->z_wp = zend;
3787 } else if (end > zsp->z_wp) {
3788 n = num;
3789 zsp->z_wp = end;
3790 } else {
3791 n = num;
3792 }
3793 if (zsp->z_wp >= zend)
3794 zbc_set_zone_full(devip, zsp);
3795
3796 num -= n;
3797 lba += n;
3798 if (num) {
3799 zsp++;
3800 zend = zsp->z_start + zsp->z_size;
3801 }
3802 }
3803 }
3804
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3805 static int check_zbc_access_params(struct scsi_cmnd *scp,
3806 unsigned long long lba, unsigned int num, bool write)
3807 {
3808 struct scsi_device *sdp = scp->device;
3809 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3810 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3811 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3812
3813 if (!write) {
3814 /* For host-managed, reads cannot cross zone types boundaries */
3815 if (zsp->z_type != zsp_end->z_type) {
3816 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3817 LBA_OUT_OF_RANGE,
3818 READ_INVDATA_ASCQ);
3819 return check_condition_result;
3820 }
3821 return 0;
3822 }
3823
3824 /* Writing into a gap zone is not allowed */
3825 if (zbc_zone_is_gap(zsp)) {
3826 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3827 ATTEMPT_ACCESS_GAP);
3828 return check_condition_result;
3829 }
3830
3831 /* No restrictions for writes within conventional zones */
3832 if (zbc_zone_is_conv(zsp)) {
3833 if (!zbc_zone_is_conv(zsp_end)) {
3834 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3835 LBA_OUT_OF_RANGE,
3836 WRITE_BOUNDARY_ASCQ);
3837 return check_condition_result;
3838 }
3839 return 0;
3840 }
3841
3842 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3843 /* Writes cannot cross sequential zone boundaries */
3844 if (zsp_end != zsp) {
3845 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3846 LBA_OUT_OF_RANGE,
3847 WRITE_BOUNDARY_ASCQ);
3848 return check_condition_result;
3849 }
3850 /* Cannot write full zones */
3851 if (zsp->z_cond == ZC5_FULL) {
3852 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3853 INVALID_FIELD_IN_CDB, 0);
3854 return check_condition_result;
3855 }
3856 /* Writes must be aligned to the zone WP */
3857 if (lba != zsp->z_wp) {
3858 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3859 LBA_OUT_OF_RANGE,
3860 UNALIGNED_WRITE_ASCQ);
3861 return check_condition_result;
3862 }
3863 }
3864
3865 /* Handle implicit open of closed and empty zones */
3866 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3867 if (devip->max_open &&
3868 devip->nr_exp_open >= devip->max_open) {
3869 mk_sense_buffer(scp, DATA_PROTECT,
3870 INSUFF_RES_ASC,
3871 INSUFF_ZONE_ASCQ);
3872 return check_condition_result;
3873 }
3874 zbc_open_zone(devip, zsp, false);
3875 }
3876
3877 return 0;
3878 }
3879
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3880 static inline int check_device_access_params
3881 (struct scsi_cmnd *scp, unsigned long long lba,
3882 unsigned int num, bool write)
3883 {
3884 struct scsi_device *sdp = scp->device;
3885 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3886
3887 if (lba + num > sdebug_capacity) {
3888 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3889 return check_condition_result;
3890 }
3891 /* transfer length excessive (tie in to block limits VPD page) */
3892 if (num > sdebug_store_sectors) {
3893 /* needs work to find which cdb byte 'num' comes from */
3894 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3895 return check_condition_result;
3896 }
3897 if (write && unlikely(sdebug_wp)) {
3898 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3899 return check_condition_result;
3900 }
3901 if (sdebug_dev_is_zoned(devip))
3902 return check_zbc_access_params(scp, lba, num, write);
3903
3904 return 0;
3905 }
3906
3907 /*
3908 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3909 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3910 * that access any of the "stores" in struct sdeb_store_info should call this
3911 * function with bug_if_fake_rw set to true.
3912 */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)3913 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3914 bool bug_if_fake_rw)
3915 {
3916 if (sdebug_fake_rw) {
3917 BUG_ON(bug_if_fake_rw); /* See note above */
3918 return NULL;
3919 }
3920 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3921 }
3922
3923 static inline void
sdeb_read_lock(rwlock_t * lock)3924 sdeb_read_lock(rwlock_t *lock)
3925 {
3926 if (sdebug_no_rwlock)
3927 __acquire(lock);
3928 else
3929 read_lock(lock);
3930 }
3931
3932 static inline void
sdeb_read_unlock(rwlock_t * lock)3933 sdeb_read_unlock(rwlock_t *lock)
3934 {
3935 if (sdebug_no_rwlock)
3936 __release(lock);
3937 else
3938 read_unlock(lock);
3939 }
3940
3941 static inline void
sdeb_write_lock(rwlock_t * lock)3942 sdeb_write_lock(rwlock_t *lock)
3943 {
3944 if (sdebug_no_rwlock)
3945 __acquire(lock);
3946 else
3947 write_lock(lock);
3948 }
3949
3950 static inline void
sdeb_write_unlock(rwlock_t * lock)3951 sdeb_write_unlock(rwlock_t *lock)
3952 {
3953 if (sdebug_no_rwlock)
3954 __release(lock);
3955 else
3956 write_unlock(lock);
3957 }
3958
3959 static inline void
sdeb_data_read_lock(struct sdeb_store_info * sip)3960 sdeb_data_read_lock(struct sdeb_store_info *sip)
3961 {
3962 BUG_ON(!sip);
3963
3964 sdeb_read_lock(&sip->macc_data_lck);
3965 }
3966
3967 static inline void
sdeb_data_read_unlock(struct sdeb_store_info * sip)3968 sdeb_data_read_unlock(struct sdeb_store_info *sip)
3969 {
3970 BUG_ON(!sip);
3971
3972 sdeb_read_unlock(&sip->macc_data_lck);
3973 }
3974
3975 static inline void
sdeb_data_write_lock(struct sdeb_store_info * sip)3976 sdeb_data_write_lock(struct sdeb_store_info *sip)
3977 {
3978 BUG_ON(!sip);
3979
3980 sdeb_write_lock(&sip->macc_data_lck);
3981 }
3982
3983 static inline void
sdeb_data_write_unlock(struct sdeb_store_info * sip)3984 sdeb_data_write_unlock(struct sdeb_store_info *sip)
3985 {
3986 BUG_ON(!sip);
3987
3988 sdeb_write_unlock(&sip->macc_data_lck);
3989 }
3990
3991 static inline void
sdeb_data_sector_read_lock(struct sdeb_store_info * sip)3992 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
3993 {
3994 BUG_ON(!sip);
3995
3996 sdeb_read_lock(&sip->macc_sector_lck);
3997 }
3998
3999 static inline void
sdeb_data_sector_read_unlock(struct sdeb_store_info * sip)4000 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
4001 {
4002 BUG_ON(!sip);
4003
4004 sdeb_read_unlock(&sip->macc_sector_lck);
4005 }
4006
4007 static inline void
sdeb_data_sector_write_lock(struct sdeb_store_info * sip)4008 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
4009 {
4010 BUG_ON(!sip);
4011
4012 sdeb_write_lock(&sip->macc_sector_lck);
4013 }
4014
4015 static inline void
sdeb_data_sector_write_unlock(struct sdeb_store_info * sip)4016 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
4017 {
4018 BUG_ON(!sip);
4019
4020 sdeb_write_unlock(&sip->macc_sector_lck);
4021 }
4022
4023 /*
4024 * Atomic locking:
4025 * We simplify the atomic model to allow only 1x atomic write and many non-
4026 * atomic reads or writes for all LBAs.
4027
4028 * A RW lock has a similar bahaviour:
4029 * Only 1x writer and many readers.
4030
4031 * So use a RW lock for per-device read and write locking:
4032 * An atomic access grabs the lock as a writer and non-atomic grabs the lock
4033 * as a reader.
4034 */
4035
4036 static inline void
sdeb_data_lock(struct sdeb_store_info * sip,bool atomic)4037 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
4038 {
4039 if (atomic)
4040 sdeb_data_write_lock(sip);
4041 else
4042 sdeb_data_read_lock(sip);
4043 }
4044
4045 static inline void
sdeb_data_unlock(struct sdeb_store_info * sip,bool atomic)4046 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
4047 {
4048 if (atomic)
4049 sdeb_data_write_unlock(sip);
4050 else
4051 sdeb_data_read_unlock(sip);
4052 }
4053
4054 /* Allow many reads but only 1x write per sector */
4055 static inline void
sdeb_data_sector_lock(struct sdeb_store_info * sip,bool do_write)4056 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
4057 {
4058 if (do_write)
4059 sdeb_data_sector_write_lock(sip);
4060 else
4061 sdeb_data_sector_read_lock(sip);
4062 }
4063
4064 static inline void
sdeb_data_sector_unlock(struct sdeb_store_info * sip,bool do_write)4065 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
4066 {
4067 if (do_write)
4068 sdeb_data_sector_write_unlock(sip);
4069 else
4070 sdeb_data_sector_read_unlock(sip);
4071 }
4072
4073 static inline void
sdeb_meta_read_lock(struct sdeb_store_info * sip)4074 sdeb_meta_read_lock(struct sdeb_store_info *sip)
4075 {
4076 if (sdebug_no_rwlock) {
4077 if (sip)
4078 __acquire(&sip->macc_meta_lck);
4079 else
4080 __acquire(&sdeb_fake_rw_lck);
4081 } else {
4082 if (sip)
4083 read_lock(&sip->macc_meta_lck);
4084 else
4085 read_lock(&sdeb_fake_rw_lck);
4086 }
4087 }
4088
4089 static inline void
sdeb_meta_read_unlock(struct sdeb_store_info * sip)4090 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
4091 {
4092 if (sdebug_no_rwlock) {
4093 if (sip)
4094 __release(&sip->macc_meta_lck);
4095 else
4096 __release(&sdeb_fake_rw_lck);
4097 } else {
4098 if (sip)
4099 read_unlock(&sip->macc_meta_lck);
4100 else
4101 read_unlock(&sdeb_fake_rw_lck);
4102 }
4103 }
4104
4105 static inline void
sdeb_meta_write_lock(struct sdeb_store_info * sip)4106 sdeb_meta_write_lock(struct sdeb_store_info *sip)
4107 {
4108 if (sdebug_no_rwlock) {
4109 if (sip)
4110 __acquire(&sip->macc_meta_lck);
4111 else
4112 __acquire(&sdeb_fake_rw_lck);
4113 } else {
4114 if (sip)
4115 write_lock(&sip->macc_meta_lck);
4116 else
4117 write_lock(&sdeb_fake_rw_lck);
4118 }
4119 }
4120
4121 static inline void
sdeb_meta_write_unlock(struct sdeb_store_info * sip)4122 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
4123 {
4124 if (sdebug_no_rwlock) {
4125 if (sip)
4126 __release(&sip->macc_meta_lck);
4127 else
4128 __release(&sdeb_fake_rw_lck);
4129 } else {
4130 if (sip)
4131 write_unlock(&sip->macc_meta_lck);
4132 else
4133 write_unlock(&sdeb_fake_rw_lck);
4134 }
4135 }
4136
4137 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,u8 group_number,bool do_write,bool atomic)4138 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
4139 u32 sg_skip, u64 lba, u32 num, u8 group_number,
4140 bool do_write, bool atomic)
4141 {
4142 int ret;
4143 u64 block;
4144 enum dma_data_direction dir;
4145 struct scsi_data_buffer *sdb = &scp->sdb;
4146 u8 *fsp;
4147 int i, total = 0;
4148
4149 /*
4150 * Even though reads are inherently atomic (in this driver), we expect
4151 * the atomic flag only for writes.
4152 */
4153 if (!do_write && atomic)
4154 return -1;
4155
4156 if (do_write) {
4157 dir = DMA_TO_DEVICE;
4158 write_since_sync = true;
4159 } else {
4160 dir = DMA_FROM_DEVICE;
4161 }
4162
4163 if (!sdb->length || !sip)
4164 return 0;
4165 if (scp->sc_data_direction != dir)
4166 return -1;
4167
4168 if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
4169 atomic_long_inc(&writes_by_group_number[group_number]);
4170
4171 fsp = sip->storep;
4172
4173 block = do_div(lba, sdebug_store_sectors);
4174
4175 /* Only allow 1x atomic write or multiple non-atomic writes at any given time */
4176 sdeb_data_lock(sip, atomic);
4177 for (i = 0; i < num; i++) {
4178 /* We shouldn't need to lock for atomic writes, but do it anyway */
4179 sdeb_data_sector_lock(sip, do_write);
4180 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4181 fsp + (block * sdebug_sector_size),
4182 sdebug_sector_size, sg_skip, do_write);
4183 sdeb_data_sector_unlock(sip, do_write);
4184 total += ret;
4185 if (ret != sdebug_sector_size)
4186 break;
4187 sg_skip += sdebug_sector_size;
4188 if (++block >= sdebug_store_sectors)
4189 block = 0;
4190 }
4191 sdeb_data_unlock(sip, atomic);
4192
4193 return total;
4194 }
4195
4196 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)4197 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
4198 {
4199 struct scsi_data_buffer *sdb = &scp->sdb;
4200
4201 if (!sdb->length)
4202 return 0;
4203 if (scp->sc_data_direction != DMA_TO_DEVICE)
4204 return -1;
4205 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
4206 num * sdebug_sector_size, 0, true);
4207 }
4208
4209 /* If sip->storep+lba compares equal to arr(num), then copy top half of
4210 * arr into sip->storep+lba and return true. If comparison fails then
4211 * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)4212 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
4213 const u8 *arr, bool compare_only)
4214 {
4215 bool res;
4216 u64 block, rest = 0;
4217 u32 store_blks = sdebug_store_sectors;
4218 u32 lb_size = sdebug_sector_size;
4219 u8 *fsp = sip->storep;
4220
4221 block = do_div(lba, store_blks);
4222 if (block + num > store_blks)
4223 rest = block + num - store_blks;
4224
4225 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4226 if (!res)
4227 return res;
4228 if (rest)
4229 res = memcmp(fsp, arr + ((num - rest) * lb_size),
4230 rest * lb_size);
4231 if (!res)
4232 return res;
4233 if (compare_only)
4234 return true;
4235 arr += num * lb_size;
4236 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4237 if (rest)
4238 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
4239 return res;
4240 }
4241
dif_compute_csum(const void * buf,int len)4242 static __be16 dif_compute_csum(const void *buf, int len)
4243 {
4244 __be16 csum;
4245
4246 if (sdebug_guard)
4247 csum = (__force __be16)ip_compute_csum(buf, len);
4248 else
4249 csum = cpu_to_be16(crc_t10dif(buf, len));
4250
4251 return csum;
4252 }
4253
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)4254 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
4255 sector_t sector, u32 ei_lba)
4256 {
4257 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
4258
4259 if (sdt->guard_tag != csum) {
4260 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
4261 (unsigned long)sector,
4262 be16_to_cpu(sdt->guard_tag),
4263 be16_to_cpu(csum));
4264 return 0x01;
4265 }
4266 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
4267 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
4268 pr_err("REF check failed on sector %lu\n",
4269 (unsigned long)sector);
4270 return 0x03;
4271 }
4272 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4273 be32_to_cpu(sdt->ref_tag) != ei_lba) {
4274 pr_err("REF check failed on sector %lu\n",
4275 (unsigned long)sector);
4276 return 0x03;
4277 }
4278 return 0;
4279 }
4280
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)4281 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
4282 unsigned int sectors, bool read)
4283 {
4284 size_t resid;
4285 void *paddr;
4286 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4287 scp->device->hostdata, true);
4288 struct t10_pi_tuple *dif_storep = sip->dif_storep;
4289 const void *dif_store_end = dif_storep + sdebug_store_sectors;
4290 struct sg_mapping_iter miter;
4291
4292 /* Bytes of protection data to copy into sgl */
4293 resid = sectors * sizeof(*dif_storep);
4294
4295 sg_miter_start(&miter, scsi_prot_sglist(scp),
4296 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
4297 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
4298
4299 while (sg_miter_next(&miter) && resid > 0) {
4300 size_t len = min_t(size_t, miter.length, resid);
4301 void *start = dif_store(sip, sector);
4302 size_t rest = 0;
4303
4304 if (dif_store_end < start + len)
4305 rest = start + len - dif_store_end;
4306
4307 paddr = miter.addr;
4308
4309 if (read)
4310 memcpy(paddr, start, len - rest);
4311 else
4312 memcpy(start, paddr, len - rest);
4313
4314 if (rest) {
4315 if (read)
4316 memcpy(paddr + len - rest, dif_storep, rest);
4317 else
4318 memcpy(dif_storep, paddr + len - rest, rest);
4319 }
4320
4321 sector += len / sizeof(*dif_storep);
4322 resid -= len;
4323 }
4324 sg_miter_stop(&miter);
4325 }
4326
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)4327 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
4328 unsigned int sectors, u32 ei_lba)
4329 {
4330 int ret = 0;
4331 unsigned int i;
4332 sector_t sector;
4333 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4334 scp->device->hostdata, true);
4335 struct t10_pi_tuple *sdt;
4336
4337 for (i = 0; i < sectors; i++, ei_lba++) {
4338 sector = start_sec + i;
4339 sdt = dif_store(sip, sector);
4340
4341 if (sdt->app_tag == cpu_to_be16(0xffff))
4342 continue;
4343
4344 /*
4345 * Because scsi_debug acts as both initiator and
4346 * target we proceed to verify the PI even if
4347 * RDPROTECT=3. This is done so the "initiator" knows
4348 * which type of error to return. Otherwise we would
4349 * have to iterate over the PI twice.
4350 */
4351 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
4352 ret = dif_verify(sdt, lba2fake_store(sip, sector),
4353 sector, ei_lba);
4354 if (ret) {
4355 dif_errors++;
4356 break;
4357 }
4358 }
4359 }
4360
4361 dif_copy_prot(scp, start_sec, sectors, true);
4362 dix_reads++;
4363
4364 return ret;
4365 }
4366
resp_read_tape(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4367 static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4368 {
4369 u32 i, num, transfer, size;
4370 u8 *cmd = scp->cmnd;
4371 struct scsi_data_buffer *sdb = &scp->sdb;
4372 int partition = devip->tape_partition;
4373 u32 pos = devip->tape_location[partition];
4374 struct tape_block *blp;
4375 bool fixed, sili;
4376
4377 if (cmd[0] != READ_6) { /* Only Read(6) supported */
4378 mk_sense_invalid_opcode(scp);
4379 return illegal_condition_result;
4380 }
4381 fixed = (cmd[1] & 0x1) != 0;
4382 sili = (cmd[1] & 0x2) != 0;
4383 if (fixed && sili) {
4384 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
4385 return check_condition_result;
4386 }
4387
4388 transfer = get_unaligned_be24(cmd + 2);
4389 if (fixed) {
4390 num = transfer;
4391 size = devip->tape_blksize;
4392 } else {
4393 if (transfer < TAPE_MIN_BLKSIZE ||
4394 transfer > TAPE_MAX_BLKSIZE) {
4395 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4396 return check_condition_result;
4397 }
4398 num = 1;
4399 size = transfer;
4400 }
4401
4402 for (i = 0, blp = devip->tape_blocks[partition] + pos;
4403 i < num && pos < devip->tape_eop[partition];
4404 i++, pos++, blp++) {
4405 devip->tape_location[partition] = pos + 1;
4406 if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
4407 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4408 FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
4409 SENSE_FLAG_FILEMARK);
4410 scsi_set_resid(scp, (num - i) * size);
4411 return check_condition_result;
4412 }
4413 /* Assume no REW */
4414 if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
4415 mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
4416 EOD_DETECTED_ASCQ, fixed ? num - i : size,
4417 0);
4418 devip->tape_location[partition] = pos;
4419 scsi_set_resid(scp, (num - i) * size);
4420 return check_condition_result;
4421 }
4422 sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
4423 size, i * size);
4424 sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4425 &(blp->data), 4, i * size, false);
4426 if (fixed) {
4427 if (blp->fl_size != devip->tape_blksize) {
4428 scsi_set_resid(scp, (num - i) * size);
4429 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4430 0, num - i,
4431 SENSE_FLAG_ILI);
4432 return check_condition_result;
4433 }
4434 } else {
4435 if (blp->fl_size != size) {
4436 if (blp->fl_size < size)
4437 scsi_set_resid(scp, size - blp->fl_size);
4438 if (!sili) {
4439 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4440 0, size - blp->fl_size,
4441 SENSE_FLAG_ILI);
4442 return check_condition_result;
4443 }
4444 }
4445 }
4446 }
4447 if (pos >= devip->tape_eop[partition]) {
4448 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4449 EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
4450 SENSE_FLAG_EOM);
4451 devip->tape_location[partition] = pos - 1;
4452 return check_condition_result;
4453 }
4454 devip->tape_location[partition] = pos;
4455
4456 return 0;
4457 }
4458
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4459 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4460 {
4461 bool check_prot;
4462 u32 num;
4463 u32 ei_lba;
4464 int ret;
4465 u64 lba;
4466 struct sdeb_store_info *sip = devip2sip(devip, true);
4467 u8 *cmd = scp->cmnd;
4468 bool meta_data_locked = false;
4469
4470 if (sdebug_ptype == TYPE_TAPE)
4471 return resp_read_tape(scp, devip);
4472
4473 switch (cmd[0]) {
4474 case READ_16:
4475 ei_lba = 0;
4476 lba = get_unaligned_be64(cmd + 2);
4477 num = get_unaligned_be32(cmd + 10);
4478 check_prot = true;
4479 break;
4480 case READ_10:
4481 ei_lba = 0;
4482 lba = get_unaligned_be32(cmd + 2);
4483 num = get_unaligned_be16(cmd + 7);
4484 check_prot = true;
4485 break;
4486 case READ_6:
4487 ei_lba = 0;
4488 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4489 (u32)(cmd[1] & 0x1f) << 16;
4490 num = (0 == cmd[4]) ? 256 : cmd[4];
4491 check_prot = true;
4492 break;
4493 case READ_12:
4494 ei_lba = 0;
4495 lba = get_unaligned_be32(cmd + 2);
4496 num = get_unaligned_be32(cmd + 6);
4497 check_prot = true;
4498 break;
4499 case XDWRITEREAD_10:
4500 ei_lba = 0;
4501 lba = get_unaligned_be32(cmd + 2);
4502 num = get_unaligned_be16(cmd + 7);
4503 check_prot = false;
4504 break;
4505 default: /* assume READ(32) */
4506 lba = get_unaligned_be64(cmd + 12);
4507 ei_lba = get_unaligned_be32(cmd + 20);
4508 num = get_unaligned_be32(cmd + 28);
4509 check_prot = false;
4510 break;
4511 }
4512 if (unlikely(have_dif_prot && check_prot)) {
4513 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4514 (cmd[1] & 0xe0)) {
4515 mk_sense_invalid_opcode(scp);
4516 return check_condition_result;
4517 }
4518 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4519 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4520 (cmd[1] & 0xe0) == 0)
4521 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
4522 "to DIF device\n");
4523 }
4524 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
4525 atomic_read(&sdeb_inject_pending))) {
4526 num /= 2;
4527 atomic_set(&sdeb_inject_pending, 0);
4528 }
4529
4530 /*
4531 * When checking device access params, for reads we only check data
4532 * versus what is set at init time, so no need to lock.
4533 */
4534 ret = check_device_access_params(scp, lba, num, false);
4535 if (ret)
4536 return ret;
4537 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
4538 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
4539 ((lba + num) > sdebug_medium_error_start))) {
4540 /* claim unrecoverable read error */
4541 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
4542 /* set info field and valid bit for fixed descriptor */
4543 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
4544 scp->sense_buffer[0] |= 0x80; /* Valid bit */
4545 ret = (lba < OPT_MEDIUM_ERR_ADDR)
4546 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
4547 put_unaligned_be32(ret, scp->sense_buffer + 3);
4548 }
4549 scsi_set_resid(scp, scsi_bufflen(scp));
4550 return check_condition_result;
4551 }
4552
4553 if (sdebug_dev_is_zoned(devip) ||
4554 (sdebug_dix && scsi_prot_sg_count(scp))) {
4555 sdeb_meta_read_lock(sip);
4556 meta_data_locked = true;
4557 }
4558
4559 /* DIX + T10 DIF */
4560 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4561 switch (prot_verify_read(scp, lba, num, ei_lba)) {
4562 case 1: /* Guard tag error */
4563 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4564 sdeb_meta_read_unlock(sip);
4565 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4566 return check_condition_result;
4567 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4568 sdeb_meta_read_unlock(sip);
4569 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4570 return illegal_condition_result;
4571 }
4572 break;
4573 case 3: /* Reference tag error */
4574 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4575 sdeb_meta_read_unlock(sip);
4576 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4577 return check_condition_result;
4578 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4579 sdeb_meta_read_unlock(sip);
4580 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4581 return illegal_condition_result;
4582 }
4583 break;
4584 }
4585 }
4586
4587 ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4588 if (meta_data_locked)
4589 sdeb_meta_read_unlock(sip);
4590 if (unlikely(ret == -1))
4591 return DID_ERROR << 16;
4592
4593 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4594
4595 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4596 atomic_read(&sdeb_inject_pending))) {
4597 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4598 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4599 atomic_set(&sdeb_inject_pending, 0);
4600 return check_condition_result;
4601 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4602 /* Logical block guard check failed */
4603 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4604 atomic_set(&sdeb_inject_pending, 0);
4605 return illegal_condition_result;
4606 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4607 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4608 atomic_set(&sdeb_inject_pending, 0);
4609 return illegal_condition_result;
4610 }
4611 }
4612 return 0;
4613 }
4614
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)4615 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4616 unsigned int sectors, u32 ei_lba)
4617 {
4618 int ret;
4619 struct t10_pi_tuple *sdt;
4620 void *daddr;
4621 sector_t sector = start_sec;
4622 int ppage_offset;
4623 int dpage_offset;
4624 struct sg_mapping_iter diter;
4625 struct sg_mapping_iter piter;
4626
4627 BUG_ON(scsi_sg_count(SCpnt) == 0);
4628 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4629
4630 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4631 scsi_prot_sg_count(SCpnt),
4632 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4633 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4634 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4635
4636 /* For each protection page */
4637 while (sg_miter_next(&piter)) {
4638 dpage_offset = 0;
4639 if (WARN_ON(!sg_miter_next(&diter))) {
4640 ret = 0x01;
4641 goto out;
4642 }
4643
4644 for (ppage_offset = 0; ppage_offset < piter.length;
4645 ppage_offset += sizeof(struct t10_pi_tuple)) {
4646 /* If we're at the end of the current
4647 * data page advance to the next one
4648 */
4649 if (dpage_offset >= diter.length) {
4650 if (WARN_ON(!sg_miter_next(&diter))) {
4651 ret = 0x01;
4652 goto out;
4653 }
4654 dpage_offset = 0;
4655 }
4656
4657 sdt = piter.addr + ppage_offset;
4658 daddr = diter.addr + dpage_offset;
4659
4660 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4661 ret = dif_verify(sdt, daddr, sector, ei_lba);
4662 if (ret)
4663 goto out;
4664 }
4665
4666 sector++;
4667 ei_lba++;
4668 dpage_offset += sdebug_sector_size;
4669 }
4670 diter.consumed = dpage_offset;
4671 sg_miter_stop(&diter);
4672 }
4673 sg_miter_stop(&piter);
4674
4675 dif_copy_prot(SCpnt, start_sec, sectors, false);
4676 dix_writes++;
4677
4678 return 0;
4679
4680 out:
4681 dif_errors++;
4682 sg_miter_stop(&diter);
4683 sg_miter_stop(&piter);
4684 return ret;
4685 }
4686
lba_to_map_index(sector_t lba)4687 static unsigned long lba_to_map_index(sector_t lba)
4688 {
4689 if (sdebug_unmap_alignment)
4690 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4691 sector_div(lba, sdebug_unmap_granularity);
4692 return lba;
4693 }
4694
map_index_to_lba(unsigned long index)4695 static sector_t map_index_to_lba(unsigned long index)
4696 {
4697 sector_t lba = index * sdebug_unmap_granularity;
4698
4699 if (sdebug_unmap_alignment)
4700 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4701 return lba;
4702 }
4703
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)4704 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4705 unsigned int *num)
4706 {
4707 sector_t end;
4708 unsigned int mapped;
4709 unsigned long index;
4710 unsigned long next;
4711
4712 index = lba_to_map_index(lba);
4713 mapped = test_bit(index, sip->map_storep);
4714
4715 if (mapped)
4716 next = find_next_zero_bit(sip->map_storep, map_size, index);
4717 else
4718 next = find_next_bit(sip->map_storep, map_size, index);
4719
4720 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
4721 *num = end - lba;
4722 return mapped;
4723 }
4724
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4725 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4726 unsigned int len)
4727 {
4728 sector_t end = lba + len;
4729
4730 while (lba < end) {
4731 unsigned long index = lba_to_map_index(lba);
4732
4733 if (index < map_size)
4734 set_bit(index, sip->map_storep);
4735
4736 lba = map_index_to_lba(index + 1);
4737 }
4738 }
4739
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4740 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4741 unsigned int len)
4742 {
4743 sector_t end = lba + len;
4744 u8 *fsp = sip->storep;
4745
4746 while (lba < end) {
4747 unsigned long index = lba_to_map_index(lba);
4748
4749 if (lba == map_index_to_lba(index) &&
4750 lba + sdebug_unmap_granularity <= end &&
4751 index < map_size) {
4752 clear_bit(index, sip->map_storep);
4753 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
4754 memset(fsp + lba * sdebug_sector_size,
4755 (sdebug_lbprz & 1) ? 0 : 0xff,
4756 sdebug_sector_size *
4757 sdebug_unmap_granularity);
4758 }
4759 if (sip->dif_storep) {
4760 memset(sip->dif_storep + lba, 0xff,
4761 sizeof(*sip->dif_storep) *
4762 sdebug_unmap_granularity);
4763 }
4764 }
4765 lba = map_index_to_lba(index + 1);
4766 }
4767 }
4768
resp_write_tape(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4769 static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4770 {
4771 u32 i, num, transfer, size, written = 0;
4772 u8 *cmd = scp->cmnd;
4773 struct scsi_data_buffer *sdb = &scp->sdb;
4774 int partition = devip->tape_partition;
4775 int pos = devip->tape_location[partition];
4776 struct tape_block *blp;
4777 bool fixed, ew;
4778
4779 if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
4780 mk_sense_invalid_opcode(scp);
4781 return illegal_condition_result;
4782 }
4783
4784 fixed = (cmd[1] & 1) != 0;
4785 transfer = get_unaligned_be24(cmd + 2);
4786 if (fixed) {
4787 num = transfer;
4788 size = devip->tape_blksize;
4789 } else {
4790 if (transfer < TAPE_MIN_BLKSIZE ||
4791 transfer > TAPE_MAX_BLKSIZE) {
4792 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4793 return check_condition_result;
4794 }
4795 num = 1;
4796 size = transfer;
4797 }
4798
4799 scsi_set_resid(scp, num * transfer);
4800 for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
4801 i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
4802 blp->fl_size = size;
4803 sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4804 &(blp->data), 4, i * size, true);
4805 written += size;
4806 scsi_set_resid(scp, num * transfer - written);
4807 ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
4808 }
4809
4810 devip->tape_location[partition] = pos;
4811 blp->fl_size = TAPE_BLOCK_EOD_FLAG;
4812 if (pos >= devip->tape_eop[partition] - 1) {
4813 mk_sense_info_tape(scp, VOLUME_OVERFLOW,
4814 NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4815 fixed ? num - i : transfer,
4816 SENSE_FLAG_EOM);
4817 return check_condition_result;
4818 }
4819 if (ew) { /* early warning */
4820 mk_sense_info_tape(scp, NO_SENSE,
4821 NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4822 fixed ? num - i : transfer,
4823 SENSE_FLAG_EOM);
4824 return check_condition_result;
4825 }
4826
4827 return 0;
4828 }
4829
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4830 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4831 {
4832 bool check_prot;
4833 u32 num;
4834 u8 group = 0;
4835 u32 ei_lba;
4836 int ret;
4837 u64 lba;
4838 struct sdeb_store_info *sip = devip2sip(devip, true);
4839 u8 *cmd = scp->cmnd;
4840 bool meta_data_locked = false;
4841
4842 if (sdebug_ptype == TYPE_TAPE)
4843 return resp_write_tape(scp, devip);
4844
4845 switch (cmd[0]) {
4846 case WRITE_16:
4847 ei_lba = 0;
4848 lba = get_unaligned_be64(cmd + 2);
4849 num = get_unaligned_be32(cmd + 10);
4850 group = cmd[14] & 0x3f;
4851 check_prot = true;
4852 break;
4853 case WRITE_10:
4854 ei_lba = 0;
4855 lba = get_unaligned_be32(cmd + 2);
4856 group = cmd[6] & 0x3f;
4857 num = get_unaligned_be16(cmd + 7);
4858 check_prot = true;
4859 break;
4860 case WRITE_6:
4861 ei_lba = 0;
4862 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4863 (u32)(cmd[1] & 0x1f) << 16;
4864 num = (0 == cmd[4]) ? 256 : cmd[4];
4865 check_prot = true;
4866 break;
4867 case WRITE_12:
4868 ei_lba = 0;
4869 lba = get_unaligned_be32(cmd + 2);
4870 num = get_unaligned_be32(cmd + 6);
4871 group = cmd[6] & 0x3f;
4872 check_prot = true;
4873 break;
4874 case 0x53: /* XDWRITEREAD(10) */
4875 ei_lba = 0;
4876 lba = get_unaligned_be32(cmd + 2);
4877 group = cmd[6] & 0x1f;
4878 num = get_unaligned_be16(cmd + 7);
4879 check_prot = false;
4880 break;
4881 default: /* assume WRITE(32) */
4882 group = cmd[6] & 0x3f;
4883 lba = get_unaligned_be64(cmd + 12);
4884 ei_lba = get_unaligned_be32(cmd + 20);
4885 num = get_unaligned_be32(cmd + 28);
4886 check_prot = false;
4887 break;
4888 }
4889 if (unlikely(have_dif_prot && check_prot)) {
4890 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4891 (cmd[1] & 0xe0)) {
4892 mk_sense_invalid_opcode(scp);
4893 return check_condition_result;
4894 }
4895 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4896 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4897 (cmd[1] & 0xe0) == 0)
4898 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4899 "to DIF device\n");
4900 }
4901
4902 if (sdebug_dev_is_zoned(devip) ||
4903 (sdebug_dix && scsi_prot_sg_count(scp)) ||
4904 scsi_debug_lbp()) {
4905 sdeb_meta_write_lock(sip);
4906 meta_data_locked = true;
4907 }
4908
4909 ret = check_device_access_params(scp, lba, num, true);
4910 if (ret) {
4911 if (meta_data_locked)
4912 sdeb_meta_write_unlock(sip);
4913 return ret;
4914 }
4915
4916 /* DIX + T10 DIF */
4917 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4918 switch (prot_verify_write(scp, lba, num, ei_lba)) {
4919 case 1: /* Guard tag error */
4920 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4921 sdeb_meta_write_unlock(sip);
4922 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4923 return illegal_condition_result;
4924 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4925 sdeb_meta_write_unlock(sip);
4926 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4927 return check_condition_result;
4928 }
4929 break;
4930 case 3: /* Reference tag error */
4931 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4932 sdeb_meta_write_unlock(sip);
4933 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4934 return illegal_condition_result;
4935 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4936 sdeb_meta_write_unlock(sip);
4937 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4938 return check_condition_result;
4939 }
4940 break;
4941 }
4942 }
4943
4944 ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
4945 if (unlikely(scsi_debug_lbp()))
4946 map_region(sip, lba, num);
4947
4948 /* If ZBC zone then bump its write pointer */
4949 if (sdebug_dev_is_zoned(devip))
4950 zbc_inc_wp(devip, lba, num);
4951 if (meta_data_locked)
4952 sdeb_meta_write_unlock(sip);
4953
4954 if (unlikely(-1 == ret))
4955 return DID_ERROR << 16;
4956 else if (unlikely(sdebug_verbose &&
4957 (ret < (num * sdebug_sector_size))))
4958 sdev_printk(KERN_INFO, scp->device,
4959 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4960 my_name, num * sdebug_sector_size, ret);
4961
4962 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4963 atomic_read(&sdeb_inject_pending))) {
4964 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4965 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4966 atomic_set(&sdeb_inject_pending, 0);
4967 return check_condition_result;
4968 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4969 /* Logical block guard check failed */
4970 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4971 atomic_set(&sdeb_inject_pending, 0);
4972 return illegal_condition_result;
4973 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4974 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4975 atomic_set(&sdeb_inject_pending, 0);
4976 return illegal_condition_result;
4977 }
4978 }
4979 return 0;
4980 }
4981
4982 /*
4983 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4984 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4985 */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4986 static int resp_write_scat(struct scsi_cmnd *scp,
4987 struct sdebug_dev_info *devip)
4988 {
4989 u8 *cmd = scp->cmnd;
4990 u8 *lrdp = NULL;
4991 u8 *up;
4992 struct sdeb_store_info *sip = devip2sip(devip, true);
4993 u8 wrprotect;
4994 u16 lbdof, num_lrd, k;
4995 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4996 u32 lb_size = sdebug_sector_size;
4997 u32 ei_lba;
4998 u64 lba;
4999 u8 group;
5000 int ret, res;
5001 bool is_16;
5002 static const u32 lrd_size = 32; /* + parameter list header size */
5003
5004 if (cmd[0] == VARIABLE_LENGTH_CMD) {
5005 is_16 = false;
5006 group = cmd[6] & 0x3f;
5007 wrprotect = (cmd[10] >> 5) & 0x7;
5008 lbdof = get_unaligned_be16(cmd + 12);
5009 num_lrd = get_unaligned_be16(cmd + 16);
5010 bt_len = get_unaligned_be32(cmd + 28);
5011 } else { /* that leaves WRITE SCATTERED(16) */
5012 is_16 = true;
5013 wrprotect = (cmd[2] >> 5) & 0x7;
5014 lbdof = get_unaligned_be16(cmd + 4);
5015 num_lrd = get_unaligned_be16(cmd + 8);
5016 bt_len = get_unaligned_be32(cmd + 10);
5017 group = cmd[14] & 0x3f;
5018 if (unlikely(have_dif_prot)) {
5019 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5020 wrprotect) {
5021 mk_sense_invalid_opcode(scp);
5022 return illegal_condition_result;
5023 }
5024 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5025 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5026 wrprotect == 0)
5027 sdev_printk(KERN_ERR, scp->device,
5028 "Unprotected WR to DIF device\n");
5029 }
5030 }
5031 if ((num_lrd == 0) || (bt_len == 0))
5032 return 0; /* T10 says these do-nothings are not errors */
5033 if (lbdof == 0) {
5034 if (sdebug_verbose)
5035 sdev_printk(KERN_INFO, scp->device,
5036 "%s: %s: LB Data Offset field bad\n",
5037 my_name, __func__);
5038 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5039 return illegal_condition_result;
5040 }
5041 lbdof_blen = lbdof * lb_size;
5042 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
5043 if (sdebug_verbose)
5044 sdev_printk(KERN_INFO, scp->device,
5045 "%s: %s: LBA range descriptors don't fit\n",
5046 my_name, __func__);
5047 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5048 return illegal_condition_result;
5049 }
5050 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
5051 if (lrdp == NULL)
5052 return SCSI_MLQUEUE_HOST_BUSY;
5053 if (sdebug_verbose)
5054 sdev_printk(KERN_INFO, scp->device,
5055 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
5056 my_name, __func__, lbdof_blen);
5057 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
5058 if (res == -1) {
5059 ret = DID_ERROR << 16;
5060 goto err_out;
5061 }
5062
5063 /* Just keep it simple and always lock for now */
5064 sdeb_meta_write_lock(sip);
5065 sg_off = lbdof_blen;
5066 /* Spec says Buffer xfer Length field in number of LBs in dout */
5067 cum_lb = 0;
5068 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
5069 lba = get_unaligned_be64(up + 0);
5070 num = get_unaligned_be32(up + 8);
5071 if (sdebug_verbose)
5072 sdev_printk(KERN_INFO, scp->device,
5073 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
5074 my_name, __func__, k, lba, num, sg_off);
5075 if (num == 0)
5076 continue;
5077 ret = check_device_access_params(scp, lba, num, true);
5078 if (ret)
5079 goto err_out_unlock;
5080 num_by = num * lb_size;
5081 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
5082
5083 if ((cum_lb + num) > bt_len) {
5084 if (sdebug_verbose)
5085 sdev_printk(KERN_INFO, scp->device,
5086 "%s: %s: sum of blocks > data provided\n",
5087 my_name, __func__);
5088 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
5089 0);
5090 ret = illegal_condition_result;
5091 goto err_out_unlock;
5092 }
5093
5094 /* DIX + T10 DIF */
5095 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5096 int prot_ret = prot_verify_write(scp, lba, num,
5097 ei_lba);
5098
5099 if (prot_ret) {
5100 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
5101 prot_ret);
5102 ret = illegal_condition_result;
5103 goto err_out_unlock;
5104 }
5105 }
5106
5107 /*
5108 * Write ranges atomically to keep as close to pre-atomic
5109 * writes behaviour as possible.
5110 */
5111 ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
5112 /* If ZBC zone then bump its write pointer */
5113 if (sdebug_dev_is_zoned(devip))
5114 zbc_inc_wp(devip, lba, num);
5115 if (unlikely(scsi_debug_lbp()))
5116 map_region(sip, lba, num);
5117 if (unlikely(-1 == ret)) {
5118 ret = DID_ERROR << 16;
5119 goto err_out_unlock;
5120 } else if (unlikely(sdebug_verbose && (ret < num_by)))
5121 sdev_printk(KERN_INFO, scp->device,
5122 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5123 my_name, num_by, ret);
5124
5125 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5126 atomic_read(&sdeb_inject_pending))) {
5127 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5128 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5129 atomic_set(&sdeb_inject_pending, 0);
5130 ret = check_condition_result;
5131 goto err_out_unlock;
5132 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5133 /* Logical block guard check failed */
5134 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5135 atomic_set(&sdeb_inject_pending, 0);
5136 ret = illegal_condition_result;
5137 goto err_out_unlock;
5138 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5139 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5140 atomic_set(&sdeb_inject_pending, 0);
5141 ret = illegal_condition_result;
5142 goto err_out_unlock;
5143 }
5144 }
5145 sg_off += num_by;
5146 cum_lb += num;
5147 }
5148 ret = 0;
5149 err_out_unlock:
5150 sdeb_meta_write_unlock(sip);
5151 err_out:
5152 kfree(lrdp);
5153 return ret;
5154 }
5155
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)5156 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
5157 u32 ei_lba, bool unmap, bool ndob)
5158 {
5159 struct scsi_device *sdp = scp->device;
5160 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5161 unsigned long long i;
5162 u64 block, lbaa;
5163 u32 lb_size = sdebug_sector_size;
5164 int ret;
5165 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
5166 scp->device->hostdata, true);
5167 u8 *fs1p;
5168 u8 *fsp;
5169 bool meta_data_locked = false;
5170
5171 if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
5172 sdeb_meta_write_lock(sip);
5173 meta_data_locked = true;
5174 }
5175
5176 ret = check_device_access_params(scp, lba, num, true);
5177 if (ret)
5178 goto out;
5179
5180 if (unmap && scsi_debug_lbp()) {
5181 unmap_region(sip, lba, num);
5182 goto out;
5183 }
5184 lbaa = lba;
5185 block = do_div(lbaa, sdebug_store_sectors);
5186 /* if ndob then zero 1 logical block, else fetch 1 logical block */
5187 fsp = sip->storep;
5188 fs1p = fsp + (block * lb_size);
5189 sdeb_data_write_lock(sip);
5190 if (ndob) {
5191 memset(fs1p, 0, lb_size);
5192 ret = 0;
5193 } else
5194 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
5195
5196 if (-1 == ret) {
5197 ret = DID_ERROR << 16;
5198 goto out;
5199 } else if (sdebug_verbose && !ndob && (ret < lb_size))
5200 sdev_printk(KERN_INFO, scp->device,
5201 "%s: %s: lb size=%u, IO sent=%d bytes\n",
5202 my_name, "write same", lb_size, ret);
5203
5204 /* Copy first sector to remaining blocks */
5205 for (i = 1 ; i < num ; i++) {
5206 lbaa = lba + i;
5207 block = do_div(lbaa, sdebug_store_sectors);
5208 memmove(fsp + (block * lb_size), fs1p, lb_size);
5209 }
5210 if (scsi_debug_lbp())
5211 map_region(sip, lba, num);
5212 /* If ZBC zone then bump its write pointer */
5213 if (sdebug_dev_is_zoned(devip))
5214 zbc_inc_wp(devip, lba, num);
5215 sdeb_data_write_unlock(sip);
5216 ret = 0;
5217 out:
5218 if (meta_data_locked)
5219 sdeb_meta_write_unlock(sip);
5220 return ret;
5221 }
5222
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5223 static int resp_write_same_10(struct scsi_cmnd *scp,
5224 struct sdebug_dev_info *devip)
5225 {
5226 u8 *cmd = scp->cmnd;
5227 u32 lba;
5228 u16 num;
5229 u32 ei_lba = 0;
5230 bool unmap = false;
5231
5232 if (cmd[1] & 0x8) {
5233 if (sdebug_lbpws10 == 0) {
5234 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5235 return check_condition_result;
5236 } else
5237 unmap = true;
5238 }
5239 lba = get_unaligned_be32(cmd + 2);
5240 num = get_unaligned_be16(cmd + 7);
5241 if (num > sdebug_write_same_length) {
5242 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5243 return check_condition_result;
5244 }
5245 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
5246 }
5247
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5248 static int resp_write_same_16(struct scsi_cmnd *scp,
5249 struct sdebug_dev_info *devip)
5250 {
5251 u8 *cmd = scp->cmnd;
5252 u64 lba;
5253 u32 num;
5254 u32 ei_lba = 0;
5255 bool unmap = false;
5256 bool ndob = false;
5257
5258 if (cmd[1] & 0x8) { /* UNMAP */
5259 if (sdebug_lbpws == 0) {
5260 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5261 return check_condition_result;
5262 } else
5263 unmap = true;
5264 }
5265 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
5266 ndob = true;
5267 lba = get_unaligned_be64(cmd + 2);
5268 num = get_unaligned_be32(cmd + 10);
5269 if (num > sdebug_write_same_length) {
5270 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5271 return check_condition_result;
5272 }
5273 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
5274 }
5275
5276 /* Note the mode field is in the same position as the (lower) service action
5277 * field. For the Report supported operation codes command, SPC-4 suggests
5278 * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5279 static int resp_write_buffer(struct scsi_cmnd *scp,
5280 struct sdebug_dev_info *devip)
5281 {
5282 u8 *cmd = scp->cmnd;
5283 struct scsi_device *sdp = scp->device;
5284 struct sdebug_dev_info *dp;
5285 u8 mode;
5286
5287 mode = cmd[1] & 0x1f;
5288 switch (mode) {
5289 case 0x4: /* download microcode (MC) and activate (ACT) */
5290 /* set UAs on this device only */
5291 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5292 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
5293 break;
5294 case 0x5: /* download MC, save and ACT */
5295 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
5296 break;
5297 case 0x6: /* download MC with offsets and ACT */
5298 /* set UAs on most devices (LUs) in this target */
5299 list_for_each_entry(dp,
5300 &devip->sdbg_host->dev_info_list,
5301 dev_list)
5302 if (dp->target == sdp->id) {
5303 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
5304 if (devip != dp)
5305 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
5306 dp->uas_bm);
5307 }
5308 break;
5309 case 0x7: /* download MC with offsets, save, and ACT */
5310 /* set UA on all devices (LUs) in this target */
5311 list_for_each_entry(dp,
5312 &devip->sdbg_host->dev_info_list,
5313 dev_list)
5314 if (dp->target == sdp->id)
5315 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
5316 dp->uas_bm);
5317 break;
5318 default:
5319 /* do nothing for this command for other mode values */
5320 break;
5321 }
5322 return 0;
5323 }
5324
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5325 static int resp_comp_write(struct scsi_cmnd *scp,
5326 struct sdebug_dev_info *devip)
5327 {
5328 u8 *cmd = scp->cmnd;
5329 u8 *arr;
5330 struct sdeb_store_info *sip = devip2sip(devip, true);
5331 u64 lba;
5332 u32 dnum;
5333 u32 lb_size = sdebug_sector_size;
5334 u8 num;
5335 int ret;
5336 int retval = 0;
5337
5338 lba = get_unaligned_be64(cmd + 2);
5339 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
5340 if (0 == num)
5341 return 0; /* degenerate case, not an error */
5342 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5343 (cmd[1] & 0xe0)) {
5344 mk_sense_invalid_opcode(scp);
5345 return check_condition_result;
5346 }
5347 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5348 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5349 (cmd[1] & 0xe0) == 0)
5350 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
5351 "to DIF device\n");
5352 ret = check_device_access_params(scp, lba, num, false);
5353 if (ret)
5354 return ret;
5355 dnum = 2 * num;
5356 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
5357 if (NULL == arr) {
5358 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5359 INSUFF_RES_ASCQ);
5360 return check_condition_result;
5361 }
5362
5363 ret = do_dout_fetch(scp, dnum, arr);
5364 if (ret == -1) {
5365 retval = DID_ERROR << 16;
5366 goto cleanup_free;
5367 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
5368 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
5369 "indicated=%u, IO sent=%d bytes\n", my_name,
5370 dnum * lb_size, ret);
5371
5372 sdeb_data_write_lock(sip);
5373 sdeb_meta_write_lock(sip);
5374 if (!comp_write_worker(sip, lba, num, arr, false)) {
5375 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5376 retval = check_condition_result;
5377 goto cleanup_unlock;
5378 }
5379
5380 /* Cover sip->map_storep (which map_region()) sets with data lock */
5381 if (scsi_debug_lbp())
5382 map_region(sip, lba, num);
5383 cleanup_unlock:
5384 sdeb_meta_write_unlock(sip);
5385 sdeb_data_write_unlock(sip);
5386 cleanup_free:
5387 kfree(arr);
5388 return retval;
5389 }
5390
5391 struct unmap_block_desc {
5392 __be64 lba;
5393 __be32 blocks;
5394 __be32 __reserved;
5395 };
5396
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5397 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5398 {
5399 unsigned char *buf;
5400 struct unmap_block_desc *desc;
5401 struct sdeb_store_info *sip = devip2sip(devip, true);
5402 unsigned int i, payload_len, descriptors;
5403 int ret;
5404
5405 if (!scsi_debug_lbp())
5406 return 0; /* fib and say its done */
5407 payload_len = get_unaligned_be16(scp->cmnd + 7);
5408 BUG_ON(scsi_bufflen(scp) != payload_len);
5409
5410 descriptors = (payload_len - 8) / 16;
5411 if (descriptors > sdebug_unmap_max_desc) {
5412 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5413 return check_condition_result;
5414 }
5415
5416 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
5417 if (!buf) {
5418 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5419 INSUFF_RES_ASCQ);
5420 return check_condition_result;
5421 }
5422
5423 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
5424
5425 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
5426 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
5427
5428 desc = (void *)&buf[8];
5429
5430 sdeb_meta_write_lock(sip);
5431
5432 for (i = 0 ; i < descriptors ; i++) {
5433 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
5434 unsigned int num = get_unaligned_be32(&desc[i].blocks);
5435
5436 ret = check_device_access_params(scp, lba, num, true);
5437 if (ret)
5438 goto out;
5439
5440 unmap_region(sip, lba, num);
5441 }
5442
5443 ret = 0;
5444
5445 out:
5446 sdeb_meta_write_unlock(sip);
5447 kfree(buf);
5448
5449 return ret;
5450 }
5451
5452 #define SDEBUG_GET_LBA_STATUS_LEN 32
5453
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5454 static int resp_get_lba_status(struct scsi_cmnd *scp,
5455 struct sdebug_dev_info *devip)
5456 {
5457 u8 *cmd = scp->cmnd;
5458 u64 lba;
5459 u32 alloc_len, mapped, num;
5460 int ret;
5461 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
5462
5463 lba = get_unaligned_be64(cmd + 2);
5464 alloc_len = get_unaligned_be32(cmd + 10);
5465
5466 if (alloc_len < 24)
5467 return 0;
5468
5469 ret = check_device_access_params(scp, lba, 1, false);
5470 if (ret)
5471 return ret;
5472
5473 if (scsi_debug_lbp()) {
5474 struct sdeb_store_info *sip = devip2sip(devip, true);
5475
5476 mapped = map_state(sip, lba, &num);
5477 } else {
5478 mapped = 1;
5479 /* following just in case virtual_gb changed */
5480 sdebug_capacity = get_sdebug_capacity();
5481 if (sdebug_capacity - lba <= 0xffffffff)
5482 num = sdebug_capacity - lba;
5483 else
5484 num = 0xffffffff;
5485 }
5486
5487 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
5488 put_unaligned_be32(20, arr); /* Parameter Data Length */
5489 put_unaligned_be64(lba, arr + 8); /* LBA */
5490 put_unaligned_be32(num, arr + 16); /* Number of blocks */
5491 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
5492
5493 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
5494 }
5495
resp_get_stream_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5496 static int resp_get_stream_status(struct scsi_cmnd *scp,
5497 struct sdebug_dev_info *devip)
5498 {
5499 u16 starting_stream_id, stream_id;
5500 const u8 *cmd = scp->cmnd;
5501 u32 alloc_len, offset;
5502 u8 arr[256] = {};
5503 struct scsi_stream_status_header *h = (void *)arr;
5504
5505 starting_stream_id = get_unaligned_be16(cmd + 4);
5506 alloc_len = get_unaligned_be32(cmd + 10);
5507
5508 if (alloc_len < 8) {
5509 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5510 return check_condition_result;
5511 }
5512
5513 if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
5514 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
5515 return check_condition_result;
5516 }
5517
5518 /*
5519 * The GET STREAM STATUS command only reports status information
5520 * about open streams. Treat the non-permanent stream as open.
5521 */
5522 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
5523 &h->number_of_open_streams);
5524
5525 for (offset = 8, stream_id = starting_stream_id;
5526 offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
5527 stream_id < MAXIMUM_NUMBER_OF_STREAMS;
5528 offset += 8, stream_id++) {
5529 struct scsi_stream_status *stream_status = (void *)arr + offset;
5530
5531 stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
5532 put_unaligned_be16(stream_id,
5533 &stream_status->stream_identifier);
5534 stream_status->rel_lifetime = stream_id + 1;
5535 }
5536 put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
5537
5538 return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
5539 }
5540
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5541 static int resp_sync_cache(struct scsi_cmnd *scp,
5542 struct sdebug_dev_info *devip)
5543 {
5544 int res = 0;
5545 u64 lba;
5546 u32 num_blocks;
5547 u8 *cmd = scp->cmnd;
5548
5549 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
5550 lba = get_unaligned_be32(cmd + 2);
5551 num_blocks = get_unaligned_be16(cmd + 7);
5552 } else { /* SYNCHRONIZE_CACHE(16) */
5553 lba = get_unaligned_be64(cmd + 2);
5554 num_blocks = get_unaligned_be32(cmd + 10);
5555 }
5556 if (lba + num_blocks > sdebug_capacity) {
5557 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5558 return check_condition_result;
5559 }
5560 if (!write_since_sync || (cmd[1] & 0x2))
5561 res = SDEG_RES_IMMED_MASK;
5562 else /* delay if write_since_sync and IMMED clear */
5563 write_since_sync = false;
5564 return res;
5565 }
5566
5567 /*
5568 * Assuming the LBA+num_blocks is not out-of-range, this function will return
5569 * CONDITION MET if the specified blocks will/have fitted in the cache, and
5570 * a GOOD status otherwise. Model a disk with a big cache and yield
5571 * CONDITION MET. Actually tries to bring range in main memory into the
5572 * cache associated with the CPU(s).
5573 *
5574 * The pcode 0x34 is also used for READ POSITION by tape devices.
5575 */
5576 enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5577 static int resp_pre_fetch(struct scsi_cmnd *scp,
5578 struct sdebug_dev_info *devip)
5579 {
5580 int res = 0;
5581 u64 lba;
5582 u64 block, rest = 0;
5583 u32 nblks;
5584 u8 *cmd = scp->cmnd;
5585 struct sdeb_store_info *sip = devip2sip(devip, true);
5586 u8 *fsp = sip->storep;
5587
5588 if (sdebug_ptype == TYPE_TAPE) {
5589 if (cmd[0] == PRE_FETCH) { /* READ POSITION (10) */
5590 int all_length;
5591 unsigned char arr[20];
5592 unsigned int pos;
5593
5594 all_length = get_unaligned_be16(cmd + 7);
5595 if ((cmd[1] & 0xfe) != 0 ||
5596 all_length != 0) { /* only short form */
5597 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
5598 all_length ? 7 : 1, 0);
5599 return check_condition_result;
5600 }
5601 memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
5602 arr[1] = devip->tape_partition;
5603 pos = devip->tape_location[devip->tape_partition];
5604 put_unaligned_be32(pos, arr + 4);
5605 put_unaligned_be32(pos, arr + 8);
5606 return fill_from_dev_buffer(scp, arr,
5607 SDEBUG_READ_POSITION_ARR_SZ);
5608 }
5609 mk_sense_invalid_opcode(scp);
5610 return check_condition_result;
5611 }
5612
5613 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
5614 lba = get_unaligned_be32(cmd + 2);
5615 nblks = get_unaligned_be16(cmd + 7);
5616 } else { /* PRE-FETCH(16) */
5617 lba = get_unaligned_be64(cmd + 2);
5618 nblks = get_unaligned_be32(cmd + 10);
5619 }
5620 if (lba + nblks > sdebug_capacity) {
5621 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5622 return check_condition_result;
5623 }
5624 if (!fsp)
5625 goto fini;
5626 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
5627 block = do_div(lba, sdebug_store_sectors);
5628 if (block + nblks > sdebug_store_sectors)
5629 rest = block + nblks - sdebug_store_sectors;
5630
5631 /* Try to bring the PRE-FETCH range into CPU's cache */
5632 sdeb_data_read_lock(sip);
5633 prefetch_range(fsp + (sdebug_sector_size * block),
5634 (nblks - rest) * sdebug_sector_size);
5635 if (rest)
5636 prefetch_range(fsp, rest * sdebug_sector_size);
5637
5638 sdeb_data_read_unlock(sip);
5639 fini:
5640 if (cmd[1] & 0x2)
5641 res = SDEG_RES_IMMED_MASK;
5642 return res | condition_met_result;
5643 }
5644
5645 #define RL_BUCKET_ELEMS 8
5646
5647 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
5648 * (W-LUN), the normal Linux scanning logic does not associate it with a
5649 * device (e.g. /dev/sg7). The following magic will make that association:
5650 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5651 * where <n> is a host number. If there are multiple targets in a host then
5652 * the above will associate a W-LUN to each target. To only get a W-LUN
5653 * for target 2, then use "echo '- 2 49409' > scan" .
5654 */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5655 static int resp_report_luns(struct scsi_cmnd *scp,
5656 struct sdebug_dev_info *devip)
5657 {
5658 unsigned char *cmd = scp->cmnd;
5659 unsigned int alloc_len;
5660 unsigned char select_report;
5661 u64 lun;
5662 struct scsi_lun *lun_p;
5663 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
5664 unsigned int lun_cnt; /* normal LUN count (max: 256) */
5665 unsigned int wlun_cnt; /* report luns W-LUN count */
5666 unsigned int tlun_cnt; /* total LUN count */
5667 unsigned int rlen; /* response length (in bytes) */
5668 int k, j, n, res;
5669 unsigned int off_rsp = 0;
5670 const int sz_lun = sizeof(struct scsi_lun);
5671
5672 clear_luns_changed_on_target(devip);
5673
5674 select_report = cmd[2];
5675 alloc_len = get_unaligned_be32(cmd + 6);
5676
5677 if (alloc_len < 4) {
5678 pr_err("alloc len too small %d\n", alloc_len);
5679 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5680 return check_condition_result;
5681 }
5682
5683 switch (select_report) {
5684 case 0: /* all LUNs apart from W-LUNs */
5685 lun_cnt = sdebug_max_luns;
5686 wlun_cnt = 0;
5687 break;
5688 case 1: /* only W-LUNs */
5689 lun_cnt = 0;
5690 wlun_cnt = 1;
5691 break;
5692 case 2: /* all LUNs */
5693 lun_cnt = sdebug_max_luns;
5694 wlun_cnt = 1;
5695 break;
5696 case 0x10: /* only administrative LUs */
5697 case 0x11: /* see SPC-5 */
5698 case 0x12: /* only subsiduary LUs owned by referenced LU */
5699 default:
5700 pr_debug("select report invalid %d\n", select_report);
5701 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5702 return check_condition_result;
5703 }
5704
5705 if (sdebug_no_lun_0 && (lun_cnt > 0))
5706 --lun_cnt;
5707
5708 tlun_cnt = lun_cnt + wlun_cnt;
5709 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
5710 scsi_set_resid(scp, scsi_bufflen(scp));
5711 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5712 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5713
5714 /* loops rely on sizeof response header same as sizeof lun (both 8) */
5715 lun = sdebug_no_lun_0 ? 1 : 0;
5716 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5717 memset(arr, 0, sizeof(arr));
5718 lun_p = (struct scsi_lun *)&arr[0];
5719 if (k == 0) {
5720 put_unaligned_be32(rlen, &arr[0]);
5721 ++lun_p;
5722 j = 1;
5723 }
5724 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5725 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5726 break;
5727 int_to_scsilun(lun++, lun_p);
5728 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5729 lun_p->scsi_lun[0] |= 0x40;
5730 }
5731 if (j < RL_BUCKET_ELEMS)
5732 break;
5733 n = j * sz_lun;
5734 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5735 if (res)
5736 return res;
5737 off_rsp += n;
5738 }
5739 if (wlun_cnt) {
5740 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5741 ++j;
5742 }
5743 if (j > 0)
5744 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5745 return res;
5746 }
5747
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5748 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5749 {
5750 bool is_bytchk3 = false;
5751 u8 bytchk;
5752 int ret, j;
5753 u32 vnum, a_num, off;
5754 const u32 lb_size = sdebug_sector_size;
5755 u64 lba;
5756 u8 *arr;
5757 u8 *cmd = scp->cmnd;
5758 struct sdeb_store_info *sip = devip2sip(devip, true);
5759
5760 bytchk = (cmd[1] >> 1) & 0x3;
5761 if (bytchk == 0) {
5762 return 0; /* always claim internal verify okay */
5763 } else if (bytchk == 2) {
5764 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5765 return check_condition_result;
5766 } else if (bytchk == 3) {
5767 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
5768 }
5769 switch (cmd[0]) {
5770 case VERIFY_16:
5771 lba = get_unaligned_be64(cmd + 2);
5772 vnum = get_unaligned_be32(cmd + 10);
5773 break;
5774 case VERIFY: /* is VERIFY(10) */
5775 lba = get_unaligned_be32(cmd + 2);
5776 vnum = get_unaligned_be16(cmd + 7);
5777 break;
5778 default:
5779 mk_sense_invalid_opcode(scp);
5780 return check_condition_result;
5781 }
5782 if (vnum == 0)
5783 return 0; /* not an error */
5784 a_num = is_bytchk3 ? 1 : vnum;
5785 /* Treat following check like one for read (i.e. no write) access */
5786 ret = check_device_access_params(scp, lba, a_num, false);
5787 if (ret)
5788 return ret;
5789
5790 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5791 if (!arr) {
5792 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5793 INSUFF_RES_ASCQ);
5794 return check_condition_result;
5795 }
5796 /* Not changing store, so only need read access */
5797 sdeb_data_read_lock(sip);
5798
5799 ret = do_dout_fetch(scp, a_num, arr);
5800 if (ret == -1) {
5801 ret = DID_ERROR << 16;
5802 goto cleanup;
5803 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5804 sdev_printk(KERN_INFO, scp->device,
5805 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5806 my_name, __func__, a_num * lb_size, ret);
5807 }
5808 if (is_bytchk3) {
5809 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5810 memcpy(arr + off, arr, lb_size);
5811 }
5812 ret = 0;
5813 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5814 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5815 ret = check_condition_result;
5816 goto cleanup;
5817 }
5818 cleanup:
5819 sdeb_data_read_unlock(sip);
5820 kfree(arr);
5821 return ret;
5822 }
5823
5824 #define RZONES_DESC_HD 64
5825
5826 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5827 static int resp_report_zones(struct scsi_cmnd *scp,
5828 struct sdebug_dev_info *devip)
5829 {
5830 unsigned int rep_max_zones, nrz = 0;
5831 int ret = 0;
5832 u32 alloc_len, rep_opts, rep_len;
5833 bool partial;
5834 u64 lba, zs_lba;
5835 u8 *arr = NULL, *desc;
5836 u8 *cmd = scp->cmnd;
5837 struct sdeb_zone_state *zsp = NULL;
5838 struct sdeb_store_info *sip = devip2sip(devip, false);
5839
5840 if (!sdebug_dev_is_zoned(devip)) {
5841 mk_sense_invalid_opcode(scp);
5842 return check_condition_result;
5843 }
5844 zs_lba = get_unaligned_be64(cmd + 2);
5845 alloc_len = get_unaligned_be32(cmd + 10);
5846 if (alloc_len == 0)
5847 return 0; /* not an error */
5848 rep_opts = cmd[14] & 0x3f;
5849 partial = cmd[14] & 0x80;
5850
5851 if (zs_lba >= sdebug_capacity) {
5852 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5853 return check_condition_result;
5854 }
5855
5856 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5857
5858 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5859 if (!arr) {
5860 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5861 INSUFF_RES_ASCQ);
5862 return check_condition_result;
5863 }
5864
5865 sdeb_meta_read_lock(sip);
5866
5867 desc = arr + 64;
5868 for (lba = zs_lba; lba < sdebug_capacity;
5869 lba = zsp->z_start + zsp->z_size) {
5870 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5871 break;
5872 zsp = zbc_zone(devip, lba);
5873 switch (rep_opts) {
5874 case 0x00:
5875 /* All zones */
5876 break;
5877 case 0x01:
5878 /* Empty zones */
5879 if (zsp->z_cond != ZC1_EMPTY)
5880 continue;
5881 break;
5882 case 0x02:
5883 /* Implicit open zones */
5884 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5885 continue;
5886 break;
5887 case 0x03:
5888 /* Explicit open zones */
5889 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5890 continue;
5891 break;
5892 case 0x04:
5893 /* Closed zones */
5894 if (zsp->z_cond != ZC4_CLOSED)
5895 continue;
5896 break;
5897 case 0x05:
5898 /* Full zones */
5899 if (zsp->z_cond != ZC5_FULL)
5900 continue;
5901 break;
5902 case 0x06:
5903 case 0x07:
5904 case 0x10:
5905 /*
5906 * Read-only, offline, reset WP recommended are
5907 * not emulated: no zones to report;
5908 */
5909 continue;
5910 case 0x11:
5911 /* non-seq-resource set */
5912 if (!zsp->z_non_seq_resource)
5913 continue;
5914 break;
5915 case 0x3e:
5916 /* All zones except gap zones. */
5917 if (zbc_zone_is_gap(zsp))
5918 continue;
5919 break;
5920 case 0x3f:
5921 /* Not write pointer (conventional) zones */
5922 if (zbc_zone_is_seq(zsp))
5923 continue;
5924 break;
5925 default:
5926 mk_sense_buffer(scp, ILLEGAL_REQUEST,
5927 INVALID_FIELD_IN_CDB, 0);
5928 ret = check_condition_result;
5929 goto fini;
5930 }
5931
5932 if (nrz < rep_max_zones) {
5933 /* Fill zone descriptor */
5934 desc[0] = zsp->z_type;
5935 desc[1] = zsp->z_cond << 4;
5936 if (zsp->z_non_seq_resource)
5937 desc[1] |= 1 << 1;
5938 put_unaligned_be64((u64)zsp->z_size, desc + 8);
5939 put_unaligned_be64((u64)zsp->z_start, desc + 16);
5940 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5941 desc += 64;
5942 }
5943
5944 if (partial && nrz >= rep_max_zones)
5945 break;
5946
5947 nrz++;
5948 }
5949
5950 /* Report header */
5951 /* Zone list length. */
5952 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5953 /* Maximum LBA */
5954 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5955 /* Zone starting LBA granularity. */
5956 if (devip->zcap < devip->zsize)
5957 put_unaligned_be64(devip->zsize, arr + 16);
5958
5959 rep_len = (unsigned long)desc - (unsigned long)arr;
5960 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5961
5962 fini:
5963 sdeb_meta_read_unlock(sip);
5964 kfree(arr);
5965 return ret;
5966 }
5967
resp_atomic_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5968 static int resp_atomic_write(struct scsi_cmnd *scp,
5969 struct sdebug_dev_info *devip)
5970 {
5971 struct sdeb_store_info *sip;
5972 u8 *cmd = scp->cmnd;
5973 u16 boundary, len;
5974 u64 lba, lba_tmp;
5975 int ret;
5976
5977 if (!scsi_debug_atomic_write()) {
5978 mk_sense_invalid_opcode(scp);
5979 return check_condition_result;
5980 }
5981
5982 sip = devip2sip(devip, true);
5983
5984 lba = get_unaligned_be64(cmd + 2);
5985 boundary = get_unaligned_be16(cmd + 10);
5986 len = get_unaligned_be16(cmd + 12);
5987
5988 lba_tmp = lba;
5989 if (sdebug_atomic_wr_align &&
5990 do_div(lba_tmp, sdebug_atomic_wr_align)) {
5991 /* Does not meet alignment requirement */
5992 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5993 return check_condition_result;
5994 }
5995
5996 if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
5997 /* Does not meet alignment requirement */
5998 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5999 return check_condition_result;
6000 }
6001
6002 if (boundary > 0) {
6003 if (boundary > sdebug_atomic_wr_max_bndry) {
6004 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6005 return check_condition_result;
6006 }
6007
6008 if (len > sdebug_atomic_wr_max_length_bndry) {
6009 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6010 return check_condition_result;
6011 }
6012 } else {
6013 if (len > sdebug_atomic_wr_max_length) {
6014 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6015 return check_condition_result;
6016 }
6017 }
6018
6019 ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
6020 if (unlikely(ret == -1))
6021 return DID_ERROR << 16;
6022 if (unlikely(ret != len * sdebug_sector_size))
6023 return DID_ERROR << 16;
6024 return 0;
6025 }
6026
6027 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)6028 static void zbc_open_all(struct sdebug_dev_info *devip)
6029 {
6030 struct sdeb_zone_state *zsp = &devip->zstate[0];
6031 unsigned int i;
6032
6033 for (i = 0; i < devip->nr_zones; i++, zsp++) {
6034 if (zsp->z_cond == ZC4_CLOSED)
6035 zbc_open_zone(devip, &devip->zstate[i], true);
6036 }
6037 }
6038
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6039 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6040 {
6041 int res = 0;
6042 u64 z_id;
6043 enum sdebug_z_cond zc;
6044 u8 *cmd = scp->cmnd;
6045 struct sdeb_zone_state *zsp;
6046 bool all = cmd[14] & 0x01;
6047 struct sdeb_store_info *sip = devip2sip(devip, false);
6048
6049 if (!sdebug_dev_is_zoned(devip)) {
6050 mk_sense_invalid_opcode(scp);
6051 return check_condition_result;
6052 }
6053 sdeb_meta_write_lock(sip);
6054
6055 if (all) {
6056 /* Check if all closed zones can be open */
6057 if (devip->max_open &&
6058 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
6059 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6060 INSUFF_ZONE_ASCQ);
6061 res = check_condition_result;
6062 goto fini;
6063 }
6064 /* Open all closed zones */
6065 zbc_open_all(devip);
6066 goto fini;
6067 }
6068
6069 /* Open the specified zone */
6070 z_id = get_unaligned_be64(cmd + 2);
6071 if (z_id >= sdebug_capacity) {
6072 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6073 res = check_condition_result;
6074 goto fini;
6075 }
6076
6077 zsp = zbc_zone(devip, z_id);
6078 if (z_id != zsp->z_start) {
6079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6080 res = check_condition_result;
6081 goto fini;
6082 }
6083 if (zbc_zone_is_conv(zsp)) {
6084 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6085 res = check_condition_result;
6086 goto fini;
6087 }
6088
6089 zc = zsp->z_cond;
6090 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
6091 goto fini;
6092
6093 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
6094 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6095 INSUFF_ZONE_ASCQ);
6096 res = check_condition_result;
6097 goto fini;
6098 }
6099
6100 zbc_open_zone(devip, zsp, true);
6101 fini:
6102 sdeb_meta_write_unlock(sip);
6103 return res;
6104 }
6105
zbc_close_all(struct sdebug_dev_info * devip)6106 static void zbc_close_all(struct sdebug_dev_info *devip)
6107 {
6108 unsigned int i;
6109
6110 for (i = 0; i < devip->nr_zones; i++)
6111 zbc_close_zone(devip, &devip->zstate[i]);
6112 }
6113
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6114 static int resp_close_zone(struct scsi_cmnd *scp,
6115 struct sdebug_dev_info *devip)
6116 {
6117 int res = 0;
6118 u64 z_id;
6119 u8 *cmd = scp->cmnd;
6120 struct sdeb_zone_state *zsp;
6121 bool all = cmd[14] & 0x01;
6122 struct sdeb_store_info *sip = devip2sip(devip, false);
6123
6124 if (!sdebug_dev_is_zoned(devip)) {
6125 mk_sense_invalid_opcode(scp);
6126 return check_condition_result;
6127 }
6128
6129 sdeb_meta_write_lock(sip);
6130
6131 if (all) {
6132 zbc_close_all(devip);
6133 goto fini;
6134 }
6135
6136 /* Close specified zone */
6137 z_id = get_unaligned_be64(cmd + 2);
6138 if (z_id >= sdebug_capacity) {
6139 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6140 res = check_condition_result;
6141 goto fini;
6142 }
6143
6144 zsp = zbc_zone(devip, z_id);
6145 if (z_id != zsp->z_start) {
6146 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6147 res = check_condition_result;
6148 goto fini;
6149 }
6150 if (zbc_zone_is_conv(zsp)) {
6151 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6152 res = check_condition_result;
6153 goto fini;
6154 }
6155
6156 zbc_close_zone(devip, zsp);
6157 fini:
6158 sdeb_meta_write_unlock(sip);
6159 return res;
6160 }
6161
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)6162 static void zbc_finish_zone(struct sdebug_dev_info *devip,
6163 struct sdeb_zone_state *zsp, bool empty)
6164 {
6165 enum sdebug_z_cond zc = zsp->z_cond;
6166
6167 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
6168 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
6169 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6170 zbc_close_zone(devip, zsp);
6171 if (zsp->z_cond == ZC4_CLOSED)
6172 devip->nr_closed--;
6173 zsp->z_wp = zsp->z_start + zsp->z_size;
6174 zsp->z_cond = ZC5_FULL;
6175 }
6176 }
6177
zbc_finish_all(struct sdebug_dev_info * devip)6178 static void zbc_finish_all(struct sdebug_dev_info *devip)
6179 {
6180 unsigned int i;
6181
6182 for (i = 0; i < devip->nr_zones; i++)
6183 zbc_finish_zone(devip, &devip->zstate[i], false);
6184 }
6185
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6186 static int resp_finish_zone(struct scsi_cmnd *scp,
6187 struct sdebug_dev_info *devip)
6188 {
6189 struct sdeb_zone_state *zsp;
6190 int res = 0;
6191 u64 z_id;
6192 u8 *cmd = scp->cmnd;
6193 bool all = cmd[14] & 0x01;
6194 struct sdeb_store_info *sip = devip2sip(devip, false);
6195
6196 if (!sdebug_dev_is_zoned(devip)) {
6197 mk_sense_invalid_opcode(scp);
6198 return check_condition_result;
6199 }
6200
6201 sdeb_meta_write_lock(sip);
6202
6203 if (all) {
6204 zbc_finish_all(devip);
6205 goto fini;
6206 }
6207
6208 /* Finish the specified zone */
6209 z_id = get_unaligned_be64(cmd + 2);
6210 if (z_id >= sdebug_capacity) {
6211 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6212 res = check_condition_result;
6213 goto fini;
6214 }
6215
6216 zsp = zbc_zone(devip, z_id);
6217 if (z_id != zsp->z_start) {
6218 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6219 res = check_condition_result;
6220 goto fini;
6221 }
6222 if (zbc_zone_is_conv(zsp)) {
6223 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6224 res = check_condition_result;
6225 goto fini;
6226 }
6227
6228 zbc_finish_zone(devip, zsp, true);
6229 fini:
6230 sdeb_meta_write_unlock(sip);
6231 return res;
6232 }
6233
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)6234 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
6235 struct sdeb_zone_state *zsp)
6236 {
6237 enum sdebug_z_cond zc;
6238 struct sdeb_store_info *sip = devip2sip(devip, false);
6239
6240 if (!zbc_zone_is_seq(zsp))
6241 return;
6242
6243 zc = zsp->z_cond;
6244 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6245 zbc_close_zone(devip, zsp);
6246
6247 if (zsp->z_cond == ZC4_CLOSED)
6248 devip->nr_closed--;
6249
6250 if (zsp->z_wp > zsp->z_start)
6251 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
6252 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
6253
6254 zsp->z_non_seq_resource = false;
6255 zsp->z_wp = zsp->z_start;
6256 zsp->z_cond = ZC1_EMPTY;
6257 }
6258
zbc_rwp_all(struct sdebug_dev_info * devip)6259 static void zbc_rwp_all(struct sdebug_dev_info *devip)
6260 {
6261 unsigned int i;
6262
6263 for (i = 0; i < devip->nr_zones; i++)
6264 zbc_rwp_zone(devip, &devip->zstate[i]);
6265 }
6266
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6267 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6268 {
6269 struct sdeb_zone_state *zsp;
6270 int res = 0;
6271 u64 z_id;
6272 u8 *cmd = scp->cmnd;
6273 bool all = cmd[14] & 0x01;
6274 struct sdeb_store_info *sip = devip2sip(devip, false);
6275
6276 if (!sdebug_dev_is_zoned(devip)) {
6277 mk_sense_invalid_opcode(scp);
6278 return check_condition_result;
6279 }
6280
6281 sdeb_meta_write_lock(sip);
6282
6283 if (all) {
6284 zbc_rwp_all(devip);
6285 goto fini;
6286 }
6287
6288 z_id = get_unaligned_be64(cmd + 2);
6289 if (z_id >= sdebug_capacity) {
6290 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6291 res = check_condition_result;
6292 goto fini;
6293 }
6294
6295 zsp = zbc_zone(devip, z_id);
6296 if (z_id != zsp->z_start) {
6297 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6298 res = check_condition_result;
6299 goto fini;
6300 }
6301 if (zbc_zone_is_conv(zsp)) {
6302 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6303 res = check_condition_result;
6304 goto fini;
6305 }
6306
6307 zbc_rwp_zone(devip, zsp);
6308 fini:
6309 sdeb_meta_write_unlock(sip);
6310 return res;
6311 }
6312
get_tag(struct scsi_cmnd * cmnd)6313 static u32 get_tag(struct scsi_cmnd *cmnd)
6314 {
6315 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
6316 }
6317
6318 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)6319 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
6320 {
6321 struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
6322 typeof(*sdsc), sd_dp);
6323 struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
6324 unsigned long flags;
6325 bool aborted;
6326
6327 if (sdebug_statistics) {
6328 atomic_inc(&sdebug_completions);
6329 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
6330 atomic_inc(&sdebug_miss_cpus);
6331 }
6332
6333 if (!scp) {
6334 pr_err("scmd=NULL\n");
6335 return;
6336 }
6337
6338 spin_lock_irqsave(&sdsc->lock, flags);
6339 aborted = sd_dp->aborted;
6340 if (unlikely(aborted))
6341 sd_dp->aborted = false;
6342
6343 spin_unlock_irqrestore(&sdsc->lock, flags);
6344
6345 if (aborted) {
6346 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
6347 blk_abort_request(scsi_cmd_to_rq(scp));
6348 return;
6349 }
6350
6351 scsi_done(scp); /* callback to mid level */
6352 }
6353
6354 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)6355 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
6356 {
6357 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
6358 hrt);
6359 sdebug_q_cmd_complete(sd_dp);
6360 return HRTIMER_NORESTART;
6361 }
6362
6363 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)6364 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
6365 {
6366 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
6367 ew.work);
6368 sdebug_q_cmd_complete(sd_dp);
6369 }
6370
6371 static bool got_shared_uuid;
6372 static uuid_t shared_uuid;
6373
sdebug_device_create_zones(struct sdebug_dev_info * devip)6374 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
6375 {
6376 struct sdeb_zone_state *zsp;
6377 sector_t capacity = get_sdebug_capacity();
6378 sector_t conv_capacity;
6379 sector_t zstart = 0;
6380 unsigned int i;
6381
6382 /*
6383 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
6384 * a zone size allowing for at least 4 zones on the device. Otherwise,
6385 * use the specified zone size checking that at least 2 zones can be
6386 * created for the device.
6387 */
6388 if (!sdeb_zbc_zone_size_mb) {
6389 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
6390 >> ilog2(sdebug_sector_size);
6391 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
6392 devip->zsize >>= 1;
6393 if (devip->zsize < 2) {
6394 pr_err("Device capacity too small\n");
6395 return -EINVAL;
6396 }
6397 } else {
6398 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
6399 pr_err("Zone size is not a power of 2\n");
6400 return -EINVAL;
6401 }
6402 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
6403 >> ilog2(sdebug_sector_size);
6404 if (devip->zsize >= capacity) {
6405 pr_err("Zone size too large for device capacity\n");
6406 return -EINVAL;
6407 }
6408 }
6409
6410 devip->zsize_shift = ilog2(devip->zsize);
6411 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
6412
6413 if (sdeb_zbc_zone_cap_mb == 0) {
6414 devip->zcap = devip->zsize;
6415 } else {
6416 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
6417 ilog2(sdebug_sector_size);
6418 if (devip->zcap > devip->zsize) {
6419 pr_err("Zone capacity too large\n");
6420 return -EINVAL;
6421 }
6422 }
6423
6424 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
6425 if (conv_capacity >= capacity) {
6426 pr_err("Number of conventional zones too large\n");
6427 return -EINVAL;
6428 }
6429 devip->nr_conv_zones = sdeb_zbc_nr_conv;
6430 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
6431 devip->zsize_shift;
6432 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
6433
6434 /* Add gap zones if zone capacity is smaller than the zone size */
6435 if (devip->zcap < devip->zsize)
6436 devip->nr_zones += devip->nr_seq_zones;
6437
6438 if (devip->zoned) {
6439 /* zbc_max_open_zones can be 0, meaning "not reported" */
6440 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
6441 devip->max_open = (devip->nr_zones - 1) / 2;
6442 else
6443 devip->max_open = sdeb_zbc_max_open;
6444 }
6445
6446 devip->zstate = kcalloc(devip->nr_zones,
6447 sizeof(struct sdeb_zone_state), GFP_KERNEL);
6448 if (!devip->zstate)
6449 return -ENOMEM;
6450
6451 for (i = 0; i < devip->nr_zones; i++) {
6452 zsp = &devip->zstate[i];
6453
6454 zsp->z_start = zstart;
6455
6456 if (i < devip->nr_conv_zones) {
6457 zsp->z_type = ZBC_ZTYPE_CNV;
6458 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6459 zsp->z_wp = (sector_t)-1;
6460 zsp->z_size =
6461 min_t(u64, devip->zsize, capacity - zstart);
6462 } else if ((zstart & (devip->zsize - 1)) == 0) {
6463 if (devip->zoned)
6464 zsp->z_type = ZBC_ZTYPE_SWR;
6465 else
6466 zsp->z_type = ZBC_ZTYPE_SWP;
6467 zsp->z_cond = ZC1_EMPTY;
6468 zsp->z_wp = zsp->z_start;
6469 zsp->z_size =
6470 min_t(u64, devip->zcap, capacity - zstart);
6471 } else {
6472 zsp->z_type = ZBC_ZTYPE_GAP;
6473 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6474 zsp->z_wp = (sector_t)-1;
6475 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
6476 capacity - zstart);
6477 }
6478
6479 WARN_ON_ONCE((int)zsp->z_size <= 0);
6480 zstart += zsp->z_size;
6481 }
6482
6483 return 0;
6484 }
6485
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)6486 static struct sdebug_dev_info *sdebug_device_create(
6487 struct sdebug_host_info *sdbg_host, gfp_t flags)
6488 {
6489 struct sdebug_dev_info *devip;
6490
6491 devip = kzalloc(sizeof(*devip), flags);
6492 if (devip) {
6493 if (sdebug_uuid_ctl == 1)
6494 uuid_gen(&devip->lu_name);
6495 else if (sdebug_uuid_ctl == 2) {
6496 if (got_shared_uuid)
6497 devip->lu_name = shared_uuid;
6498 else {
6499 uuid_gen(&shared_uuid);
6500 got_shared_uuid = true;
6501 devip->lu_name = shared_uuid;
6502 }
6503 }
6504 devip->sdbg_host = sdbg_host;
6505 if (sdeb_zbc_in_use) {
6506 devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
6507 if (sdebug_device_create_zones(devip)) {
6508 kfree(devip);
6509 return NULL;
6510 }
6511 } else {
6512 devip->zoned = false;
6513 }
6514 if (sdebug_ptype == TYPE_TAPE) {
6515 devip->tape_density = TAPE_DEF_DENSITY;
6516 devip->tape_blksize = TAPE_DEF_BLKSIZE;
6517 }
6518 devip->create_ts = ktime_get_boottime();
6519 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
6520 spin_lock_init(&devip->list_lock);
6521 INIT_LIST_HEAD(&devip->inject_err_list);
6522 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
6523 }
6524 return devip;
6525 }
6526
find_build_dev_info(struct scsi_device * sdev)6527 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
6528 {
6529 struct sdebug_host_info *sdbg_host;
6530 struct sdebug_dev_info *open_devip = NULL;
6531 struct sdebug_dev_info *devip;
6532
6533 sdbg_host = shost_to_sdebug_host(sdev->host);
6534
6535 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6536 if ((devip->used) && (devip->channel == sdev->channel) &&
6537 (devip->target == sdev->id) &&
6538 (devip->lun == sdev->lun))
6539 return devip;
6540 else {
6541 if ((!devip->used) && (!open_devip))
6542 open_devip = devip;
6543 }
6544 }
6545 if (!open_devip) { /* try and make a new one */
6546 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
6547 if (!open_devip) {
6548 pr_err("out of memory at line %d\n", __LINE__);
6549 return NULL;
6550 }
6551 }
6552
6553 open_devip->channel = sdev->channel;
6554 open_devip->target = sdev->id;
6555 open_devip->lun = sdev->lun;
6556 open_devip->sdbg_host = sdbg_host;
6557 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
6558 open_devip->used = true;
6559 return open_devip;
6560 }
6561
scsi_debug_sdev_init(struct scsi_device * sdp)6562 static int scsi_debug_sdev_init(struct scsi_device *sdp)
6563 {
6564 if (sdebug_verbose)
6565 pr_info("sdev_init <%u %u %u %llu>\n",
6566 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6567
6568 return 0;
6569 }
6570
scsi_debug_sdev_configure(struct scsi_device * sdp,struct queue_limits * lim)6571 static int scsi_debug_sdev_configure(struct scsi_device *sdp,
6572 struct queue_limits *lim)
6573 {
6574 struct sdebug_dev_info *devip =
6575 (struct sdebug_dev_info *)sdp->hostdata;
6576 struct dentry *dentry;
6577
6578 if (sdebug_verbose)
6579 pr_info("sdev_configure <%u %u %u %llu>\n",
6580 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6581 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
6582 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
6583 if (devip == NULL) {
6584 devip = find_build_dev_info(sdp);
6585 if (devip == NULL)
6586 return 1; /* no resources, will be marked offline */
6587 }
6588 if (sdebug_ptype == TYPE_TAPE) {
6589 if (!devip->tape_blocks[0]) {
6590 devip->tape_blocks[0] =
6591 kcalloc(TAPE_UNITS, sizeof(struct tape_block),
6592 GFP_KERNEL);
6593 if (!devip->tape_blocks[0])
6594 return 1;
6595 }
6596 devip->tape_pending_nbr_partitions = -1;
6597 if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
6598 kfree(devip->tape_blocks[0]);
6599 devip->tape_blocks[0] = NULL;
6600 return 1;
6601 }
6602 }
6603 sdp->hostdata = devip;
6604 if (sdebug_no_uld)
6605 sdp->no_uld_attach = 1;
6606 config_cdb_len(sdp);
6607
6608 if (sdebug_allow_restart)
6609 sdp->allow_restart = 1;
6610
6611 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
6612 sdebug_debugfs_root);
6613 if (IS_ERR_OR_NULL(devip->debugfs_entry))
6614 pr_info("%s: failed to create debugfs directory for device %s\n",
6615 __func__, dev_name(&sdp->sdev_gendev));
6616
6617 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
6618 &sdebug_error_fops);
6619 if (IS_ERR_OR_NULL(dentry))
6620 pr_info("%s: failed to create error file for device %s\n",
6621 __func__, dev_name(&sdp->sdev_gendev));
6622
6623 return 0;
6624 }
6625
scsi_debug_sdev_destroy(struct scsi_device * sdp)6626 static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
6627 {
6628 struct sdebug_dev_info *devip =
6629 (struct sdebug_dev_info *)sdp->hostdata;
6630 struct sdebug_err_inject *err;
6631
6632 if (sdebug_verbose)
6633 pr_info("sdev_destroy <%u %u %u %llu>\n",
6634 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6635
6636 if (!devip)
6637 return;
6638
6639 spin_lock(&devip->list_lock);
6640 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6641 list_del_rcu(&err->list);
6642 call_rcu(&err->rcu, sdebug_err_free);
6643 }
6644 spin_unlock(&devip->list_lock);
6645
6646 debugfs_remove(devip->debugfs_entry);
6647
6648 if (sdebug_ptype == TYPE_TAPE) {
6649 kfree(devip->tape_blocks[0]);
6650 devip->tape_blocks[0] = NULL;
6651 }
6652
6653 /* make this slot available for re-use */
6654 devip->used = false;
6655 sdp->hostdata = NULL;
6656 }
6657
6658 /* Returns true if cancelled or not running callback. */
scsi_debug_stop_cmnd(struct scsi_cmnd * cmnd)6659 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
6660 {
6661 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6662 struct sdebug_defer *sd_dp = &sdsc->sd_dp;
6663 enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
6664
6665 lockdep_assert_held(&sdsc->lock);
6666
6667 if (defer_t == SDEB_DEFER_HRT) {
6668 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
6669
6670 switch (res) {
6671 case -1: /* -1 It's executing the CB */
6672 return false;
6673 case 0: /* Not active, it must have already run */
6674 case 1: /* Was active, we've now cancelled */
6675 default:
6676 return true;
6677 }
6678 } else if (defer_t == SDEB_DEFER_WQ) {
6679 /* Cancel if pending */
6680 if (cancel_work(&sd_dp->ew.work))
6681 return true;
6682 /* callback may be running, so return false */
6683 return false;
6684 } else if (defer_t == SDEB_DEFER_POLL) {
6685 return true;
6686 }
6687
6688 return false;
6689 }
6690
6691 /*
6692 * Called from scsi_debug_abort() only, which is for timed-out cmd.
6693 */
scsi_debug_abort_cmnd(struct scsi_cmnd * cmnd)6694 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6695 {
6696 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6697 unsigned long flags;
6698 bool res;
6699
6700 spin_lock_irqsave(&sdsc->lock, flags);
6701 res = scsi_debug_stop_cmnd(cmnd);
6702 spin_unlock_irqrestore(&sdsc->lock, flags);
6703
6704 return res;
6705 }
6706
6707 /*
6708 * All we can do is set the cmnd as internally aborted and wait for it to
6709 * finish. We cannot call scsi_done() as normal completion path may do that.
6710 */
sdebug_stop_cmnd(struct request * rq,void * data)6711 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6712 {
6713 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6714
6715 return true;
6716 }
6717
6718 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)6719 static void stop_all_queued(void)
6720 {
6721 struct sdebug_host_info *sdhp;
6722
6723 mutex_lock(&sdebug_host_list_mutex);
6724 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6725 struct Scsi_Host *shost = sdhp->shost;
6726
6727 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6728 }
6729 mutex_unlock(&sdebug_host_list_mutex);
6730 }
6731
sdebug_fail_abort(struct scsi_cmnd * cmnd)6732 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6733 {
6734 struct scsi_device *sdp = cmnd->device;
6735 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6736 struct sdebug_err_inject *err;
6737 unsigned char *cmd = cmnd->cmnd;
6738 int ret = 0;
6739
6740 if (devip == NULL)
6741 return 0;
6742
6743 rcu_read_lock();
6744 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6745 if (err->type == ERR_ABORT_CMD_FAILED &&
6746 (err->cmd == cmd[0] || err->cmd == 0xff)) {
6747 ret = !!err->cnt;
6748 if (err->cnt < 0)
6749 err->cnt++;
6750
6751 rcu_read_unlock();
6752 return ret;
6753 }
6754 }
6755 rcu_read_unlock();
6756
6757 return 0;
6758 }
6759
scsi_debug_abort(struct scsi_cmnd * SCpnt)6760 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6761 {
6762 bool aborted = scsi_debug_abort_cmnd(SCpnt);
6763 u8 *cmd = SCpnt->cmnd;
6764 u8 opcode = cmd[0];
6765
6766 ++num_aborts;
6767
6768 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6769 sdev_printk(KERN_INFO, SCpnt->device,
6770 "%s: command%s found\n", __func__,
6771 aborted ? "" : " not");
6772
6773
6774 if (sdebug_fail_abort(SCpnt)) {
6775 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6776 opcode);
6777 return FAILED;
6778 }
6779
6780 if (aborted == false)
6781 return FAILED;
6782
6783 return SUCCESS;
6784 }
6785
scsi_debug_stop_all_queued_iter(struct request * rq,void * data)6786 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6787 {
6788 struct scsi_device *sdp = data;
6789 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6790
6791 if (scmd->device == sdp)
6792 scsi_debug_abort_cmnd(scmd);
6793
6794 return true;
6795 }
6796
6797 /* Deletes (stops) timers or work queues of all queued commands per sdev */
scsi_debug_stop_all_queued(struct scsi_device * sdp)6798 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6799 {
6800 struct Scsi_Host *shost = sdp->host;
6801
6802 blk_mq_tagset_busy_iter(&shost->tag_set,
6803 scsi_debug_stop_all_queued_iter, sdp);
6804 }
6805
sdebug_fail_lun_reset(struct scsi_cmnd * cmnd)6806 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6807 {
6808 struct scsi_device *sdp = cmnd->device;
6809 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6810 struct sdebug_err_inject *err;
6811 unsigned char *cmd = cmnd->cmnd;
6812 int ret = 0;
6813
6814 if (devip == NULL)
6815 return 0;
6816
6817 rcu_read_lock();
6818 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6819 if (err->type == ERR_LUN_RESET_FAILED &&
6820 (err->cmd == cmd[0] || err->cmd == 0xff)) {
6821 ret = !!err->cnt;
6822 if (err->cnt < 0)
6823 err->cnt++;
6824
6825 rcu_read_unlock();
6826 return ret;
6827 }
6828 }
6829 rcu_read_unlock();
6830
6831 return 0;
6832 }
6833
scsi_tape_reset_clear(struct sdebug_dev_info * devip)6834 static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
6835 {
6836 if (sdebug_ptype == TYPE_TAPE) {
6837 int i;
6838
6839 devip->tape_blksize = TAPE_DEF_BLKSIZE;
6840 devip->tape_density = TAPE_DEF_DENSITY;
6841 devip->tape_partition = 0;
6842 devip->tape_dce = 0;
6843 for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
6844 devip->tape_location[i] = 0;
6845 devip->tape_pending_nbr_partitions = -1;
6846 /* Don't reset partitioning? */
6847 }
6848 }
6849
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)6850 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6851 {
6852 struct scsi_device *sdp = SCpnt->device;
6853 struct sdebug_dev_info *devip = sdp->hostdata;
6854 u8 *cmd = SCpnt->cmnd;
6855 u8 opcode = cmd[0];
6856
6857 ++num_dev_resets;
6858
6859 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6860 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6861
6862 scsi_debug_stop_all_queued(sdp);
6863 if (devip) {
6864 set_bit(SDEBUG_UA_POR, devip->uas_bm);
6865 scsi_tape_reset_clear(devip);
6866 }
6867
6868 if (sdebug_fail_lun_reset(SCpnt)) {
6869 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6870 return FAILED;
6871 }
6872
6873 return SUCCESS;
6874 }
6875
sdebug_fail_target_reset(struct scsi_cmnd * cmnd)6876 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6877 {
6878 struct scsi_target *starget = scsi_target(cmnd->device);
6879 struct sdebug_target_info *targetip =
6880 (struct sdebug_target_info *)starget->hostdata;
6881
6882 if (targetip)
6883 return targetip->reset_fail;
6884
6885 return 0;
6886 }
6887
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)6888 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6889 {
6890 struct scsi_device *sdp = SCpnt->device;
6891 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6892 struct sdebug_dev_info *devip;
6893 u8 *cmd = SCpnt->cmnd;
6894 u8 opcode = cmd[0];
6895 int k = 0;
6896
6897 ++num_target_resets;
6898 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6899 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6900
6901 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6902 if (devip->target == sdp->id) {
6903 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6904 scsi_tape_reset_clear(devip);
6905 ++k;
6906 }
6907 }
6908
6909 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6910 sdev_printk(KERN_INFO, sdp,
6911 "%s: %d device(s) found in target\n", __func__, k);
6912
6913 if (sdebug_fail_target_reset(SCpnt)) {
6914 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
6915 opcode);
6916 return FAILED;
6917 }
6918
6919 return SUCCESS;
6920 }
6921
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)6922 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
6923 {
6924 struct scsi_device *sdp = SCpnt->device;
6925 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6926 struct sdebug_dev_info *devip;
6927 int k = 0;
6928
6929 ++num_bus_resets;
6930
6931 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6932 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6933
6934 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6935 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6936 scsi_tape_reset_clear(devip);
6937 ++k;
6938 }
6939
6940 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6941 sdev_printk(KERN_INFO, sdp,
6942 "%s: %d device(s) found in host\n", __func__, k);
6943 return SUCCESS;
6944 }
6945
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)6946 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
6947 {
6948 struct sdebug_host_info *sdbg_host;
6949 struct sdebug_dev_info *devip;
6950 int k = 0;
6951
6952 ++num_host_resets;
6953 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6954 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
6955 mutex_lock(&sdebug_host_list_mutex);
6956 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
6957 list_for_each_entry(devip, &sdbg_host->dev_info_list,
6958 dev_list) {
6959 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6960 scsi_tape_reset_clear(devip);
6961 ++k;
6962 }
6963 }
6964 mutex_unlock(&sdebug_host_list_mutex);
6965 stop_all_queued();
6966 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6967 sdev_printk(KERN_INFO, SCpnt->device,
6968 "%s: %d device(s) found\n", __func__, k);
6969 return SUCCESS;
6970 }
6971
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)6972 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
6973 {
6974 struct msdos_partition *pp;
6975 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
6976 int sectors_per_part, num_sectors, k;
6977 int heads_by_sects, start_sec, end_sec;
6978
6979 /* assume partition table already zeroed */
6980 if ((sdebug_num_parts < 1) || (store_size < 1048576))
6981 return;
6982 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
6983 sdebug_num_parts = SDEBUG_MAX_PARTS;
6984 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
6985 }
6986 num_sectors = (int)get_sdebug_capacity();
6987 sectors_per_part = (num_sectors - sdebug_sectors_per)
6988 / sdebug_num_parts;
6989 heads_by_sects = sdebug_heads * sdebug_sectors_per;
6990 starts[0] = sdebug_sectors_per;
6991 max_part_secs = sectors_per_part;
6992 for (k = 1; k < sdebug_num_parts; ++k) {
6993 starts[k] = ((k * sectors_per_part) / heads_by_sects)
6994 * heads_by_sects;
6995 if (starts[k] - starts[k - 1] < max_part_secs)
6996 max_part_secs = starts[k] - starts[k - 1];
6997 }
6998 starts[sdebug_num_parts] = num_sectors;
6999 starts[sdebug_num_parts + 1] = 0;
7000
7001 ramp[510] = 0x55; /* magic partition markings */
7002 ramp[511] = 0xAA;
7003 pp = (struct msdos_partition *)(ramp + 0x1be);
7004 for (k = 0; starts[k + 1]; ++k, ++pp) {
7005 start_sec = starts[k];
7006 end_sec = starts[k] + max_part_secs - 1;
7007 pp->boot_ind = 0;
7008
7009 pp->cyl = start_sec / heads_by_sects;
7010 pp->head = (start_sec - (pp->cyl * heads_by_sects))
7011 / sdebug_sectors_per;
7012 pp->sector = (start_sec % sdebug_sectors_per) + 1;
7013
7014 pp->end_cyl = end_sec / heads_by_sects;
7015 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
7016 / sdebug_sectors_per;
7017 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
7018
7019 pp->start_sect = cpu_to_le32(start_sec);
7020 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
7021 pp->sys_ind = 0x83; /* plain Linux partition */
7022 }
7023 }
7024
block_unblock_all_queues(bool block)7025 static void block_unblock_all_queues(bool block)
7026 {
7027 struct sdebug_host_info *sdhp;
7028
7029 lockdep_assert_held(&sdebug_host_list_mutex);
7030
7031 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7032 struct Scsi_Host *shost = sdhp->shost;
7033
7034 if (block)
7035 scsi_block_requests(shost);
7036 else
7037 scsi_unblock_requests(shost);
7038 }
7039 }
7040
7041 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
7042 * commands will be processed normally before triggers occur.
7043 */
tweak_cmnd_count(void)7044 static void tweak_cmnd_count(void)
7045 {
7046 int count, modulo;
7047
7048 modulo = abs(sdebug_every_nth);
7049 if (modulo < 2)
7050 return;
7051
7052 mutex_lock(&sdebug_host_list_mutex);
7053 block_unblock_all_queues(true);
7054 count = atomic_read(&sdebug_cmnd_count);
7055 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
7056 block_unblock_all_queues(false);
7057 mutex_unlock(&sdebug_host_list_mutex);
7058 }
7059
clear_queue_stats(void)7060 static void clear_queue_stats(void)
7061 {
7062 atomic_set(&sdebug_cmnd_count, 0);
7063 atomic_set(&sdebug_completions, 0);
7064 atomic_set(&sdebug_miss_cpus, 0);
7065 atomic_set(&sdebug_a_tsf, 0);
7066 }
7067
inject_on_this_cmd(void)7068 static bool inject_on_this_cmd(void)
7069 {
7070 if (sdebug_every_nth == 0)
7071 return false;
7072 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7073 }
7074
7075 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
7076
7077 /* Complete the processing of the thread that queued a SCSI command to this
7078 * driver. It either completes the command by calling cmnd_done() or
7079 * schedules a hr timer or work queue then returns 0. Returns
7080 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
7081 */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)7082 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
7083 int scsi_result,
7084 int (*pfp)(struct scsi_cmnd *,
7085 struct sdebug_dev_info *),
7086 int delta_jiff, int ndelay)
7087 {
7088 struct request *rq = scsi_cmd_to_rq(cmnd);
7089 bool polled = rq->cmd_flags & REQ_POLLED;
7090 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
7091 unsigned long flags;
7092 u64 ns_from_boot = 0;
7093 struct scsi_device *sdp;
7094 struct sdebug_defer *sd_dp;
7095
7096 if (unlikely(devip == NULL)) {
7097 if (scsi_result == 0)
7098 scsi_result = DID_NO_CONNECT << 16;
7099 goto respond_in_thread;
7100 }
7101 sdp = cmnd->device;
7102
7103 if (delta_jiff == 0)
7104 goto respond_in_thread;
7105
7106
7107 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
7108 (scsi_result == 0))) {
7109 int num_in_q = scsi_device_busy(sdp);
7110 int qdepth = cmnd->device->queue_depth;
7111
7112 if ((num_in_q == qdepth) &&
7113 (atomic_inc_return(&sdebug_a_tsf) >=
7114 abs(sdebug_every_nth))) {
7115 atomic_set(&sdebug_a_tsf, 0);
7116 scsi_result = device_qfull_result;
7117
7118 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
7119 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
7120 __func__, num_in_q);
7121 }
7122 }
7123
7124 sd_dp = &sdsc->sd_dp;
7125
7126 if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
7127 ns_from_boot = ktime_get_boottime_ns();
7128
7129 /* one of the resp_*() response functions is called here */
7130 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
7131 if (cmnd->result & SDEG_RES_IMMED_MASK) {
7132 cmnd->result &= ~SDEG_RES_IMMED_MASK;
7133 delta_jiff = ndelay = 0;
7134 }
7135 if (cmnd->result == 0 && scsi_result != 0)
7136 cmnd->result = scsi_result;
7137 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
7138 if (atomic_read(&sdeb_inject_pending)) {
7139 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
7140 atomic_set(&sdeb_inject_pending, 0);
7141 cmnd->result = check_condition_result;
7142 }
7143 }
7144
7145 if (unlikely(sdebug_verbose && cmnd->result))
7146 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
7147 __func__, cmnd->result);
7148
7149 if (delta_jiff > 0 || ndelay > 0) {
7150 ktime_t kt;
7151
7152 if (delta_jiff > 0) {
7153 u64 ns = jiffies_to_nsecs(delta_jiff);
7154
7155 if (sdebug_random && ns < U32_MAX) {
7156 ns = get_random_u32_below((u32)ns);
7157 } else if (sdebug_random) {
7158 ns >>= 12; /* scale to 4 usec precision */
7159 if (ns < U32_MAX) /* over 4 hours max */
7160 ns = get_random_u32_below((u32)ns);
7161 ns <<= 12;
7162 }
7163 kt = ns_to_ktime(ns);
7164 } else { /* ndelay has a 4.2 second max */
7165 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
7166 (u32)ndelay;
7167 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
7168 u64 d = ktime_get_boottime_ns() - ns_from_boot;
7169
7170 if (kt <= d) { /* elapsed duration >= kt */
7171 /* call scsi_done() from this thread */
7172 scsi_done(cmnd);
7173 return 0;
7174 }
7175 /* otherwise reduce kt by elapsed time */
7176 kt -= d;
7177 }
7178 }
7179 if (sdebug_statistics)
7180 sd_dp->issuing_cpu = raw_smp_processor_id();
7181 if (polled) {
7182 spin_lock_irqsave(&sdsc->lock, flags);
7183 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
7184 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7185 spin_unlock_irqrestore(&sdsc->lock, flags);
7186 } else {
7187 /* schedule the invocation of scsi_done() for a later time */
7188 spin_lock_irqsave(&sdsc->lock, flags);
7189 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
7190 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
7191 /*
7192 * The completion handler will try to grab sqcp->lock,
7193 * so there is no chance that the completion handler
7194 * will call scsi_done() until we release the lock
7195 * here (so ok to keep referencing sdsc).
7196 */
7197 spin_unlock_irqrestore(&sdsc->lock, flags);
7198 }
7199 } else { /* jdelay < 0, use work queue */
7200 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
7201 atomic_read(&sdeb_inject_pending))) {
7202 sd_dp->aborted = true;
7203 atomic_set(&sdeb_inject_pending, 0);
7204 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
7205 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
7206 }
7207
7208 if (sdebug_statistics)
7209 sd_dp->issuing_cpu = raw_smp_processor_id();
7210 if (polled) {
7211 spin_lock_irqsave(&sdsc->lock, flags);
7212 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
7213 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7214 spin_unlock_irqrestore(&sdsc->lock, flags);
7215 } else {
7216 spin_lock_irqsave(&sdsc->lock, flags);
7217 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
7218 schedule_work(&sd_dp->ew.work);
7219 spin_unlock_irqrestore(&sdsc->lock, flags);
7220 }
7221 }
7222
7223 return 0;
7224
7225 respond_in_thread: /* call back to mid-layer using invocation thread */
7226 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
7227 cmnd->result &= ~SDEG_RES_IMMED_MASK;
7228 if (cmnd->result == 0 && scsi_result != 0)
7229 cmnd->result = scsi_result;
7230 scsi_done(cmnd);
7231 return 0;
7232 }
7233
7234 /* Note: The following macros create attribute files in the
7235 /sys/module/scsi_debug/parameters directory. Unfortunately this
7236 driver is unaware of a change and cannot trigger auxiliary actions
7237 as it can when the corresponding attribute in the
7238 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
7239 */
7240 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
7241 module_param_named(ato, sdebug_ato, int, S_IRUGO);
7242 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
7243 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
7244 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
7245 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
7246 module_param_named(dif, sdebug_dif, int, S_IRUGO);
7247 module_param_named(dix, sdebug_dix, int, S_IRUGO);
7248 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
7249 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
7250 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
7251 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
7252 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
7253 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
7254 module_param_string(inq_product, sdebug_inq_product_id,
7255 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
7256 module_param_string(inq_rev, sdebug_inq_product_rev,
7257 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
7258 module_param_string(inq_vendor, sdebug_inq_vendor_id,
7259 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
7260 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
7261 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
7262 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
7263 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
7264 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
7265 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
7266 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
7267 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
7268 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
7269 module_param_named(medium_error_count, sdebug_medium_error_count, int,
7270 S_IRUGO | S_IWUSR);
7271 module_param_named(medium_error_start, sdebug_medium_error_start, int,
7272 S_IRUGO | S_IWUSR);
7273 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
7274 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
7275 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
7276 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
7277 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
7278 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
7279 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
7280 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
7281 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
7282 module_param_named(per_host_store, sdebug_per_host_store, bool,
7283 S_IRUGO | S_IWUSR);
7284 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
7285 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
7286 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
7287 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
7288 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
7289 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
7290 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
7291 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
7292 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
7293 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
7294 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
7295 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
7296 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
7297 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
7298 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
7299 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
7300 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
7301 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
7302 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
7303 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
7304 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
7305 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
7306 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
7307 S_IRUGO | S_IWUSR);
7308 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
7309 module_param_named(write_same_length, sdebug_write_same_length, int,
7310 S_IRUGO | S_IWUSR);
7311 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
7312 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
7313 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
7314 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
7315 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
7316 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
7317
7318 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
7319 MODULE_DESCRIPTION("SCSI debug adapter driver");
7320 MODULE_LICENSE("GPL");
7321 MODULE_VERSION(SDEBUG_VERSION);
7322
7323 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
7324 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
7325 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
7326 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
7327 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
7328 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
7329 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
7330 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
7331 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
7332 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
7333 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
7334 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
7335 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
7336 MODULE_PARM_DESC(host_max_queue,
7337 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
7338 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
7339 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
7340 SDEBUG_VERSION "\")");
7341 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
7342 MODULE_PARM_DESC(lbprz,
7343 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
7344 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
7345 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
7346 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
7347 MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
7348 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
7349 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
7350 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
7351 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
7352 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
7353 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
7354 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
7355 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
7356 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
7357 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
7358 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
7359 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
7360 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
7361 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
7362 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
7363 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
7364 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
7365 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
7366 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
7367 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
7368 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
7369 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
7370 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
7371 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
7372 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
7373 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
7374 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
7375 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
7376 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
7377 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
7378 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
7379 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
7380 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
7381 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
7382 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
7383 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
7384 MODULE_PARM_DESC(uuid_ctl,
7385 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
7386 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
7387 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
7388 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
7389 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
7390 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
7391 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
7392 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
7393 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
7394 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
7395 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
7396
7397 #define SDEBUG_INFO_LEN 256
7398 static char sdebug_info[SDEBUG_INFO_LEN];
7399
scsi_debug_info(struct Scsi_Host * shp)7400 static const char *scsi_debug_info(struct Scsi_Host *shp)
7401 {
7402 int k;
7403
7404 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
7405 my_name, SDEBUG_VERSION, sdebug_version_date);
7406 if (k >= (SDEBUG_INFO_LEN - 1))
7407 return sdebug_info;
7408 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
7409 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
7410 sdebug_dev_size_mb, sdebug_opts, submit_queues,
7411 "statistics", (int)sdebug_statistics);
7412 return sdebug_info;
7413 }
7414
7415 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)7416 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
7417 int length)
7418 {
7419 char arr[16];
7420 int opts;
7421 int minLen = length > 15 ? 15 : length;
7422
7423 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
7424 return -EACCES;
7425 memcpy(arr, buffer, minLen);
7426 arr[minLen] = '\0';
7427 if (1 != sscanf(arr, "%d", &opts))
7428 return -EINVAL;
7429 sdebug_opts = opts;
7430 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7431 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7432 if (sdebug_every_nth != 0)
7433 tweak_cmnd_count();
7434 return length;
7435 }
7436
7437 struct sdebug_submit_queue_data {
7438 int *first;
7439 int *last;
7440 int queue_num;
7441 };
7442
sdebug_submit_queue_iter(struct request * rq,void * opaque)7443 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
7444 {
7445 struct sdebug_submit_queue_data *data = opaque;
7446 u32 unique_tag = blk_mq_unique_tag(rq);
7447 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7448 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
7449 int queue_num = data->queue_num;
7450
7451 if (hwq != queue_num)
7452 return true;
7453
7454 /* Rely on iter'ing in ascending tag order */
7455 if (*data->first == -1)
7456 *data->first = *data->last = tag;
7457 else
7458 *data->last = tag;
7459
7460 return true;
7461 }
7462
7463 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
7464 * same for each scsi_debug host (if more than one). Some of the counters
7465 * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)7466 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
7467 {
7468 struct sdebug_host_info *sdhp;
7469 int j;
7470
7471 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
7472 SDEBUG_VERSION, sdebug_version_date);
7473 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
7474 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
7475 sdebug_opts, sdebug_every_nth);
7476 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
7477 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
7478 sdebug_sector_size, "bytes");
7479 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
7480 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
7481 num_aborts);
7482 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
7483 num_dev_resets, num_target_resets, num_bus_resets,
7484 num_host_resets);
7485 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
7486 dix_reads, dix_writes, dif_errors);
7487 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
7488 sdebug_statistics);
7489 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
7490 atomic_read(&sdebug_cmnd_count),
7491 atomic_read(&sdebug_completions),
7492 "miss_cpus", atomic_read(&sdebug_miss_cpus),
7493 atomic_read(&sdebug_a_tsf),
7494 atomic_read(&sdeb_mq_poll_count));
7495
7496 seq_printf(m, "submit_queues=%d\n", submit_queues);
7497 for (j = 0; j < submit_queues; ++j) {
7498 int f = -1, l = -1;
7499 struct sdebug_submit_queue_data data = {
7500 .queue_num = j,
7501 .first = &f,
7502 .last = &l,
7503 };
7504 seq_printf(m, " queue %d:\n", j);
7505 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
7506 &data);
7507 if (f >= 0) {
7508 seq_printf(m, " BUSY: %s: %d,%d\n",
7509 "first,last bits", f, l);
7510 }
7511 }
7512
7513 seq_printf(m, "this host_no=%d\n", host->host_no);
7514 if (!xa_empty(per_store_ap)) {
7515 bool niu;
7516 int idx;
7517 unsigned long l_idx;
7518 struct sdeb_store_info *sip;
7519
7520 seq_puts(m, "\nhost list:\n");
7521 j = 0;
7522 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7523 idx = sdhp->si_idx;
7524 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
7525 sdhp->shost->host_no, idx);
7526 ++j;
7527 }
7528 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
7529 sdeb_most_recent_idx);
7530 j = 0;
7531 xa_for_each(per_store_ap, l_idx, sip) {
7532 niu = xa_get_mark(per_store_ap, l_idx,
7533 SDEB_XA_NOT_IN_USE);
7534 idx = (int)l_idx;
7535 seq_printf(m, " %d: idx=%d%s\n", j, idx,
7536 (niu ? " not_in_use" : ""));
7537 ++j;
7538 }
7539 }
7540 return 0;
7541 }
7542
delay_show(struct device_driver * ddp,char * buf)7543 static ssize_t delay_show(struct device_driver *ddp, char *buf)
7544 {
7545 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
7546 }
7547 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
7548 * of delay is jiffies.
7549 */
delay_store(struct device_driver * ddp,const char * buf,size_t count)7550 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
7551 size_t count)
7552 {
7553 int jdelay, res;
7554
7555 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
7556 res = count;
7557 if (sdebug_jdelay != jdelay) {
7558 struct sdebug_host_info *sdhp;
7559
7560 mutex_lock(&sdebug_host_list_mutex);
7561 block_unblock_all_queues(true);
7562
7563 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7564 struct Scsi_Host *shost = sdhp->shost;
7565
7566 if (scsi_host_busy(shost)) {
7567 res = -EBUSY; /* queued commands */
7568 break;
7569 }
7570 }
7571 if (res > 0) {
7572 sdebug_jdelay = jdelay;
7573 sdebug_ndelay = 0;
7574 }
7575 block_unblock_all_queues(false);
7576 mutex_unlock(&sdebug_host_list_mutex);
7577 }
7578 return res;
7579 }
7580 return -EINVAL;
7581 }
7582 static DRIVER_ATTR_RW(delay);
7583
ndelay_show(struct device_driver * ddp,char * buf)7584 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
7585 {
7586 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
7587 }
7588 /* Returns -EBUSY if ndelay is being changed and commands are queued */
7589 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)7590 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
7591 size_t count)
7592 {
7593 int ndelay, res;
7594
7595 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
7596 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
7597 res = count;
7598 if (sdebug_ndelay != ndelay) {
7599 struct sdebug_host_info *sdhp;
7600
7601 mutex_lock(&sdebug_host_list_mutex);
7602 block_unblock_all_queues(true);
7603
7604 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7605 struct Scsi_Host *shost = sdhp->shost;
7606
7607 if (scsi_host_busy(shost)) {
7608 res = -EBUSY; /* queued commands */
7609 break;
7610 }
7611 }
7612
7613 if (res > 0) {
7614 sdebug_ndelay = ndelay;
7615 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
7616 : DEF_JDELAY;
7617 }
7618 block_unblock_all_queues(false);
7619 mutex_unlock(&sdebug_host_list_mutex);
7620 }
7621 return res;
7622 }
7623 return -EINVAL;
7624 }
7625 static DRIVER_ATTR_RW(ndelay);
7626
opts_show(struct device_driver * ddp,char * buf)7627 static ssize_t opts_show(struct device_driver *ddp, char *buf)
7628 {
7629 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
7630 }
7631
opts_store(struct device_driver * ddp,const char * buf,size_t count)7632 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
7633 size_t count)
7634 {
7635 int opts;
7636 char work[20];
7637
7638 if (sscanf(buf, "%10s", work) == 1) {
7639 if (strncasecmp(work, "0x", 2) == 0) {
7640 if (kstrtoint(work + 2, 16, &opts) == 0)
7641 goto opts_done;
7642 } else {
7643 if (kstrtoint(work, 10, &opts) == 0)
7644 goto opts_done;
7645 }
7646 }
7647 return -EINVAL;
7648 opts_done:
7649 sdebug_opts = opts;
7650 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7651 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7652 tweak_cmnd_count();
7653 return count;
7654 }
7655 static DRIVER_ATTR_RW(opts);
7656
ptype_show(struct device_driver * ddp,char * buf)7657 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
7658 {
7659 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
7660 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)7661 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
7662 size_t count)
7663 {
7664 int n;
7665
7666 /* Cannot change from or to TYPE_ZBC with sysfs */
7667 if (sdebug_ptype == TYPE_ZBC)
7668 return -EINVAL;
7669
7670 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7671 if (n == TYPE_ZBC)
7672 return -EINVAL;
7673 sdebug_ptype = n;
7674 return count;
7675 }
7676 return -EINVAL;
7677 }
7678 static DRIVER_ATTR_RW(ptype);
7679
dsense_show(struct device_driver * ddp,char * buf)7680 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7681 {
7682 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7683 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)7684 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7685 size_t count)
7686 {
7687 int n;
7688
7689 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7690 sdebug_dsense = n;
7691 return count;
7692 }
7693 return -EINVAL;
7694 }
7695 static DRIVER_ATTR_RW(dsense);
7696
fake_rw_show(struct device_driver * ddp,char * buf)7697 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7698 {
7699 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7700 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)7701 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7702 size_t count)
7703 {
7704 int n, idx;
7705
7706 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7707 bool want_store = (n == 0);
7708 struct sdebug_host_info *sdhp;
7709
7710 n = (n > 0);
7711 sdebug_fake_rw = (sdebug_fake_rw > 0);
7712 if (sdebug_fake_rw == n)
7713 return count; /* not transitioning so do nothing */
7714
7715 if (want_store) { /* 1 --> 0 transition, set up store */
7716 if (sdeb_first_idx < 0) {
7717 idx = sdebug_add_store();
7718 if (idx < 0)
7719 return idx;
7720 } else {
7721 idx = sdeb_first_idx;
7722 xa_clear_mark(per_store_ap, idx,
7723 SDEB_XA_NOT_IN_USE);
7724 }
7725 /* make all hosts use same store */
7726 list_for_each_entry(sdhp, &sdebug_host_list,
7727 host_list) {
7728 if (sdhp->si_idx != idx) {
7729 xa_set_mark(per_store_ap, sdhp->si_idx,
7730 SDEB_XA_NOT_IN_USE);
7731 sdhp->si_idx = idx;
7732 }
7733 }
7734 sdeb_most_recent_idx = idx;
7735 } else { /* 0 --> 1 transition is trigger for shrink */
7736 sdebug_erase_all_stores(true /* apart from first */);
7737 }
7738 sdebug_fake_rw = n;
7739 return count;
7740 }
7741 return -EINVAL;
7742 }
7743 static DRIVER_ATTR_RW(fake_rw);
7744
no_lun_0_show(struct device_driver * ddp,char * buf)7745 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7746 {
7747 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7748 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)7749 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7750 size_t count)
7751 {
7752 int n;
7753
7754 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7755 sdebug_no_lun_0 = n;
7756 return count;
7757 }
7758 return -EINVAL;
7759 }
7760 static DRIVER_ATTR_RW(no_lun_0);
7761
num_tgts_show(struct device_driver * ddp,char * buf)7762 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7763 {
7764 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7765 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)7766 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7767 size_t count)
7768 {
7769 int n;
7770
7771 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7772 sdebug_num_tgts = n;
7773 sdebug_max_tgts_luns();
7774 return count;
7775 }
7776 return -EINVAL;
7777 }
7778 static DRIVER_ATTR_RW(num_tgts);
7779
dev_size_mb_show(struct device_driver * ddp,char * buf)7780 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7781 {
7782 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7783 }
7784 static DRIVER_ATTR_RO(dev_size_mb);
7785
per_host_store_show(struct device_driver * ddp,char * buf)7786 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7787 {
7788 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7789 }
7790
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)7791 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7792 size_t count)
7793 {
7794 bool v;
7795
7796 if (kstrtobool(buf, &v))
7797 return -EINVAL;
7798
7799 sdebug_per_host_store = v;
7800 return count;
7801 }
7802 static DRIVER_ATTR_RW(per_host_store);
7803
num_parts_show(struct device_driver * ddp,char * buf)7804 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7805 {
7806 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7807 }
7808 static DRIVER_ATTR_RO(num_parts);
7809
every_nth_show(struct device_driver * ddp,char * buf)7810 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7811 {
7812 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7813 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)7814 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7815 size_t count)
7816 {
7817 int nth;
7818 char work[20];
7819
7820 if (sscanf(buf, "%10s", work) == 1) {
7821 if (strncasecmp(work, "0x", 2) == 0) {
7822 if (kstrtoint(work + 2, 16, &nth) == 0)
7823 goto every_nth_done;
7824 } else {
7825 if (kstrtoint(work, 10, &nth) == 0)
7826 goto every_nth_done;
7827 }
7828 }
7829 return -EINVAL;
7830
7831 every_nth_done:
7832 sdebug_every_nth = nth;
7833 if (nth && !sdebug_statistics) {
7834 pr_info("every_nth needs statistics=1, set it\n");
7835 sdebug_statistics = true;
7836 }
7837 tweak_cmnd_count();
7838 return count;
7839 }
7840 static DRIVER_ATTR_RW(every_nth);
7841
lun_format_show(struct device_driver * ddp,char * buf)7842 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7843 {
7844 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7845 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)7846 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7847 size_t count)
7848 {
7849 int n;
7850 bool changed;
7851
7852 if (kstrtoint(buf, 0, &n))
7853 return -EINVAL;
7854 if (n >= 0) {
7855 if (n > (int)SAM_LUN_AM_FLAT) {
7856 pr_warn("only LUN address methods 0 and 1 are supported\n");
7857 return -EINVAL;
7858 }
7859 changed = ((int)sdebug_lun_am != n);
7860 sdebug_lun_am = n;
7861 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
7862 struct sdebug_host_info *sdhp;
7863 struct sdebug_dev_info *dp;
7864
7865 mutex_lock(&sdebug_host_list_mutex);
7866 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7867 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7868 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7869 }
7870 }
7871 mutex_unlock(&sdebug_host_list_mutex);
7872 }
7873 return count;
7874 }
7875 return -EINVAL;
7876 }
7877 static DRIVER_ATTR_RW(lun_format);
7878
max_luns_show(struct device_driver * ddp,char * buf)7879 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7880 {
7881 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7882 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)7883 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7884 size_t count)
7885 {
7886 int n;
7887 bool changed;
7888
7889 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7890 if (n > 256) {
7891 pr_warn("max_luns can be no more than 256\n");
7892 return -EINVAL;
7893 }
7894 changed = (sdebug_max_luns != n);
7895 sdebug_max_luns = n;
7896 sdebug_max_tgts_luns();
7897 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
7898 struct sdebug_host_info *sdhp;
7899 struct sdebug_dev_info *dp;
7900
7901 mutex_lock(&sdebug_host_list_mutex);
7902 list_for_each_entry(sdhp, &sdebug_host_list,
7903 host_list) {
7904 list_for_each_entry(dp, &sdhp->dev_info_list,
7905 dev_list) {
7906 set_bit(SDEBUG_UA_LUNS_CHANGED,
7907 dp->uas_bm);
7908 }
7909 }
7910 mutex_unlock(&sdebug_host_list_mutex);
7911 }
7912 return count;
7913 }
7914 return -EINVAL;
7915 }
7916 static DRIVER_ATTR_RW(max_luns);
7917
max_queue_show(struct device_driver * ddp,char * buf)7918 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
7919 {
7920 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
7921 }
7922 /* N.B. max_queue can be changed while there are queued commands. In flight
7923 * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)7924 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
7925 size_t count)
7926 {
7927 int n;
7928
7929 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
7930 (n <= SDEBUG_CANQUEUE) &&
7931 (sdebug_host_max_queue == 0)) {
7932 mutex_lock(&sdebug_host_list_mutex);
7933
7934 /* We may only change sdebug_max_queue when we have no shosts */
7935 if (list_empty(&sdebug_host_list))
7936 sdebug_max_queue = n;
7937 else
7938 count = -EBUSY;
7939 mutex_unlock(&sdebug_host_list_mutex);
7940 return count;
7941 }
7942 return -EINVAL;
7943 }
7944 static DRIVER_ATTR_RW(max_queue);
7945
host_max_queue_show(struct device_driver * ddp,char * buf)7946 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
7947 {
7948 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
7949 }
7950
no_rwlock_show(struct device_driver * ddp,char * buf)7951 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
7952 {
7953 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
7954 }
7955
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)7956 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
7957 {
7958 bool v;
7959
7960 if (kstrtobool(buf, &v))
7961 return -EINVAL;
7962
7963 sdebug_no_rwlock = v;
7964 return count;
7965 }
7966 static DRIVER_ATTR_RW(no_rwlock);
7967
7968 /*
7969 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
7970 * in range [0, sdebug_host_max_queue), we can't change it.
7971 */
7972 static DRIVER_ATTR_RO(host_max_queue);
7973
no_uld_show(struct device_driver * ddp,char * buf)7974 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
7975 {
7976 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
7977 }
7978 static DRIVER_ATTR_RO(no_uld);
7979
scsi_level_show(struct device_driver * ddp,char * buf)7980 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
7981 {
7982 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
7983 }
7984 static DRIVER_ATTR_RO(scsi_level);
7985
virtual_gb_show(struct device_driver * ddp,char * buf)7986 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
7987 {
7988 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
7989 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)7990 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
7991 size_t count)
7992 {
7993 int n;
7994 bool changed;
7995
7996 /* Ignore capacity change for ZBC drives for now */
7997 if (sdeb_zbc_in_use)
7998 return -ENOTSUPP;
7999
8000 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8001 changed = (sdebug_virtual_gb != n);
8002 sdebug_virtual_gb = n;
8003 sdebug_capacity = get_sdebug_capacity();
8004 if (changed) {
8005 struct sdebug_host_info *sdhp;
8006 struct sdebug_dev_info *dp;
8007
8008 mutex_lock(&sdebug_host_list_mutex);
8009 list_for_each_entry(sdhp, &sdebug_host_list,
8010 host_list) {
8011 list_for_each_entry(dp, &sdhp->dev_info_list,
8012 dev_list) {
8013 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
8014 dp->uas_bm);
8015 }
8016 }
8017 mutex_unlock(&sdebug_host_list_mutex);
8018 }
8019 return count;
8020 }
8021 return -EINVAL;
8022 }
8023 static DRIVER_ATTR_RW(virtual_gb);
8024
add_host_show(struct device_driver * ddp,char * buf)8025 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
8026 {
8027 /* absolute number of hosts currently active is what is shown */
8028 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
8029 }
8030
add_host_store(struct device_driver * ddp,const char * buf,size_t count)8031 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
8032 size_t count)
8033 {
8034 bool found;
8035 unsigned long idx;
8036 struct sdeb_store_info *sip;
8037 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
8038 int delta_hosts;
8039
8040 if (sscanf(buf, "%d", &delta_hosts) != 1)
8041 return -EINVAL;
8042 if (delta_hosts > 0) {
8043 do {
8044 found = false;
8045 if (want_phs) {
8046 xa_for_each_marked(per_store_ap, idx, sip,
8047 SDEB_XA_NOT_IN_USE) {
8048 sdeb_most_recent_idx = (int)idx;
8049 found = true;
8050 break;
8051 }
8052 if (found) /* re-use case */
8053 sdebug_add_host_helper((int)idx);
8054 else
8055 sdebug_do_add_host(true);
8056 } else {
8057 sdebug_do_add_host(false);
8058 }
8059 } while (--delta_hosts);
8060 } else if (delta_hosts < 0) {
8061 do {
8062 sdebug_do_remove_host(false);
8063 } while (++delta_hosts);
8064 }
8065 return count;
8066 }
8067 static DRIVER_ATTR_RW(add_host);
8068
vpd_use_hostno_show(struct device_driver * ddp,char * buf)8069 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
8070 {
8071 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
8072 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)8073 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
8074 size_t count)
8075 {
8076 int n;
8077
8078 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8079 sdebug_vpd_use_hostno = n;
8080 return count;
8081 }
8082 return -EINVAL;
8083 }
8084 static DRIVER_ATTR_RW(vpd_use_hostno);
8085
statistics_show(struct device_driver * ddp,char * buf)8086 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
8087 {
8088 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
8089 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)8090 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
8091 size_t count)
8092 {
8093 int n;
8094
8095 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
8096 if (n > 0)
8097 sdebug_statistics = true;
8098 else {
8099 clear_queue_stats();
8100 sdebug_statistics = false;
8101 }
8102 return count;
8103 }
8104 return -EINVAL;
8105 }
8106 static DRIVER_ATTR_RW(statistics);
8107
sector_size_show(struct device_driver * ddp,char * buf)8108 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
8109 {
8110 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
8111 }
8112 static DRIVER_ATTR_RO(sector_size);
8113
submit_queues_show(struct device_driver * ddp,char * buf)8114 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
8115 {
8116 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
8117 }
8118 static DRIVER_ATTR_RO(submit_queues);
8119
dix_show(struct device_driver * ddp,char * buf)8120 static ssize_t dix_show(struct device_driver *ddp, char *buf)
8121 {
8122 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
8123 }
8124 static DRIVER_ATTR_RO(dix);
8125
dif_show(struct device_driver * ddp,char * buf)8126 static ssize_t dif_show(struct device_driver *ddp, char *buf)
8127 {
8128 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
8129 }
8130 static DRIVER_ATTR_RO(dif);
8131
guard_show(struct device_driver * ddp,char * buf)8132 static ssize_t guard_show(struct device_driver *ddp, char *buf)
8133 {
8134 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
8135 }
8136 static DRIVER_ATTR_RO(guard);
8137
ato_show(struct device_driver * ddp,char * buf)8138 static ssize_t ato_show(struct device_driver *ddp, char *buf)
8139 {
8140 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
8141 }
8142 static DRIVER_ATTR_RO(ato);
8143
map_show(struct device_driver * ddp,char * buf)8144 static ssize_t map_show(struct device_driver *ddp, char *buf)
8145 {
8146 ssize_t count = 0;
8147
8148 if (!scsi_debug_lbp())
8149 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
8150 sdebug_store_sectors);
8151
8152 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
8153 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
8154
8155 if (sip)
8156 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
8157 (int)map_size, sip->map_storep);
8158 }
8159 buf[count++] = '\n';
8160 buf[count] = '\0';
8161
8162 return count;
8163 }
8164 static DRIVER_ATTR_RO(map);
8165
random_show(struct device_driver * ddp,char * buf)8166 static ssize_t random_show(struct device_driver *ddp, char *buf)
8167 {
8168 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
8169 }
8170
random_store(struct device_driver * ddp,const char * buf,size_t count)8171 static ssize_t random_store(struct device_driver *ddp, const char *buf,
8172 size_t count)
8173 {
8174 bool v;
8175
8176 if (kstrtobool(buf, &v))
8177 return -EINVAL;
8178
8179 sdebug_random = v;
8180 return count;
8181 }
8182 static DRIVER_ATTR_RW(random);
8183
removable_show(struct device_driver * ddp,char * buf)8184 static ssize_t removable_show(struct device_driver *ddp, char *buf)
8185 {
8186 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
8187 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)8188 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
8189 size_t count)
8190 {
8191 int n;
8192
8193 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8194 sdebug_removable = (n > 0);
8195 return count;
8196 }
8197 return -EINVAL;
8198 }
8199 static DRIVER_ATTR_RW(removable);
8200
host_lock_show(struct device_driver * ddp,char * buf)8201 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
8202 {
8203 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
8204 }
8205 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)8206 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
8207 size_t count)
8208 {
8209 int n;
8210
8211 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8212 sdebug_host_lock = (n > 0);
8213 return count;
8214 }
8215 return -EINVAL;
8216 }
8217 static DRIVER_ATTR_RW(host_lock);
8218
strict_show(struct device_driver * ddp,char * buf)8219 static ssize_t strict_show(struct device_driver *ddp, char *buf)
8220 {
8221 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
8222 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)8223 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
8224 size_t count)
8225 {
8226 int n;
8227
8228 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8229 sdebug_strict = (n > 0);
8230 return count;
8231 }
8232 return -EINVAL;
8233 }
8234 static DRIVER_ATTR_RW(strict);
8235
uuid_ctl_show(struct device_driver * ddp,char * buf)8236 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
8237 {
8238 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
8239 }
8240 static DRIVER_ATTR_RO(uuid_ctl);
8241
cdb_len_show(struct device_driver * ddp,char * buf)8242 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
8243 {
8244 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
8245 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)8246 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
8247 size_t count)
8248 {
8249 int ret, n;
8250
8251 ret = kstrtoint(buf, 0, &n);
8252 if (ret)
8253 return ret;
8254 sdebug_cdb_len = n;
8255 all_config_cdb_len();
8256 return count;
8257 }
8258 static DRIVER_ATTR_RW(cdb_len);
8259
8260 static const char * const zbc_model_strs_a[] = {
8261 [BLK_ZONED_NONE] = "none",
8262 [BLK_ZONED_HA] = "host-aware",
8263 [BLK_ZONED_HM] = "host-managed",
8264 };
8265
8266 static const char * const zbc_model_strs_b[] = {
8267 [BLK_ZONED_NONE] = "no",
8268 [BLK_ZONED_HA] = "aware",
8269 [BLK_ZONED_HM] = "managed",
8270 };
8271
8272 static const char * const zbc_model_strs_c[] = {
8273 [BLK_ZONED_NONE] = "0",
8274 [BLK_ZONED_HA] = "1",
8275 [BLK_ZONED_HM] = "2",
8276 };
8277
sdeb_zbc_model_str(const char * cp)8278 static int sdeb_zbc_model_str(const char *cp)
8279 {
8280 int res = sysfs_match_string(zbc_model_strs_a, cp);
8281
8282 if (res < 0) {
8283 res = sysfs_match_string(zbc_model_strs_b, cp);
8284 if (res < 0) {
8285 res = sysfs_match_string(zbc_model_strs_c, cp);
8286 if (res < 0)
8287 return -EINVAL;
8288 }
8289 }
8290 return res;
8291 }
8292
zbc_show(struct device_driver * ddp,char * buf)8293 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
8294 {
8295 return scnprintf(buf, PAGE_SIZE, "%s\n",
8296 zbc_model_strs_a[sdeb_zbc_model]);
8297 }
8298 static DRIVER_ATTR_RO(zbc);
8299
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)8300 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
8301 {
8302 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
8303 }
8304 static DRIVER_ATTR_RO(tur_ms_to_ready);
8305
group_number_stats_show(struct device_driver * ddp,char * buf)8306 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
8307 {
8308 char *p = buf, *end = buf + PAGE_SIZE;
8309 int i;
8310
8311 for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8312 p += scnprintf(p, end - p, "%d %ld\n", i,
8313 atomic_long_read(&writes_by_group_number[i]));
8314
8315 return p - buf;
8316 }
8317
group_number_stats_store(struct device_driver * ddp,const char * buf,size_t count)8318 static ssize_t group_number_stats_store(struct device_driver *ddp,
8319 const char *buf, size_t count)
8320 {
8321 int i;
8322
8323 for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8324 atomic_long_set(&writes_by_group_number[i], 0);
8325
8326 return count;
8327 }
8328 static DRIVER_ATTR_RW(group_number_stats);
8329
8330 /* Note: The following array creates attribute files in the
8331 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
8332 files (over those found in the /sys/module/scsi_debug/parameters
8333 directory) is that auxiliary actions can be triggered when an attribute
8334 is changed. For example see: add_host_store() above.
8335 */
8336
8337 static struct attribute *sdebug_drv_attrs[] = {
8338 &driver_attr_delay.attr,
8339 &driver_attr_opts.attr,
8340 &driver_attr_ptype.attr,
8341 &driver_attr_dsense.attr,
8342 &driver_attr_fake_rw.attr,
8343 &driver_attr_host_max_queue.attr,
8344 &driver_attr_no_lun_0.attr,
8345 &driver_attr_num_tgts.attr,
8346 &driver_attr_dev_size_mb.attr,
8347 &driver_attr_num_parts.attr,
8348 &driver_attr_every_nth.attr,
8349 &driver_attr_lun_format.attr,
8350 &driver_attr_max_luns.attr,
8351 &driver_attr_max_queue.attr,
8352 &driver_attr_no_rwlock.attr,
8353 &driver_attr_no_uld.attr,
8354 &driver_attr_scsi_level.attr,
8355 &driver_attr_virtual_gb.attr,
8356 &driver_attr_add_host.attr,
8357 &driver_attr_per_host_store.attr,
8358 &driver_attr_vpd_use_hostno.attr,
8359 &driver_attr_sector_size.attr,
8360 &driver_attr_statistics.attr,
8361 &driver_attr_submit_queues.attr,
8362 &driver_attr_dix.attr,
8363 &driver_attr_dif.attr,
8364 &driver_attr_guard.attr,
8365 &driver_attr_ato.attr,
8366 &driver_attr_map.attr,
8367 &driver_attr_random.attr,
8368 &driver_attr_removable.attr,
8369 &driver_attr_host_lock.attr,
8370 &driver_attr_ndelay.attr,
8371 &driver_attr_strict.attr,
8372 &driver_attr_uuid_ctl.attr,
8373 &driver_attr_cdb_len.attr,
8374 &driver_attr_tur_ms_to_ready.attr,
8375 &driver_attr_zbc.attr,
8376 &driver_attr_group_number_stats.attr,
8377 NULL,
8378 };
8379 ATTRIBUTE_GROUPS(sdebug_drv);
8380
8381 static struct device *pseudo_primary;
8382
scsi_debug_init(void)8383 static int __init scsi_debug_init(void)
8384 {
8385 bool want_store = (sdebug_fake_rw == 0);
8386 unsigned long sz;
8387 int k, ret, hosts_to_add;
8388 int idx = -1;
8389
8390 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
8391 pr_warn("ndelay must be less than 1 second, ignored\n");
8392 sdebug_ndelay = 0;
8393 } else if (sdebug_ndelay > 0)
8394 sdebug_jdelay = JDELAY_OVERRIDDEN;
8395
8396 switch (sdebug_sector_size) {
8397 case 512:
8398 case 1024:
8399 case 2048:
8400 case 4096:
8401 break;
8402 default:
8403 pr_err("invalid sector_size %d\n", sdebug_sector_size);
8404 return -EINVAL;
8405 }
8406
8407 switch (sdebug_dif) {
8408 case T10_PI_TYPE0_PROTECTION:
8409 break;
8410 case T10_PI_TYPE1_PROTECTION:
8411 case T10_PI_TYPE2_PROTECTION:
8412 case T10_PI_TYPE3_PROTECTION:
8413 have_dif_prot = true;
8414 break;
8415
8416 default:
8417 pr_err("dif must be 0, 1, 2 or 3\n");
8418 return -EINVAL;
8419 }
8420
8421 if (sdebug_num_tgts < 0) {
8422 pr_err("num_tgts must be >= 0\n");
8423 return -EINVAL;
8424 }
8425
8426 if (sdebug_guard > 1) {
8427 pr_err("guard must be 0 or 1\n");
8428 return -EINVAL;
8429 }
8430
8431 if (sdebug_ato > 1) {
8432 pr_err("ato must be 0 or 1\n");
8433 return -EINVAL;
8434 }
8435
8436 if (sdebug_physblk_exp > 15) {
8437 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
8438 return -EINVAL;
8439 }
8440
8441 sdebug_lun_am = sdebug_lun_am_i;
8442 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
8443 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
8444 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
8445 }
8446
8447 if (sdebug_max_luns > 256) {
8448 if (sdebug_max_luns > 16384) {
8449 pr_warn("max_luns can be no more than 16384, use default\n");
8450 sdebug_max_luns = DEF_MAX_LUNS;
8451 }
8452 sdebug_lun_am = SAM_LUN_AM_FLAT;
8453 }
8454
8455 if (sdebug_lowest_aligned > 0x3fff) {
8456 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
8457 return -EINVAL;
8458 }
8459
8460 if (submit_queues < 1) {
8461 pr_err("submit_queues must be 1 or more\n");
8462 return -EINVAL;
8463 }
8464
8465 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
8466 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
8467 return -EINVAL;
8468 }
8469
8470 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
8471 (sdebug_host_max_queue < 0)) {
8472 pr_err("host_max_queue must be in range [0 %d]\n",
8473 SDEBUG_CANQUEUE);
8474 return -EINVAL;
8475 }
8476
8477 if (sdebug_host_max_queue &&
8478 (sdebug_max_queue != sdebug_host_max_queue)) {
8479 sdebug_max_queue = sdebug_host_max_queue;
8480 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
8481 sdebug_max_queue);
8482 }
8483
8484 /*
8485 * check for host managed zoned block device specified with
8486 * ptype=0x14 or zbc=XXX.
8487 */
8488 if (sdebug_ptype == TYPE_ZBC) {
8489 sdeb_zbc_model = BLK_ZONED_HM;
8490 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
8491 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
8492 if (k < 0)
8493 return k;
8494 sdeb_zbc_model = k;
8495 switch (sdeb_zbc_model) {
8496 case BLK_ZONED_NONE:
8497 case BLK_ZONED_HA:
8498 sdebug_ptype = TYPE_DISK;
8499 break;
8500 case BLK_ZONED_HM:
8501 sdebug_ptype = TYPE_ZBC;
8502 break;
8503 default:
8504 pr_err("Invalid ZBC model\n");
8505 return -EINVAL;
8506 }
8507 }
8508 if (sdeb_zbc_model != BLK_ZONED_NONE) {
8509 sdeb_zbc_in_use = true;
8510 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8511 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
8512 }
8513
8514 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8515 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
8516 if (sdebug_dev_size_mb < 1)
8517 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
8518 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8519 sdebug_store_sectors = sz / sdebug_sector_size;
8520 sdebug_capacity = get_sdebug_capacity();
8521
8522 /* play around with geometry, don't waste too much on track 0 */
8523 sdebug_heads = 8;
8524 sdebug_sectors_per = 32;
8525 if (sdebug_dev_size_mb >= 256)
8526 sdebug_heads = 64;
8527 else if (sdebug_dev_size_mb >= 16)
8528 sdebug_heads = 32;
8529 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8530 (sdebug_sectors_per * sdebug_heads);
8531 if (sdebug_cylinders_per >= 1024) {
8532 /* other LLDs do this; implies >= 1GB ram disk ... */
8533 sdebug_heads = 255;
8534 sdebug_sectors_per = 63;
8535 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8536 (sdebug_sectors_per * sdebug_heads);
8537 }
8538 if (scsi_debug_lbp()) {
8539 sdebug_unmap_max_blocks =
8540 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
8541
8542 sdebug_unmap_max_desc =
8543 clamp(sdebug_unmap_max_desc, 0U, 256U);
8544
8545 sdebug_unmap_granularity =
8546 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
8547
8548 if (sdebug_unmap_alignment &&
8549 sdebug_unmap_granularity <=
8550 sdebug_unmap_alignment) {
8551 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
8552 return -EINVAL;
8553 }
8554 }
8555
8556 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
8557 if (want_store) {
8558 idx = sdebug_add_store();
8559 if (idx < 0)
8560 return idx;
8561 }
8562
8563 pseudo_primary = root_device_register("pseudo_0");
8564 if (IS_ERR(pseudo_primary)) {
8565 pr_warn("root_device_register() error\n");
8566 ret = PTR_ERR(pseudo_primary);
8567 goto free_vm;
8568 }
8569 ret = bus_register(&pseudo_lld_bus);
8570 if (ret < 0) {
8571 pr_warn("bus_register error: %d\n", ret);
8572 goto dev_unreg;
8573 }
8574 ret = driver_register(&sdebug_driverfs_driver);
8575 if (ret < 0) {
8576 pr_warn("driver_register error: %d\n", ret);
8577 goto bus_unreg;
8578 }
8579
8580 hosts_to_add = sdebug_add_host;
8581 sdebug_add_host = 0;
8582
8583 sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
8584 if (IS_ERR_OR_NULL(sdebug_debugfs_root))
8585 pr_info("%s: failed to create initial debugfs directory\n", __func__);
8586
8587 for (k = 0; k < hosts_to_add; k++) {
8588 if (want_store && k == 0) {
8589 ret = sdebug_add_host_helper(idx);
8590 if (ret < 0) {
8591 pr_err("add_host_helper k=%d, error=%d\n",
8592 k, -ret);
8593 break;
8594 }
8595 } else {
8596 ret = sdebug_do_add_host(want_store &&
8597 sdebug_per_host_store);
8598 if (ret < 0) {
8599 pr_err("add_host k=%d error=%d\n", k, -ret);
8600 break;
8601 }
8602 }
8603 }
8604 if (sdebug_verbose)
8605 pr_info("built %d host(s)\n", sdebug_num_hosts);
8606
8607 return 0;
8608
8609 bus_unreg:
8610 bus_unregister(&pseudo_lld_bus);
8611 dev_unreg:
8612 root_device_unregister(pseudo_primary);
8613 free_vm:
8614 sdebug_erase_store(idx, NULL);
8615 return ret;
8616 }
8617
scsi_debug_exit(void)8618 static void __exit scsi_debug_exit(void)
8619 {
8620 int k = sdebug_num_hosts;
8621
8622 for (; k; k--)
8623 sdebug_do_remove_host(true);
8624 driver_unregister(&sdebug_driverfs_driver);
8625 bus_unregister(&pseudo_lld_bus);
8626 root_device_unregister(pseudo_primary);
8627
8628 sdebug_erase_all_stores(false);
8629 xa_destroy(per_store_ap);
8630 debugfs_remove(sdebug_debugfs_root);
8631 }
8632
8633 device_initcall(scsi_debug_init);
8634 module_exit(scsi_debug_exit);
8635
sdebug_release_adapter(struct device * dev)8636 static void sdebug_release_adapter(struct device *dev)
8637 {
8638 struct sdebug_host_info *sdbg_host;
8639
8640 sdbg_host = dev_to_sdebug_host(dev);
8641 kfree(sdbg_host);
8642 }
8643
8644 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)8645 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
8646 {
8647 if (idx < 0)
8648 return;
8649 if (!sip) {
8650 if (xa_empty(per_store_ap))
8651 return;
8652 sip = xa_load(per_store_ap, idx);
8653 if (!sip)
8654 return;
8655 }
8656 vfree(sip->map_storep);
8657 vfree(sip->dif_storep);
8658 vfree(sip->storep);
8659 xa_erase(per_store_ap, idx);
8660 kfree(sip);
8661 }
8662
8663 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)8664 static void sdebug_erase_all_stores(bool apart_from_first)
8665 {
8666 unsigned long idx;
8667 struct sdeb_store_info *sip = NULL;
8668
8669 xa_for_each(per_store_ap, idx, sip) {
8670 if (apart_from_first)
8671 apart_from_first = false;
8672 else
8673 sdebug_erase_store(idx, sip);
8674 }
8675 if (apart_from_first)
8676 sdeb_most_recent_idx = sdeb_first_idx;
8677 }
8678
8679 /*
8680 * Returns store xarray new element index (idx) if >=0 else negated errno.
8681 * Limit the number of stores to 65536.
8682 */
sdebug_add_store(void)8683 static int sdebug_add_store(void)
8684 {
8685 int res;
8686 u32 n_idx;
8687 unsigned long iflags;
8688 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8689 struct sdeb_store_info *sip = NULL;
8690 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8691
8692 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8693 if (!sip)
8694 return -ENOMEM;
8695
8696 xa_lock_irqsave(per_store_ap, iflags);
8697 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8698 if (unlikely(res < 0)) {
8699 xa_unlock_irqrestore(per_store_ap, iflags);
8700 kfree(sip);
8701 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8702 return res;
8703 }
8704 sdeb_most_recent_idx = n_idx;
8705 if (sdeb_first_idx < 0)
8706 sdeb_first_idx = n_idx;
8707 xa_unlock_irqrestore(per_store_ap, iflags);
8708
8709 res = -ENOMEM;
8710 sip->storep = vzalloc(sz);
8711 if (!sip->storep) {
8712 pr_err("user data oom\n");
8713 goto err;
8714 }
8715 if (sdebug_num_parts > 0)
8716 sdebug_build_parts(sip->storep, sz);
8717
8718 /* DIF/DIX: what T10 calls Protection Information (PI) */
8719 if (sdebug_dix) {
8720 int dif_size;
8721
8722 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8723 sip->dif_storep = vmalloc(dif_size);
8724
8725 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
8726 sip->dif_storep);
8727
8728 if (!sip->dif_storep) {
8729 pr_err("DIX oom\n");
8730 goto err;
8731 }
8732 memset(sip->dif_storep, 0xff, dif_size);
8733 }
8734 /* Logical Block Provisioning */
8735 if (scsi_debug_lbp()) {
8736 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8737 sip->map_storep = vmalloc(array_size(sizeof(long),
8738 BITS_TO_LONGS(map_size)));
8739
8740 pr_info("%lu provisioning blocks\n", map_size);
8741
8742 if (!sip->map_storep) {
8743 pr_err("LBP map oom\n");
8744 goto err;
8745 }
8746
8747 bitmap_zero(sip->map_storep, map_size);
8748
8749 /* Map first 1KB for partition table */
8750 if (sdebug_num_parts)
8751 map_region(sip, 0, 2);
8752 }
8753
8754 rwlock_init(&sip->macc_data_lck);
8755 rwlock_init(&sip->macc_meta_lck);
8756 rwlock_init(&sip->macc_sector_lck);
8757 return (int)n_idx;
8758 err:
8759 sdebug_erase_store((int)n_idx, sip);
8760 pr_warn("%s: failed, errno=%d\n", __func__, -res);
8761 return res;
8762 }
8763
sdebug_add_host_helper(int per_host_idx)8764 static int sdebug_add_host_helper(int per_host_idx)
8765 {
8766 int k, devs_per_host, idx;
8767 int error = -ENOMEM;
8768 struct sdebug_host_info *sdbg_host;
8769 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8770
8771 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8772 if (!sdbg_host)
8773 return -ENOMEM;
8774 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8775 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8776 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8777 sdbg_host->si_idx = idx;
8778
8779 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8780
8781 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8782 for (k = 0; k < devs_per_host; k++) {
8783 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8784 if (!sdbg_devinfo)
8785 goto clean;
8786 }
8787
8788 mutex_lock(&sdebug_host_list_mutex);
8789 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8790 mutex_unlock(&sdebug_host_list_mutex);
8791
8792 sdbg_host->dev.bus = &pseudo_lld_bus;
8793 sdbg_host->dev.parent = pseudo_primary;
8794 sdbg_host->dev.release = &sdebug_release_adapter;
8795 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8796
8797 error = device_register(&sdbg_host->dev);
8798 if (error) {
8799 mutex_lock(&sdebug_host_list_mutex);
8800 list_del(&sdbg_host->host_list);
8801 mutex_unlock(&sdebug_host_list_mutex);
8802 goto clean;
8803 }
8804
8805 ++sdebug_num_hosts;
8806 return 0;
8807
8808 clean:
8809 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8810 dev_list) {
8811 list_del(&sdbg_devinfo->dev_list);
8812 kfree(sdbg_devinfo->zstate);
8813 kfree(sdbg_devinfo);
8814 }
8815 if (sdbg_host->dev.release)
8816 put_device(&sdbg_host->dev);
8817 else
8818 kfree(sdbg_host);
8819 pr_warn("%s: failed, errno=%d\n", __func__, -error);
8820 return error;
8821 }
8822
sdebug_do_add_host(bool mk_new_store)8823 static int sdebug_do_add_host(bool mk_new_store)
8824 {
8825 int ph_idx = sdeb_most_recent_idx;
8826
8827 if (mk_new_store) {
8828 ph_idx = sdebug_add_store();
8829 if (ph_idx < 0)
8830 return ph_idx;
8831 }
8832 return sdebug_add_host_helper(ph_idx);
8833 }
8834
sdebug_do_remove_host(bool the_end)8835 static void sdebug_do_remove_host(bool the_end)
8836 {
8837 int idx = -1;
8838 struct sdebug_host_info *sdbg_host = NULL;
8839 struct sdebug_host_info *sdbg_host2;
8840
8841 mutex_lock(&sdebug_host_list_mutex);
8842 if (!list_empty(&sdebug_host_list)) {
8843 sdbg_host = list_entry(sdebug_host_list.prev,
8844 struct sdebug_host_info, host_list);
8845 idx = sdbg_host->si_idx;
8846 }
8847 if (!the_end && idx >= 0) {
8848 bool unique = true;
8849
8850 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8851 if (sdbg_host2 == sdbg_host)
8852 continue;
8853 if (idx == sdbg_host2->si_idx) {
8854 unique = false;
8855 break;
8856 }
8857 }
8858 if (unique) {
8859 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8860 if (idx == sdeb_most_recent_idx)
8861 --sdeb_most_recent_idx;
8862 }
8863 }
8864 if (sdbg_host)
8865 list_del(&sdbg_host->host_list);
8866 mutex_unlock(&sdebug_host_list_mutex);
8867
8868 if (!sdbg_host)
8869 return;
8870
8871 device_unregister(&sdbg_host->dev);
8872 --sdebug_num_hosts;
8873 }
8874
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)8875 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8876 {
8877 struct sdebug_dev_info *devip = sdev->hostdata;
8878
8879 if (!devip)
8880 return -ENODEV;
8881
8882 mutex_lock(&sdebug_host_list_mutex);
8883 block_unblock_all_queues(true);
8884
8885 if (qdepth > SDEBUG_CANQUEUE) {
8886 qdepth = SDEBUG_CANQUEUE;
8887 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
8888 qdepth, SDEBUG_CANQUEUE);
8889 }
8890 if (qdepth < 1)
8891 qdepth = 1;
8892 if (qdepth != sdev->queue_depth)
8893 scsi_change_queue_depth(sdev, qdepth);
8894
8895 block_unblock_all_queues(false);
8896 mutex_unlock(&sdebug_host_list_mutex);
8897
8898 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8899 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
8900
8901 return sdev->queue_depth;
8902 }
8903
fake_timeout(struct scsi_cmnd * scp)8904 static bool fake_timeout(struct scsi_cmnd *scp)
8905 {
8906 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
8907 if (sdebug_every_nth < -1)
8908 sdebug_every_nth = -1;
8909 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
8910 return true; /* ignore command causing timeout */
8911 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
8912 scsi_medium_access_command(scp))
8913 return true; /* time out reads and writes */
8914 }
8915 return false;
8916 }
8917
8918 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)8919 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
8920 {
8921 int stopped_state;
8922 u64 diff_ns = 0;
8923 ktime_t now_ts = ktime_get_boottime();
8924 struct scsi_device *sdp = scp->device;
8925
8926 stopped_state = atomic_read(&devip->stopped);
8927 if (stopped_state == 2) {
8928 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
8929 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
8930 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
8931 /* tur_ms_to_ready timer extinguished */
8932 atomic_set(&devip->stopped, 0);
8933 return 0;
8934 }
8935 }
8936 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
8937 if (sdebug_verbose)
8938 sdev_printk(KERN_INFO, sdp,
8939 "%s: Not ready: in process of becoming ready\n", my_name);
8940 if (scp->cmnd[0] == TEST_UNIT_READY) {
8941 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
8942
8943 if (diff_ns <= tur_nanosecs_to_ready)
8944 diff_ns = tur_nanosecs_to_ready - diff_ns;
8945 else
8946 diff_ns = tur_nanosecs_to_ready;
8947 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
8948 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
8949 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
8950 diff_ns);
8951 return check_condition_result;
8952 }
8953 }
8954 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
8955 if (sdebug_verbose)
8956 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
8957 my_name);
8958 return check_condition_result;
8959 }
8960
sdebug_map_queues(struct Scsi_Host * shost)8961 static void sdebug_map_queues(struct Scsi_Host *shost)
8962 {
8963 int i, qoff;
8964
8965 if (shost->nr_hw_queues == 1)
8966 return;
8967
8968 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
8969 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
8970
8971 map->nr_queues = 0;
8972
8973 if (i == HCTX_TYPE_DEFAULT)
8974 map->nr_queues = submit_queues - poll_queues;
8975 else if (i == HCTX_TYPE_POLL)
8976 map->nr_queues = poll_queues;
8977
8978 if (!map->nr_queues) {
8979 BUG_ON(i == HCTX_TYPE_DEFAULT);
8980 continue;
8981 }
8982
8983 map->queue_offset = qoff;
8984 blk_mq_map_queues(map);
8985
8986 qoff += map->nr_queues;
8987 }
8988 }
8989
8990 struct sdebug_blk_mq_poll_data {
8991 unsigned int queue_num;
8992 int *num_entries;
8993 };
8994
8995 /*
8996 * We don't handle aborted commands here, but it does not seem possible to have
8997 * aborted polled commands from schedule_resp()
8998 */
sdebug_blk_mq_poll_iter(struct request * rq,void * opaque)8999 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
9000 {
9001 struct sdebug_blk_mq_poll_data *data = opaque;
9002 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
9003 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9004 struct sdebug_defer *sd_dp;
9005 u32 unique_tag = blk_mq_unique_tag(rq);
9006 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
9007 unsigned long flags;
9008 int queue_num = data->queue_num;
9009 ktime_t time;
9010
9011 /* We're only interested in one queue for this iteration */
9012 if (hwq != queue_num)
9013 return true;
9014
9015 /* Subsequent checks would fail if this failed, but check anyway */
9016 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
9017 return true;
9018
9019 time = ktime_get_boottime();
9020
9021 spin_lock_irqsave(&sdsc->lock, flags);
9022 sd_dp = &sdsc->sd_dp;
9023 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
9024 spin_unlock_irqrestore(&sdsc->lock, flags);
9025 return true;
9026 }
9027
9028 if (time < sd_dp->cmpl_ts) {
9029 spin_unlock_irqrestore(&sdsc->lock, flags);
9030 return true;
9031 }
9032 spin_unlock_irqrestore(&sdsc->lock, flags);
9033
9034 if (sdebug_statistics) {
9035 atomic_inc(&sdebug_completions);
9036 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
9037 atomic_inc(&sdebug_miss_cpus);
9038 }
9039
9040 scsi_done(cmd); /* callback to mid level */
9041 (*data->num_entries)++;
9042 return true;
9043 }
9044
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)9045 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
9046 {
9047 int num_entries = 0;
9048 struct sdebug_blk_mq_poll_data data = {
9049 .queue_num = queue_num,
9050 .num_entries = &num_entries,
9051 };
9052
9053 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
9054 &data);
9055
9056 if (num_entries > 0)
9057 atomic_add(num_entries, &sdeb_mq_poll_count);
9058 return num_entries;
9059 }
9060
sdebug_timeout_cmd(struct scsi_cmnd * cmnd)9061 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
9062 {
9063 struct scsi_device *sdp = cmnd->device;
9064 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9065 struct sdebug_err_inject *err;
9066 unsigned char *cmd = cmnd->cmnd;
9067 int ret = 0;
9068
9069 if (devip == NULL)
9070 return 0;
9071
9072 rcu_read_lock();
9073 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9074 if (err->type == ERR_TMOUT_CMD &&
9075 (err->cmd == cmd[0] || err->cmd == 0xff)) {
9076 ret = !!err->cnt;
9077 if (err->cnt < 0)
9078 err->cnt++;
9079
9080 rcu_read_unlock();
9081 return ret;
9082 }
9083 }
9084 rcu_read_unlock();
9085
9086 return 0;
9087 }
9088
sdebug_fail_queue_cmd(struct scsi_cmnd * cmnd)9089 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
9090 {
9091 struct scsi_device *sdp = cmnd->device;
9092 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9093 struct sdebug_err_inject *err;
9094 unsigned char *cmd = cmnd->cmnd;
9095 int ret = 0;
9096
9097 if (devip == NULL)
9098 return 0;
9099
9100 rcu_read_lock();
9101 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9102 if (err->type == ERR_FAIL_QUEUE_CMD &&
9103 (err->cmd == cmd[0] || err->cmd == 0xff)) {
9104 ret = err->cnt ? err->queuecmd_ret : 0;
9105 if (err->cnt < 0)
9106 err->cnt++;
9107
9108 rcu_read_unlock();
9109 return ret;
9110 }
9111 }
9112 rcu_read_unlock();
9113
9114 return 0;
9115 }
9116
sdebug_fail_cmd(struct scsi_cmnd * cmnd,int * retval,struct sdebug_err_inject * info)9117 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
9118 struct sdebug_err_inject *info)
9119 {
9120 struct scsi_device *sdp = cmnd->device;
9121 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9122 struct sdebug_err_inject *err;
9123 unsigned char *cmd = cmnd->cmnd;
9124 int ret = 0;
9125 int result;
9126
9127 if (devip == NULL)
9128 return 0;
9129
9130 rcu_read_lock();
9131 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9132 if (err->type == ERR_FAIL_CMD &&
9133 (err->cmd == cmd[0] || err->cmd == 0xff)) {
9134 if (!err->cnt) {
9135 rcu_read_unlock();
9136 return 0;
9137 }
9138
9139 ret = !!err->cnt;
9140 rcu_read_unlock();
9141 goto out_handle;
9142 }
9143 }
9144 rcu_read_unlock();
9145
9146 return 0;
9147
9148 out_handle:
9149 if (err->cnt < 0)
9150 err->cnt++;
9151 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
9152 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
9153 *info = *err;
9154 *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
9155
9156 return ret;
9157 }
9158
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)9159 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
9160 struct scsi_cmnd *scp)
9161 {
9162 u8 sdeb_i;
9163 struct scsi_device *sdp = scp->device;
9164 const struct opcode_info_t *oip;
9165 const struct opcode_info_t *r_oip;
9166 struct sdebug_dev_info *devip;
9167 u8 *cmd = scp->cmnd;
9168 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
9169 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
9170 int k, na;
9171 int errsts = 0;
9172 u64 lun_index = sdp->lun & 0x3FFF;
9173 u32 flags;
9174 u16 sa;
9175 u8 opcode = cmd[0];
9176 bool has_wlun_rl;
9177 bool inject_now;
9178 int ret = 0;
9179 struct sdebug_err_inject err;
9180
9181 scsi_set_resid(scp, 0);
9182 if (sdebug_statistics) {
9183 atomic_inc(&sdebug_cmnd_count);
9184 inject_now = inject_on_this_cmd();
9185 } else {
9186 inject_now = false;
9187 }
9188 if (unlikely(sdebug_verbose &&
9189 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
9190 char b[120];
9191 int n, len, sb;
9192
9193 len = scp->cmd_len;
9194 sb = (int)sizeof(b);
9195 if (len > 32)
9196 strcpy(b, "too long, over 32 bytes");
9197 else {
9198 for (k = 0, n = 0; k < len && n < sb; ++k)
9199 n += scnprintf(b + n, sb - n, "%02x ",
9200 (u32)cmd[k]);
9201 }
9202 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
9203 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
9204 }
9205 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
9206 return SCSI_MLQUEUE_HOST_BUSY;
9207 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
9208 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
9209 goto err_out;
9210
9211 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
9212 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
9213 devip = (struct sdebug_dev_info *)sdp->hostdata;
9214 if (unlikely(!devip)) {
9215 devip = find_build_dev_info(sdp);
9216 if (NULL == devip)
9217 goto err_out;
9218 }
9219
9220 if (sdebug_timeout_cmd(scp)) {
9221 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
9222 return 0;
9223 }
9224
9225 ret = sdebug_fail_queue_cmd(scp);
9226 if (ret) {
9227 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
9228 opcode, ret);
9229 return ret;
9230 }
9231
9232 if (sdebug_fail_cmd(scp, &ret, &err)) {
9233 scmd_printk(KERN_INFO, scp,
9234 "fail command 0x%x with hostbyte=0x%x, "
9235 "driverbyte=0x%x, statusbyte=0x%x, "
9236 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
9237 opcode, err.host_byte, err.driver_byte,
9238 err.status_byte, err.sense_key, err.asc, err.asq);
9239 return ret;
9240 }
9241
9242 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
9243 atomic_set(&sdeb_inject_pending, 1);
9244
9245 na = oip->num_attached;
9246 r_pfp = oip->pfp;
9247 if (na) { /* multiple commands with this opcode */
9248 r_oip = oip;
9249 if (FF_SA & r_oip->flags) {
9250 if (F_SA_LOW & oip->flags)
9251 sa = 0x1f & cmd[1];
9252 else
9253 sa = get_unaligned_be16(cmd + 8);
9254 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9255 if (opcode == oip->opcode && sa == oip->sa)
9256 break;
9257 }
9258 } else { /* since no service action only check opcode */
9259 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9260 if (opcode == oip->opcode)
9261 break;
9262 }
9263 }
9264 if (k > na) {
9265 if (F_SA_LOW & r_oip->flags)
9266 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
9267 else if (F_SA_HIGH & r_oip->flags)
9268 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
9269 else
9270 mk_sense_invalid_opcode(scp);
9271 goto check_cond;
9272 }
9273 } /* else (when na==0) we assume the oip is a match */
9274 flags = oip->flags;
9275 if (unlikely(F_INV_OP & flags)) {
9276 mk_sense_invalid_opcode(scp);
9277 goto check_cond;
9278 }
9279 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
9280 if (sdebug_verbose)
9281 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
9282 my_name, opcode, " supported for wlun");
9283 mk_sense_invalid_opcode(scp);
9284 goto check_cond;
9285 }
9286 if (unlikely(sdebug_strict)) { /* check cdb against mask */
9287 u8 rem;
9288 int j;
9289
9290 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
9291 rem = ~oip->len_mask[k] & cmd[k];
9292 if (rem) {
9293 for (j = 7; j >= 0; --j, rem <<= 1) {
9294 if (0x80 & rem)
9295 break;
9296 }
9297 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
9298 goto check_cond;
9299 }
9300 }
9301 }
9302 if (unlikely(!(F_SKIP_UA & flags) &&
9303 find_first_bit(devip->uas_bm,
9304 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
9305 errsts = make_ua(scp, devip);
9306 if (errsts)
9307 goto check_cond;
9308 }
9309 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
9310 atomic_read(&devip->stopped))) {
9311 errsts = resp_not_ready(scp, devip);
9312 if (errsts)
9313 goto fini;
9314 }
9315 if (sdebug_fake_rw && (F_FAKE_RW & flags))
9316 goto fini;
9317 if (unlikely(sdebug_every_nth)) {
9318 if (fake_timeout(scp))
9319 return 0; /* ignore command: make trouble */
9320 }
9321 if (likely(oip->pfp))
9322 pfp = oip->pfp; /* calls a resp_* function */
9323 else
9324 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
9325
9326 fini:
9327 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
9328 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
9329 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
9330 sdebug_ndelay > 10000)) {
9331 /*
9332 * Skip long delays if ndelay <= 10 microseconds. Otherwise
9333 * for Start Stop Unit (SSU) want at least 1 second delay and
9334 * if sdebug_jdelay>1 want a long delay of that many seconds.
9335 * For Synchronize Cache want 1/20 of SSU's delay.
9336 */
9337 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
9338 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
9339
9340 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
9341 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
9342 } else
9343 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
9344 sdebug_ndelay);
9345 check_cond:
9346 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
9347 err_out:
9348 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
9349 }
9350
sdebug_init_cmd_priv(struct Scsi_Host * shost,struct scsi_cmnd * cmd)9351 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
9352 {
9353 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9354 struct sdebug_defer *sd_dp = &sdsc->sd_dp;
9355
9356 spin_lock_init(&sdsc->lock);
9357 hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
9358 HRTIMER_MODE_REL_PINNED);
9359 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
9360
9361 return 0;
9362 }
9363
9364 static const struct scsi_host_template sdebug_driver_template = {
9365 .show_info = scsi_debug_show_info,
9366 .write_info = scsi_debug_write_info,
9367 .proc_name = sdebug_proc_name,
9368 .name = "SCSI DEBUG",
9369 .info = scsi_debug_info,
9370 .sdev_init = scsi_debug_sdev_init,
9371 .sdev_configure = scsi_debug_sdev_configure,
9372 .sdev_destroy = scsi_debug_sdev_destroy,
9373 .ioctl = scsi_debug_ioctl,
9374 .queuecommand = scsi_debug_queuecommand,
9375 .change_queue_depth = sdebug_change_qdepth,
9376 .map_queues = sdebug_map_queues,
9377 .mq_poll = sdebug_blk_mq_poll,
9378 .eh_abort_handler = scsi_debug_abort,
9379 .eh_device_reset_handler = scsi_debug_device_reset,
9380 .eh_target_reset_handler = scsi_debug_target_reset,
9381 .eh_bus_reset_handler = scsi_debug_bus_reset,
9382 .eh_host_reset_handler = scsi_debug_host_reset,
9383 .can_queue = SDEBUG_CANQUEUE,
9384 .this_id = 7,
9385 .sg_tablesize = SG_MAX_SEGMENTS,
9386 .cmd_per_lun = DEF_CMD_PER_LUN,
9387 .max_sectors = -1U,
9388 .max_segment_size = -1U,
9389 .module = THIS_MODULE,
9390 .skip_settle_delay = 1,
9391 .track_queue_depth = 1,
9392 .cmd_size = sizeof(struct sdebug_scsi_cmd),
9393 .init_cmd_priv = sdebug_init_cmd_priv,
9394 .target_alloc = sdebug_target_alloc,
9395 .target_destroy = sdebug_target_destroy,
9396 };
9397
sdebug_driver_probe(struct device * dev)9398 static int sdebug_driver_probe(struct device *dev)
9399 {
9400 int error = 0;
9401 struct sdebug_host_info *sdbg_host;
9402 struct Scsi_Host *hpnt;
9403 int hprot;
9404
9405 sdbg_host = dev_to_sdebug_host(dev);
9406
9407 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
9408 if (NULL == hpnt) {
9409 pr_err("scsi_host_alloc failed\n");
9410 error = -ENODEV;
9411 return error;
9412 }
9413 hpnt->can_queue = sdebug_max_queue;
9414 hpnt->cmd_per_lun = sdebug_max_queue;
9415 if (!sdebug_clustering)
9416 hpnt->dma_boundary = PAGE_SIZE - 1;
9417
9418 if (submit_queues > nr_cpu_ids) {
9419 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
9420 my_name, submit_queues, nr_cpu_ids);
9421 submit_queues = nr_cpu_ids;
9422 }
9423 /*
9424 * Decide whether to tell scsi subsystem that we want mq. The
9425 * following should give the same answer for each host.
9426 */
9427 hpnt->nr_hw_queues = submit_queues;
9428 if (sdebug_host_max_queue)
9429 hpnt->host_tagset = 1;
9430
9431 /* poll queues are possible for nr_hw_queues > 1 */
9432 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
9433 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
9434 my_name, poll_queues, hpnt->nr_hw_queues);
9435 poll_queues = 0;
9436 }
9437
9438 /*
9439 * Poll queues don't need interrupts, but we need at least one I/O queue
9440 * left over for non-polled I/O.
9441 * If condition not met, trim poll_queues to 1 (just for simplicity).
9442 */
9443 if (poll_queues >= submit_queues) {
9444 if (submit_queues < 3)
9445 pr_warn("%s: trim poll_queues to 1\n", my_name);
9446 else
9447 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
9448 my_name, submit_queues - 1);
9449 poll_queues = 1;
9450 }
9451 if (poll_queues)
9452 hpnt->nr_maps = 3;
9453
9454 sdbg_host->shost = hpnt;
9455 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
9456 hpnt->max_id = sdebug_num_tgts + 1;
9457 else
9458 hpnt->max_id = sdebug_num_tgts;
9459 /* = sdebug_max_luns; */
9460 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
9461
9462 hprot = 0;
9463
9464 switch (sdebug_dif) {
9465
9466 case T10_PI_TYPE1_PROTECTION:
9467 hprot = SHOST_DIF_TYPE1_PROTECTION;
9468 if (sdebug_dix)
9469 hprot |= SHOST_DIX_TYPE1_PROTECTION;
9470 break;
9471
9472 case T10_PI_TYPE2_PROTECTION:
9473 hprot = SHOST_DIF_TYPE2_PROTECTION;
9474 if (sdebug_dix)
9475 hprot |= SHOST_DIX_TYPE2_PROTECTION;
9476 break;
9477
9478 case T10_PI_TYPE3_PROTECTION:
9479 hprot = SHOST_DIF_TYPE3_PROTECTION;
9480 if (sdebug_dix)
9481 hprot |= SHOST_DIX_TYPE3_PROTECTION;
9482 break;
9483
9484 default:
9485 if (sdebug_dix)
9486 hprot |= SHOST_DIX_TYPE0_PROTECTION;
9487 break;
9488 }
9489
9490 scsi_host_set_prot(hpnt, hprot);
9491
9492 if (have_dif_prot || sdebug_dix)
9493 pr_info("host protection%s%s%s%s%s%s%s\n",
9494 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
9495 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
9496 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
9497 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
9498 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
9499 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
9500 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
9501
9502 if (sdebug_guard == 1)
9503 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
9504 else
9505 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
9506
9507 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
9508 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
9509 if (sdebug_every_nth) /* need stats counters for every_nth */
9510 sdebug_statistics = true;
9511 error = scsi_add_host(hpnt, &sdbg_host->dev);
9512 if (error) {
9513 pr_err("scsi_add_host failed\n");
9514 error = -ENODEV;
9515 scsi_host_put(hpnt);
9516 } else {
9517 scsi_scan_host(hpnt);
9518 }
9519
9520 return error;
9521 }
9522
sdebug_driver_remove(struct device * dev)9523 static void sdebug_driver_remove(struct device *dev)
9524 {
9525 struct sdebug_host_info *sdbg_host;
9526 struct sdebug_dev_info *sdbg_devinfo, *tmp;
9527
9528 sdbg_host = dev_to_sdebug_host(dev);
9529
9530 scsi_remove_host(sdbg_host->shost);
9531
9532 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
9533 dev_list) {
9534 list_del(&sdbg_devinfo->dev_list);
9535 kfree(sdbg_devinfo->zstate);
9536 kfree(sdbg_devinfo);
9537 }
9538
9539 scsi_host_put(sdbg_host->shost);
9540 }
9541
9542 static const struct bus_type pseudo_lld_bus = {
9543 .name = "pseudo",
9544 .probe = sdebug_driver_probe,
9545 .remove = sdebug_driver_remove,
9546 .drv_groups = sdebug_drv_groups,
9547 };
9548