xref: /linux/drivers/scsi/scsi_debug.c (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27 
28 #include <linux/module.h>
29 
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
63 
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
79 
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
82 
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
84 
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST   1
87 #define DEF_NUM_TGTS   1
88 #define DEF_MAX_LUNS   1
89 /* With these defaults, this driver will make 1 host with 1 target
90  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
91  */
92 #define DEF_ATO 1
93 #define DEF_DELAY   1
94 #define DEF_DEV_SIZE_MB   8
95 #define DEF_DIF 0
96 #define DEF_DIX 0
97 #define DEF_D_SENSE   0
98 #define DEF_EVERY_NTH   0
99 #define DEF_FAKE_RW	0
100 #define DEF_GUARD 0
101 #define DEF_LBPU 0
102 #define DEF_LBPWS 0
103 #define DEF_LBPWS10 0
104 #define DEF_LBPRZ 1
105 #define DEF_LOWEST_ALIGNED 0
106 #define DEF_NO_LUN_0   0
107 #define DEF_NUM_PARTS   0
108 #define DEF_OPTS   0
109 #define DEF_OPT_BLKS 64
110 #define DEF_PHYSBLK_EXP 0
111 #define DEF_PTYPE   0
112 #define DEF_REMOVABLE false
113 #define DEF_SCSI_LEVEL   5    /* INQUIRY, byte2 [5->SPC-3] */
114 #define DEF_SECTOR_SIZE 512
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_UNMAP_GRANULARITY 1
117 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
118 #define DEF_UNMAP_MAX_DESC 256
119 #define DEF_VIRTUAL_GB   0
120 #define DEF_VPD_USE_HOSTNO 1
121 #define DEF_WRITESAME_LENGTH 0xFFFF
122 
123 /* bit mask values for scsi_debug_opts */
124 #define SCSI_DEBUG_OPT_NOISE   1
125 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
126 #define SCSI_DEBUG_OPT_TIMEOUT   4
127 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
128 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
129 #define SCSI_DEBUG_OPT_DIF_ERR   32
130 #define SCSI_DEBUG_OPT_DIX_ERR   64
131 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
132 /* When "every_nth" > 0 then modulo "every_nth" commands:
133  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134  *   - a RECOVERED_ERROR is simulated on successful read and write
135  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136  *   - a TRANSPORT_ERROR is simulated on successful read and write
137  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
138  *
139  * When "every_nth" < 0 then after "- every_nth" commands:
140  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
141  *   - a RECOVERED_ERROR is simulated on successful read and write
142  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
143  *   - a TRANSPORT_ERROR is simulated on successful read and write
144  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
145  * This will continue until some other action occurs (e.g. the user
146  * writing a new value (other than -1 or 1) to every_nth via sysfs).
147  */
148 
149 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
150  * sector on read commands: */
151 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
152 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
153 
154 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
155  * or "peripheral device" addressing (value 0) */
156 #define SAM2_LUN_ADDRESS_METHOD 0
157 #define SAM2_WLUN_REPORT_LUNS 0xc101
158 
159 /* Can queue up to this number of commands. Typically commands that
160  * that have a non-zero delay are queued. */
161 #define SCSI_DEBUG_CANQUEUE  255
162 
163 static int scsi_debug_add_host = DEF_NUM_HOST;
164 static int scsi_debug_ato = DEF_ATO;
165 static int scsi_debug_delay = DEF_DELAY;
166 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
167 static int scsi_debug_dif = DEF_DIF;
168 static int scsi_debug_dix = DEF_DIX;
169 static int scsi_debug_dsense = DEF_D_SENSE;
170 static int scsi_debug_every_nth = DEF_EVERY_NTH;
171 static int scsi_debug_fake_rw = DEF_FAKE_RW;
172 static unsigned int scsi_debug_guard = DEF_GUARD;
173 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
174 static int scsi_debug_max_luns = DEF_MAX_LUNS;
175 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
176 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
177 static int scsi_debug_no_uld = 0;
178 static int scsi_debug_num_parts = DEF_NUM_PARTS;
179 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
180 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
181 static int scsi_debug_opts = DEF_OPTS;
182 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
183 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
184 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
185 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
186 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
187 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
188 static unsigned int scsi_debug_lbpu = DEF_LBPU;
189 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
190 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
191 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
192 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
193 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
194 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
195 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
196 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
197 static bool scsi_debug_removable = DEF_REMOVABLE;
198 
199 static int scsi_debug_cmnd_count = 0;
200 
201 #define DEV_READONLY(TGT)      (0)
202 
203 static unsigned int sdebug_store_sectors;
204 static sector_t sdebug_capacity;	/* in sectors */
205 
206 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
207    may still need them */
208 static int sdebug_heads;		/* heads per disk */
209 static int sdebug_cylinders_per;	/* cylinders per surface */
210 static int sdebug_sectors_per;		/* sectors per cylinder */
211 
212 #define SDEBUG_MAX_PARTS 4
213 
214 #define SDEBUG_SENSE_LEN 32
215 
216 #define SCSI_DEBUG_MAX_CMD_LEN 32
217 
218 static unsigned int scsi_debug_lbp(void)
219 {
220 	return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
221 }
222 
223 struct sdebug_dev_info {
224 	struct list_head dev_list;
225 	unsigned char sense_buff[SDEBUG_SENSE_LEN];	/* weak nexus */
226 	unsigned int channel;
227 	unsigned int target;
228 	unsigned int lun;
229 	struct sdebug_host_info *sdbg_host;
230 	unsigned int wlun;
231 	char reset;
232 	char stopped;
233 	char used;
234 };
235 
236 struct sdebug_host_info {
237 	struct list_head host_list;
238 	struct Scsi_Host *shost;
239 	struct device dev;
240 	struct list_head dev_info_list;
241 };
242 
243 #define to_sdebug_host(d)	\
244 	container_of(d, struct sdebug_host_info, dev)
245 
246 static LIST_HEAD(sdebug_host_list);
247 static DEFINE_SPINLOCK(sdebug_host_list_lock);
248 
249 typedef void (* done_funct_t) (struct scsi_cmnd *);
250 
251 struct sdebug_queued_cmd {
252 	int in_use;
253 	struct timer_list cmnd_timer;
254 	done_funct_t done_funct;
255 	struct scsi_cmnd * a_cmnd;
256 	int scsi_result;
257 };
258 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
259 
260 static unsigned char * fake_storep;	/* ramdisk storage */
261 static struct sd_dif_tuple *dif_storep;	/* protection info */
262 static void *map_storep;		/* provisioning map */
263 
264 static unsigned long map_size;
265 static int num_aborts = 0;
266 static int num_dev_resets = 0;
267 static int num_bus_resets = 0;
268 static int num_host_resets = 0;
269 static int dix_writes;
270 static int dix_reads;
271 static int dif_errors;
272 
273 static DEFINE_SPINLOCK(queued_arr_lock);
274 static DEFINE_RWLOCK(atomic_rw);
275 
276 static char sdebug_proc_name[] = "scsi_debug";
277 
278 static struct bus_type pseudo_lld_bus;
279 
280 static struct device_driver sdebug_driverfs_driver = {
281 	.name 		= sdebug_proc_name,
282 	.bus		= &pseudo_lld_bus,
283 };
284 
285 static const int check_condition_result =
286 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
287 
288 static const int illegal_condition_result =
289 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
290 
291 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
292 				    0, 0, 0x2, 0x4b};
293 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
294 			           0, 0, 0x0, 0x0};
295 
296 static void *fake_store(unsigned long long lba)
297 {
298 	lba = do_div(lba, sdebug_store_sectors);
299 
300 	return fake_storep + lba * scsi_debug_sector_size;
301 }
302 
303 static struct sd_dif_tuple *dif_store(sector_t sector)
304 {
305 	sector = do_div(sector, sdebug_store_sectors);
306 
307 	return dif_storep + sector;
308 }
309 
310 static int sdebug_add_adapter(void);
311 static void sdebug_remove_adapter(void);
312 
313 static void sdebug_max_tgts_luns(void)
314 {
315 	struct sdebug_host_info *sdbg_host;
316 	struct Scsi_Host *hpnt;
317 
318 	spin_lock(&sdebug_host_list_lock);
319 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
320 		hpnt = sdbg_host->shost;
321 		if ((hpnt->this_id >= 0) &&
322 		    (scsi_debug_num_tgts > hpnt->this_id))
323 			hpnt->max_id = scsi_debug_num_tgts + 1;
324 		else
325 			hpnt->max_id = scsi_debug_num_tgts;
326 		/* scsi_debug_max_luns; */
327 		hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
328 	}
329 	spin_unlock(&sdebug_host_list_lock);
330 }
331 
332 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
333 			    int asc, int asq)
334 {
335 	unsigned char *sbuff;
336 
337 	sbuff = devip->sense_buff;
338 	memset(sbuff, 0, SDEBUG_SENSE_LEN);
339 
340 	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
341 
342 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
343 		printk(KERN_INFO "scsi_debug:    [sense_key,asc,ascq]: "
344 		      "[0x%x,0x%x,0x%x]\n", key, asc, asq);
345 }
346 
347 static void get_data_transfer_info(unsigned char *cmd,
348 				   unsigned long long *lba, unsigned int *num,
349 				   u32 *ei_lba)
350 {
351 	*ei_lba = 0;
352 
353 	switch (*cmd) {
354 	case VARIABLE_LENGTH_CMD:
355 		*lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
356 			(u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
357 			(u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
358 			(u64)cmd[13] << 48 | (u64)cmd[12] << 56;
359 
360 		*ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
361 			(u32)cmd[21] << 16 | (u32)cmd[20] << 24;
362 
363 		*num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
364 			(u32)cmd[28] << 24;
365 		break;
366 
367 	case WRITE_SAME_16:
368 	case WRITE_16:
369 	case READ_16:
370 		*lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
371 			(u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
372 			(u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
373 			(u64)cmd[3] << 48 | (u64)cmd[2] << 56;
374 
375 		*num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
376 			(u32)cmd[10] << 24;
377 		break;
378 	case WRITE_12:
379 	case READ_12:
380 		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
381 			(u32)cmd[2] << 24;
382 
383 		*num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
384 			(u32)cmd[6] << 24;
385 		break;
386 	case WRITE_SAME:
387 	case WRITE_10:
388 	case READ_10:
389 	case XDWRITEREAD_10:
390 		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 |	(u32)cmd[3] << 16 |
391 			(u32)cmd[2] << 24;
392 
393 		*num = (u32)cmd[8] | (u32)cmd[7] << 8;
394 		break;
395 	case WRITE_6:
396 	case READ_6:
397 		*lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
398 			(u32)(cmd[1] & 0x1f) << 16;
399 		*num = (0 == cmd[4]) ? 256 : cmd[4];
400 		break;
401 	default:
402 		break;
403 	}
404 }
405 
406 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
407 {
408 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
409 		printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
410 	}
411 	return -EINVAL;
412 	/* return -ENOTTY; // correct return but upsets fdisk */
413 }
414 
415 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
416 			   struct sdebug_dev_info * devip)
417 {
418 	if (devip->reset) {
419 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
420 			printk(KERN_INFO "scsi_debug: Reporting Unit "
421 			       "attention: power on reset\n");
422 		devip->reset = 0;
423 		mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
424 		return check_condition_result;
425 	}
426 	if ((0 == reset_only) && devip->stopped) {
427 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
428 			printk(KERN_INFO "scsi_debug: Reporting Not "
429 			       "ready: initializing command required\n");
430 		mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
431 				0x2);
432 		return check_condition_result;
433 	}
434 	return 0;
435 }
436 
437 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
438 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
439 				int arr_len)
440 {
441 	int act_len;
442 	struct scsi_data_buffer *sdb = scsi_in(scp);
443 
444 	if (!sdb->length)
445 		return 0;
446 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
447 		return (DID_ERROR << 16);
448 
449 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
450 				      arr, arr_len);
451 	sdb->resid = scsi_bufflen(scp) - act_len;
452 
453 	return 0;
454 }
455 
456 /* Returns number of bytes fetched into 'arr' or -1 if error. */
457 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
458 			       int arr_len)
459 {
460 	if (!scsi_bufflen(scp))
461 		return 0;
462 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
463 		return -1;
464 
465 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
466 }
467 
468 
469 static const char * inq_vendor_id = "Linux   ";
470 static const char * inq_product_id = "scsi_debug      ";
471 static const char * inq_product_rev = "0004";
472 
473 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
474 			   int target_dev_id, int dev_id_num,
475 			   const char * dev_id_str,
476 			   int dev_id_str_len)
477 {
478 	int num, port_a;
479 	char b[32];
480 
481 	port_a = target_dev_id + 1;
482 	/* T10 vendor identifier field format (faked) */
483 	arr[0] = 0x2;	/* ASCII */
484 	arr[1] = 0x1;
485 	arr[2] = 0x0;
486 	memcpy(&arr[4], inq_vendor_id, 8);
487 	memcpy(&arr[12], inq_product_id, 16);
488 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
489 	num = 8 + 16 + dev_id_str_len;
490 	arr[3] = num;
491 	num += 4;
492 	if (dev_id_num >= 0) {
493 		/* NAA-5, Logical unit identifier (binary) */
494 		arr[num++] = 0x1;	/* binary (not necessarily sas) */
495 		arr[num++] = 0x3;	/* PIV=0, lu, naa */
496 		arr[num++] = 0x0;
497 		arr[num++] = 0x8;
498 		arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
499 		arr[num++] = 0x33;
500 		arr[num++] = 0x33;
501 		arr[num++] = 0x30;
502 		arr[num++] = (dev_id_num >> 24);
503 		arr[num++] = (dev_id_num >> 16) & 0xff;
504 		arr[num++] = (dev_id_num >> 8) & 0xff;
505 		arr[num++] = dev_id_num & 0xff;
506 		/* Target relative port number */
507 		arr[num++] = 0x61;	/* proto=sas, binary */
508 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
509 		arr[num++] = 0x0;	/* reserved */
510 		arr[num++] = 0x4;	/* length */
511 		arr[num++] = 0x0;	/* reserved */
512 		arr[num++] = 0x0;	/* reserved */
513 		arr[num++] = 0x0;
514 		arr[num++] = 0x1;	/* relative port A */
515 	}
516 	/* NAA-5, Target port identifier */
517 	arr[num++] = 0x61;	/* proto=sas, binary */
518 	arr[num++] = 0x93;	/* piv=1, target port, naa */
519 	arr[num++] = 0x0;
520 	arr[num++] = 0x8;
521 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
522 	arr[num++] = 0x22;
523 	arr[num++] = 0x22;
524 	arr[num++] = 0x20;
525 	arr[num++] = (port_a >> 24);
526 	arr[num++] = (port_a >> 16) & 0xff;
527 	arr[num++] = (port_a >> 8) & 0xff;
528 	arr[num++] = port_a & 0xff;
529 	/* NAA-5, Target port group identifier */
530 	arr[num++] = 0x61;	/* proto=sas, binary */
531 	arr[num++] = 0x95;	/* piv=1, target port group id */
532 	arr[num++] = 0x0;
533 	arr[num++] = 0x4;
534 	arr[num++] = 0;
535 	arr[num++] = 0;
536 	arr[num++] = (port_group_id >> 8) & 0xff;
537 	arr[num++] = port_group_id & 0xff;
538 	/* NAA-5, Target device identifier */
539 	arr[num++] = 0x61;	/* proto=sas, binary */
540 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
541 	arr[num++] = 0x0;
542 	arr[num++] = 0x8;
543 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
544 	arr[num++] = 0x22;
545 	arr[num++] = 0x22;
546 	arr[num++] = 0x20;
547 	arr[num++] = (target_dev_id >> 24);
548 	arr[num++] = (target_dev_id >> 16) & 0xff;
549 	arr[num++] = (target_dev_id >> 8) & 0xff;
550 	arr[num++] = target_dev_id & 0xff;
551 	/* SCSI name string: Target device identifier */
552 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
553 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
554 	arr[num++] = 0x0;
555 	arr[num++] = 24;
556 	memcpy(arr + num, "naa.52222220", 12);
557 	num += 12;
558 	snprintf(b, sizeof(b), "%08X", target_dev_id);
559 	memcpy(arr + num, b, 8);
560 	num += 8;
561 	memset(arr + num, 0, 4);
562 	num += 4;
563 	return num;
564 }
565 
566 
567 static unsigned char vpd84_data[] = {
568 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
569     0x22,0x22,0x22,0x0,0xbb,0x1,
570     0x22,0x22,0x22,0x0,0xbb,0x2,
571 };
572 
573 static int inquiry_evpd_84(unsigned char * arr)
574 {
575 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
576 	return sizeof(vpd84_data);
577 }
578 
579 static int inquiry_evpd_85(unsigned char * arr)
580 {
581 	int num = 0;
582 	const char * na1 = "https://www.kernel.org/config";
583 	const char * na2 = "http://www.kernel.org/log";
584 	int plen, olen;
585 
586 	arr[num++] = 0x1;	/* lu, storage config */
587 	arr[num++] = 0x0;	/* reserved */
588 	arr[num++] = 0x0;
589 	olen = strlen(na1);
590 	plen = olen + 1;
591 	if (plen % 4)
592 		plen = ((plen / 4) + 1) * 4;
593 	arr[num++] = plen;	/* length, null termianted, padded */
594 	memcpy(arr + num, na1, olen);
595 	memset(arr + num + olen, 0, plen - olen);
596 	num += plen;
597 
598 	arr[num++] = 0x4;	/* lu, logging */
599 	arr[num++] = 0x0;	/* reserved */
600 	arr[num++] = 0x0;
601 	olen = strlen(na2);
602 	plen = olen + 1;
603 	if (plen % 4)
604 		plen = ((plen / 4) + 1) * 4;
605 	arr[num++] = plen;	/* length, null terminated, padded */
606 	memcpy(arr + num, na2, olen);
607 	memset(arr + num + olen, 0, plen - olen);
608 	num += plen;
609 
610 	return num;
611 }
612 
613 /* SCSI ports VPD page */
614 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
615 {
616 	int num = 0;
617 	int port_a, port_b;
618 
619 	port_a = target_dev_id + 1;
620 	port_b = port_a + 1;
621 	arr[num++] = 0x0;	/* reserved */
622 	arr[num++] = 0x0;	/* reserved */
623 	arr[num++] = 0x0;
624 	arr[num++] = 0x1;	/* relative port 1 (primary) */
625 	memset(arr + num, 0, 6);
626 	num += 6;
627 	arr[num++] = 0x0;
628 	arr[num++] = 12;	/* length tp descriptor */
629 	/* naa-5 target port identifier (A) */
630 	arr[num++] = 0x61;	/* proto=sas, binary */
631 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
632 	arr[num++] = 0x0;	/* reserved */
633 	arr[num++] = 0x8;	/* length */
634 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
635 	arr[num++] = 0x22;
636 	arr[num++] = 0x22;
637 	arr[num++] = 0x20;
638 	arr[num++] = (port_a >> 24);
639 	arr[num++] = (port_a >> 16) & 0xff;
640 	arr[num++] = (port_a >> 8) & 0xff;
641 	arr[num++] = port_a & 0xff;
642 
643 	arr[num++] = 0x0;	/* reserved */
644 	arr[num++] = 0x0;	/* reserved */
645 	arr[num++] = 0x0;
646 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
647 	memset(arr + num, 0, 6);
648 	num += 6;
649 	arr[num++] = 0x0;
650 	arr[num++] = 12;	/* length tp descriptor */
651 	/* naa-5 target port identifier (B) */
652 	arr[num++] = 0x61;	/* proto=sas, binary */
653 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
654 	arr[num++] = 0x0;	/* reserved */
655 	arr[num++] = 0x8;	/* length */
656 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
657 	arr[num++] = 0x22;
658 	arr[num++] = 0x22;
659 	arr[num++] = 0x20;
660 	arr[num++] = (port_b >> 24);
661 	arr[num++] = (port_b >> 16) & 0xff;
662 	arr[num++] = (port_b >> 8) & 0xff;
663 	arr[num++] = port_b & 0xff;
664 
665 	return num;
666 }
667 
668 
669 static unsigned char vpd89_data[] = {
670 /* from 4th byte */ 0,0,0,0,
671 'l','i','n','u','x',' ',' ',' ',
672 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
673 '1','2','3','4',
674 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
675 0xec,0,0,0,
676 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
677 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
678 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
679 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
680 0x53,0x41,
681 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
682 0x20,0x20,
683 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
684 0x10,0x80,
685 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
686 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
687 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
689 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
690 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
691 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
696 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
697 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
698 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
706 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
707 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
708 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
711 };
712 
713 static int inquiry_evpd_89(unsigned char * arr)
714 {
715 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
716 	return sizeof(vpd89_data);
717 }
718 
719 
720 /* Block limits VPD page (SBC-3) */
721 static unsigned char vpdb0_data[] = {
722 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
723 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
724 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
725 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
726 };
727 
728 static int inquiry_evpd_b0(unsigned char * arr)
729 {
730 	unsigned int gran;
731 
732 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
733 
734 	/* Optimal transfer length granularity */
735 	gran = 1 << scsi_debug_physblk_exp;
736 	arr[2] = (gran >> 8) & 0xff;
737 	arr[3] = gran & 0xff;
738 
739 	/* Maximum Transfer Length */
740 	if (sdebug_store_sectors > 0x400) {
741 		arr[4] = (sdebug_store_sectors >> 24) & 0xff;
742 		arr[5] = (sdebug_store_sectors >> 16) & 0xff;
743 		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
744 		arr[7] = sdebug_store_sectors & 0xff;
745 	}
746 
747 	/* Optimal Transfer Length */
748 	put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
749 
750 	if (scsi_debug_lbpu) {
751 		/* Maximum Unmap LBA Count */
752 		put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
753 
754 		/* Maximum Unmap Block Descriptor Count */
755 		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
756 	}
757 
758 	/* Unmap Granularity Alignment */
759 	if (scsi_debug_unmap_alignment) {
760 		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
761 		arr[28] |= 0x80; /* UGAVALID */
762 	}
763 
764 	/* Optimal Unmap Granularity */
765 	put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
766 
767 	/* Maximum WRITE SAME Length */
768 	put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
769 
770 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
771 
772 	return sizeof(vpdb0_data);
773 }
774 
775 /* Block device characteristics VPD page (SBC-3) */
776 static int inquiry_evpd_b1(unsigned char *arr)
777 {
778 	memset(arr, 0, 0x3c);
779 	arr[0] = 0;
780 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
781 	arr[2] = 0;
782 	arr[3] = 5;	/* less than 1.8" */
783 
784 	return 0x3c;
785 }
786 
787 /* Logical block provisioning VPD page (SBC-3) */
788 static int inquiry_evpd_b2(unsigned char *arr)
789 {
790 	memset(arr, 0, 0x4);
791 	arr[0] = 0;			/* threshold exponent */
792 
793 	if (scsi_debug_lbpu)
794 		arr[1] = 1 << 7;
795 
796 	if (scsi_debug_lbpws)
797 		arr[1] |= 1 << 6;
798 
799 	if (scsi_debug_lbpws10)
800 		arr[1] |= 1 << 5;
801 
802 	if (scsi_debug_lbprz)
803 		arr[1] |= 1 << 2;
804 
805 	return 0x4;
806 }
807 
808 #define SDEBUG_LONG_INQ_SZ 96
809 #define SDEBUG_MAX_INQ_ARR_SZ 584
810 
811 static int resp_inquiry(struct scsi_cmnd * scp, int target,
812 			struct sdebug_dev_info * devip)
813 {
814 	unsigned char pq_pdt;
815 	unsigned char * arr;
816 	unsigned char *cmd = (unsigned char *)scp->cmnd;
817 	int alloc_len, n, ret;
818 
819 	alloc_len = (cmd[3] << 8) + cmd[4];
820 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
821 	if (! arr)
822 		return DID_REQUEUE << 16;
823 	if (devip->wlun)
824 		pq_pdt = 0x1e;	/* present, wlun */
825 	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
826 		pq_pdt = 0x7f;	/* not present, no device type */
827 	else
828 		pq_pdt = (scsi_debug_ptype & 0x1f);
829 	arr[0] = pq_pdt;
830 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
831 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
832 			       	0);
833 		kfree(arr);
834 		return check_condition_result;
835 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
836 		int lu_id_num, port_group_id, target_dev_id, len;
837 		char lu_id_str[6];
838 		int host_no = devip->sdbg_host->shost->host_no;
839 
840 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
841 		    (devip->channel & 0x7f);
842 		if (0 == scsi_debug_vpd_use_hostno)
843 			host_no = 0;
844 		lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
845 			    (devip->target * 1000) + devip->lun);
846 		target_dev_id = ((host_no + 1) * 2000) +
847 				 (devip->target * 1000) - 3;
848 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
849 		if (0 == cmd[2]) { /* supported vital product data pages */
850 			arr[1] = cmd[2];	/*sanity */
851 			n = 4;
852 			arr[n++] = 0x0;   /* this page */
853 			arr[n++] = 0x80;  /* unit serial number */
854 			arr[n++] = 0x83;  /* device identification */
855 			arr[n++] = 0x84;  /* software interface ident. */
856 			arr[n++] = 0x85;  /* management network addresses */
857 			arr[n++] = 0x86;  /* extended inquiry */
858 			arr[n++] = 0x87;  /* mode page policy */
859 			arr[n++] = 0x88;  /* SCSI ports */
860 			arr[n++] = 0x89;  /* ATA information */
861 			arr[n++] = 0xb0;  /* Block limits (SBC) */
862 			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
863 			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
864 				arr[n++] = 0xb2;
865 			arr[3] = n - 4;	  /* number of supported VPD pages */
866 		} else if (0x80 == cmd[2]) { /* unit serial number */
867 			arr[1] = cmd[2];	/*sanity */
868 			arr[3] = len;
869 			memcpy(&arr[4], lu_id_str, len);
870 		} else if (0x83 == cmd[2]) { /* device identification */
871 			arr[1] = cmd[2];	/*sanity */
872 			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
873 						 target_dev_id, lu_id_num,
874 						 lu_id_str, len);
875 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
876 			arr[1] = cmd[2];	/*sanity */
877 			arr[3] = inquiry_evpd_84(&arr[4]);
878 		} else if (0x85 == cmd[2]) { /* Management network addresses */
879 			arr[1] = cmd[2];	/*sanity */
880 			arr[3] = inquiry_evpd_85(&arr[4]);
881 		} else if (0x86 == cmd[2]) { /* extended inquiry */
882 			arr[1] = cmd[2];	/*sanity */
883 			arr[3] = 0x3c;	/* number of following entries */
884 			if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
885 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
886 			else if (scsi_debug_dif)
887 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
888 			else
889 				arr[4] = 0x0;   /* no protection stuff */
890 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
891 		} else if (0x87 == cmd[2]) { /* mode page policy */
892 			arr[1] = cmd[2];	/*sanity */
893 			arr[3] = 0x8;	/* number of following entries */
894 			arr[4] = 0x2;	/* disconnect-reconnect mp */
895 			arr[6] = 0x80;	/* mlus, shared */
896 			arr[8] = 0x18;	 /* protocol specific lu */
897 			arr[10] = 0x82;	 /* mlus, per initiator port */
898 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
899 			arr[1] = cmd[2];	/*sanity */
900 			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
901 		} else if (0x89 == cmd[2]) { /* ATA information */
902 			arr[1] = cmd[2];        /*sanity */
903 			n = inquiry_evpd_89(&arr[4]);
904 			arr[2] = (n >> 8);
905 			arr[3] = (n & 0xff);
906 		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
907 			arr[1] = cmd[2];        /*sanity */
908 			arr[3] = inquiry_evpd_b0(&arr[4]);
909 		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
910 			arr[1] = cmd[2];        /*sanity */
911 			arr[3] = inquiry_evpd_b1(&arr[4]);
912 		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
913 			arr[1] = cmd[2];        /*sanity */
914 			arr[3] = inquiry_evpd_b2(&arr[4]);
915 		} else {
916 			/* Illegal request, invalid field in cdb */
917 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
918 					INVALID_FIELD_IN_CDB, 0);
919 			kfree(arr);
920 			return check_condition_result;
921 		}
922 		len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
923 		ret = fill_from_dev_buffer(scp, arr,
924 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
925 		kfree(arr);
926 		return ret;
927 	}
928 	/* drops through here for a standard inquiry */
929 	arr[1] = scsi_debug_removable ? 0x80 : 0;	/* Removable disk */
930 	arr[2] = scsi_debug_scsi_level;
931 	arr[3] = 2;    /* response_data_format==2 */
932 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
933 	arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
934 	if (0 == scsi_debug_vpd_use_hostno)
935 		arr[5] = 0x10; /* claim: implicit TGPS */
936 	arr[6] = 0x10; /* claim: MultiP */
937 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
938 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
939 	memcpy(&arr[8], inq_vendor_id, 8);
940 	memcpy(&arr[16], inq_product_id, 16);
941 	memcpy(&arr[32], inq_product_rev, 4);
942 	/* version descriptors (2 bytes each) follow */
943 	arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
944 	arr[60] = 0x3; arr[61] = 0x14;  /* SPC-3 ANSI */
945 	n = 62;
946 	if (scsi_debug_ptype == 0) {
947 		arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
948 	} else if (scsi_debug_ptype == 1) {
949 		arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
950 	}
951 	arr[n++] = 0xc; arr[n++] = 0xf;  /* SAS-1.1 rev 10 */
952 	ret = fill_from_dev_buffer(scp, arr,
953 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
954 	kfree(arr);
955 	return ret;
956 }
957 
958 static int resp_requests(struct scsi_cmnd * scp,
959 			 struct sdebug_dev_info * devip)
960 {
961 	unsigned char * sbuff;
962 	unsigned char *cmd = (unsigned char *)scp->cmnd;
963 	unsigned char arr[SDEBUG_SENSE_LEN];
964 	int want_dsense;
965 	int len = 18;
966 
967 	memset(arr, 0, sizeof(arr));
968 	if (devip->reset == 1)
969 		mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
970 	want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
971 	sbuff = devip->sense_buff;
972 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
973 		if (want_dsense) {
974 			arr[0] = 0x72;
975 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
976 			arr[2] = THRESHOLD_EXCEEDED;
977 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
978 		} else {
979 			arr[0] = 0x70;
980 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
981 			arr[7] = 0xa;   	/* 18 byte sense buffer */
982 			arr[12] = THRESHOLD_EXCEEDED;
983 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
984 		}
985 	} else {
986 		memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
987 		if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
988 			/* DESC bit set and sense_buff in fixed format */
989 			memset(arr, 0, sizeof(arr));
990 			arr[0] = 0x72;
991 			arr[1] = sbuff[2];     /* sense key */
992 			arr[2] = sbuff[12];    /* asc */
993 			arr[3] = sbuff[13];    /* ascq */
994 			len = 8;
995 		}
996 	}
997 	mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
998 	return fill_from_dev_buffer(scp, arr, len);
999 }
1000 
1001 static int resp_start_stop(struct scsi_cmnd * scp,
1002 			   struct sdebug_dev_info * devip)
1003 {
1004 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1005 	int power_cond, errsts, start;
1006 
1007 	if ((errsts = check_readiness(scp, 1, devip)))
1008 		return errsts;
1009 	power_cond = (cmd[4] & 0xf0) >> 4;
1010 	if (power_cond) {
1011 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1012 			       	0);
1013 		return check_condition_result;
1014 	}
1015 	start = cmd[4] & 1;
1016 	if (start == devip->stopped)
1017 		devip->stopped = !start;
1018 	return 0;
1019 }
1020 
1021 static sector_t get_sdebug_capacity(void)
1022 {
1023 	if (scsi_debug_virtual_gb > 0)
1024 		return (sector_t)scsi_debug_virtual_gb *
1025 			(1073741824 / scsi_debug_sector_size);
1026 	else
1027 		return sdebug_store_sectors;
1028 }
1029 
1030 #define SDEBUG_READCAP_ARR_SZ 8
1031 static int resp_readcap(struct scsi_cmnd * scp,
1032 			struct sdebug_dev_info * devip)
1033 {
1034 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1035 	unsigned int capac;
1036 	int errsts;
1037 
1038 	if ((errsts = check_readiness(scp, 1, devip)))
1039 		return errsts;
1040 	/* following just in case virtual_gb changed */
1041 	sdebug_capacity = get_sdebug_capacity();
1042 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1043 	if (sdebug_capacity < 0xffffffff) {
1044 		capac = (unsigned int)sdebug_capacity - 1;
1045 		arr[0] = (capac >> 24);
1046 		arr[1] = (capac >> 16) & 0xff;
1047 		arr[2] = (capac >> 8) & 0xff;
1048 		arr[3] = capac & 0xff;
1049 	} else {
1050 		arr[0] = 0xff;
1051 		arr[1] = 0xff;
1052 		arr[2] = 0xff;
1053 		arr[3] = 0xff;
1054 	}
1055 	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1056 	arr[7] = scsi_debug_sector_size & 0xff;
1057 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1058 }
1059 
1060 #define SDEBUG_READCAP16_ARR_SZ 32
1061 static int resp_readcap16(struct scsi_cmnd * scp,
1062 			  struct sdebug_dev_info * devip)
1063 {
1064 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1065 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1066 	unsigned long long capac;
1067 	int errsts, k, alloc_len;
1068 
1069 	if ((errsts = check_readiness(scp, 1, devip)))
1070 		return errsts;
1071 	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1072 		     + cmd[13]);
1073 	/* following just in case virtual_gb changed */
1074 	sdebug_capacity = get_sdebug_capacity();
1075 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1076 	capac = sdebug_capacity - 1;
1077 	for (k = 0; k < 8; ++k, capac >>= 8)
1078 		arr[7 - k] = capac & 0xff;
1079 	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1080 	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1081 	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1082 	arr[11] = scsi_debug_sector_size & 0xff;
1083 	arr[13] = scsi_debug_physblk_exp & 0xf;
1084 	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1085 
1086 	if (scsi_debug_lbp()) {
1087 		arr[14] |= 0x80; /* LBPME */
1088 		if (scsi_debug_lbprz)
1089 			arr[14] |= 0x40; /* LBPRZ */
1090 	}
1091 
1092 	arr[15] = scsi_debug_lowest_aligned & 0xff;
1093 
1094 	if (scsi_debug_dif) {
1095 		arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1096 		arr[12] |= 1; /* PROT_EN */
1097 	}
1098 
1099 	return fill_from_dev_buffer(scp, arr,
1100 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1101 }
1102 
1103 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1104 
1105 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1106 			      struct sdebug_dev_info * devip)
1107 {
1108 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1109 	unsigned char * arr;
1110 	int host_no = devip->sdbg_host->shost->host_no;
1111 	int n, ret, alen, rlen;
1112 	int port_group_a, port_group_b, port_a, port_b;
1113 
1114 	alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1115 		+ cmd[9]);
1116 
1117 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1118 	if (! arr)
1119 		return DID_REQUEUE << 16;
1120 	/*
1121 	 * EVPD page 0x88 states we have two ports, one
1122 	 * real and a fake port with no device connected.
1123 	 * So we create two port groups with one port each
1124 	 * and set the group with port B to unavailable.
1125 	 */
1126 	port_a = 0x1; /* relative port A */
1127 	port_b = 0x2; /* relative port B */
1128 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1129 	    (devip->channel & 0x7f);
1130 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1131 	    (devip->channel & 0x7f) + 0x80;
1132 
1133 	/*
1134 	 * The asymmetric access state is cycled according to the host_id.
1135 	 */
1136 	n = 4;
1137 	if (0 == scsi_debug_vpd_use_hostno) {
1138 	    arr[n++] = host_no % 3; /* Asymm access state */
1139 	    arr[n++] = 0x0F; /* claim: all states are supported */
1140 	} else {
1141 	    arr[n++] = 0x0; /* Active/Optimized path */
1142 	    arr[n++] = 0x01; /* claim: only support active/optimized paths */
1143 	}
1144 	arr[n++] = (port_group_a >> 8) & 0xff;
1145 	arr[n++] = port_group_a & 0xff;
1146 	arr[n++] = 0;    /* Reserved */
1147 	arr[n++] = 0;    /* Status code */
1148 	arr[n++] = 0;    /* Vendor unique */
1149 	arr[n++] = 0x1;  /* One port per group */
1150 	arr[n++] = 0;    /* Reserved */
1151 	arr[n++] = 0;    /* Reserved */
1152 	arr[n++] = (port_a >> 8) & 0xff;
1153 	arr[n++] = port_a & 0xff;
1154 	arr[n++] = 3;    /* Port unavailable */
1155 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1156 	arr[n++] = (port_group_b >> 8) & 0xff;
1157 	arr[n++] = port_group_b & 0xff;
1158 	arr[n++] = 0;    /* Reserved */
1159 	arr[n++] = 0;    /* Status code */
1160 	arr[n++] = 0;    /* Vendor unique */
1161 	arr[n++] = 0x1;  /* One port per group */
1162 	arr[n++] = 0;    /* Reserved */
1163 	arr[n++] = 0;    /* Reserved */
1164 	arr[n++] = (port_b >> 8) & 0xff;
1165 	arr[n++] = port_b & 0xff;
1166 
1167 	rlen = n - 4;
1168 	arr[0] = (rlen >> 24) & 0xff;
1169 	arr[1] = (rlen >> 16) & 0xff;
1170 	arr[2] = (rlen >> 8) & 0xff;
1171 	arr[3] = rlen & 0xff;
1172 
1173 	/*
1174 	 * Return the smallest value of either
1175 	 * - The allocated length
1176 	 * - The constructed command length
1177 	 * - The maximum array size
1178 	 */
1179 	rlen = min(alen,n);
1180 	ret = fill_from_dev_buffer(scp, arr,
1181 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1182 	kfree(arr);
1183 	return ret;
1184 }
1185 
1186 /* <<Following mode page info copied from ST318451LW>> */
1187 
1188 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1189 {	/* Read-Write Error Recovery page for mode_sense */
1190 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1191 					5, 0, 0xff, 0xff};
1192 
1193 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1194 	if (1 == pcontrol)
1195 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1196 	return sizeof(err_recov_pg);
1197 }
1198 
1199 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1200 { 	/* Disconnect-Reconnect page for mode_sense */
1201 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1202 					 0, 0, 0, 0, 0, 0, 0, 0};
1203 
1204 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1205 	if (1 == pcontrol)
1206 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1207 	return sizeof(disconnect_pg);
1208 }
1209 
1210 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1211 {       /* Format device page for mode_sense */
1212 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1213 				     0, 0, 0, 0, 0, 0, 0, 0,
1214 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1215 
1216 	memcpy(p, format_pg, sizeof(format_pg));
1217 	p[10] = (sdebug_sectors_per >> 8) & 0xff;
1218 	p[11] = sdebug_sectors_per & 0xff;
1219 	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1220 	p[13] = scsi_debug_sector_size & 0xff;
1221 	if (scsi_debug_removable)
1222 		p[20] |= 0x20; /* should agree with INQUIRY */
1223 	if (1 == pcontrol)
1224 		memset(p + 2, 0, sizeof(format_pg) - 2);
1225 	return sizeof(format_pg);
1226 }
1227 
1228 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1229 { 	/* Caching page for mode_sense */
1230 	unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1231 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1232 
1233 	memcpy(p, caching_pg, sizeof(caching_pg));
1234 	if (1 == pcontrol)
1235 		memset(p + 2, 0, sizeof(caching_pg) - 2);
1236 	return sizeof(caching_pg);
1237 }
1238 
1239 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1240 { 	/* Control mode page for mode_sense */
1241 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1242 				        0, 0, 0, 0};
1243 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1244 				     0, 0, 0x2, 0x4b};
1245 
1246 	if (scsi_debug_dsense)
1247 		ctrl_m_pg[2] |= 0x4;
1248 	else
1249 		ctrl_m_pg[2] &= ~0x4;
1250 
1251 	if (scsi_debug_ato)
1252 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1253 
1254 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1255 	if (1 == pcontrol)
1256 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1257 	else if (2 == pcontrol)
1258 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1259 	return sizeof(ctrl_m_pg);
1260 }
1261 
1262 
1263 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1264 {	/* Informational Exceptions control mode page for mode_sense */
1265 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1266 				       0, 0, 0x0, 0x0};
1267 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1268 				      0, 0, 0x0, 0x0};
1269 
1270 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1271 	if (1 == pcontrol)
1272 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1273 	else if (2 == pcontrol)
1274 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1275 	return sizeof(iec_m_pg);
1276 }
1277 
1278 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1279 {	/* SAS SSP mode page - short format for mode_sense */
1280 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1281 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1282 
1283 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1284 	if (1 == pcontrol)
1285 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1286 	return sizeof(sas_sf_m_pg);
1287 }
1288 
1289 
1290 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1291 			      int target_dev_id)
1292 {	/* SAS phy control and discover mode page for mode_sense */
1293 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1294 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1295 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1296 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1297 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1298 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1299 		    0, 0, 0, 0, 0, 0, 0, 0,
1300 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1301 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1302 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1303 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1304 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1305 		    0, 0, 0, 0, 0, 0, 0, 0,
1306 		};
1307 	int port_a, port_b;
1308 
1309 	port_a = target_dev_id + 1;
1310 	port_b = port_a + 1;
1311 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1312 	p[20] = (port_a >> 24);
1313 	p[21] = (port_a >> 16) & 0xff;
1314 	p[22] = (port_a >> 8) & 0xff;
1315 	p[23] = port_a & 0xff;
1316 	p[48 + 20] = (port_b >> 24);
1317 	p[48 + 21] = (port_b >> 16) & 0xff;
1318 	p[48 + 22] = (port_b >> 8) & 0xff;
1319 	p[48 + 23] = port_b & 0xff;
1320 	if (1 == pcontrol)
1321 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1322 	return sizeof(sas_pcd_m_pg);
1323 }
1324 
1325 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1326 {	/* SAS SSP shared protocol specific port mode subpage */
1327 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1328 		    0, 0, 0, 0, 0, 0, 0, 0,
1329 		};
1330 
1331 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1332 	if (1 == pcontrol)
1333 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1334 	return sizeof(sas_sha_m_pg);
1335 }
1336 
1337 #define SDEBUG_MAX_MSENSE_SZ 256
1338 
1339 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1340 			   struct sdebug_dev_info * devip)
1341 {
1342 	unsigned char dbd, llbaa;
1343 	int pcontrol, pcode, subpcode, bd_len;
1344 	unsigned char dev_spec;
1345 	int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1346 	unsigned char * ap;
1347 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1348 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1349 
1350 	if ((errsts = check_readiness(scp, 1, devip)))
1351 		return errsts;
1352 	dbd = !!(cmd[1] & 0x8);
1353 	pcontrol = (cmd[2] & 0xc0) >> 6;
1354 	pcode = cmd[2] & 0x3f;
1355 	subpcode = cmd[3];
1356 	msense_6 = (MODE_SENSE == cmd[0]);
1357 	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1358 	if ((0 == scsi_debug_ptype) && (0 == dbd))
1359 		bd_len = llbaa ? 16 : 8;
1360 	else
1361 		bd_len = 0;
1362 	alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1363 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1364 	if (0x3 == pcontrol) {  /* Saving values not supported */
1365 		mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1366 			       	0);
1367 		return check_condition_result;
1368 	}
1369 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1370 			(devip->target * 1000) - 3;
1371 	/* set DPOFUA bit for disks */
1372 	if (0 == scsi_debug_ptype)
1373 		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1374 	else
1375 		dev_spec = 0x0;
1376 	if (msense_6) {
1377 		arr[2] = dev_spec;
1378 		arr[3] = bd_len;
1379 		offset = 4;
1380 	} else {
1381 		arr[3] = dev_spec;
1382 		if (16 == bd_len)
1383 			arr[4] = 0x1;	/* set LONGLBA bit */
1384 		arr[7] = bd_len;	/* assume 255 or less */
1385 		offset = 8;
1386 	}
1387 	ap = arr + offset;
1388 	if ((bd_len > 0) && (!sdebug_capacity))
1389 		sdebug_capacity = get_sdebug_capacity();
1390 
1391 	if (8 == bd_len) {
1392 		if (sdebug_capacity > 0xfffffffe) {
1393 			ap[0] = 0xff;
1394 			ap[1] = 0xff;
1395 			ap[2] = 0xff;
1396 			ap[3] = 0xff;
1397 		} else {
1398 			ap[0] = (sdebug_capacity >> 24) & 0xff;
1399 			ap[1] = (sdebug_capacity >> 16) & 0xff;
1400 			ap[2] = (sdebug_capacity >> 8) & 0xff;
1401 			ap[3] = sdebug_capacity & 0xff;
1402 		}
1403 		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1404 		ap[7] = scsi_debug_sector_size & 0xff;
1405 		offset += bd_len;
1406 		ap = arr + offset;
1407 	} else if (16 == bd_len) {
1408 		unsigned long long capac = sdebug_capacity;
1409 
1410         	for (k = 0; k < 8; ++k, capac >>= 8)
1411                 	ap[7 - k] = capac & 0xff;
1412 		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1413 		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1414 		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1415 		ap[15] = scsi_debug_sector_size & 0xff;
1416 		offset += bd_len;
1417 		ap = arr + offset;
1418 	}
1419 
1420 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1421 		/* TODO: Control Extension page */
1422 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1423 			       	0);
1424 		return check_condition_result;
1425 	}
1426 	switch (pcode) {
1427 	case 0x1:	/* Read-Write error recovery page, direct access */
1428 		len = resp_err_recov_pg(ap, pcontrol, target);
1429 		offset += len;
1430 		break;
1431 	case 0x2:	/* Disconnect-Reconnect page, all devices */
1432 		len = resp_disconnect_pg(ap, pcontrol, target);
1433 		offset += len;
1434 		break;
1435         case 0x3:       /* Format device page, direct access */
1436                 len = resp_format_pg(ap, pcontrol, target);
1437                 offset += len;
1438                 break;
1439 	case 0x8:	/* Caching page, direct access */
1440 		len = resp_caching_pg(ap, pcontrol, target);
1441 		offset += len;
1442 		break;
1443 	case 0xa:	/* Control Mode page, all devices */
1444 		len = resp_ctrl_m_pg(ap, pcontrol, target);
1445 		offset += len;
1446 		break;
1447 	case 0x19:	/* if spc==1 then sas phy, control+discover */
1448 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
1449 		        mk_sense_buffer(devip, ILLEGAL_REQUEST,
1450 					INVALID_FIELD_IN_CDB, 0);
1451 			return check_condition_result;
1452 	        }
1453 		len = 0;
1454 		if ((0x0 == subpcode) || (0xff == subpcode))
1455 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1456 		if ((0x1 == subpcode) || (0xff == subpcode))
1457 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1458 						  target_dev_id);
1459 		if ((0x2 == subpcode) || (0xff == subpcode))
1460 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
1461 		offset += len;
1462 		break;
1463 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
1464 		len = resp_iec_m_pg(ap, pcontrol, target);
1465 		offset += len;
1466 		break;
1467 	case 0x3f:	/* Read all Mode pages */
1468 		if ((0 == subpcode) || (0xff == subpcode)) {
1469 			len = resp_err_recov_pg(ap, pcontrol, target);
1470 			len += resp_disconnect_pg(ap + len, pcontrol, target);
1471 			len += resp_format_pg(ap + len, pcontrol, target);
1472 			len += resp_caching_pg(ap + len, pcontrol, target);
1473 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1474 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1475 			if (0xff == subpcode) {
1476 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1477 						  target, target_dev_id);
1478 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
1479 			}
1480 			len += resp_iec_m_pg(ap + len, pcontrol, target);
1481 		} else {
1482 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1483 					INVALID_FIELD_IN_CDB, 0);
1484 			return check_condition_result;
1485                 }
1486 		offset += len;
1487 		break;
1488 	default:
1489 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1490 			       	0);
1491 		return check_condition_result;
1492 	}
1493 	if (msense_6)
1494 		arr[0] = offset - 1;
1495 	else {
1496 		arr[0] = ((offset - 2) >> 8) & 0xff;
1497 		arr[1] = (offset - 2) & 0xff;
1498 	}
1499 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1500 }
1501 
1502 #define SDEBUG_MAX_MSELECT_SZ 512
1503 
1504 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1505 			    struct sdebug_dev_info * devip)
1506 {
1507 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1508 	int param_len, res, errsts, mpage;
1509 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1510 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1511 
1512 	if ((errsts = check_readiness(scp, 1, devip)))
1513 		return errsts;
1514 	memset(arr, 0, sizeof(arr));
1515 	pf = cmd[1] & 0x10;
1516 	sp = cmd[1] & 0x1;
1517 	param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1518 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1519 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1520 				INVALID_FIELD_IN_CDB, 0);
1521 		return check_condition_result;
1522 	}
1523         res = fetch_to_dev_buffer(scp, arr, param_len);
1524         if (-1 == res)
1525                 return (DID_ERROR << 16);
1526         else if ((res < param_len) &&
1527                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1528                 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1529                        " IO sent=%d bytes\n", param_len, res);
1530 	md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1531 	bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1532 	if (md_len > 2) {
1533 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1534 				INVALID_FIELD_IN_PARAM_LIST, 0);
1535 		return check_condition_result;
1536 	}
1537 	off = bd_len + (mselect6 ? 4 : 8);
1538 	mpage = arr[off] & 0x3f;
1539 	ps = !!(arr[off] & 0x80);
1540 	if (ps) {
1541 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1542 				INVALID_FIELD_IN_PARAM_LIST, 0);
1543 		return check_condition_result;
1544 	}
1545 	spf = !!(arr[off] & 0x40);
1546 	pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1547 		       (arr[off + 1] + 2);
1548 	if ((pg_len + off) > param_len) {
1549 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1550 				PARAMETER_LIST_LENGTH_ERR, 0);
1551 		return check_condition_result;
1552 	}
1553 	switch (mpage) {
1554 	case 0xa:      /* Control Mode page */
1555 		if (ctrl_m_pg[1] == arr[off + 1]) {
1556 			memcpy(ctrl_m_pg + 2, arr + off + 2,
1557 			       sizeof(ctrl_m_pg) - 2);
1558 			scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1559 			return 0;
1560 		}
1561 		break;
1562 	case 0x1c:      /* Informational Exceptions Mode page */
1563 		if (iec_m_pg[1] == arr[off + 1]) {
1564 			memcpy(iec_m_pg + 2, arr + off + 2,
1565 			       sizeof(iec_m_pg) - 2);
1566 			return 0;
1567 		}
1568 		break;
1569 	default:
1570 		break;
1571 	}
1572 	mk_sense_buffer(devip, ILLEGAL_REQUEST,
1573 			INVALID_FIELD_IN_PARAM_LIST, 0);
1574 	return check_condition_result;
1575 }
1576 
1577 static int resp_temp_l_pg(unsigned char * arr)
1578 {
1579 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1580 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
1581 		};
1582 
1583         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1584         return sizeof(temp_l_pg);
1585 }
1586 
1587 static int resp_ie_l_pg(unsigned char * arr)
1588 {
1589 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1590 		};
1591 
1592         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1593 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
1594 		arr[4] = THRESHOLD_EXCEEDED;
1595 		arr[5] = 0xff;
1596 	}
1597         return sizeof(ie_l_pg);
1598 }
1599 
1600 #define SDEBUG_MAX_LSENSE_SZ 512
1601 
1602 static int resp_log_sense(struct scsi_cmnd * scp,
1603                           struct sdebug_dev_info * devip)
1604 {
1605 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1606 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1607 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1608 
1609 	if ((errsts = check_readiness(scp, 1, devip)))
1610 		return errsts;
1611 	memset(arr, 0, sizeof(arr));
1612 	ppc = cmd[1] & 0x2;
1613 	sp = cmd[1] & 0x1;
1614 	if (ppc || sp) {
1615 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1616 				INVALID_FIELD_IN_CDB, 0);
1617 		return check_condition_result;
1618 	}
1619 	pcontrol = (cmd[2] & 0xc0) >> 6;
1620 	pcode = cmd[2] & 0x3f;
1621 	subpcode = cmd[3] & 0xff;
1622 	alloc_len = (cmd[7] << 8) + cmd[8];
1623 	arr[0] = pcode;
1624 	if (0 == subpcode) {
1625 		switch (pcode) {
1626 		case 0x0:	/* Supported log pages log page */
1627 			n = 4;
1628 			arr[n++] = 0x0;		/* this page */
1629 			arr[n++] = 0xd;		/* Temperature */
1630 			arr[n++] = 0x2f;	/* Informational exceptions */
1631 			arr[3] = n - 4;
1632 			break;
1633 		case 0xd:	/* Temperature log page */
1634 			arr[3] = resp_temp_l_pg(arr + 4);
1635 			break;
1636 		case 0x2f:	/* Informational exceptions log page */
1637 			arr[3] = resp_ie_l_pg(arr + 4);
1638 			break;
1639 		default:
1640 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1641 					INVALID_FIELD_IN_CDB, 0);
1642 			return check_condition_result;
1643 		}
1644 	} else if (0xff == subpcode) {
1645 		arr[0] |= 0x40;
1646 		arr[1] = subpcode;
1647 		switch (pcode) {
1648 		case 0x0:	/* Supported log pages and subpages log page */
1649 			n = 4;
1650 			arr[n++] = 0x0;
1651 			arr[n++] = 0x0;		/* 0,0 page */
1652 			arr[n++] = 0x0;
1653 			arr[n++] = 0xff;	/* this page */
1654 			arr[n++] = 0xd;
1655 			arr[n++] = 0x0;		/* Temperature */
1656 			arr[n++] = 0x2f;
1657 			arr[n++] = 0x0;	/* Informational exceptions */
1658 			arr[3] = n - 4;
1659 			break;
1660 		case 0xd:	/* Temperature subpages */
1661 			n = 4;
1662 			arr[n++] = 0xd;
1663 			arr[n++] = 0x0;		/* Temperature */
1664 			arr[3] = n - 4;
1665 			break;
1666 		case 0x2f:	/* Informational exceptions subpages */
1667 			n = 4;
1668 			arr[n++] = 0x2f;
1669 			arr[n++] = 0x0;		/* Informational exceptions */
1670 			arr[3] = n - 4;
1671 			break;
1672 		default:
1673 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1674 					INVALID_FIELD_IN_CDB, 0);
1675 			return check_condition_result;
1676 		}
1677 	} else {
1678 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1679 				INVALID_FIELD_IN_CDB, 0);
1680 		return check_condition_result;
1681 	}
1682 	len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1683 	return fill_from_dev_buffer(scp, arr,
1684 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1685 }
1686 
1687 static int check_device_access_params(struct sdebug_dev_info *devi,
1688 				      unsigned long long lba, unsigned int num)
1689 {
1690 	if (lba + num > sdebug_capacity) {
1691 		mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1692 		return check_condition_result;
1693 	}
1694 	/* transfer length excessive (tie in to block limits VPD page) */
1695 	if (num > sdebug_store_sectors) {
1696 		mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1697 		return check_condition_result;
1698 	}
1699 	return 0;
1700 }
1701 
1702 /* Returns number of bytes copied or -1 if error. */
1703 static int do_device_access(struct scsi_cmnd *scmd,
1704 			    struct sdebug_dev_info *devi,
1705 			    unsigned long long lba, unsigned int num, int write)
1706 {
1707 	int ret;
1708 	unsigned long long block, rest = 0;
1709 	struct scsi_data_buffer *sdb;
1710 	enum dma_data_direction dir;
1711 	size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1712 		       off_t);
1713 
1714 	if (write) {
1715 		sdb = scsi_out(scmd);
1716 		dir = DMA_TO_DEVICE;
1717 		func = sg_pcopy_to_buffer;
1718 	} else {
1719 		sdb = scsi_in(scmd);
1720 		dir = DMA_FROM_DEVICE;
1721 		func = sg_pcopy_from_buffer;
1722 	}
1723 
1724 	if (!sdb->length)
1725 		return 0;
1726 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1727 		return -1;
1728 
1729 	block = do_div(lba, sdebug_store_sectors);
1730 	if (block + num > sdebug_store_sectors)
1731 		rest = block + num - sdebug_store_sectors;
1732 
1733 	ret = func(sdb->table.sgl, sdb->table.nents,
1734 		   fake_storep + (block * scsi_debug_sector_size),
1735 		   (num - rest) * scsi_debug_sector_size, 0);
1736 	if (ret != (num - rest) * scsi_debug_sector_size)
1737 		return ret;
1738 
1739 	if (rest) {
1740 		ret += func(sdb->table.sgl, sdb->table.nents,
1741 			    fake_storep, rest * scsi_debug_sector_size,
1742 			    (num - rest) * scsi_debug_sector_size);
1743 	}
1744 
1745 	return ret;
1746 }
1747 
1748 static __be16 dif_compute_csum(const void *buf, int len)
1749 {
1750 	__be16 csum;
1751 
1752 	if (scsi_debug_guard)
1753 		csum = (__force __be16)ip_compute_csum(buf, len);
1754 	else
1755 		csum = cpu_to_be16(crc_t10dif(buf, len));
1756 
1757 	return csum;
1758 }
1759 
1760 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1761 		      sector_t sector, u32 ei_lba)
1762 {
1763 	__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1764 
1765 	if (sdt->guard_tag != csum) {
1766 		pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1767 			__func__,
1768 			(unsigned long)sector,
1769 			be16_to_cpu(sdt->guard_tag),
1770 			be16_to_cpu(csum));
1771 		return 0x01;
1772 	}
1773 	if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1774 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1775 		pr_err("%s: REF check failed on sector %lu\n",
1776 			__func__, (unsigned long)sector);
1777 		return 0x03;
1778 	}
1779 	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1780 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
1781 		pr_err("%s: REF check failed on sector %lu\n",
1782 			__func__, (unsigned long)sector);
1783 			dif_errors++;
1784 		return 0x03;
1785 	}
1786 	return 0;
1787 }
1788 
1789 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1790 			  unsigned int sectors, bool read)
1791 {
1792 	unsigned int i, resid;
1793 	struct scatterlist *psgl;
1794 	void *paddr;
1795 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
1796 
1797 	/* Bytes of protection data to copy into sgl */
1798 	resid = sectors * sizeof(*dif_storep);
1799 
1800 	scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1801 		int len = min(psgl->length, resid);
1802 		void *start = dif_store(sector);
1803 		int rest = 0;
1804 
1805 		if (dif_store_end < start + len)
1806 			rest = start + len - dif_store_end;
1807 
1808 		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1809 
1810 		if (read)
1811 			memcpy(paddr, start, len - rest);
1812 		else
1813 			memcpy(start, paddr, len - rest);
1814 
1815 		if (rest) {
1816 			if (read)
1817 				memcpy(paddr + len - rest, dif_storep, rest);
1818 			else
1819 				memcpy(dif_storep, paddr + len - rest, rest);
1820 		}
1821 
1822 		sector += len / sizeof(*dif_storep);
1823 		resid -= len;
1824 		kunmap_atomic(paddr);
1825 	}
1826 }
1827 
1828 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1829 			    unsigned int sectors, u32 ei_lba)
1830 {
1831 	unsigned int i;
1832 	struct sd_dif_tuple *sdt;
1833 	sector_t sector;
1834 
1835 	for (i = 0; i < sectors; i++) {
1836 		int ret;
1837 
1838 		sector = start_sec + i;
1839 		sdt = dif_store(sector);
1840 
1841 		if (sdt->app_tag == cpu_to_be16(0xffff))
1842 			continue;
1843 
1844 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
1845 		if (ret) {
1846 			dif_errors++;
1847 			return ret;
1848 		}
1849 
1850 		ei_lba++;
1851 	}
1852 
1853 	dif_copy_prot(SCpnt, start_sec, sectors, true);
1854 	dix_reads++;
1855 
1856 	return 0;
1857 }
1858 
1859 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1860 		     unsigned int num, struct sdebug_dev_info *devip,
1861 		     u32 ei_lba)
1862 {
1863 	unsigned long iflags;
1864 	int ret;
1865 
1866 	ret = check_device_access_params(devip, lba, num);
1867 	if (ret)
1868 		return ret;
1869 
1870 	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1871 	    (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1872 	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1873 		/* claim unrecoverable read error */
1874 		mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1875 		/* set info field and valid bit for fixed descriptor */
1876 		if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1877 			devip->sense_buff[0] |= 0x80;	/* Valid bit */
1878 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
1879 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1880 			devip->sense_buff[3] = (ret >> 24) & 0xff;
1881 			devip->sense_buff[4] = (ret >> 16) & 0xff;
1882 			devip->sense_buff[5] = (ret >> 8) & 0xff;
1883 			devip->sense_buff[6] = ret & 0xff;
1884 		}
1885 	        scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1886 		return check_condition_result;
1887 	}
1888 
1889 	/* DIX + T10 DIF */
1890 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1891 		int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1892 
1893 		if (prot_ret) {
1894 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1895 			return illegal_condition_result;
1896 		}
1897 	}
1898 
1899 	read_lock_irqsave(&atomic_rw, iflags);
1900 	ret = do_device_access(SCpnt, devip, lba, num, 0);
1901 	read_unlock_irqrestore(&atomic_rw, iflags);
1902 	if (ret == -1)
1903 		return DID_ERROR << 16;
1904 
1905 	scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
1906 
1907 	return 0;
1908 }
1909 
1910 void dump_sector(unsigned char *buf, int len)
1911 {
1912 	int i, j;
1913 
1914 	printk(KERN_ERR ">>> Sector Dump <<<\n");
1915 
1916 	for (i = 0 ; i < len ; i += 16) {
1917 		printk(KERN_ERR "%04d: ", i);
1918 
1919 		for (j = 0 ; j < 16 ; j++) {
1920 			unsigned char c = buf[i+j];
1921 			if (c >= 0x20 && c < 0x7e)
1922 				printk(" %c ", buf[i+j]);
1923 			else
1924 				printk("%02x ", buf[i+j]);
1925 		}
1926 
1927 		printk("\n");
1928 	}
1929 }
1930 
1931 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1932 			     unsigned int sectors, u32 ei_lba)
1933 {
1934 	int i, j, ret;
1935 	struct sd_dif_tuple *sdt;
1936 	struct scatterlist *dsgl;
1937 	struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1938 	void *daddr, *paddr;
1939 	sector_t sector = start_sec;
1940 	int ppage_offset;
1941 
1942 	BUG_ON(scsi_sg_count(SCpnt) == 0);
1943 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1944 
1945 	ppage_offset = 0;
1946 
1947 	/* For each data page */
1948 	scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1949 		daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1950 		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1951 
1952 		/* For each sector-sized chunk in data page */
1953 		for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) {
1954 
1955 			/* If we're at the end of the current
1956 			 * protection page advance to the next one
1957 			 */
1958 			if (ppage_offset >= psgl->length) {
1959 				kunmap_atomic(paddr);
1960 				psgl = sg_next(psgl);
1961 				BUG_ON(psgl == NULL);
1962 				paddr = kmap_atomic(sg_page(psgl))
1963 					+ psgl->offset;
1964 				ppage_offset = 0;
1965 			}
1966 
1967 			sdt = paddr + ppage_offset;
1968 
1969 			ret = dif_verify(sdt, daddr + j, sector, ei_lba);
1970 			if (ret) {
1971 				dump_sector(daddr + j, scsi_debug_sector_size);
1972 				goto out;
1973 			}
1974 
1975 			sector++;
1976 			ei_lba++;
1977 			ppage_offset += sizeof(struct sd_dif_tuple);
1978 		}
1979 
1980 		kunmap_atomic(paddr);
1981 		kunmap_atomic(daddr);
1982 	}
1983 
1984 	dif_copy_prot(SCpnt, start_sec, sectors, false);
1985 	dix_writes++;
1986 
1987 	return 0;
1988 
1989 out:
1990 	dif_errors++;
1991 	kunmap_atomic(paddr);
1992 	kunmap_atomic(daddr);
1993 	return ret;
1994 }
1995 
1996 static unsigned long lba_to_map_index(sector_t lba)
1997 {
1998 	if (scsi_debug_unmap_alignment) {
1999 		lba += scsi_debug_unmap_granularity -
2000 			scsi_debug_unmap_alignment;
2001 	}
2002 	do_div(lba, scsi_debug_unmap_granularity);
2003 
2004 	return lba;
2005 }
2006 
2007 static sector_t map_index_to_lba(unsigned long index)
2008 {
2009 	sector_t lba = index * scsi_debug_unmap_granularity;
2010 
2011 	if (scsi_debug_unmap_alignment) {
2012 		lba -= scsi_debug_unmap_granularity -
2013 			scsi_debug_unmap_alignment;
2014 	}
2015 
2016 	return lba;
2017 }
2018 
2019 static unsigned int map_state(sector_t lba, unsigned int *num)
2020 {
2021 	sector_t end;
2022 	unsigned int mapped;
2023 	unsigned long index;
2024 	unsigned long next;
2025 
2026 	index = lba_to_map_index(lba);
2027 	mapped = test_bit(index, map_storep);
2028 
2029 	if (mapped)
2030 		next = find_next_zero_bit(map_storep, map_size, index);
2031 	else
2032 		next = find_next_bit(map_storep, map_size, index);
2033 
2034 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2035 	*num = end - lba;
2036 
2037 	return mapped;
2038 }
2039 
2040 static void map_region(sector_t lba, unsigned int len)
2041 {
2042 	sector_t end = lba + len;
2043 
2044 	while (lba < end) {
2045 		unsigned long index = lba_to_map_index(lba);
2046 
2047 		if (index < map_size)
2048 			set_bit(index, map_storep);
2049 
2050 		lba = map_index_to_lba(index + 1);
2051 	}
2052 }
2053 
2054 static void unmap_region(sector_t lba, unsigned int len)
2055 {
2056 	sector_t end = lba + len;
2057 
2058 	while (lba < end) {
2059 		unsigned long index = lba_to_map_index(lba);
2060 
2061 		if (lba == map_index_to_lba(index) &&
2062 		    lba + scsi_debug_unmap_granularity <= end &&
2063 		    index < map_size) {
2064 			clear_bit(index, map_storep);
2065 			if (scsi_debug_lbprz) {
2066 				memset(fake_storep +
2067 				       lba * scsi_debug_sector_size, 0,
2068 				       scsi_debug_sector_size *
2069 				       scsi_debug_unmap_granularity);
2070 			}
2071 			if (dif_storep) {
2072 				memset(dif_storep + lba, 0xff,
2073 				       sizeof(*dif_storep) *
2074 				       scsi_debug_unmap_granularity);
2075 			}
2076 		}
2077 		lba = map_index_to_lba(index + 1);
2078 	}
2079 }
2080 
2081 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2082 		      unsigned int num, struct sdebug_dev_info *devip,
2083 		      u32 ei_lba)
2084 {
2085 	unsigned long iflags;
2086 	int ret;
2087 
2088 	ret = check_device_access_params(devip, lba, num);
2089 	if (ret)
2090 		return ret;
2091 
2092 	/* DIX + T10 DIF */
2093 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2094 		int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2095 
2096 		if (prot_ret) {
2097 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2098 			return illegal_condition_result;
2099 		}
2100 	}
2101 
2102 	write_lock_irqsave(&atomic_rw, iflags);
2103 	ret = do_device_access(SCpnt, devip, lba, num, 1);
2104 	if (scsi_debug_lbp())
2105 		map_region(lba, num);
2106 	write_unlock_irqrestore(&atomic_rw, iflags);
2107 	if (-1 == ret)
2108 		return (DID_ERROR << 16);
2109 	else if ((ret < (num * scsi_debug_sector_size)) &&
2110 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2111 		printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2112 		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2113 
2114 	return 0;
2115 }
2116 
2117 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2118 		      unsigned int num, struct sdebug_dev_info *devip,
2119 			   u32 ei_lba, unsigned int unmap)
2120 {
2121 	unsigned long iflags;
2122 	unsigned long long i;
2123 	int ret;
2124 
2125 	ret = check_device_access_params(devip, lba, num);
2126 	if (ret)
2127 		return ret;
2128 
2129 	if (num > scsi_debug_write_same_length) {
2130 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2131 				0);
2132 		return check_condition_result;
2133 	}
2134 
2135 	write_lock_irqsave(&atomic_rw, iflags);
2136 
2137 	if (unmap && scsi_debug_lbp()) {
2138 		unmap_region(lba, num);
2139 		goto out;
2140 	}
2141 
2142 	/* Else fetch one logical block */
2143 	ret = fetch_to_dev_buffer(scmd,
2144 				  fake_storep + (lba * scsi_debug_sector_size),
2145 				  scsi_debug_sector_size);
2146 
2147 	if (-1 == ret) {
2148 		write_unlock_irqrestore(&atomic_rw, iflags);
2149 		return (DID_ERROR << 16);
2150 	} else if ((ret < (num * scsi_debug_sector_size)) &&
2151 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2152 		printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2153 		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2154 
2155 	/* Copy first sector to remaining blocks */
2156 	for (i = 1 ; i < num ; i++)
2157 		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2158 		       fake_storep + (lba * scsi_debug_sector_size),
2159 		       scsi_debug_sector_size);
2160 
2161 	if (scsi_debug_lbp())
2162 		map_region(lba, num);
2163 out:
2164 	write_unlock_irqrestore(&atomic_rw, iflags);
2165 
2166 	return 0;
2167 }
2168 
2169 struct unmap_block_desc {
2170 	__be64	lba;
2171 	__be32	blocks;
2172 	__be32	__reserved;
2173 };
2174 
2175 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2176 {
2177 	unsigned char *buf;
2178 	struct unmap_block_desc *desc;
2179 	unsigned int i, payload_len, descriptors;
2180 	int ret;
2181 
2182 	ret = check_readiness(scmd, 1, devip);
2183 	if (ret)
2184 		return ret;
2185 
2186 	payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2187 	BUG_ON(scsi_bufflen(scmd) != payload_len);
2188 
2189 	descriptors = (payload_len - 8) / 16;
2190 
2191 	buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2192 	if (!buf)
2193 		return check_condition_result;
2194 
2195 	scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2196 
2197 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2198 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2199 
2200 	desc = (void *)&buf[8];
2201 
2202 	for (i = 0 ; i < descriptors ; i++) {
2203 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2204 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
2205 
2206 		ret = check_device_access_params(devip, lba, num);
2207 		if (ret)
2208 			goto out;
2209 
2210 		unmap_region(lba, num);
2211 	}
2212 
2213 	ret = 0;
2214 
2215 out:
2216 	kfree(buf);
2217 
2218 	return ret;
2219 }
2220 
2221 #define SDEBUG_GET_LBA_STATUS_LEN 32
2222 
2223 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2224 			       struct sdebug_dev_info * devip)
2225 {
2226 	unsigned long long lba;
2227 	unsigned int alloc_len, mapped, num;
2228 	unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2229 	int ret;
2230 
2231 	ret = check_readiness(scmd, 1, devip);
2232 	if (ret)
2233 		return ret;
2234 
2235 	lba = get_unaligned_be64(&scmd->cmnd[2]);
2236 	alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2237 
2238 	if (alloc_len < 24)
2239 		return 0;
2240 
2241 	ret = check_device_access_params(devip, lba, 1);
2242 	if (ret)
2243 		return ret;
2244 
2245 	mapped = map_state(lba, &num);
2246 
2247 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2248 	put_unaligned_be32(20, &arr[0]);	/* Parameter Data Length */
2249 	put_unaligned_be64(lba, &arr[8]);	/* LBA */
2250 	put_unaligned_be32(num, &arr[16]);	/* Number of blocks */
2251 	arr[20] = !mapped;			/* mapped = 0, unmapped = 1 */
2252 
2253 	return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2254 }
2255 
2256 #define SDEBUG_RLUN_ARR_SZ 256
2257 
2258 static int resp_report_luns(struct scsi_cmnd * scp,
2259 			    struct sdebug_dev_info * devip)
2260 {
2261 	unsigned int alloc_len;
2262 	int lun_cnt, i, upper, num, n, wlun, lun;
2263 	unsigned char *cmd = (unsigned char *)scp->cmnd;
2264 	int select_report = (int)cmd[2];
2265 	struct scsi_lun *one_lun;
2266 	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2267 	unsigned char * max_addr;
2268 
2269 	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2270 	if ((alloc_len < 4) || (select_report > 2)) {
2271 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2272 			       	0);
2273 		return check_condition_result;
2274 	}
2275 	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
2276 	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2277 	lun_cnt = scsi_debug_max_luns;
2278 	if (1 == select_report)
2279 		lun_cnt = 0;
2280 	else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2281 		--lun_cnt;
2282 	wlun = (select_report > 0) ? 1 : 0;
2283 	num = lun_cnt + wlun;
2284 	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2285 	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2286 	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2287 			    sizeof(struct scsi_lun)), num);
2288 	if (n < num) {
2289 		wlun = 0;
2290 		lun_cnt = n;
2291 	}
2292 	one_lun = (struct scsi_lun *) &arr[8];
2293 	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2294 	for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2295              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2296 	     i++, lun++) {
2297 		upper = (lun >> 8) & 0x3f;
2298 		if (upper)
2299 			one_lun[i].scsi_lun[0] =
2300 			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2301 		one_lun[i].scsi_lun[1] = lun & 0xff;
2302 	}
2303 	if (wlun) {
2304 		one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2305 		one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2306 		i++;
2307 	}
2308 	alloc_len = (unsigned char *)(one_lun + i) - arr;
2309 	return fill_from_dev_buffer(scp, arr,
2310 				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2311 }
2312 
2313 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2314 			    unsigned int num, struct sdebug_dev_info *devip)
2315 {
2316 	int i, j, ret = -1;
2317 	unsigned char *kaddr, *buf;
2318 	unsigned int offset;
2319 	struct scatterlist *sg;
2320 	struct scsi_data_buffer *sdb = scsi_in(scp);
2321 
2322 	/* better not to use temporary buffer. */
2323 	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2324 	if (!buf)
2325 		return ret;
2326 
2327 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2328 
2329 	offset = 0;
2330 	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2331 		kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2332 		if (!kaddr)
2333 			goto out;
2334 
2335 		for (j = 0; j < sg->length; j++)
2336 			*(kaddr + sg->offset + j) ^= *(buf + offset + j);
2337 
2338 		offset += sg->length;
2339 		kunmap_atomic(kaddr);
2340 	}
2341 	ret = 0;
2342 out:
2343 	kfree(buf);
2344 
2345 	return ret;
2346 }
2347 
2348 /* When timer goes off this function is called. */
2349 static void timer_intr_handler(unsigned long indx)
2350 {
2351 	struct sdebug_queued_cmd * sqcp;
2352 	unsigned long iflags;
2353 
2354 	if (indx >= scsi_debug_max_queue) {
2355 		printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2356 		       "large\n");
2357 		return;
2358 	}
2359 	spin_lock_irqsave(&queued_arr_lock, iflags);
2360 	sqcp = &queued_arr[(int)indx];
2361 	if (! sqcp->in_use) {
2362 		printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2363 		       "interrupt\n");
2364 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2365 		return;
2366 	}
2367 	sqcp->in_use = 0;
2368 	if (sqcp->done_funct) {
2369 		sqcp->a_cmnd->result = sqcp->scsi_result;
2370 		sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2371 	}
2372 	sqcp->done_funct = NULL;
2373 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2374 }
2375 
2376 
2377 static struct sdebug_dev_info *
2378 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2379 {
2380 	struct sdebug_dev_info *devip;
2381 
2382 	devip = kzalloc(sizeof(*devip), flags);
2383 	if (devip) {
2384 		devip->sdbg_host = sdbg_host;
2385 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2386 	}
2387 	return devip;
2388 }
2389 
2390 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2391 {
2392 	struct sdebug_host_info * sdbg_host;
2393 	struct sdebug_dev_info * open_devip = NULL;
2394 	struct sdebug_dev_info * devip =
2395 			(struct sdebug_dev_info *)sdev->hostdata;
2396 
2397 	if (devip)
2398 		return devip;
2399 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2400 	if (!sdbg_host) {
2401                 printk(KERN_ERR "Host info NULL\n");
2402 		return NULL;
2403         }
2404 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2405 		if ((devip->used) && (devip->channel == sdev->channel) &&
2406                     (devip->target == sdev->id) &&
2407                     (devip->lun == sdev->lun))
2408                         return devip;
2409 		else {
2410 			if ((!devip->used) && (!open_devip))
2411 				open_devip = devip;
2412 		}
2413 	}
2414 	if (!open_devip) { /* try and make a new one */
2415 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2416 		if (!open_devip) {
2417 			printk(KERN_ERR "%s: out of memory at line %d\n",
2418 				__func__, __LINE__);
2419 			return NULL;
2420 		}
2421 	}
2422 
2423 	open_devip->channel = sdev->channel;
2424 	open_devip->target = sdev->id;
2425 	open_devip->lun = sdev->lun;
2426 	open_devip->sdbg_host = sdbg_host;
2427 	open_devip->reset = 1;
2428 	open_devip->used = 1;
2429 	memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2430 	if (scsi_debug_dsense)
2431 		open_devip->sense_buff[0] = 0x72;
2432 	else {
2433 		open_devip->sense_buff[0] = 0x70;
2434 		open_devip->sense_buff[7] = 0xa;
2435 	}
2436 	if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2437 		open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2438 
2439 	return open_devip;
2440 }
2441 
2442 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2443 {
2444 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2445 		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2446 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2447 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2448 	return 0;
2449 }
2450 
2451 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2452 {
2453 	struct sdebug_dev_info *devip;
2454 
2455 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2456 		printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2457 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2458 	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2459 		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2460 	devip = devInfoReg(sdp);
2461 	if (NULL == devip)
2462 		return 1;	/* no resources, will be marked offline */
2463 	sdp->hostdata = devip;
2464 	if (sdp->host->cmd_per_lun)
2465 		scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2466 					sdp->host->cmd_per_lun);
2467 	blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2468 	if (scsi_debug_no_uld)
2469 		sdp->no_uld_attach = 1;
2470 	return 0;
2471 }
2472 
2473 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2474 {
2475 	struct sdebug_dev_info *devip =
2476 		(struct sdebug_dev_info *)sdp->hostdata;
2477 
2478 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2479 		printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2480 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2481 	if (devip) {
2482 		/* make this slot available for re-use */
2483 		devip->used = 0;
2484 		sdp->hostdata = NULL;
2485 	}
2486 }
2487 
2488 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2489 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2490 {
2491 	unsigned long iflags;
2492 	int k;
2493 	struct sdebug_queued_cmd *sqcp;
2494 
2495 	spin_lock_irqsave(&queued_arr_lock, iflags);
2496 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2497 		sqcp = &queued_arr[k];
2498 		if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2499 			del_timer_sync(&sqcp->cmnd_timer);
2500 			sqcp->in_use = 0;
2501 			sqcp->a_cmnd = NULL;
2502 			break;
2503 		}
2504 	}
2505 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2506 	return (k < scsi_debug_max_queue) ? 1 : 0;
2507 }
2508 
2509 /* Deletes (stops) timers of all queued commands */
2510 static void stop_all_queued(void)
2511 {
2512 	unsigned long iflags;
2513 	int k;
2514 	struct sdebug_queued_cmd *sqcp;
2515 
2516 	spin_lock_irqsave(&queued_arr_lock, iflags);
2517 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2518 		sqcp = &queued_arr[k];
2519 		if (sqcp->in_use && sqcp->a_cmnd) {
2520 			del_timer_sync(&sqcp->cmnd_timer);
2521 			sqcp->in_use = 0;
2522 			sqcp->a_cmnd = NULL;
2523 		}
2524 	}
2525 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2526 }
2527 
2528 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2529 {
2530 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2531 		printk(KERN_INFO "scsi_debug: abort\n");
2532 	++num_aborts;
2533 	stop_queued_cmnd(SCpnt);
2534 	return SUCCESS;
2535 }
2536 
2537 static int scsi_debug_biosparam(struct scsi_device *sdev,
2538 		struct block_device * bdev, sector_t capacity, int *info)
2539 {
2540 	int res;
2541 	unsigned char *buf;
2542 
2543 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2544 		printk(KERN_INFO "scsi_debug: biosparam\n");
2545 	buf = scsi_bios_ptable(bdev);
2546 	if (buf) {
2547 		res = scsi_partsize(buf, capacity,
2548 				    &info[2], &info[0], &info[1]);
2549 		kfree(buf);
2550 		if (! res)
2551 			return res;
2552 	}
2553 	info[0] = sdebug_heads;
2554 	info[1] = sdebug_sectors_per;
2555 	info[2] = sdebug_cylinders_per;
2556 	return 0;
2557 }
2558 
2559 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2560 {
2561 	struct sdebug_dev_info * devip;
2562 
2563 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2564 		printk(KERN_INFO "scsi_debug: device_reset\n");
2565 	++num_dev_resets;
2566 	if (SCpnt) {
2567 		devip = devInfoReg(SCpnt->device);
2568 		if (devip)
2569 			devip->reset = 1;
2570 	}
2571 	return SUCCESS;
2572 }
2573 
2574 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2575 {
2576 	struct sdebug_host_info *sdbg_host;
2577         struct sdebug_dev_info * dev_info;
2578         struct scsi_device * sdp;
2579         struct Scsi_Host * hp;
2580 
2581 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2582 		printk(KERN_INFO "scsi_debug: bus_reset\n");
2583 	++num_bus_resets;
2584 	if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2585 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2586 		if (sdbg_host) {
2587 			list_for_each_entry(dev_info,
2588                                             &sdbg_host->dev_info_list,
2589                                             dev_list)
2590 				dev_info->reset = 1;
2591 		}
2592 	}
2593 	return SUCCESS;
2594 }
2595 
2596 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2597 {
2598 	struct sdebug_host_info * sdbg_host;
2599         struct sdebug_dev_info * dev_info;
2600 
2601 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2602 		printk(KERN_INFO "scsi_debug: host_reset\n");
2603 	++num_host_resets;
2604         spin_lock(&sdebug_host_list_lock);
2605         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2606                 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2607                                     dev_list)
2608                         dev_info->reset = 1;
2609         }
2610         spin_unlock(&sdebug_host_list_lock);
2611 	stop_all_queued();
2612 	return SUCCESS;
2613 }
2614 
2615 /* Initializes timers in queued array */
2616 static void __init init_all_queued(void)
2617 {
2618 	unsigned long iflags;
2619 	int k;
2620 	struct sdebug_queued_cmd * sqcp;
2621 
2622 	spin_lock_irqsave(&queued_arr_lock, iflags);
2623 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2624 		sqcp = &queued_arr[k];
2625 		init_timer(&sqcp->cmnd_timer);
2626 		sqcp->in_use = 0;
2627 		sqcp->a_cmnd = NULL;
2628 	}
2629 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2630 }
2631 
2632 static void __init sdebug_build_parts(unsigned char *ramp,
2633 				      unsigned long store_size)
2634 {
2635 	struct partition * pp;
2636 	int starts[SDEBUG_MAX_PARTS + 2];
2637 	int sectors_per_part, num_sectors, k;
2638 	int heads_by_sects, start_sec, end_sec;
2639 
2640 	/* assume partition table already zeroed */
2641 	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2642 		return;
2643 	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2644 		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2645 		printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2646 				    "partitions to %d\n", SDEBUG_MAX_PARTS);
2647 	}
2648 	num_sectors = (int)sdebug_store_sectors;
2649 	sectors_per_part = (num_sectors - sdebug_sectors_per)
2650 			   / scsi_debug_num_parts;
2651 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
2652         starts[0] = sdebug_sectors_per;
2653 	for (k = 1; k < scsi_debug_num_parts; ++k)
2654 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
2655 			    * heads_by_sects;
2656 	starts[scsi_debug_num_parts] = num_sectors;
2657 	starts[scsi_debug_num_parts + 1] = 0;
2658 
2659 	ramp[510] = 0x55;	/* magic partition markings */
2660 	ramp[511] = 0xAA;
2661 	pp = (struct partition *)(ramp + 0x1be);
2662 	for (k = 0; starts[k + 1]; ++k, ++pp) {
2663 		start_sec = starts[k];
2664 		end_sec = starts[k + 1] - 1;
2665 		pp->boot_ind = 0;
2666 
2667 		pp->cyl = start_sec / heads_by_sects;
2668 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
2669 			   / sdebug_sectors_per;
2670 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
2671 
2672 		pp->end_cyl = end_sec / heads_by_sects;
2673 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2674 			       / sdebug_sectors_per;
2675 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2676 
2677 		pp->start_sect = cpu_to_le32(start_sec);
2678 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
2679 		pp->sys_ind = 0x83;	/* plain Linux partition */
2680 	}
2681 }
2682 
2683 static int schedule_resp(struct scsi_cmnd * cmnd,
2684 			 struct sdebug_dev_info * devip,
2685 			 done_funct_t done, int scsi_result, int delta_jiff)
2686 {
2687 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2688 		if (scsi_result) {
2689 			struct scsi_device * sdp = cmnd->device;
2690 
2691 			printk(KERN_INFO "scsi_debug:    <%u %u %u %u> "
2692 			       "non-zero result=0x%x\n", sdp->host->host_no,
2693 			       sdp->channel, sdp->id, sdp->lun, scsi_result);
2694 		}
2695 	}
2696 	if (cmnd && devip) {
2697 		/* simulate autosense by this driver */
2698 		if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2699 			memcpy(cmnd->sense_buffer, devip->sense_buff,
2700 			       (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2701 			       SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2702 	}
2703 	if (delta_jiff <= 0) {
2704 		if (cmnd)
2705 			cmnd->result = scsi_result;
2706 		if (done)
2707 			done(cmnd);
2708 		return 0;
2709 	} else {
2710 		unsigned long iflags;
2711 		int k;
2712 		struct sdebug_queued_cmd * sqcp = NULL;
2713 
2714 		spin_lock_irqsave(&queued_arr_lock, iflags);
2715 		for (k = 0; k < scsi_debug_max_queue; ++k) {
2716 			sqcp = &queued_arr[k];
2717 			if (! sqcp->in_use)
2718 				break;
2719 		}
2720 		if (k >= scsi_debug_max_queue) {
2721 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
2722 			printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2723 			return 1;	/* report busy to mid level */
2724 		}
2725 		sqcp->in_use = 1;
2726 		sqcp->a_cmnd = cmnd;
2727 		sqcp->scsi_result = scsi_result;
2728 		sqcp->done_funct = done;
2729 		sqcp->cmnd_timer.function = timer_intr_handler;
2730 		sqcp->cmnd_timer.data = k;
2731 		sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2732 		add_timer(&sqcp->cmnd_timer);
2733 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2734 		if (cmnd)
2735 			cmnd->result = 0;
2736 		return 0;
2737 	}
2738 }
2739 /* Note: The following macros create attribute files in the
2740    /sys/module/scsi_debug/parameters directory. Unfortunately this
2741    driver is unaware of a change and cannot trigger auxiliary actions
2742    as it can when the corresponding attribute in the
2743    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2744  */
2745 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2746 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2747 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2748 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2749 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2750 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2751 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2752 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2753 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2754 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
2755 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2756 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2757 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2758 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2759 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2760 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2761 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2762 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2763 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2764 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2765 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2766 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2767 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2768 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2769 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2770 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2771 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2772 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2773 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2774 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2775 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2776 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2777 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2778 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2779 		   S_IRUGO | S_IWUSR);
2780 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2781 		   S_IRUGO | S_IWUSR);
2782 
2783 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2784 MODULE_DESCRIPTION("SCSI debug adapter driver");
2785 MODULE_LICENSE("GPL");
2786 MODULE_VERSION(SCSI_DEBUG_VERSION);
2787 
2788 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2789 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2790 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2791 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2792 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2793 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2794 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2795 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2796 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2797 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2798 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2799 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2800 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2801 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2802 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2803 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2804 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2805 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2806 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2807 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2808 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2809 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2810 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2811 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2812 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2813 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2814 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2815 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2816 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2817 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2818 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2819 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2820 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2821 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2822 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2823 
2824 static char sdebug_info[256];
2825 
2826 static const char * scsi_debug_info(struct Scsi_Host * shp)
2827 {
2828 	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2829 		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2830 		scsi_debug_version_date, scsi_debug_dev_size_mb,
2831 		scsi_debug_opts);
2832 	return sdebug_info;
2833 }
2834 
2835 /* scsi_debug_proc_info
2836  * Used if the driver currently has no own support for /proc/scsi
2837  */
2838 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
2839 {
2840 	char arr[16];
2841 	int opts;
2842 	int minLen = length > 15 ? 15 : length;
2843 
2844 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2845 		return -EACCES;
2846 	memcpy(arr, buffer, minLen);
2847 	arr[minLen] = '\0';
2848 	if (1 != sscanf(arr, "%d", &opts))
2849 		return -EINVAL;
2850 	scsi_debug_opts = opts;
2851 	if (scsi_debug_every_nth != 0)
2852 		scsi_debug_cmnd_count = 0;
2853 	return length;
2854 }
2855 
2856 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
2857 {
2858 	seq_printf(m, "scsi_debug adapter driver, version "
2859 	    "%s [%s]\n"
2860 	    "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2861 	    "every_nth=%d(curr:%d)\n"
2862 	    "delay=%d, max_luns=%d, scsi_level=%d\n"
2863 	    "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2864 	    "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2865 	    "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2866 	    SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2867 	    scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2868 	    scsi_debug_cmnd_count, scsi_debug_delay,
2869 	    scsi_debug_max_luns, scsi_debug_scsi_level,
2870 	    scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2871 	    sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2872 	    num_host_resets, dix_reads, dix_writes, dif_errors);
2873 	return 0;
2874 }
2875 
2876 static ssize_t delay_show(struct device_driver *ddp, char *buf)
2877 {
2878         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2879 }
2880 
2881 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
2882 			   size_t count)
2883 {
2884         int delay;
2885 	char work[20];
2886 
2887         if (1 == sscanf(buf, "%10s", work)) {
2888 		if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2889 			scsi_debug_delay = delay;
2890 			return count;
2891 		}
2892 	}
2893 	return -EINVAL;
2894 }
2895 static DRIVER_ATTR_RW(delay);
2896 
2897 static ssize_t opts_show(struct device_driver *ddp, char *buf)
2898 {
2899         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2900 }
2901 
2902 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
2903 			  size_t count)
2904 {
2905         int opts;
2906 	char work[20];
2907 
2908         if (1 == sscanf(buf, "%10s", work)) {
2909 		if (0 == strnicmp(work,"0x", 2)) {
2910 			if (1 == sscanf(&work[2], "%x", &opts))
2911 				goto opts_done;
2912 		} else {
2913 			if (1 == sscanf(work, "%d", &opts))
2914 				goto opts_done;
2915 		}
2916 	}
2917 	return -EINVAL;
2918 opts_done:
2919 	scsi_debug_opts = opts;
2920 	scsi_debug_cmnd_count = 0;
2921 	return count;
2922 }
2923 static DRIVER_ATTR_RW(opts);
2924 
2925 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
2926 {
2927         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2928 }
2929 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
2930 			   size_t count)
2931 {
2932         int n;
2933 
2934 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2935 		scsi_debug_ptype = n;
2936 		return count;
2937 	}
2938 	return -EINVAL;
2939 }
2940 static DRIVER_ATTR_RW(ptype);
2941 
2942 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
2943 {
2944         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2945 }
2946 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
2947 			    size_t count)
2948 {
2949         int n;
2950 
2951 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2952 		scsi_debug_dsense = n;
2953 		return count;
2954 	}
2955 	return -EINVAL;
2956 }
2957 static DRIVER_ATTR_RW(dsense);
2958 
2959 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
2960 {
2961         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2962 }
2963 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
2964 			     size_t count)
2965 {
2966         int n;
2967 
2968 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2969 		scsi_debug_fake_rw = n;
2970 		return count;
2971 	}
2972 	return -EINVAL;
2973 }
2974 static DRIVER_ATTR_RW(fake_rw);
2975 
2976 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
2977 {
2978         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2979 }
2980 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
2981 			      size_t count)
2982 {
2983         int n;
2984 
2985 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2986 		scsi_debug_no_lun_0 = n;
2987 		return count;
2988 	}
2989 	return -EINVAL;
2990 }
2991 static DRIVER_ATTR_RW(no_lun_0);
2992 
2993 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
2994 {
2995         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2996 }
2997 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
2998 			      size_t count)
2999 {
3000         int n;
3001 
3002 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3003 		scsi_debug_num_tgts = n;
3004 		sdebug_max_tgts_luns();
3005 		return count;
3006 	}
3007 	return -EINVAL;
3008 }
3009 static DRIVER_ATTR_RW(num_tgts);
3010 
3011 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3012 {
3013         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3014 }
3015 static DRIVER_ATTR_RO(dev_size_mb);
3016 
3017 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3018 {
3019         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3020 }
3021 static DRIVER_ATTR_RO(num_parts);
3022 
3023 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3024 {
3025         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3026 }
3027 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3028 			       size_t count)
3029 {
3030         int nth;
3031 
3032 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3033 		scsi_debug_every_nth = nth;
3034 		scsi_debug_cmnd_count = 0;
3035 		return count;
3036 	}
3037 	return -EINVAL;
3038 }
3039 static DRIVER_ATTR_RW(every_nth);
3040 
3041 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3042 {
3043         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3044 }
3045 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3046 			      size_t count)
3047 {
3048         int n;
3049 
3050 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3051 		scsi_debug_max_luns = n;
3052 		sdebug_max_tgts_luns();
3053 		return count;
3054 	}
3055 	return -EINVAL;
3056 }
3057 static DRIVER_ATTR_RW(max_luns);
3058 
3059 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3060 {
3061         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3062 }
3063 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3064 			       size_t count)
3065 {
3066         int n;
3067 
3068 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3069 	    (n <= SCSI_DEBUG_CANQUEUE)) {
3070 		scsi_debug_max_queue = n;
3071 		return count;
3072 	}
3073 	return -EINVAL;
3074 }
3075 static DRIVER_ATTR_RW(max_queue);
3076 
3077 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3078 {
3079         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3080 }
3081 static DRIVER_ATTR_RO(no_uld);
3082 
3083 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3084 {
3085         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3086 }
3087 static DRIVER_ATTR_RO(scsi_level);
3088 
3089 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3090 {
3091         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3092 }
3093 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3094 				size_t count)
3095 {
3096         int n;
3097 
3098 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3099 		scsi_debug_virtual_gb = n;
3100 
3101 		sdebug_capacity = get_sdebug_capacity();
3102 
3103 		return count;
3104 	}
3105 	return -EINVAL;
3106 }
3107 static DRIVER_ATTR_RW(virtual_gb);
3108 
3109 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3110 {
3111         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3112 }
3113 
3114 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3115 			      size_t count)
3116 {
3117 	int delta_hosts;
3118 
3119 	if (sscanf(buf, "%d", &delta_hosts) != 1)
3120 		return -EINVAL;
3121 	if (delta_hosts > 0) {
3122 		do {
3123 			sdebug_add_adapter();
3124 		} while (--delta_hosts);
3125 	} else if (delta_hosts < 0) {
3126 		do {
3127 			sdebug_remove_adapter();
3128 		} while (++delta_hosts);
3129 	}
3130 	return count;
3131 }
3132 static DRIVER_ATTR_RW(add_host);
3133 
3134 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3135 {
3136 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3137 }
3138 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3139 				    size_t count)
3140 {
3141 	int n;
3142 
3143 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3144 		scsi_debug_vpd_use_hostno = n;
3145 		return count;
3146 	}
3147 	return -EINVAL;
3148 }
3149 static DRIVER_ATTR_RW(vpd_use_hostno);
3150 
3151 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3152 {
3153 	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3154 }
3155 static DRIVER_ATTR_RO(sector_size);
3156 
3157 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3158 {
3159 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3160 }
3161 static DRIVER_ATTR_RO(dix);
3162 
3163 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3164 {
3165 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3166 }
3167 static DRIVER_ATTR_RO(dif);
3168 
3169 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3170 {
3171 	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3172 }
3173 static DRIVER_ATTR_RO(guard);
3174 
3175 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3176 {
3177 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3178 }
3179 static DRIVER_ATTR_RO(ato);
3180 
3181 static ssize_t map_show(struct device_driver *ddp, char *buf)
3182 {
3183 	ssize_t count;
3184 
3185 	if (!scsi_debug_lbp())
3186 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3187 				 sdebug_store_sectors);
3188 
3189 	count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3190 
3191 	buf[count++] = '\n';
3192 	buf[count++] = 0;
3193 
3194 	return count;
3195 }
3196 static DRIVER_ATTR_RO(map);
3197 
3198 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3199 {
3200 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3201 }
3202 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3203 			       size_t count)
3204 {
3205 	int n;
3206 
3207 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3208 		scsi_debug_removable = (n > 0);
3209 		return count;
3210 	}
3211 	return -EINVAL;
3212 }
3213 static DRIVER_ATTR_RW(removable);
3214 
3215 /* Note: The following array creates attribute files in the
3216    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3217    files (over those found in the /sys/module/scsi_debug/parameters
3218    directory) is that auxiliary actions can be triggered when an attribute
3219    is changed. For example see: sdebug_add_host_store() above.
3220  */
3221 
3222 static struct attribute *sdebug_drv_attrs[] = {
3223 	&driver_attr_delay.attr,
3224 	&driver_attr_opts.attr,
3225 	&driver_attr_ptype.attr,
3226 	&driver_attr_dsense.attr,
3227 	&driver_attr_fake_rw.attr,
3228 	&driver_attr_no_lun_0.attr,
3229 	&driver_attr_num_tgts.attr,
3230 	&driver_attr_dev_size_mb.attr,
3231 	&driver_attr_num_parts.attr,
3232 	&driver_attr_every_nth.attr,
3233 	&driver_attr_max_luns.attr,
3234 	&driver_attr_max_queue.attr,
3235 	&driver_attr_no_uld.attr,
3236 	&driver_attr_scsi_level.attr,
3237 	&driver_attr_virtual_gb.attr,
3238 	&driver_attr_add_host.attr,
3239 	&driver_attr_vpd_use_hostno.attr,
3240 	&driver_attr_sector_size.attr,
3241 	&driver_attr_dix.attr,
3242 	&driver_attr_dif.attr,
3243 	&driver_attr_guard.attr,
3244 	&driver_attr_ato.attr,
3245 	&driver_attr_map.attr,
3246 	&driver_attr_removable.attr,
3247 	NULL,
3248 };
3249 ATTRIBUTE_GROUPS(sdebug_drv);
3250 
3251 struct device *pseudo_primary;
3252 
3253 static int __init scsi_debug_init(void)
3254 {
3255 	unsigned long sz;
3256 	int host_to_add;
3257 	int k;
3258 	int ret;
3259 
3260 	switch (scsi_debug_sector_size) {
3261 	case  512:
3262 	case 1024:
3263 	case 2048:
3264 	case 4096:
3265 		break;
3266 	default:
3267 		printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3268 		       scsi_debug_sector_size);
3269 		return -EINVAL;
3270 	}
3271 
3272 	switch (scsi_debug_dif) {
3273 
3274 	case SD_DIF_TYPE0_PROTECTION:
3275 	case SD_DIF_TYPE1_PROTECTION:
3276 	case SD_DIF_TYPE2_PROTECTION:
3277 	case SD_DIF_TYPE3_PROTECTION:
3278 		break;
3279 
3280 	default:
3281 		printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3282 		return -EINVAL;
3283 	}
3284 
3285 	if (scsi_debug_guard > 1) {
3286 		printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3287 		return -EINVAL;
3288 	}
3289 
3290 	if (scsi_debug_ato > 1) {
3291 		printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3292 		return -EINVAL;
3293 	}
3294 
3295 	if (scsi_debug_physblk_exp > 15) {
3296 		printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3297 		       scsi_debug_physblk_exp);
3298 		return -EINVAL;
3299 	}
3300 
3301 	if (scsi_debug_lowest_aligned > 0x3fff) {
3302 		printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3303 		       scsi_debug_lowest_aligned);
3304 		return -EINVAL;
3305 	}
3306 
3307 	if (scsi_debug_dev_size_mb < 1)
3308 		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
3309 	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3310 	sdebug_store_sectors = sz / scsi_debug_sector_size;
3311 	sdebug_capacity = get_sdebug_capacity();
3312 
3313 	/* play around with geometry, don't waste too much on track 0 */
3314 	sdebug_heads = 8;
3315 	sdebug_sectors_per = 32;
3316 	if (scsi_debug_dev_size_mb >= 16)
3317 		sdebug_heads = 32;
3318 	else if (scsi_debug_dev_size_mb >= 256)
3319 		sdebug_heads = 64;
3320 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3321 			       (sdebug_sectors_per * sdebug_heads);
3322 	if (sdebug_cylinders_per >= 1024) {
3323 		/* other LLDs do this; implies >= 1GB ram disk ... */
3324 		sdebug_heads = 255;
3325 		sdebug_sectors_per = 63;
3326 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3327 			       (sdebug_sectors_per * sdebug_heads);
3328 	}
3329 
3330 	fake_storep = vmalloc(sz);
3331 	if (NULL == fake_storep) {
3332 		printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3333 		return -ENOMEM;
3334 	}
3335 	memset(fake_storep, 0, sz);
3336 	if (scsi_debug_num_parts > 0)
3337 		sdebug_build_parts(fake_storep, sz);
3338 
3339 	if (scsi_debug_dix) {
3340 		int dif_size;
3341 
3342 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3343 		dif_storep = vmalloc(dif_size);
3344 
3345 		printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3346 		       dif_size, dif_storep);
3347 
3348 		if (dif_storep == NULL) {
3349 			printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3350 			ret = -ENOMEM;
3351 			goto free_vm;
3352 		}
3353 
3354 		memset(dif_storep, 0xff, dif_size);
3355 	}
3356 
3357 	/* Logical Block Provisioning */
3358 	if (scsi_debug_lbp()) {
3359 		scsi_debug_unmap_max_blocks =
3360 			clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3361 
3362 		scsi_debug_unmap_max_desc =
3363 			clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3364 
3365 		scsi_debug_unmap_granularity =
3366 			clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3367 
3368 		if (scsi_debug_unmap_alignment &&
3369 		    scsi_debug_unmap_granularity <=
3370 		    scsi_debug_unmap_alignment) {
3371 			printk(KERN_ERR
3372 			       "%s: ERR: unmap_granularity <= unmap_alignment\n",
3373 			       __func__);
3374 			return -EINVAL;
3375 		}
3376 
3377 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3378 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3379 
3380 		printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3381 		       map_size);
3382 
3383 		if (map_storep == NULL) {
3384 			printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3385 			ret = -ENOMEM;
3386 			goto free_vm;
3387 		}
3388 
3389 		bitmap_zero(map_storep, map_size);
3390 
3391 		/* Map first 1KB for partition table */
3392 		if (scsi_debug_num_parts)
3393 			map_region(0, 2);
3394 	}
3395 
3396 	pseudo_primary = root_device_register("pseudo_0");
3397 	if (IS_ERR(pseudo_primary)) {
3398 		printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3399 		ret = PTR_ERR(pseudo_primary);
3400 		goto free_vm;
3401 	}
3402 	ret = bus_register(&pseudo_lld_bus);
3403 	if (ret < 0) {
3404 		printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3405 			ret);
3406 		goto dev_unreg;
3407 	}
3408 	ret = driver_register(&sdebug_driverfs_driver);
3409 	if (ret < 0) {
3410 		printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3411 			ret);
3412 		goto bus_unreg;
3413 	}
3414 
3415 	init_all_queued();
3416 
3417 	host_to_add = scsi_debug_add_host;
3418         scsi_debug_add_host = 0;
3419 
3420         for (k = 0; k < host_to_add; k++) {
3421                 if (sdebug_add_adapter()) {
3422                         printk(KERN_ERR "scsi_debug_init: "
3423                                "sdebug_add_adapter failed k=%d\n", k);
3424                         break;
3425                 }
3426         }
3427 
3428 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3429 		printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3430 		       scsi_debug_add_host);
3431 	}
3432 	return 0;
3433 
3434 bus_unreg:
3435 	bus_unregister(&pseudo_lld_bus);
3436 dev_unreg:
3437 	root_device_unregister(pseudo_primary);
3438 free_vm:
3439 	if (map_storep)
3440 		vfree(map_storep);
3441 	if (dif_storep)
3442 		vfree(dif_storep);
3443 	vfree(fake_storep);
3444 
3445 	return ret;
3446 }
3447 
3448 static void __exit scsi_debug_exit(void)
3449 {
3450 	int k = scsi_debug_add_host;
3451 
3452 	stop_all_queued();
3453 	for (; k; k--)
3454 		sdebug_remove_adapter();
3455 	driver_unregister(&sdebug_driverfs_driver);
3456 	bus_unregister(&pseudo_lld_bus);
3457 	root_device_unregister(pseudo_primary);
3458 
3459 	if (dif_storep)
3460 		vfree(dif_storep);
3461 
3462 	vfree(fake_storep);
3463 }
3464 
3465 device_initcall(scsi_debug_init);
3466 module_exit(scsi_debug_exit);
3467 
3468 static void sdebug_release_adapter(struct device * dev)
3469 {
3470         struct sdebug_host_info *sdbg_host;
3471 
3472 	sdbg_host = to_sdebug_host(dev);
3473         kfree(sdbg_host);
3474 }
3475 
3476 static int sdebug_add_adapter(void)
3477 {
3478 	int k, devs_per_host;
3479         int error = 0;
3480         struct sdebug_host_info *sdbg_host;
3481 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
3482 
3483         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3484         if (NULL == sdbg_host) {
3485                 printk(KERN_ERR "%s: out of memory at line %d\n",
3486                        __func__, __LINE__);
3487                 return -ENOMEM;
3488         }
3489 
3490         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3491 
3492 	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3493         for (k = 0; k < devs_per_host; k++) {
3494 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3495 		if (!sdbg_devinfo) {
3496                         printk(KERN_ERR "%s: out of memory at line %d\n",
3497                                __func__, __LINE__);
3498                         error = -ENOMEM;
3499 			goto clean;
3500                 }
3501         }
3502 
3503         spin_lock(&sdebug_host_list_lock);
3504         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3505         spin_unlock(&sdebug_host_list_lock);
3506 
3507         sdbg_host->dev.bus = &pseudo_lld_bus;
3508         sdbg_host->dev.parent = pseudo_primary;
3509         sdbg_host->dev.release = &sdebug_release_adapter;
3510         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3511 
3512         error = device_register(&sdbg_host->dev);
3513 
3514         if (error)
3515 		goto clean;
3516 
3517 	++scsi_debug_add_host;
3518         return error;
3519 
3520 clean:
3521 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3522 				 dev_list) {
3523 		list_del(&sdbg_devinfo->dev_list);
3524 		kfree(sdbg_devinfo);
3525 	}
3526 
3527 	kfree(sdbg_host);
3528         return error;
3529 }
3530 
3531 static void sdebug_remove_adapter(void)
3532 {
3533         struct sdebug_host_info * sdbg_host = NULL;
3534 
3535         spin_lock(&sdebug_host_list_lock);
3536         if (!list_empty(&sdebug_host_list)) {
3537                 sdbg_host = list_entry(sdebug_host_list.prev,
3538                                        struct sdebug_host_info, host_list);
3539 		list_del(&sdbg_host->host_list);
3540 	}
3541         spin_unlock(&sdebug_host_list_lock);
3542 
3543 	if (!sdbg_host)
3544 		return;
3545 
3546         device_unregister(&sdbg_host->dev);
3547         --scsi_debug_add_host;
3548 }
3549 
3550 static
3551 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3552 {
3553 	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3554 	int len, k;
3555 	unsigned int num;
3556 	unsigned long long lba;
3557 	u32 ei_lba;
3558 	int errsts = 0;
3559 	int target = SCpnt->device->id;
3560 	struct sdebug_dev_info *devip = NULL;
3561 	int inj_recovered = 0;
3562 	int inj_transport = 0;
3563 	int inj_dif = 0;
3564 	int inj_dix = 0;
3565 	int delay_override = 0;
3566 	int unmap = 0;
3567 
3568 	scsi_set_resid(SCpnt, 0);
3569 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3570 		printk(KERN_INFO "scsi_debug: cmd ");
3571 		for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3572 			printk("%02x ", (int)cmd[k]);
3573 		printk("\n");
3574 	}
3575 
3576 	if (target == SCpnt->device->host->hostt->this_id) {
3577 		printk(KERN_INFO "scsi_debug: initiator's id used as "
3578 		       "target!\n");
3579 		return schedule_resp(SCpnt, NULL, done,
3580 				     DID_NO_CONNECT << 16, 0);
3581 	}
3582 
3583 	if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3584 	    (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3585 		return schedule_resp(SCpnt, NULL, done,
3586 				     DID_NO_CONNECT << 16, 0);
3587 	devip = devInfoReg(SCpnt->device);
3588 	if (NULL == devip)
3589 		return schedule_resp(SCpnt, NULL, done,
3590 				     DID_NO_CONNECT << 16, 0);
3591 
3592 	if ((scsi_debug_every_nth != 0) &&
3593 	    (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3594 		scsi_debug_cmnd_count = 0;
3595 		if (scsi_debug_every_nth < -1)
3596 			scsi_debug_every_nth = -1;
3597 		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3598 			return 0; /* ignore command causing timeout */
3599 		else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3600 			 scsi_medium_access_command(SCpnt))
3601 			return 0; /* time out reads and writes */
3602 		else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3603 			inj_recovered = 1; /* to reads and writes below */
3604 		else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3605 			inj_transport = 1; /* to reads and writes below */
3606 		else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3607 			inj_dif = 1; /* to reads and writes below */
3608 		else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3609 			inj_dix = 1; /* to reads and writes below */
3610 	}
3611 
3612 	if (devip->wlun) {
3613 		switch (*cmd) {
3614 		case INQUIRY:
3615 		case REQUEST_SENSE:
3616 		case TEST_UNIT_READY:
3617 		case REPORT_LUNS:
3618 			break;  /* only allowable wlun commands */
3619 		default:
3620 			if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3621 				printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3622 				       "not supported for wlun\n", *cmd);
3623 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3624 					INVALID_OPCODE, 0);
3625 			errsts = check_condition_result;
3626 			return schedule_resp(SCpnt, devip, done, errsts,
3627 					     0);
3628 		}
3629 	}
3630 
3631 	switch (*cmd) {
3632 	case INQUIRY:     /* mandatory, ignore unit attention */
3633 		delay_override = 1;
3634 		errsts = resp_inquiry(SCpnt, target, devip);
3635 		break;
3636 	case REQUEST_SENSE:	/* mandatory, ignore unit attention */
3637 		delay_override = 1;
3638 		errsts = resp_requests(SCpnt, devip);
3639 		break;
3640 	case REZERO_UNIT:	/* actually this is REWIND for SSC */
3641 	case START_STOP:
3642 		errsts = resp_start_stop(SCpnt, devip);
3643 		break;
3644 	case ALLOW_MEDIUM_REMOVAL:
3645 		errsts = check_readiness(SCpnt, 1, devip);
3646 		if (errsts)
3647 			break;
3648 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3649 			printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3650 			       cmd[4] ? "inhibited" : "enabled");
3651 		break;
3652 	case SEND_DIAGNOSTIC:     /* mandatory */
3653 		errsts = check_readiness(SCpnt, 1, devip);
3654 		break;
3655 	case TEST_UNIT_READY:     /* mandatory */
3656 		delay_override = 1;
3657 		errsts = check_readiness(SCpnt, 0, devip);
3658 		break;
3659 	case RESERVE:
3660 		errsts = check_readiness(SCpnt, 1, devip);
3661 		break;
3662 	case RESERVE_10:
3663 		errsts = check_readiness(SCpnt, 1, devip);
3664 		break;
3665 	case RELEASE:
3666 		errsts = check_readiness(SCpnt, 1, devip);
3667 		break;
3668 	case RELEASE_10:
3669 		errsts = check_readiness(SCpnt, 1, devip);
3670 		break;
3671 	case READ_CAPACITY:
3672 		errsts = resp_readcap(SCpnt, devip);
3673 		break;
3674 	case SERVICE_ACTION_IN:
3675 		if (cmd[1] == SAI_READ_CAPACITY_16)
3676 			errsts = resp_readcap16(SCpnt, devip);
3677 		else if (cmd[1] == SAI_GET_LBA_STATUS) {
3678 
3679 			if (scsi_debug_lbp() == 0) {
3680 				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3681 						INVALID_COMMAND_OPCODE, 0);
3682 				errsts = check_condition_result;
3683 			} else
3684 				errsts = resp_get_lba_status(SCpnt, devip);
3685 		} else {
3686 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3687 					INVALID_OPCODE, 0);
3688 			errsts = check_condition_result;
3689 		}
3690 		break;
3691 	case MAINTENANCE_IN:
3692 		if (MI_REPORT_TARGET_PGS != cmd[1]) {
3693 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3694 					INVALID_OPCODE, 0);
3695 			errsts = check_condition_result;
3696 			break;
3697 		}
3698 		errsts = resp_report_tgtpgs(SCpnt, devip);
3699 		break;
3700 	case READ_16:
3701 	case READ_12:
3702 	case READ_10:
3703 		/* READ{10,12,16} and DIF Type 2 are natural enemies */
3704 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3705 		    cmd[1] & 0xe0) {
3706 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3707 					INVALID_COMMAND_OPCODE, 0);
3708 			errsts = check_condition_result;
3709 			break;
3710 		}
3711 
3712 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3713 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3714 		    (cmd[1] & 0xe0) == 0)
3715 			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3716 
3717 		/* fall through */
3718 	case READ_6:
3719 read:
3720 		errsts = check_readiness(SCpnt, 0, devip);
3721 		if (errsts)
3722 			break;
3723 		if (scsi_debug_fake_rw)
3724 			break;
3725 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3726 		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3727 		if (inj_recovered && (0 == errsts)) {
3728 			mk_sense_buffer(devip, RECOVERED_ERROR,
3729 					THRESHOLD_EXCEEDED, 0);
3730 			errsts = check_condition_result;
3731 		} else if (inj_transport && (0 == errsts)) {
3732 			mk_sense_buffer(devip, ABORTED_COMMAND,
3733 					TRANSPORT_PROBLEM, ACK_NAK_TO);
3734 			errsts = check_condition_result;
3735 		} else if (inj_dif && (0 == errsts)) {
3736 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3737 			errsts = illegal_condition_result;
3738 		} else if (inj_dix && (0 == errsts)) {
3739 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3740 			errsts = illegal_condition_result;
3741 		}
3742 		break;
3743 	case REPORT_LUNS:	/* mandatory, ignore unit attention */
3744 		delay_override = 1;
3745 		errsts = resp_report_luns(SCpnt, devip);
3746 		break;
3747 	case VERIFY:		/* 10 byte SBC-2 command */
3748 		errsts = check_readiness(SCpnt, 0, devip);
3749 		break;
3750 	case WRITE_16:
3751 	case WRITE_12:
3752 	case WRITE_10:
3753 		/* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3754 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3755 		    cmd[1] & 0xe0) {
3756 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3757 					INVALID_COMMAND_OPCODE, 0);
3758 			errsts = check_condition_result;
3759 			break;
3760 		}
3761 
3762 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3763 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3764 		    (cmd[1] & 0xe0) == 0)
3765 			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3766 
3767 		/* fall through */
3768 	case WRITE_6:
3769 write:
3770 		errsts = check_readiness(SCpnt, 0, devip);
3771 		if (errsts)
3772 			break;
3773 		if (scsi_debug_fake_rw)
3774 			break;
3775 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3776 		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3777 		if (inj_recovered && (0 == errsts)) {
3778 			mk_sense_buffer(devip, RECOVERED_ERROR,
3779 					THRESHOLD_EXCEEDED, 0);
3780 			errsts = check_condition_result;
3781 		} else if (inj_dif && (0 == errsts)) {
3782 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3783 			errsts = illegal_condition_result;
3784 		} else if (inj_dix && (0 == errsts)) {
3785 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3786 			errsts = illegal_condition_result;
3787 		}
3788 		break;
3789 	case WRITE_SAME_16:
3790 	case WRITE_SAME:
3791 		if (cmd[1] & 0x8) {
3792 			if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3793 			    (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3794 				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3795 						INVALID_FIELD_IN_CDB, 0);
3796 				errsts = check_condition_result;
3797 			} else
3798 				unmap = 1;
3799 		}
3800 		if (errsts)
3801 			break;
3802 		errsts = check_readiness(SCpnt, 0, devip);
3803 		if (errsts)
3804 			break;
3805 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3806 		errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3807 		break;
3808 	case UNMAP:
3809 		errsts = check_readiness(SCpnt, 0, devip);
3810 		if (errsts)
3811 			break;
3812 
3813 		if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3814 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3815 					INVALID_COMMAND_OPCODE, 0);
3816 			errsts = check_condition_result;
3817 		} else
3818 			errsts = resp_unmap(SCpnt, devip);
3819 		break;
3820 	case MODE_SENSE:
3821 	case MODE_SENSE_10:
3822 		errsts = resp_mode_sense(SCpnt, target, devip);
3823 		break;
3824 	case MODE_SELECT:
3825 		errsts = resp_mode_select(SCpnt, 1, devip);
3826 		break;
3827 	case MODE_SELECT_10:
3828 		errsts = resp_mode_select(SCpnt, 0, devip);
3829 		break;
3830 	case LOG_SENSE:
3831 		errsts = resp_log_sense(SCpnt, devip);
3832 		break;
3833 	case SYNCHRONIZE_CACHE:
3834 		delay_override = 1;
3835 		errsts = check_readiness(SCpnt, 0, devip);
3836 		break;
3837 	case WRITE_BUFFER:
3838 		errsts = check_readiness(SCpnt, 1, devip);
3839 		break;
3840 	case XDWRITEREAD_10:
3841 		if (!scsi_bidi_cmnd(SCpnt)) {
3842 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3843 					INVALID_FIELD_IN_CDB, 0);
3844 			errsts = check_condition_result;
3845 			break;
3846 		}
3847 
3848 		errsts = check_readiness(SCpnt, 0, devip);
3849 		if (errsts)
3850 			break;
3851 		if (scsi_debug_fake_rw)
3852 			break;
3853 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3854 		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3855 		if (errsts)
3856 			break;
3857 		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3858 		if (errsts)
3859 			break;
3860 		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3861 		break;
3862 	case VARIABLE_LENGTH_CMD:
3863 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3864 
3865 			if ((cmd[10] & 0xe0) == 0)
3866 				printk(KERN_ERR
3867 				       "Unprotected RD/WR to DIF device\n");
3868 
3869 			if (cmd[9] == READ_32) {
3870 				BUG_ON(SCpnt->cmd_len < 32);
3871 				goto read;
3872 			}
3873 
3874 			if (cmd[9] == WRITE_32) {
3875 				BUG_ON(SCpnt->cmd_len < 32);
3876 				goto write;
3877 			}
3878 		}
3879 
3880 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
3881 				INVALID_FIELD_IN_CDB, 0);
3882 		errsts = check_condition_result;
3883 		break;
3884 
3885 	default:
3886 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3887 			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3888 			       "supported\n", *cmd);
3889 		errsts = check_readiness(SCpnt, 1, devip);
3890 		if (errsts)
3891 			break;	/* Unit attention takes precedence */
3892 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3893 		errsts = check_condition_result;
3894 		break;
3895 	}
3896 	return schedule_resp(SCpnt, devip, done, errsts,
3897 			     (delay_override ? 0 : scsi_debug_delay));
3898 }
3899 
3900 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3901 
3902 static struct scsi_host_template sdebug_driver_template = {
3903 	.show_info =		scsi_debug_show_info,
3904 	.write_info =		scsi_debug_write_info,
3905 	.proc_name =		sdebug_proc_name,
3906 	.name =			"SCSI DEBUG",
3907 	.info =			scsi_debug_info,
3908 	.slave_alloc =		scsi_debug_slave_alloc,
3909 	.slave_configure =	scsi_debug_slave_configure,
3910 	.slave_destroy =	scsi_debug_slave_destroy,
3911 	.ioctl =		scsi_debug_ioctl,
3912 	.queuecommand =		scsi_debug_queuecommand,
3913 	.eh_abort_handler =	scsi_debug_abort,
3914 	.eh_bus_reset_handler = scsi_debug_bus_reset,
3915 	.eh_device_reset_handler = scsi_debug_device_reset,
3916 	.eh_host_reset_handler = scsi_debug_host_reset,
3917 	.bios_param =		scsi_debug_biosparam,
3918 	.can_queue =		SCSI_DEBUG_CANQUEUE,
3919 	.this_id =		7,
3920 	.sg_tablesize =		256,
3921 	.cmd_per_lun =		16,
3922 	.max_sectors =		0xffff,
3923 	.use_clustering = 	DISABLE_CLUSTERING,
3924 	.module =		THIS_MODULE,
3925 };
3926 
3927 static int sdebug_driver_probe(struct device * dev)
3928 {
3929         int error = 0;
3930         struct sdebug_host_info *sdbg_host;
3931         struct Scsi_Host *hpnt;
3932 	int host_prot;
3933 
3934 	sdbg_host = to_sdebug_host(dev);
3935 
3936 	sdebug_driver_template.can_queue = scsi_debug_max_queue;
3937 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3938 	if (NULL == hpnt) {
3939 		printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3940 		error = -ENODEV;
3941 		return error;
3942 	}
3943 
3944         sdbg_host->shost = hpnt;
3945 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3946 	if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3947 		hpnt->max_id = scsi_debug_num_tgts + 1;
3948 	else
3949 		hpnt->max_id = scsi_debug_num_tgts;
3950 	hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;	/* = scsi_debug_max_luns; */
3951 
3952 	host_prot = 0;
3953 
3954 	switch (scsi_debug_dif) {
3955 
3956 	case SD_DIF_TYPE1_PROTECTION:
3957 		host_prot = SHOST_DIF_TYPE1_PROTECTION;
3958 		if (scsi_debug_dix)
3959 			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3960 		break;
3961 
3962 	case SD_DIF_TYPE2_PROTECTION:
3963 		host_prot = SHOST_DIF_TYPE2_PROTECTION;
3964 		if (scsi_debug_dix)
3965 			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3966 		break;
3967 
3968 	case SD_DIF_TYPE3_PROTECTION:
3969 		host_prot = SHOST_DIF_TYPE3_PROTECTION;
3970 		if (scsi_debug_dix)
3971 			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3972 		break;
3973 
3974 	default:
3975 		if (scsi_debug_dix)
3976 			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3977 		break;
3978 	}
3979 
3980 	scsi_host_set_prot(hpnt, host_prot);
3981 
3982 	printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3983 	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3984 	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3985 	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3986 	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3987 	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3988 	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3989 	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3990 
3991 	if (scsi_debug_guard == 1)
3992 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3993 	else
3994 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3995 
3996         error = scsi_add_host(hpnt, &sdbg_host->dev);
3997         if (error) {
3998                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3999                 error = -ENODEV;
4000 		scsi_host_put(hpnt);
4001         } else
4002 		scsi_scan_host(hpnt);
4003 
4004 
4005         return error;
4006 }
4007 
4008 static int sdebug_driver_remove(struct device * dev)
4009 {
4010         struct sdebug_host_info *sdbg_host;
4011 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
4012 
4013 	sdbg_host = to_sdebug_host(dev);
4014 
4015 	if (!sdbg_host) {
4016 		printk(KERN_ERR "%s: Unable to locate host info\n",
4017 		       __func__);
4018 		return -ENODEV;
4019 	}
4020 
4021         scsi_remove_host(sdbg_host->shost);
4022 
4023 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4024 				 dev_list) {
4025                 list_del(&sdbg_devinfo->dev_list);
4026                 kfree(sdbg_devinfo);
4027         }
4028 
4029         scsi_host_put(sdbg_host->shost);
4030         return 0;
4031 }
4032 
4033 static int pseudo_lld_bus_match(struct device *dev,
4034 				struct device_driver *dev_driver)
4035 {
4036 	return 1;
4037 }
4038 
4039 static struct bus_type pseudo_lld_bus = {
4040 	.name = "pseudo",
4041 	.match = pseudo_lld_bus_match,
4042 	.probe = sdebug_driver_probe,
4043 	.remove = sdebug_driver_remove,
4044 	.drv_groups = sdebug_drv_groups,
4045 };
4046