xref: /linux/drivers/scsi/sd.c (revision baaa68a9796ef2cadfe5caaf4c730412eda0f31c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *      sd.c Copyright (C) 1992 Drew Eckhardt
4  *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5  *
6  *      Linux scsi disk driver
7  *              Initial versions: Drew Eckhardt
8  *              Subsequent revisions: Eric Youngdale
9  *	Modification history:
10  *       - Drew Eckhardt <drew@colorado.edu> original
11  *       - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
12  *         outstanding request, and other enhancements.
13  *         Support loadable low-level scsi drivers.
14  *       - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
15  *         eight major numbers.
16  *       - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
17  *	 - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
18  *	   sd_init and cleanups.
19  *	 - Alex Davis <letmein@erols.com> Fix problem where partition info
20  *	   not being read in sd_open. Fix problem where removable media
21  *	   could be ejected after sd_open.
22  *	 - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
23  *	 - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
24  *	   <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
25  *	   Support 32k/1M disks.
26  *
27  *	Logging policy (needs CONFIG_SCSI_LOGGING defined):
28  *	 - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
29  *	 - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
30  *	 - entering sd_ioctl: SCSI_LOG_IOCTL level 1
31  *	 - entering other commands: SCSI_LOG_HLQUEUE level 3
32  *	Note: when the logging level is set by the user, it must be greater
33  *	than the level indicated above to trigger output.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/fs.h>
38 #include <linux/kernel.h>
39 #include <linux/mm.h>
40 #include <linux/bio.h>
41 #include <linux/hdreg.h>
42 #include <linux/errno.h>
43 #include <linux/idr.h>
44 #include <linux/interrupt.h>
45 #include <linux/init.h>
46 #include <linux/blkdev.h>
47 #include <linux/blkpg.h>
48 #include <linux/blk-pm.h>
49 #include <linux/delay.h>
50 #include <linux/major.h>
51 #include <linux/mutex.h>
52 #include <linux/string_helpers.h>
53 #include <linux/slab.h>
54 #include <linux/sed-opal.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/pr.h>
57 #include <linux/t10-pi.h>
58 #include <linux/uaccess.h>
59 #include <asm/unaligned.h>
60 
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_dbg.h>
64 #include <scsi/scsi_device.h>
65 #include <scsi/scsi_driver.h>
66 #include <scsi/scsi_eh.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_ioctl.h>
69 #include <scsi/scsicam.h>
70 
71 #include "sd.h"
72 #include "scsi_priv.h"
73 #include "scsi_logging.h"
74 
75 MODULE_AUTHOR("Eric Youngdale");
76 MODULE_DESCRIPTION("SCSI disk (sd) driver");
77 MODULE_LICENSE("GPL");
78 
79 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
95 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
96 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
97 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
98 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
99 
100 #define SD_MINORS	16
101 
102 static void sd_config_discard(struct scsi_disk *, unsigned int);
103 static void sd_config_write_same(struct scsi_disk *);
104 static int  sd_revalidate_disk(struct gendisk *);
105 static void sd_unlock_native_capacity(struct gendisk *disk);
106 static int  sd_probe(struct device *);
107 static int  sd_remove(struct device *);
108 static void sd_shutdown(struct device *);
109 static int sd_suspend_system(struct device *);
110 static int sd_suspend_runtime(struct device *);
111 static int sd_resume_system(struct device *);
112 static int sd_resume_runtime(struct device *);
113 static void sd_rescan(struct device *);
114 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
115 static void sd_uninit_command(struct scsi_cmnd *SCpnt);
116 static int sd_done(struct scsi_cmnd *);
117 static void sd_eh_reset(struct scsi_cmnd *);
118 static int sd_eh_action(struct scsi_cmnd *, int);
119 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
120 static void scsi_disk_release(struct device *cdev);
121 
122 static DEFINE_IDA(sd_index_ida);
123 
124 static struct kmem_cache *sd_cdb_cache;
125 static mempool_t *sd_cdb_pool;
126 static mempool_t *sd_page_pool;
127 static struct lock_class_key sd_bio_compl_lkclass;
128 
129 static const char *sd_cache_types[] = {
130 	"write through", "none", "write back",
131 	"write back, no read (daft)"
132 };
133 
134 static void sd_set_flush_flag(struct scsi_disk *sdkp)
135 {
136 	bool wc = false, fua = false;
137 
138 	if (sdkp->WCE) {
139 		wc = true;
140 		if (sdkp->DPOFUA)
141 			fua = true;
142 	}
143 
144 	blk_queue_write_cache(sdkp->disk->queue, wc, fua);
145 }
146 
147 static ssize_t
148 cache_type_store(struct device *dev, struct device_attribute *attr,
149 		 const char *buf, size_t count)
150 {
151 	int ct, rcd, wce, sp;
152 	struct scsi_disk *sdkp = to_scsi_disk(dev);
153 	struct scsi_device *sdp = sdkp->device;
154 	char buffer[64];
155 	char *buffer_data;
156 	struct scsi_mode_data data;
157 	struct scsi_sense_hdr sshdr;
158 	static const char temp[] = "temporary ";
159 	int len;
160 
161 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
162 		/* no cache control on RBC devices; theoretically they
163 		 * can do it, but there's probably so many exceptions
164 		 * it's not worth the risk */
165 		return -EINVAL;
166 
167 	if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
168 		buf += sizeof(temp) - 1;
169 		sdkp->cache_override = 1;
170 	} else {
171 		sdkp->cache_override = 0;
172 	}
173 
174 	ct = sysfs_match_string(sd_cache_types, buf);
175 	if (ct < 0)
176 		return -EINVAL;
177 
178 	rcd = ct & 0x01 ? 1 : 0;
179 	wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
180 
181 	if (sdkp->cache_override) {
182 		sdkp->WCE = wce;
183 		sdkp->RCD = rcd;
184 		sd_set_flush_flag(sdkp);
185 		return count;
186 	}
187 
188 	if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
189 			    sdkp->max_retries, &data, NULL))
190 		return -EINVAL;
191 	len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
192 		  data.block_descriptor_length);
193 	buffer_data = buffer + data.header_length +
194 		data.block_descriptor_length;
195 	buffer_data[2] &= ~0x05;
196 	buffer_data[2] |= wce << 2 | rcd;
197 	sp = buffer_data[0] & 0x80 ? 1 : 0;
198 	buffer_data[0] &= ~0x80;
199 
200 	/*
201 	 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
202 	 * received mode parameter buffer before doing MODE SELECT.
203 	 */
204 	data.device_specific = 0;
205 
206 	if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
207 			     sdkp->max_retries, &data, &sshdr)) {
208 		if (scsi_sense_valid(&sshdr))
209 			sd_print_sense_hdr(sdkp, &sshdr);
210 		return -EINVAL;
211 	}
212 	sd_revalidate_disk(sdkp->disk);
213 	return count;
214 }
215 
216 static ssize_t
217 manage_start_stop_show(struct device *dev, struct device_attribute *attr,
218 		       char *buf)
219 {
220 	struct scsi_disk *sdkp = to_scsi_disk(dev);
221 	struct scsi_device *sdp = sdkp->device;
222 
223 	return sprintf(buf, "%u\n", sdp->manage_start_stop);
224 }
225 
226 static ssize_t
227 manage_start_stop_store(struct device *dev, struct device_attribute *attr,
228 			const char *buf, size_t count)
229 {
230 	struct scsi_disk *sdkp = to_scsi_disk(dev);
231 	struct scsi_device *sdp = sdkp->device;
232 	bool v;
233 
234 	if (!capable(CAP_SYS_ADMIN))
235 		return -EACCES;
236 
237 	if (kstrtobool(buf, &v))
238 		return -EINVAL;
239 
240 	sdp->manage_start_stop = v;
241 
242 	return count;
243 }
244 static DEVICE_ATTR_RW(manage_start_stop);
245 
246 static ssize_t
247 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
248 {
249 	struct scsi_disk *sdkp = to_scsi_disk(dev);
250 
251 	return sprintf(buf, "%u\n", sdkp->device->allow_restart);
252 }
253 
254 static ssize_t
255 allow_restart_store(struct device *dev, struct device_attribute *attr,
256 		    const char *buf, size_t count)
257 {
258 	bool v;
259 	struct scsi_disk *sdkp = to_scsi_disk(dev);
260 	struct scsi_device *sdp = sdkp->device;
261 
262 	if (!capable(CAP_SYS_ADMIN))
263 		return -EACCES;
264 
265 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
266 		return -EINVAL;
267 
268 	if (kstrtobool(buf, &v))
269 		return -EINVAL;
270 
271 	sdp->allow_restart = v;
272 
273 	return count;
274 }
275 static DEVICE_ATTR_RW(allow_restart);
276 
277 static ssize_t
278 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
279 {
280 	struct scsi_disk *sdkp = to_scsi_disk(dev);
281 	int ct = sdkp->RCD + 2*sdkp->WCE;
282 
283 	return sprintf(buf, "%s\n", sd_cache_types[ct]);
284 }
285 static DEVICE_ATTR_RW(cache_type);
286 
287 static ssize_t
288 FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
289 {
290 	struct scsi_disk *sdkp = to_scsi_disk(dev);
291 
292 	return sprintf(buf, "%u\n", sdkp->DPOFUA);
293 }
294 static DEVICE_ATTR_RO(FUA);
295 
296 static ssize_t
297 protection_type_show(struct device *dev, struct device_attribute *attr,
298 		     char *buf)
299 {
300 	struct scsi_disk *sdkp = to_scsi_disk(dev);
301 
302 	return sprintf(buf, "%u\n", sdkp->protection_type);
303 }
304 
305 static ssize_t
306 protection_type_store(struct device *dev, struct device_attribute *attr,
307 		      const char *buf, size_t count)
308 {
309 	struct scsi_disk *sdkp = to_scsi_disk(dev);
310 	unsigned int val;
311 	int err;
312 
313 	if (!capable(CAP_SYS_ADMIN))
314 		return -EACCES;
315 
316 	err = kstrtouint(buf, 10, &val);
317 
318 	if (err)
319 		return err;
320 
321 	if (val <= T10_PI_TYPE3_PROTECTION)
322 		sdkp->protection_type = val;
323 
324 	return count;
325 }
326 static DEVICE_ATTR_RW(protection_type);
327 
328 static ssize_t
329 protection_mode_show(struct device *dev, struct device_attribute *attr,
330 		     char *buf)
331 {
332 	struct scsi_disk *sdkp = to_scsi_disk(dev);
333 	struct scsi_device *sdp = sdkp->device;
334 	unsigned int dif, dix;
335 
336 	dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
337 	dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
338 
339 	if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
340 		dif = 0;
341 		dix = 1;
342 	}
343 
344 	if (!dif && !dix)
345 		return sprintf(buf, "none\n");
346 
347 	return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
348 }
349 static DEVICE_ATTR_RO(protection_mode);
350 
351 static ssize_t
352 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
353 {
354 	struct scsi_disk *sdkp = to_scsi_disk(dev);
355 
356 	return sprintf(buf, "%u\n", sdkp->ATO);
357 }
358 static DEVICE_ATTR_RO(app_tag_own);
359 
360 static ssize_t
361 thin_provisioning_show(struct device *dev, struct device_attribute *attr,
362 		       char *buf)
363 {
364 	struct scsi_disk *sdkp = to_scsi_disk(dev);
365 
366 	return sprintf(buf, "%u\n", sdkp->lbpme);
367 }
368 static DEVICE_ATTR_RO(thin_provisioning);
369 
370 /* sysfs_match_string() requires dense arrays */
371 static const char *lbp_mode[] = {
372 	[SD_LBP_FULL]		= "full",
373 	[SD_LBP_UNMAP]		= "unmap",
374 	[SD_LBP_WS16]		= "writesame_16",
375 	[SD_LBP_WS10]		= "writesame_10",
376 	[SD_LBP_ZERO]		= "writesame_zero",
377 	[SD_LBP_DISABLE]	= "disabled",
378 };
379 
380 static ssize_t
381 provisioning_mode_show(struct device *dev, struct device_attribute *attr,
382 		       char *buf)
383 {
384 	struct scsi_disk *sdkp = to_scsi_disk(dev);
385 
386 	return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
387 }
388 
389 static ssize_t
390 provisioning_mode_store(struct device *dev, struct device_attribute *attr,
391 			const char *buf, size_t count)
392 {
393 	struct scsi_disk *sdkp = to_scsi_disk(dev);
394 	struct scsi_device *sdp = sdkp->device;
395 	int mode;
396 
397 	if (!capable(CAP_SYS_ADMIN))
398 		return -EACCES;
399 
400 	if (sd_is_zoned(sdkp)) {
401 		sd_config_discard(sdkp, SD_LBP_DISABLE);
402 		return count;
403 	}
404 
405 	if (sdp->type != TYPE_DISK)
406 		return -EINVAL;
407 
408 	mode = sysfs_match_string(lbp_mode, buf);
409 	if (mode < 0)
410 		return -EINVAL;
411 
412 	sd_config_discard(sdkp, mode);
413 
414 	return count;
415 }
416 static DEVICE_ATTR_RW(provisioning_mode);
417 
418 /* sysfs_match_string() requires dense arrays */
419 static const char *zeroing_mode[] = {
420 	[SD_ZERO_WRITE]		= "write",
421 	[SD_ZERO_WS]		= "writesame",
422 	[SD_ZERO_WS16_UNMAP]	= "writesame_16_unmap",
423 	[SD_ZERO_WS10_UNMAP]	= "writesame_10_unmap",
424 };
425 
426 static ssize_t
427 zeroing_mode_show(struct device *dev, struct device_attribute *attr,
428 		  char *buf)
429 {
430 	struct scsi_disk *sdkp = to_scsi_disk(dev);
431 
432 	return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
433 }
434 
435 static ssize_t
436 zeroing_mode_store(struct device *dev, struct device_attribute *attr,
437 		   const char *buf, size_t count)
438 {
439 	struct scsi_disk *sdkp = to_scsi_disk(dev);
440 	int mode;
441 
442 	if (!capable(CAP_SYS_ADMIN))
443 		return -EACCES;
444 
445 	mode = sysfs_match_string(zeroing_mode, buf);
446 	if (mode < 0)
447 		return -EINVAL;
448 
449 	sdkp->zeroing_mode = mode;
450 
451 	return count;
452 }
453 static DEVICE_ATTR_RW(zeroing_mode);
454 
455 static ssize_t
456 max_medium_access_timeouts_show(struct device *dev,
457 				struct device_attribute *attr, char *buf)
458 {
459 	struct scsi_disk *sdkp = to_scsi_disk(dev);
460 
461 	return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
462 }
463 
464 static ssize_t
465 max_medium_access_timeouts_store(struct device *dev,
466 				 struct device_attribute *attr, const char *buf,
467 				 size_t count)
468 {
469 	struct scsi_disk *sdkp = to_scsi_disk(dev);
470 	int err;
471 
472 	if (!capable(CAP_SYS_ADMIN))
473 		return -EACCES;
474 
475 	err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
476 
477 	return err ? err : count;
478 }
479 static DEVICE_ATTR_RW(max_medium_access_timeouts);
480 
481 static ssize_t
482 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
483 			   char *buf)
484 {
485 	struct scsi_disk *sdkp = to_scsi_disk(dev);
486 
487 	return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
488 }
489 
490 static ssize_t
491 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
492 			    const char *buf, size_t count)
493 {
494 	struct scsi_disk *sdkp = to_scsi_disk(dev);
495 	struct scsi_device *sdp = sdkp->device;
496 	unsigned long max;
497 	int err;
498 
499 	if (!capable(CAP_SYS_ADMIN))
500 		return -EACCES;
501 
502 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
503 		return -EINVAL;
504 
505 	err = kstrtoul(buf, 10, &max);
506 
507 	if (err)
508 		return err;
509 
510 	if (max == 0)
511 		sdp->no_write_same = 1;
512 	else if (max <= SD_MAX_WS16_BLOCKS) {
513 		sdp->no_write_same = 0;
514 		sdkp->max_ws_blocks = max;
515 	}
516 
517 	sd_config_write_same(sdkp);
518 
519 	return count;
520 }
521 static DEVICE_ATTR_RW(max_write_same_blocks);
522 
523 static ssize_t
524 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
525 {
526 	struct scsi_disk *sdkp = to_scsi_disk(dev);
527 
528 	if (sdkp->device->type == TYPE_ZBC)
529 		return sprintf(buf, "host-managed\n");
530 	if (sdkp->zoned == 1)
531 		return sprintf(buf, "host-aware\n");
532 	if (sdkp->zoned == 2)
533 		return sprintf(buf, "drive-managed\n");
534 	return sprintf(buf, "none\n");
535 }
536 static DEVICE_ATTR_RO(zoned_cap);
537 
538 static ssize_t
539 max_retries_store(struct device *dev, struct device_attribute *attr,
540 		  const char *buf, size_t count)
541 {
542 	struct scsi_disk *sdkp = to_scsi_disk(dev);
543 	struct scsi_device *sdev = sdkp->device;
544 	int retries, err;
545 
546 	err = kstrtoint(buf, 10, &retries);
547 	if (err)
548 		return err;
549 
550 	if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
551 		sdkp->max_retries = retries;
552 		return count;
553 	}
554 
555 	sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
556 		    SD_MAX_RETRIES);
557 	return -EINVAL;
558 }
559 
560 static ssize_t
561 max_retries_show(struct device *dev, struct device_attribute *attr,
562 		 char *buf)
563 {
564 	struct scsi_disk *sdkp = to_scsi_disk(dev);
565 
566 	return sprintf(buf, "%d\n", sdkp->max_retries);
567 }
568 
569 static DEVICE_ATTR_RW(max_retries);
570 
571 static struct attribute *sd_disk_attrs[] = {
572 	&dev_attr_cache_type.attr,
573 	&dev_attr_FUA.attr,
574 	&dev_attr_allow_restart.attr,
575 	&dev_attr_manage_start_stop.attr,
576 	&dev_attr_protection_type.attr,
577 	&dev_attr_protection_mode.attr,
578 	&dev_attr_app_tag_own.attr,
579 	&dev_attr_thin_provisioning.attr,
580 	&dev_attr_provisioning_mode.attr,
581 	&dev_attr_zeroing_mode.attr,
582 	&dev_attr_max_write_same_blocks.attr,
583 	&dev_attr_max_medium_access_timeouts.attr,
584 	&dev_attr_zoned_cap.attr,
585 	&dev_attr_max_retries.attr,
586 	NULL,
587 };
588 ATTRIBUTE_GROUPS(sd_disk);
589 
590 static struct class sd_disk_class = {
591 	.name		= "scsi_disk",
592 	.owner		= THIS_MODULE,
593 	.dev_release	= scsi_disk_release,
594 	.dev_groups	= sd_disk_groups,
595 };
596 
597 static const struct dev_pm_ops sd_pm_ops = {
598 	.suspend		= sd_suspend_system,
599 	.resume			= sd_resume_system,
600 	.poweroff		= sd_suspend_system,
601 	.restore		= sd_resume_system,
602 	.runtime_suspend	= sd_suspend_runtime,
603 	.runtime_resume		= sd_resume_runtime,
604 };
605 
606 static struct scsi_driver sd_template = {
607 	.gendrv = {
608 		.name		= "sd",
609 		.owner		= THIS_MODULE,
610 		.probe		= sd_probe,
611 		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
612 		.remove		= sd_remove,
613 		.shutdown	= sd_shutdown,
614 		.pm		= &sd_pm_ops,
615 	},
616 	.rescan			= sd_rescan,
617 	.init_command		= sd_init_command,
618 	.uninit_command		= sd_uninit_command,
619 	.done			= sd_done,
620 	.eh_action		= sd_eh_action,
621 	.eh_reset		= sd_eh_reset,
622 };
623 
624 /*
625  * Don't request a new module, as that could deadlock in multipath
626  * environment.
627  */
628 static void sd_default_probe(dev_t devt)
629 {
630 }
631 
632 /*
633  * Device no to disk mapping:
634  *
635  *       major         disc2     disc  p1
636  *   |............|.............|....|....| <- dev_t
637  *    31        20 19          8 7  4 3  0
638  *
639  * Inside a major, we have 16k disks, however mapped non-
640  * contiguously. The first 16 disks are for major0, the next
641  * ones with major1, ... Disk 256 is for major0 again, disk 272
642  * for major1, ...
643  * As we stay compatible with our numbering scheme, we can reuse
644  * the well-know SCSI majors 8, 65--71, 136--143.
645  */
646 static int sd_major(int major_idx)
647 {
648 	switch (major_idx) {
649 	case 0:
650 		return SCSI_DISK0_MAJOR;
651 	case 1 ... 7:
652 		return SCSI_DISK1_MAJOR + major_idx - 1;
653 	case 8 ... 15:
654 		return SCSI_DISK8_MAJOR + major_idx - 8;
655 	default:
656 		BUG();
657 		return 0;	/* shut up gcc */
658 	}
659 }
660 
661 #ifdef CONFIG_BLK_SED_OPAL
662 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
663 		size_t len, bool send)
664 {
665 	struct scsi_disk *sdkp = data;
666 	struct scsi_device *sdev = sdkp->device;
667 	u8 cdb[12] = { 0, };
668 	int ret;
669 
670 	cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
671 	cdb[1] = secp;
672 	put_unaligned_be16(spsp, &cdb[2]);
673 	put_unaligned_be32(len, &cdb[6]);
674 
675 	ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
676 		buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
677 		RQF_PM, NULL);
678 	return ret <= 0 ? ret : -EIO;
679 }
680 #endif /* CONFIG_BLK_SED_OPAL */
681 
682 /*
683  * Look up the DIX operation based on whether the command is read or
684  * write and whether dix and dif are enabled.
685  */
686 static unsigned int sd_prot_op(bool write, bool dix, bool dif)
687 {
688 	/* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
689 	static const unsigned int ops[] = {	/* wrt dix dif */
690 		SCSI_PROT_NORMAL,		/*  0	0   0  */
691 		SCSI_PROT_READ_STRIP,		/*  0	0   1  */
692 		SCSI_PROT_READ_INSERT,		/*  0	1   0  */
693 		SCSI_PROT_READ_PASS,		/*  0	1   1  */
694 		SCSI_PROT_NORMAL,		/*  1	0   0  */
695 		SCSI_PROT_WRITE_INSERT,		/*  1	0   1  */
696 		SCSI_PROT_WRITE_STRIP,		/*  1	1   0  */
697 		SCSI_PROT_WRITE_PASS,		/*  1	1   1  */
698 	};
699 
700 	return ops[write << 2 | dix << 1 | dif];
701 }
702 
703 /*
704  * Returns a mask of the protection flags that are valid for a given DIX
705  * operation.
706  */
707 static unsigned int sd_prot_flag_mask(unsigned int prot_op)
708 {
709 	static const unsigned int flag_mask[] = {
710 		[SCSI_PROT_NORMAL]		= 0,
711 
712 		[SCSI_PROT_READ_STRIP]		= SCSI_PROT_TRANSFER_PI |
713 						  SCSI_PROT_GUARD_CHECK |
714 						  SCSI_PROT_REF_CHECK |
715 						  SCSI_PROT_REF_INCREMENT,
716 
717 		[SCSI_PROT_READ_INSERT]		= SCSI_PROT_REF_INCREMENT |
718 						  SCSI_PROT_IP_CHECKSUM,
719 
720 		[SCSI_PROT_READ_PASS]		= SCSI_PROT_TRANSFER_PI |
721 						  SCSI_PROT_GUARD_CHECK |
722 						  SCSI_PROT_REF_CHECK |
723 						  SCSI_PROT_REF_INCREMENT |
724 						  SCSI_PROT_IP_CHECKSUM,
725 
726 		[SCSI_PROT_WRITE_INSERT]	= SCSI_PROT_TRANSFER_PI |
727 						  SCSI_PROT_REF_INCREMENT,
728 
729 		[SCSI_PROT_WRITE_STRIP]		= SCSI_PROT_GUARD_CHECK |
730 						  SCSI_PROT_REF_CHECK |
731 						  SCSI_PROT_REF_INCREMENT |
732 						  SCSI_PROT_IP_CHECKSUM,
733 
734 		[SCSI_PROT_WRITE_PASS]		= SCSI_PROT_TRANSFER_PI |
735 						  SCSI_PROT_GUARD_CHECK |
736 						  SCSI_PROT_REF_CHECK |
737 						  SCSI_PROT_REF_INCREMENT |
738 						  SCSI_PROT_IP_CHECKSUM,
739 	};
740 
741 	return flag_mask[prot_op];
742 }
743 
744 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
745 					   unsigned int dix, unsigned int dif)
746 {
747 	struct request *rq = scsi_cmd_to_rq(scmd);
748 	struct bio *bio = rq->bio;
749 	unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
750 	unsigned int protect = 0;
751 
752 	if (dix) {				/* DIX Type 0, 1, 2, 3 */
753 		if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
754 			scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
755 
756 		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
757 			scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
758 	}
759 
760 	if (dif != T10_PI_TYPE3_PROTECTION) {	/* DIX/DIF Type 0, 1, 2 */
761 		scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
762 
763 		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
764 			scmd->prot_flags |= SCSI_PROT_REF_CHECK;
765 	}
766 
767 	if (dif) {				/* DIX/DIF Type 1, 2, 3 */
768 		scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
769 
770 		if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
771 			protect = 3 << 5;	/* Disable target PI checking */
772 		else
773 			protect = 1 << 5;	/* Enable target PI checking */
774 	}
775 
776 	scsi_set_prot_op(scmd, prot_op);
777 	scsi_set_prot_type(scmd, dif);
778 	scmd->prot_flags &= sd_prot_flag_mask(prot_op);
779 
780 	return protect;
781 }
782 
783 static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
784 {
785 	struct request_queue *q = sdkp->disk->queue;
786 	unsigned int logical_block_size = sdkp->device->sector_size;
787 	unsigned int max_blocks = 0;
788 
789 	q->limits.discard_alignment =
790 		sdkp->unmap_alignment * logical_block_size;
791 	q->limits.discard_granularity =
792 		max(sdkp->physical_block_size,
793 		    sdkp->unmap_granularity * logical_block_size);
794 	sdkp->provisioning_mode = mode;
795 
796 	switch (mode) {
797 
798 	case SD_LBP_FULL:
799 	case SD_LBP_DISABLE:
800 		blk_queue_max_discard_sectors(q, 0);
801 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
802 		return;
803 
804 	case SD_LBP_UNMAP:
805 		max_blocks = min_not_zero(sdkp->max_unmap_blocks,
806 					  (u32)SD_MAX_WS16_BLOCKS);
807 		break;
808 
809 	case SD_LBP_WS16:
810 		if (sdkp->device->unmap_limit_for_ws)
811 			max_blocks = sdkp->max_unmap_blocks;
812 		else
813 			max_blocks = sdkp->max_ws_blocks;
814 
815 		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
816 		break;
817 
818 	case SD_LBP_WS10:
819 		if (sdkp->device->unmap_limit_for_ws)
820 			max_blocks = sdkp->max_unmap_blocks;
821 		else
822 			max_blocks = sdkp->max_ws_blocks;
823 
824 		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
825 		break;
826 
827 	case SD_LBP_ZERO:
828 		max_blocks = min_not_zero(sdkp->max_ws_blocks,
829 					  (u32)SD_MAX_WS10_BLOCKS);
830 		break;
831 	}
832 
833 	blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
834 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
835 }
836 
837 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
838 {
839 	struct scsi_device *sdp = cmd->device;
840 	struct request *rq = scsi_cmd_to_rq(cmd);
841 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
842 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
843 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
844 	unsigned int data_len = 24;
845 	char *buf;
846 
847 	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
848 	if (!rq->special_vec.bv_page)
849 		return BLK_STS_RESOURCE;
850 	clear_highpage(rq->special_vec.bv_page);
851 	rq->special_vec.bv_offset = 0;
852 	rq->special_vec.bv_len = data_len;
853 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
854 
855 	cmd->cmd_len = 10;
856 	cmd->cmnd[0] = UNMAP;
857 	cmd->cmnd[8] = 24;
858 
859 	buf = bvec_virt(&rq->special_vec);
860 	put_unaligned_be16(6 + 16, &buf[0]);
861 	put_unaligned_be16(16, &buf[2]);
862 	put_unaligned_be64(lba, &buf[8]);
863 	put_unaligned_be32(nr_blocks, &buf[16]);
864 
865 	cmd->allowed = sdkp->max_retries;
866 	cmd->transfersize = data_len;
867 	rq->timeout = SD_TIMEOUT;
868 
869 	return scsi_alloc_sgtables(cmd);
870 }
871 
872 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
873 		bool unmap)
874 {
875 	struct scsi_device *sdp = cmd->device;
876 	struct request *rq = scsi_cmd_to_rq(cmd);
877 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
878 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
879 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
880 	u32 data_len = sdp->sector_size;
881 
882 	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
883 	if (!rq->special_vec.bv_page)
884 		return BLK_STS_RESOURCE;
885 	clear_highpage(rq->special_vec.bv_page);
886 	rq->special_vec.bv_offset = 0;
887 	rq->special_vec.bv_len = data_len;
888 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
889 
890 	cmd->cmd_len = 16;
891 	cmd->cmnd[0] = WRITE_SAME_16;
892 	if (unmap)
893 		cmd->cmnd[1] = 0x8; /* UNMAP */
894 	put_unaligned_be64(lba, &cmd->cmnd[2]);
895 	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
896 
897 	cmd->allowed = sdkp->max_retries;
898 	cmd->transfersize = data_len;
899 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
900 
901 	return scsi_alloc_sgtables(cmd);
902 }
903 
904 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
905 		bool unmap)
906 {
907 	struct scsi_device *sdp = cmd->device;
908 	struct request *rq = scsi_cmd_to_rq(cmd);
909 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
910 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
911 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
912 	u32 data_len = sdp->sector_size;
913 
914 	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
915 	if (!rq->special_vec.bv_page)
916 		return BLK_STS_RESOURCE;
917 	clear_highpage(rq->special_vec.bv_page);
918 	rq->special_vec.bv_offset = 0;
919 	rq->special_vec.bv_len = data_len;
920 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
921 
922 	cmd->cmd_len = 10;
923 	cmd->cmnd[0] = WRITE_SAME;
924 	if (unmap)
925 		cmd->cmnd[1] = 0x8; /* UNMAP */
926 	put_unaligned_be32(lba, &cmd->cmnd[2]);
927 	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
928 
929 	cmd->allowed = sdkp->max_retries;
930 	cmd->transfersize = data_len;
931 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
932 
933 	return scsi_alloc_sgtables(cmd);
934 }
935 
936 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
937 {
938 	struct request *rq = scsi_cmd_to_rq(cmd);
939 	struct scsi_device *sdp = cmd->device;
940 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
941 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
942 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
943 
944 	if (!(rq->cmd_flags & REQ_NOUNMAP)) {
945 		switch (sdkp->zeroing_mode) {
946 		case SD_ZERO_WS16_UNMAP:
947 			return sd_setup_write_same16_cmnd(cmd, true);
948 		case SD_ZERO_WS10_UNMAP:
949 			return sd_setup_write_same10_cmnd(cmd, true);
950 		}
951 	}
952 
953 	if (sdp->no_write_same) {
954 		rq->rq_flags |= RQF_QUIET;
955 		return BLK_STS_TARGET;
956 	}
957 
958 	if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
959 		return sd_setup_write_same16_cmnd(cmd, false);
960 
961 	return sd_setup_write_same10_cmnd(cmd, false);
962 }
963 
964 static void sd_config_write_same(struct scsi_disk *sdkp)
965 {
966 	struct request_queue *q = sdkp->disk->queue;
967 	unsigned int logical_block_size = sdkp->device->sector_size;
968 
969 	if (sdkp->device->no_write_same) {
970 		sdkp->max_ws_blocks = 0;
971 		goto out;
972 	}
973 
974 	/* Some devices can not handle block counts above 0xffff despite
975 	 * supporting WRITE SAME(16). Consequently we default to 64k
976 	 * blocks per I/O unless the device explicitly advertises a
977 	 * bigger limit.
978 	 */
979 	if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
980 		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
981 						   (u32)SD_MAX_WS16_BLOCKS);
982 	else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
983 		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
984 						   (u32)SD_MAX_WS10_BLOCKS);
985 	else {
986 		sdkp->device->no_write_same = 1;
987 		sdkp->max_ws_blocks = 0;
988 	}
989 
990 	if (sdkp->lbprz && sdkp->lbpws)
991 		sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
992 	else if (sdkp->lbprz && sdkp->lbpws10)
993 		sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
994 	else if (sdkp->max_ws_blocks)
995 		sdkp->zeroing_mode = SD_ZERO_WS;
996 	else
997 		sdkp->zeroing_mode = SD_ZERO_WRITE;
998 
999 	if (sdkp->max_ws_blocks &&
1000 	    sdkp->physical_block_size > logical_block_size) {
1001 		/*
1002 		 * Reporting a maximum number of blocks that is not aligned
1003 		 * on the device physical size would cause a large write same
1004 		 * request to be split into physically unaligned chunks by
1005 		 * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
1006 		 * even if the caller of these functions took care to align the
1007 		 * large request. So make sure the maximum reported is aligned
1008 		 * to the device physical block size. This is only an optional
1009 		 * optimization for regular disks, but this is mandatory to
1010 		 * avoid failure of large write same requests directed at
1011 		 * sequential write required zones of host-managed ZBC disks.
1012 		 */
1013 		sdkp->max_ws_blocks =
1014 			round_down(sdkp->max_ws_blocks,
1015 				   bytes_to_logical(sdkp->device,
1016 						    sdkp->physical_block_size));
1017 	}
1018 
1019 out:
1020 	blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
1021 					 (logical_block_size >> 9));
1022 	blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
1023 					 (logical_block_size >> 9));
1024 }
1025 
1026 /**
1027  * sd_setup_write_same_cmnd - write the same data to multiple blocks
1028  * @cmd: command to prepare
1029  *
1030  * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
1031  * the preference indicated by the target device.
1032  **/
1033 static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
1034 {
1035 	struct request *rq = scsi_cmd_to_rq(cmd);
1036 	struct scsi_device *sdp = cmd->device;
1037 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1038 	struct bio *bio = rq->bio;
1039 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1040 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1041 	blk_status_t ret;
1042 
1043 	if (sdkp->device->no_write_same)
1044 		return BLK_STS_TARGET;
1045 
1046 	BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
1047 
1048 	rq->timeout = SD_WRITE_SAME_TIMEOUT;
1049 
1050 	if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
1051 		cmd->cmd_len = 16;
1052 		cmd->cmnd[0] = WRITE_SAME_16;
1053 		put_unaligned_be64(lba, &cmd->cmnd[2]);
1054 		put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1055 	} else {
1056 		cmd->cmd_len = 10;
1057 		cmd->cmnd[0] = WRITE_SAME;
1058 		put_unaligned_be32(lba, &cmd->cmnd[2]);
1059 		put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1060 	}
1061 
1062 	cmd->transfersize = sdp->sector_size;
1063 	cmd->allowed = sdkp->max_retries;
1064 
1065 	/*
1066 	 * For WRITE SAME the data transferred via the DATA OUT buffer is
1067 	 * different from the amount of data actually written to the target.
1068 	 *
1069 	 * We set up __data_len to the amount of data transferred via the
1070 	 * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
1071 	 * to transfer a single sector of data first, but then reset it to
1072 	 * the amount of data to be written right after so that the I/O path
1073 	 * knows how much to actually write.
1074 	 */
1075 	rq->__data_len = sdp->sector_size;
1076 	ret = scsi_alloc_sgtables(cmd);
1077 	rq->__data_len = blk_rq_bytes(rq);
1078 
1079 	return ret;
1080 }
1081 
1082 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
1083 {
1084 	struct request *rq = scsi_cmd_to_rq(cmd);
1085 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1086 
1087 	/* flush requests don't perform I/O, zero the S/G table */
1088 	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1089 
1090 	cmd->cmnd[0] = SYNCHRONIZE_CACHE;
1091 	cmd->cmd_len = 10;
1092 	cmd->transfersize = 0;
1093 	cmd->allowed = sdkp->max_retries;
1094 
1095 	rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
1096 	return BLK_STS_OK;
1097 }
1098 
1099 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
1100 				       sector_t lba, unsigned int nr_blocks,
1101 				       unsigned char flags)
1102 {
1103 	cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
1104 	if (unlikely(cmd->cmnd == NULL))
1105 		return BLK_STS_RESOURCE;
1106 
1107 	cmd->cmd_len = SD_EXT_CDB_SIZE;
1108 	memset(cmd->cmnd, 0, cmd->cmd_len);
1109 
1110 	cmd->cmnd[0]  = VARIABLE_LENGTH_CMD;
1111 	cmd->cmnd[7]  = 0x18; /* Additional CDB len */
1112 	cmd->cmnd[9]  = write ? WRITE_32 : READ_32;
1113 	cmd->cmnd[10] = flags;
1114 	put_unaligned_be64(lba, &cmd->cmnd[12]);
1115 	put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
1116 	put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
1117 
1118 	return BLK_STS_OK;
1119 }
1120 
1121 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
1122 				       sector_t lba, unsigned int nr_blocks,
1123 				       unsigned char flags)
1124 {
1125 	cmd->cmd_len  = 16;
1126 	cmd->cmnd[0]  = write ? WRITE_16 : READ_16;
1127 	cmd->cmnd[1]  = flags;
1128 	cmd->cmnd[14] = 0;
1129 	cmd->cmnd[15] = 0;
1130 	put_unaligned_be64(lba, &cmd->cmnd[2]);
1131 	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1132 
1133 	return BLK_STS_OK;
1134 }
1135 
1136 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
1137 				       sector_t lba, unsigned int nr_blocks,
1138 				       unsigned char flags)
1139 {
1140 	cmd->cmd_len = 10;
1141 	cmd->cmnd[0] = write ? WRITE_10 : READ_10;
1142 	cmd->cmnd[1] = flags;
1143 	cmd->cmnd[6] = 0;
1144 	cmd->cmnd[9] = 0;
1145 	put_unaligned_be32(lba, &cmd->cmnd[2]);
1146 	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1147 
1148 	return BLK_STS_OK;
1149 }
1150 
1151 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
1152 				      sector_t lba, unsigned int nr_blocks,
1153 				      unsigned char flags)
1154 {
1155 	/* Avoid that 0 blocks gets translated into 256 blocks. */
1156 	if (WARN_ON_ONCE(nr_blocks == 0))
1157 		return BLK_STS_IOERR;
1158 
1159 	if (unlikely(flags & 0x8)) {
1160 		/*
1161 		 * This happens only if this drive failed 10byte rw
1162 		 * command with ILLEGAL_REQUEST during operation and
1163 		 * thus turned off use_10_for_rw.
1164 		 */
1165 		scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
1166 		return BLK_STS_IOERR;
1167 	}
1168 
1169 	cmd->cmd_len = 6;
1170 	cmd->cmnd[0] = write ? WRITE_6 : READ_6;
1171 	cmd->cmnd[1] = (lba >> 16) & 0x1f;
1172 	cmd->cmnd[2] = (lba >> 8) & 0xff;
1173 	cmd->cmnd[3] = lba & 0xff;
1174 	cmd->cmnd[4] = nr_blocks;
1175 	cmd->cmnd[5] = 0;
1176 
1177 	return BLK_STS_OK;
1178 }
1179 
1180 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
1181 {
1182 	struct request *rq = scsi_cmd_to_rq(cmd);
1183 	struct scsi_device *sdp = cmd->device;
1184 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1185 	sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1186 	sector_t threshold;
1187 	unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1188 	unsigned int mask = logical_to_sectors(sdp, 1) - 1;
1189 	bool write = rq_data_dir(rq) == WRITE;
1190 	unsigned char protect, fua;
1191 	blk_status_t ret;
1192 	unsigned int dif;
1193 	bool dix;
1194 
1195 	ret = scsi_alloc_sgtables(cmd);
1196 	if (ret != BLK_STS_OK)
1197 		return ret;
1198 
1199 	ret = BLK_STS_IOERR;
1200 	if (!scsi_device_online(sdp) || sdp->changed) {
1201 		scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
1202 		goto fail;
1203 	}
1204 
1205 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
1206 		scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
1207 		goto fail;
1208 	}
1209 
1210 	if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
1211 		scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
1212 		goto fail;
1213 	}
1214 
1215 	/*
1216 	 * Some SD card readers can't handle accesses which touch the
1217 	 * last one or two logical blocks. Split accesses as needed.
1218 	 */
1219 	threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
1220 
1221 	if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
1222 		if (lba < threshold) {
1223 			/* Access up to the threshold but not beyond */
1224 			nr_blocks = threshold - lba;
1225 		} else {
1226 			/* Access only a single logical block */
1227 			nr_blocks = 1;
1228 		}
1229 	}
1230 
1231 	if (req_op(rq) == REQ_OP_ZONE_APPEND) {
1232 		ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
1233 		if (ret)
1234 			goto fail;
1235 	}
1236 
1237 	fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
1238 	dix = scsi_prot_sg_count(cmd);
1239 	dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
1240 
1241 	if (dif || dix)
1242 		protect = sd_setup_protect_cmnd(cmd, dix, dif);
1243 	else
1244 		protect = 0;
1245 
1246 	if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
1247 		ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
1248 					 protect | fua);
1249 	} else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
1250 		ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
1251 					 protect | fua);
1252 	} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
1253 		   sdp->use_10_for_rw || protect) {
1254 		ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
1255 					 protect | fua);
1256 	} else {
1257 		ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
1258 					protect | fua);
1259 	}
1260 
1261 	if (unlikely(ret != BLK_STS_OK))
1262 		goto fail;
1263 
1264 	/*
1265 	 * We shouldn't disconnect in the middle of a sector, so with a dumb
1266 	 * host adapter, it's safe to assume that we can at least transfer
1267 	 * this many bytes between each connect / disconnect.
1268 	 */
1269 	cmd->transfersize = sdp->sector_size;
1270 	cmd->underflow = nr_blocks << 9;
1271 	cmd->allowed = sdkp->max_retries;
1272 	cmd->sdb.length = nr_blocks * sdp->sector_size;
1273 
1274 	SCSI_LOG_HLQUEUE(1,
1275 			 scmd_printk(KERN_INFO, cmd,
1276 				     "%s: block=%llu, count=%d\n", __func__,
1277 				     (unsigned long long)blk_rq_pos(rq),
1278 				     blk_rq_sectors(rq)));
1279 	SCSI_LOG_HLQUEUE(2,
1280 			 scmd_printk(KERN_INFO, cmd,
1281 				     "%s %d/%u 512 byte blocks.\n",
1282 				     write ? "writing" : "reading", nr_blocks,
1283 				     blk_rq_sectors(rq)));
1284 
1285 	/*
1286 	 * This indicates that the command is ready from our end to be queued.
1287 	 */
1288 	return BLK_STS_OK;
1289 fail:
1290 	scsi_free_sgtables(cmd);
1291 	return ret;
1292 }
1293 
1294 static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
1295 {
1296 	struct request *rq = scsi_cmd_to_rq(cmd);
1297 
1298 	switch (req_op(rq)) {
1299 	case REQ_OP_DISCARD:
1300 		switch (scsi_disk(rq->q->disk)->provisioning_mode) {
1301 		case SD_LBP_UNMAP:
1302 			return sd_setup_unmap_cmnd(cmd);
1303 		case SD_LBP_WS16:
1304 			return sd_setup_write_same16_cmnd(cmd, true);
1305 		case SD_LBP_WS10:
1306 			return sd_setup_write_same10_cmnd(cmd, true);
1307 		case SD_LBP_ZERO:
1308 			return sd_setup_write_same10_cmnd(cmd, false);
1309 		default:
1310 			return BLK_STS_TARGET;
1311 		}
1312 	case REQ_OP_WRITE_ZEROES:
1313 		return sd_setup_write_zeroes_cmnd(cmd);
1314 	case REQ_OP_WRITE_SAME:
1315 		return sd_setup_write_same_cmnd(cmd);
1316 	case REQ_OP_FLUSH:
1317 		return sd_setup_flush_cmnd(cmd);
1318 	case REQ_OP_READ:
1319 	case REQ_OP_WRITE:
1320 	case REQ_OP_ZONE_APPEND:
1321 		return sd_setup_read_write_cmnd(cmd);
1322 	case REQ_OP_ZONE_RESET:
1323 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1324 						   false);
1325 	case REQ_OP_ZONE_RESET_ALL:
1326 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1327 						   true);
1328 	case REQ_OP_ZONE_OPEN:
1329 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
1330 	case REQ_OP_ZONE_CLOSE:
1331 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
1332 	case REQ_OP_ZONE_FINISH:
1333 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
1334 	default:
1335 		WARN_ON_ONCE(1);
1336 		return BLK_STS_NOTSUPP;
1337 	}
1338 }
1339 
1340 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1341 {
1342 	struct request *rq = scsi_cmd_to_rq(SCpnt);
1343 	u8 *cmnd;
1344 
1345 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1346 		mempool_free(rq->special_vec.bv_page, sd_page_pool);
1347 
1348 	if (SCpnt->cmnd != scsi_req(rq)->cmd) {
1349 		cmnd = SCpnt->cmnd;
1350 		SCpnt->cmnd = NULL;
1351 		SCpnt->cmd_len = 0;
1352 		mempool_free(cmnd, sd_cdb_pool);
1353 	}
1354 }
1355 
1356 static bool sd_need_revalidate(struct block_device *bdev,
1357 		struct scsi_disk *sdkp)
1358 {
1359 	if (sdkp->device->removable || sdkp->write_prot) {
1360 		if (bdev_check_media_change(bdev))
1361 			return true;
1362 	}
1363 
1364 	/*
1365 	 * Force a full rescan after ioctl(BLKRRPART).  While the disk state has
1366 	 * nothing to do with partitions, BLKRRPART is used to force a full
1367 	 * revalidate after things like a format for historical reasons.
1368 	 */
1369 	return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1370 }
1371 
1372 /**
1373  *	sd_open - open a scsi disk device
1374  *	@bdev: Block device of the scsi disk to open
1375  *	@mode: FMODE_* mask
1376  *
1377  *	Returns 0 if successful. Returns a negated errno value in case
1378  *	of error.
1379  *
1380  *	Note: This can be called from a user context (e.g. fsck(1) )
1381  *	or from within the kernel (e.g. as a result of a mount(1) ).
1382  *	In the latter case @inode and @filp carry an abridged amount
1383  *	of information as noted above.
1384  *
1385  *	Locking: called with bdev->bd_disk->open_mutex held.
1386  **/
1387 static int sd_open(struct block_device *bdev, fmode_t mode)
1388 {
1389 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1390 	struct scsi_device *sdev = sdkp->device;
1391 	int retval;
1392 
1393 	if (scsi_device_get(sdev))
1394 		return -ENXIO;
1395 
1396 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
1397 
1398 	/*
1399 	 * If the device is in error recovery, wait until it is done.
1400 	 * If the device is offline, then disallow any access to it.
1401 	 */
1402 	retval = -ENXIO;
1403 	if (!scsi_block_when_processing_errors(sdev))
1404 		goto error_out;
1405 
1406 	if (sd_need_revalidate(bdev, sdkp))
1407 		sd_revalidate_disk(bdev->bd_disk);
1408 
1409 	/*
1410 	 * If the drive is empty, just let the open fail.
1411 	 */
1412 	retval = -ENOMEDIUM;
1413 	if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
1414 		goto error_out;
1415 
1416 	/*
1417 	 * If the device has the write protect tab set, have the open fail
1418 	 * if the user expects to be able to write to the thing.
1419 	 */
1420 	retval = -EROFS;
1421 	if (sdkp->write_prot && (mode & FMODE_WRITE))
1422 		goto error_out;
1423 
1424 	/*
1425 	 * It is possible that the disk changing stuff resulted in
1426 	 * the device being taken offline.  If this is the case,
1427 	 * report this to the user, and don't pretend that the
1428 	 * open actually succeeded.
1429 	 */
1430 	retval = -ENXIO;
1431 	if (!scsi_device_online(sdev))
1432 		goto error_out;
1433 
1434 	if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
1435 		if (scsi_block_when_processing_errors(sdev))
1436 			scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
1437 	}
1438 
1439 	return 0;
1440 
1441 error_out:
1442 	scsi_device_put(sdev);
1443 	return retval;
1444 }
1445 
1446 /**
1447  *	sd_release - invoked when the (last) close(2) is called on this
1448  *	scsi disk.
1449  *	@disk: disk to release
1450  *	@mode: FMODE_* mask
1451  *
1452  *	Returns 0.
1453  *
1454  *	Note: may block (uninterruptible) if error recovery is underway
1455  *	on this disk.
1456  *
1457  *	Locking: called with bdev->bd_disk->open_mutex held.
1458  **/
1459 static void sd_release(struct gendisk *disk, fmode_t mode)
1460 {
1461 	struct scsi_disk *sdkp = scsi_disk(disk);
1462 	struct scsi_device *sdev = sdkp->device;
1463 
1464 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
1465 
1466 	if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
1467 		if (scsi_block_when_processing_errors(sdev))
1468 			scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1469 	}
1470 
1471 	scsi_device_put(sdev);
1472 }
1473 
1474 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1475 {
1476 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1477 	struct scsi_device *sdp = sdkp->device;
1478 	struct Scsi_Host *host = sdp->host;
1479 	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
1480 	int diskinfo[4];
1481 
1482 	/* default to most commonly used values */
1483 	diskinfo[0] = 0x40;	/* 1 << 6 */
1484 	diskinfo[1] = 0x20;	/* 1 << 5 */
1485 	diskinfo[2] = capacity >> 11;
1486 
1487 	/* override with calculated, extended default, or driver values */
1488 	if (host->hostt->bios_param)
1489 		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
1490 	else
1491 		scsicam_bios_param(bdev, capacity, diskinfo);
1492 
1493 	geo->heads = diskinfo[0];
1494 	geo->sectors = diskinfo[1];
1495 	geo->cylinders = diskinfo[2];
1496 	return 0;
1497 }
1498 
1499 /**
1500  *	sd_ioctl - process an ioctl
1501  *	@bdev: target block device
1502  *	@mode: FMODE_* mask
1503  *	@cmd: ioctl command number
1504  *	@arg: this is third argument given to ioctl(2) system call.
1505  *	Often contains a pointer.
1506  *
1507  *	Returns 0 if successful (some ioctls return positive numbers on
1508  *	success as well). Returns a negated errno value in case of error.
1509  *
1510  *	Note: most ioctls are forward onto the block subsystem or further
1511  *	down in the scsi subsystem.
1512  **/
1513 static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1514 		    unsigned int cmd, unsigned long arg)
1515 {
1516 	struct gendisk *disk = bdev->bd_disk;
1517 	struct scsi_disk *sdkp = scsi_disk(disk);
1518 	struct scsi_device *sdp = sdkp->device;
1519 	void __user *p = (void __user *)arg;
1520 	int error;
1521 
1522 	SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
1523 				    "cmd=0x%x\n", disk->disk_name, cmd));
1524 
1525 	if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
1526 		return -ENOIOCTLCMD;
1527 
1528 	/*
1529 	 * If we are in the middle of error recovery, don't let anyone
1530 	 * else try and use this device.  Also, if error recovery fails, it
1531 	 * may try and take the device offline, in which case all further
1532 	 * access to the device is prohibited.
1533 	 */
1534 	error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
1535 			(mode & FMODE_NDELAY) != 0);
1536 	if (error)
1537 		return error;
1538 
1539 	if (is_sed_ioctl(cmd))
1540 		return sed_ioctl(sdkp->opal_dev, cmd, p);
1541 	return scsi_ioctl(sdp, mode, cmd, p);
1542 }
1543 
1544 static void set_media_not_present(struct scsi_disk *sdkp)
1545 {
1546 	if (sdkp->media_present)
1547 		sdkp->device->changed = 1;
1548 
1549 	if (sdkp->device->removable) {
1550 		sdkp->media_present = 0;
1551 		sdkp->capacity = 0;
1552 	}
1553 }
1554 
1555 static int media_not_present(struct scsi_disk *sdkp,
1556 			     struct scsi_sense_hdr *sshdr)
1557 {
1558 	if (!scsi_sense_valid(sshdr))
1559 		return 0;
1560 
1561 	/* not invoked for commands that could return deferred errors */
1562 	switch (sshdr->sense_key) {
1563 	case UNIT_ATTENTION:
1564 	case NOT_READY:
1565 		/* medium not present */
1566 		if (sshdr->asc == 0x3A) {
1567 			set_media_not_present(sdkp);
1568 			return 1;
1569 		}
1570 	}
1571 	return 0;
1572 }
1573 
1574 /**
1575  *	sd_check_events - check media events
1576  *	@disk: kernel device descriptor
1577  *	@clearing: disk events currently being cleared
1578  *
1579  *	Returns mask of DISK_EVENT_*.
1580  *
1581  *	Note: this function is invoked from the block subsystem.
1582  **/
1583 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1584 {
1585 	struct scsi_disk *sdkp = disk->private_data;
1586 	struct scsi_device *sdp;
1587 	int retval;
1588 	bool disk_changed;
1589 
1590 	if (!sdkp)
1591 		return 0;
1592 
1593 	sdp = sdkp->device;
1594 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1595 
1596 	/*
1597 	 * If the device is offline, don't send any commands - just pretend as
1598 	 * if the command failed.  If the device ever comes back online, we
1599 	 * can deal with it then.  It is only because of unrecoverable errors
1600 	 * that we would ever take a device offline in the first place.
1601 	 */
1602 	if (!scsi_device_online(sdp)) {
1603 		set_media_not_present(sdkp);
1604 		goto out;
1605 	}
1606 
1607 	/*
1608 	 * Using TEST_UNIT_READY enables differentiation between drive with
1609 	 * no cartridge loaded - NOT READY, drive with changed cartridge -
1610 	 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1611 	 *
1612 	 * Drives that auto spin down. eg iomega jaz 1G, will be started
1613 	 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1614 	 * sd_revalidate() is called.
1615 	 */
1616 	if (scsi_block_when_processing_errors(sdp)) {
1617 		struct scsi_sense_hdr sshdr = { 0, };
1618 
1619 		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
1620 					      &sshdr);
1621 
1622 		/* failed to execute TUR, assume media not present */
1623 		if (retval < 0 || host_byte(retval)) {
1624 			set_media_not_present(sdkp);
1625 			goto out;
1626 		}
1627 
1628 		if (media_not_present(sdkp, &sshdr))
1629 			goto out;
1630 	}
1631 
1632 	/*
1633 	 * For removable scsi disk we have to recognise the presence
1634 	 * of a disk in the drive.
1635 	 */
1636 	if (!sdkp->media_present)
1637 		sdp->changed = 1;
1638 	sdkp->media_present = 1;
1639 out:
1640 	/*
1641 	 * sdp->changed is set under the following conditions:
1642 	 *
1643 	 *	Medium present state has changed in either direction.
1644 	 *	Device has indicated UNIT_ATTENTION.
1645 	 */
1646 	disk_changed = sdp->changed;
1647 	sdp->changed = 0;
1648 	return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1649 }
1650 
1651 static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
1652 {
1653 	int retries, res;
1654 	struct scsi_device *sdp = sdkp->device;
1655 	const int timeout = sdp->request_queue->rq_timeout
1656 		* SD_FLUSH_TIMEOUT_MULTIPLIER;
1657 	struct scsi_sense_hdr my_sshdr;
1658 
1659 	if (!scsi_device_online(sdp))
1660 		return -ENODEV;
1661 
1662 	/* caller might not be interested in sense, but we need it */
1663 	if (!sshdr)
1664 		sshdr = &my_sshdr;
1665 
1666 	for (retries = 3; retries > 0; --retries) {
1667 		unsigned char cmd[10] = { 0 };
1668 
1669 		cmd[0] = SYNCHRONIZE_CACHE;
1670 		/*
1671 		 * Leave the rest of the command zero to indicate
1672 		 * flush everything.
1673 		 */
1674 		res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
1675 				timeout, sdkp->max_retries, 0, RQF_PM, NULL);
1676 		if (res == 0)
1677 			break;
1678 	}
1679 
1680 	if (res) {
1681 		sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1682 
1683 		if (res < 0)
1684 			return res;
1685 
1686 		if (scsi_status_is_check_condition(res) &&
1687 		    scsi_sense_valid(sshdr)) {
1688 			sd_print_sense_hdr(sdkp, sshdr);
1689 
1690 			/* we need to evaluate the error return  */
1691 			if (sshdr->asc == 0x3a ||	/* medium not present */
1692 			    sshdr->asc == 0x20 ||	/* invalid command */
1693 			    (sshdr->asc == 0x74 && sshdr->ascq == 0x71))	/* drive is password locked */
1694 				/* this is no error here */
1695 				return 0;
1696 		}
1697 
1698 		switch (host_byte(res)) {
1699 		/* ignore errors due to racing a disconnection */
1700 		case DID_BAD_TARGET:
1701 		case DID_NO_CONNECT:
1702 			return 0;
1703 		/* signal the upper layer it might try again */
1704 		case DID_BUS_BUSY:
1705 		case DID_IMM_RETRY:
1706 		case DID_REQUEUE:
1707 		case DID_SOFT_ERROR:
1708 			return -EBUSY;
1709 		default:
1710 			return -EIO;
1711 		}
1712 	}
1713 	return 0;
1714 }
1715 
1716 static void sd_rescan(struct device *dev)
1717 {
1718 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
1719 
1720 	sd_revalidate_disk(sdkp->disk);
1721 }
1722 
1723 static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
1724 		enum blk_unique_id type)
1725 {
1726 	struct scsi_device *sdev = scsi_disk(disk)->device;
1727 	const struct scsi_vpd *vpd;
1728 	const unsigned char *d;
1729 	int ret = -ENXIO, len;
1730 
1731 	rcu_read_lock();
1732 	vpd = rcu_dereference(sdev->vpd_pg83);
1733 	if (!vpd)
1734 		goto out_unlock;
1735 
1736 	ret = -EINVAL;
1737 	for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
1738 		/* we only care about designators with LU association */
1739 		if (((d[1] >> 4) & 0x3) != 0x00)
1740 			continue;
1741 		if ((d[1] & 0xf) != type)
1742 			continue;
1743 
1744 		/*
1745 		 * Only exit early if a 16-byte descriptor was found.  Otherwise
1746 		 * keep looking as one with more entropy might still show up.
1747 		 */
1748 		len = d[3];
1749 		if (len != 8 && len != 12 && len != 16)
1750 			continue;
1751 		ret = len;
1752 		memcpy(id, d + 4, len);
1753 		if (len == 16)
1754 			break;
1755 	}
1756 out_unlock:
1757 	rcu_read_unlock();
1758 	return ret;
1759 }
1760 
1761 static char sd_pr_type(enum pr_type type)
1762 {
1763 	switch (type) {
1764 	case PR_WRITE_EXCLUSIVE:
1765 		return 0x01;
1766 	case PR_EXCLUSIVE_ACCESS:
1767 		return 0x03;
1768 	case PR_WRITE_EXCLUSIVE_REG_ONLY:
1769 		return 0x05;
1770 	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1771 		return 0x06;
1772 	case PR_WRITE_EXCLUSIVE_ALL_REGS:
1773 		return 0x07;
1774 	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1775 		return 0x08;
1776 	default:
1777 		return 0;
1778 	}
1779 };
1780 
1781 static int sd_pr_command(struct block_device *bdev, u8 sa,
1782 		u64 key, u64 sa_key, u8 type, u8 flags)
1783 {
1784 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1785 	struct scsi_device *sdev = sdkp->device;
1786 	struct scsi_sense_hdr sshdr;
1787 	int result;
1788 	u8 cmd[16] = { 0, };
1789 	u8 data[24] = { 0, };
1790 
1791 	cmd[0] = PERSISTENT_RESERVE_OUT;
1792 	cmd[1] = sa;
1793 	cmd[2] = type;
1794 	put_unaligned_be32(sizeof(data), &cmd[5]);
1795 
1796 	put_unaligned_be64(key, &data[0]);
1797 	put_unaligned_be64(sa_key, &data[8]);
1798 	data[20] = flags;
1799 
1800 	result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
1801 			&sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
1802 
1803 	if (scsi_status_is_check_condition(result) &&
1804 	    scsi_sense_valid(&sshdr)) {
1805 		sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
1806 		scsi_print_sense_hdr(sdev, NULL, &sshdr);
1807 	}
1808 
1809 	return result;
1810 }
1811 
1812 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
1813 		u32 flags)
1814 {
1815 	if (flags & ~PR_FL_IGNORE_KEY)
1816 		return -EOPNOTSUPP;
1817 	return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
1818 			old_key, new_key, 0,
1819 			(1 << 0) /* APTPL */);
1820 }
1821 
1822 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
1823 		u32 flags)
1824 {
1825 	if (flags)
1826 		return -EOPNOTSUPP;
1827 	return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
1828 }
1829 
1830 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1831 {
1832 	return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
1833 }
1834 
1835 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
1836 		enum pr_type type, bool abort)
1837 {
1838 	return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
1839 			     sd_pr_type(type), 0);
1840 }
1841 
1842 static int sd_pr_clear(struct block_device *bdev, u64 key)
1843 {
1844 	return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
1845 }
1846 
1847 static const struct pr_ops sd_pr_ops = {
1848 	.pr_register	= sd_pr_register,
1849 	.pr_reserve	= sd_pr_reserve,
1850 	.pr_release	= sd_pr_release,
1851 	.pr_preempt	= sd_pr_preempt,
1852 	.pr_clear	= sd_pr_clear,
1853 };
1854 
1855 static void scsi_disk_free_disk(struct gendisk *disk)
1856 {
1857 	struct scsi_disk *sdkp = scsi_disk(disk);
1858 
1859 	put_device(&sdkp->disk_dev);
1860 }
1861 
1862 static const struct block_device_operations sd_fops = {
1863 	.owner			= THIS_MODULE,
1864 	.open			= sd_open,
1865 	.release		= sd_release,
1866 	.ioctl			= sd_ioctl,
1867 	.getgeo			= sd_getgeo,
1868 	.compat_ioctl		= blkdev_compat_ptr_ioctl,
1869 	.check_events		= sd_check_events,
1870 	.unlock_native_capacity	= sd_unlock_native_capacity,
1871 	.report_zones		= sd_zbc_report_zones,
1872 	.get_unique_id		= sd_get_unique_id,
1873 	.free_disk		= scsi_disk_free_disk,
1874 	.pr_ops			= &sd_pr_ops,
1875 };
1876 
1877 /**
1878  *	sd_eh_reset - reset error handling callback
1879  *	@scmd:		sd-issued command that has failed
1880  *
1881  *	This function is called by the SCSI midlayer before starting
1882  *	SCSI EH. When counting medium access failures we have to be
1883  *	careful to register it only only once per device and SCSI EH run;
1884  *	there might be several timed out commands which will cause the
1885  *	'max_medium_access_timeouts' counter to trigger after the first
1886  *	SCSI EH run already and set the device to offline.
1887  *	So this function resets the internal counter before starting SCSI EH.
1888  **/
1889 static void sd_eh_reset(struct scsi_cmnd *scmd)
1890 {
1891 	struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
1892 
1893 	/* New SCSI EH run, reset gate variable */
1894 	sdkp->ignore_medium_access_errors = false;
1895 }
1896 
1897 /**
1898  *	sd_eh_action - error handling callback
1899  *	@scmd:		sd-issued command that has failed
1900  *	@eh_disp:	The recovery disposition suggested by the midlayer
1901  *
1902  *	This function is called by the SCSI midlayer upon completion of an
1903  *	error test command (currently TEST UNIT READY). The result of sending
1904  *	the eh command is passed in eh_disp.  We're looking for devices that
1905  *	fail medium access commands but are OK with non access commands like
1906  *	test unit ready (so wrongly see the device as having a successful
1907  *	recovery)
1908  **/
1909 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
1910 {
1911 	struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
1912 	struct scsi_device *sdev = scmd->device;
1913 
1914 	if (!scsi_device_online(sdev) ||
1915 	    !scsi_medium_access_command(scmd) ||
1916 	    host_byte(scmd->result) != DID_TIME_OUT ||
1917 	    eh_disp != SUCCESS)
1918 		return eh_disp;
1919 
1920 	/*
1921 	 * The device has timed out executing a medium access command.
1922 	 * However, the TEST UNIT READY command sent during error
1923 	 * handling completed successfully. Either the device is in the
1924 	 * process of recovering or has it suffered an internal failure
1925 	 * that prevents access to the storage medium.
1926 	 */
1927 	if (!sdkp->ignore_medium_access_errors) {
1928 		sdkp->medium_access_timed_out++;
1929 		sdkp->ignore_medium_access_errors = true;
1930 	}
1931 
1932 	/*
1933 	 * If the device keeps failing read/write commands but TEST UNIT
1934 	 * READY always completes successfully we assume that medium
1935 	 * access is no longer possible and take the device offline.
1936 	 */
1937 	if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
1938 		scmd_printk(KERN_ERR, scmd,
1939 			    "Medium access timeout failure. Offlining disk!\n");
1940 		mutex_lock(&sdev->state_mutex);
1941 		scsi_device_set_state(sdev, SDEV_OFFLINE);
1942 		mutex_unlock(&sdev->state_mutex);
1943 
1944 		return SUCCESS;
1945 	}
1946 
1947 	return eh_disp;
1948 }
1949 
1950 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1951 {
1952 	struct request *req = scsi_cmd_to_rq(scmd);
1953 	struct scsi_device *sdev = scmd->device;
1954 	unsigned int transferred, good_bytes;
1955 	u64 start_lba, end_lba, bad_lba;
1956 
1957 	/*
1958 	 * Some commands have a payload smaller than the device logical
1959 	 * block size (e.g. INQUIRY on a 4K disk).
1960 	 */
1961 	if (scsi_bufflen(scmd) <= sdev->sector_size)
1962 		return 0;
1963 
1964 	/* Check if we have a 'bad_lba' information */
1965 	if (!scsi_get_sense_info_fld(scmd->sense_buffer,
1966 				     SCSI_SENSE_BUFFERSIZE,
1967 				     &bad_lba))
1968 		return 0;
1969 
1970 	/*
1971 	 * If the bad lba was reported incorrectly, we have no idea where
1972 	 * the error is.
1973 	 */
1974 	start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
1975 	end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
1976 	if (bad_lba < start_lba || bad_lba >= end_lba)
1977 		return 0;
1978 
1979 	/*
1980 	 * resid is optional but mostly filled in.  When it's unused,
1981 	 * its value is zero, so we assume the whole buffer transferred
1982 	 */
1983 	transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
1984 
1985 	/* This computation should always be done in terms of the
1986 	 * resolution of the device's medium.
1987 	 */
1988 	good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
1989 
1990 	return min(good_bytes, transferred);
1991 }
1992 
1993 /**
1994  *	sd_done - bottom half handler: called when the lower level
1995  *	driver has completed (successfully or otherwise) a scsi command.
1996  *	@SCpnt: mid-level's per command structure.
1997  *
1998  *	Note: potentially run from within an ISR. Must not block.
1999  **/
2000 static int sd_done(struct scsi_cmnd *SCpnt)
2001 {
2002 	int result = SCpnt->result;
2003 	unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
2004 	unsigned int sector_size = SCpnt->device->sector_size;
2005 	unsigned int resid;
2006 	struct scsi_sense_hdr sshdr;
2007 	struct request *req = scsi_cmd_to_rq(SCpnt);
2008 	struct scsi_disk *sdkp = scsi_disk(req->q->disk);
2009 	int sense_valid = 0;
2010 	int sense_deferred = 0;
2011 
2012 	switch (req_op(req)) {
2013 	case REQ_OP_DISCARD:
2014 	case REQ_OP_WRITE_ZEROES:
2015 	case REQ_OP_WRITE_SAME:
2016 	case REQ_OP_ZONE_RESET:
2017 	case REQ_OP_ZONE_RESET_ALL:
2018 	case REQ_OP_ZONE_OPEN:
2019 	case REQ_OP_ZONE_CLOSE:
2020 	case REQ_OP_ZONE_FINISH:
2021 		if (!result) {
2022 			good_bytes = blk_rq_bytes(req);
2023 			scsi_set_resid(SCpnt, 0);
2024 		} else {
2025 			good_bytes = 0;
2026 			scsi_set_resid(SCpnt, blk_rq_bytes(req));
2027 		}
2028 		break;
2029 	default:
2030 		/*
2031 		 * In case of bogus fw or device, we could end up having
2032 		 * an unaligned partial completion. Check this here and force
2033 		 * alignment.
2034 		 */
2035 		resid = scsi_get_resid(SCpnt);
2036 		if (resid & (sector_size - 1)) {
2037 			sd_printk(KERN_INFO, sdkp,
2038 				"Unaligned partial completion (resid=%u, sector_sz=%u)\n",
2039 				resid, sector_size);
2040 			scsi_print_command(SCpnt);
2041 			resid = min(scsi_bufflen(SCpnt),
2042 				    round_up(resid, sector_size));
2043 			scsi_set_resid(SCpnt, resid);
2044 		}
2045 	}
2046 
2047 	if (result) {
2048 		sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
2049 		if (sense_valid)
2050 			sense_deferred = scsi_sense_is_deferred(&sshdr);
2051 	}
2052 	sdkp->medium_access_timed_out = 0;
2053 
2054 	if (!scsi_status_is_check_condition(result) &&
2055 	    (!sense_valid || sense_deferred))
2056 		goto out;
2057 
2058 	switch (sshdr.sense_key) {
2059 	case HARDWARE_ERROR:
2060 	case MEDIUM_ERROR:
2061 		good_bytes = sd_completed_bytes(SCpnt);
2062 		break;
2063 	case RECOVERED_ERROR:
2064 		good_bytes = scsi_bufflen(SCpnt);
2065 		break;
2066 	case NO_SENSE:
2067 		/* This indicates a false check condition, so ignore it.  An
2068 		 * unknown amount of data was transferred so treat it as an
2069 		 * error.
2070 		 */
2071 		SCpnt->result = 0;
2072 		memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2073 		break;
2074 	case ABORTED_COMMAND:
2075 		if (sshdr.asc == 0x10)  /* DIF: Target detected corruption */
2076 			good_bytes = sd_completed_bytes(SCpnt);
2077 		break;
2078 	case ILLEGAL_REQUEST:
2079 		switch (sshdr.asc) {
2080 		case 0x10:	/* DIX: Host detected corruption */
2081 			good_bytes = sd_completed_bytes(SCpnt);
2082 			break;
2083 		case 0x20:	/* INVALID COMMAND OPCODE */
2084 		case 0x24:	/* INVALID FIELD IN CDB */
2085 			switch (SCpnt->cmnd[0]) {
2086 			case UNMAP:
2087 				sd_config_discard(sdkp, SD_LBP_DISABLE);
2088 				break;
2089 			case WRITE_SAME_16:
2090 			case WRITE_SAME:
2091 				if (SCpnt->cmnd[1] & 8) { /* UNMAP */
2092 					sd_config_discard(sdkp, SD_LBP_DISABLE);
2093 				} else {
2094 					sdkp->device->no_write_same = 1;
2095 					sd_config_write_same(sdkp);
2096 					req->rq_flags |= RQF_QUIET;
2097 				}
2098 				break;
2099 			}
2100 		}
2101 		break;
2102 	default:
2103 		break;
2104 	}
2105 
2106  out:
2107 	if (sd_is_zoned(sdkp))
2108 		good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
2109 
2110 	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
2111 					   "sd_done: completed %d of %d bytes\n",
2112 					   good_bytes, scsi_bufflen(SCpnt)));
2113 
2114 	return good_bytes;
2115 }
2116 
2117 /*
2118  * spinup disk - called only in sd_revalidate_disk()
2119  */
2120 static void
2121 sd_spinup_disk(struct scsi_disk *sdkp)
2122 {
2123 	unsigned char cmd[10];
2124 	unsigned long spintime_expire = 0;
2125 	int retries, spintime;
2126 	unsigned int the_result;
2127 	struct scsi_sense_hdr sshdr;
2128 	int sense_valid = 0;
2129 
2130 	spintime = 0;
2131 
2132 	/* Spin up drives, as required.  Only do this at boot time */
2133 	/* Spinup needs to be done for module loads too. */
2134 	do {
2135 		retries = 0;
2136 
2137 		do {
2138 			bool media_was_present = sdkp->media_present;
2139 
2140 			cmd[0] = TEST_UNIT_READY;
2141 			memset((void *) &cmd[1], 0, 9);
2142 
2143 			the_result = scsi_execute_req(sdkp->device, cmd,
2144 						      DMA_NONE, NULL, 0,
2145 						      &sshdr, SD_TIMEOUT,
2146 						      sdkp->max_retries, NULL);
2147 
2148 			/*
2149 			 * If the drive has indicated to us that it
2150 			 * doesn't have any media in it, don't bother
2151 			 * with any more polling.
2152 			 */
2153 			if (media_not_present(sdkp, &sshdr)) {
2154 				if (media_was_present)
2155 					sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
2156 				return;
2157 			}
2158 
2159 			if (the_result)
2160 				sense_valid = scsi_sense_valid(&sshdr);
2161 			retries++;
2162 		} while (retries < 3 &&
2163 			 (!scsi_status_is_good(the_result) ||
2164 			  (scsi_status_is_check_condition(the_result) &&
2165 			  sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
2166 
2167 		if (!scsi_status_is_check_condition(the_result)) {
2168 			/* no sense, TUR either succeeded or failed
2169 			 * with a status error */
2170 			if(!spintime && !scsi_status_is_good(the_result)) {
2171 				sd_print_result(sdkp, "Test Unit Ready failed",
2172 						the_result);
2173 			}
2174 			break;
2175 		}
2176 
2177 		/*
2178 		 * The device does not want the automatic start to be issued.
2179 		 */
2180 		if (sdkp->device->no_start_on_add)
2181 			break;
2182 
2183 		if (sense_valid && sshdr.sense_key == NOT_READY) {
2184 			if (sshdr.asc == 4 && sshdr.ascq == 3)
2185 				break;	/* manual intervention required */
2186 			if (sshdr.asc == 4 && sshdr.ascq == 0xb)
2187 				break;	/* standby */
2188 			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2189 				break;	/* unavailable */
2190 			if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2191 				break;	/* sanitize in progress */
2192 			/*
2193 			 * Issue command to spin up drive when not ready
2194 			 */
2195 			if (!spintime) {
2196 				sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
2197 				cmd[0] = START_STOP;
2198 				cmd[1] = 1;	/* Return immediately */
2199 				memset((void *) &cmd[2], 0, 8);
2200 				cmd[4] = 1;	/* Start spin cycle */
2201 				if (sdkp->device->start_stop_pwr_cond)
2202 					cmd[4] |= 1 << 4;
2203 				scsi_execute_req(sdkp->device, cmd, DMA_NONE,
2204 						 NULL, 0, &sshdr,
2205 						 SD_TIMEOUT, sdkp->max_retries,
2206 						 NULL);
2207 				spintime_expire = jiffies + 100 * HZ;
2208 				spintime = 1;
2209 			}
2210 			/* Wait 1 second for next try */
2211 			msleep(1000);
2212 			printk(KERN_CONT ".");
2213 
2214 		/*
2215 		 * Wait for USB flash devices with slow firmware.
2216 		 * Yes, this sense key/ASC combination shouldn't
2217 		 * occur here.  It's characteristic of these devices.
2218 		 */
2219 		} else if (sense_valid &&
2220 				sshdr.sense_key == UNIT_ATTENTION &&
2221 				sshdr.asc == 0x28) {
2222 			if (!spintime) {
2223 				spintime_expire = jiffies + 5 * HZ;
2224 				spintime = 1;
2225 			}
2226 			/* Wait 1 second for next try */
2227 			msleep(1000);
2228 		} else {
2229 			/* we don't understand the sense code, so it's
2230 			 * probably pointless to loop */
2231 			if(!spintime) {
2232 				sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
2233 				sd_print_sense_hdr(sdkp, &sshdr);
2234 			}
2235 			break;
2236 		}
2237 
2238 	} while (spintime && time_before_eq(jiffies, spintime_expire));
2239 
2240 	if (spintime) {
2241 		if (scsi_status_is_good(the_result))
2242 			printk(KERN_CONT "ready\n");
2243 		else
2244 			printk(KERN_CONT "not responding...\n");
2245 	}
2246 }
2247 
2248 /*
2249  * Determine whether disk supports Data Integrity Field.
2250  */
2251 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
2252 {
2253 	struct scsi_device *sdp = sdkp->device;
2254 	u8 type;
2255 	int ret = 0;
2256 
2257 	if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
2258 		sdkp->protection_type = 0;
2259 		return ret;
2260 	}
2261 
2262 	type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
2263 
2264 	if (type > T10_PI_TYPE3_PROTECTION)
2265 		ret = -ENODEV;
2266 	else if (scsi_host_dif_capable(sdp->host, type))
2267 		ret = 1;
2268 
2269 	if (sdkp->first_scan || type != sdkp->protection_type)
2270 		switch (ret) {
2271 		case -ENODEV:
2272 			sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
2273 				  " protection type %u. Disabling disk!\n",
2274 				  type);
2275 			break;
2276 		case 1:
2277 			sd_printk(KERN_NOTICE, sdkp,
2278 				  "Enabling DIF Type %u protection\n", type);
2279 			break;
2280 		case 0:
2281 			sd_printk(KERN_NOTICE, sdkp,
2282 				  "Disabling DIF Type %u protection\n", type);
2283 			break;
2284 		}
2285 
2286 	sdkp->protection_type = type;
2287 
2288 	return ret;
2289 }
2290 
2291 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2292 			struct scsi_sense_hdr *sshdr, int sense_valid,
2293 			int the_result)
2294 {
2295 	if (sense_valid)
2296 		sd_print_sense_hdr(sdkp, sshdr);
2297 	else
2298 		sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
2299 
2300 	/*
2301 	 * Set dirty bit for removable devices if not ready -
2302 	 * sometimes drives will not report this properly.
2303 	 */
2304 	if (sdp->removable &&
2305 	    sense_valid && sshdr->sense_key == NOT_READY)
2306 		set_media_not_present(sdkp);
2307 
2308 	/*
2309 	 * We used to set media_present to 0 here to indicate no media
2310 	 * in the drive, but some drives fail read capacity even with
2311 	 * media present, so we can't do that.
2312 	 */
2313 	sdkp->capacity = 0; /* unknown mapped to zero - as usual */
2314 }
2315 
2316 #define RC16_LEN 32
2317 #if RC16_LEN > SD_BUF_SIZE
2318 #error RC16_LEN must not be more than SD_BUF_SIZE
2319 #endif
2320 
2321 #define READ_CAPACITY_RETRIES_ON_RESET	10
2322 
2323 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2324 						unsigned char *buffer)
2325 {
2326 	unsigned char cmd[16];
2327 	struct scsi_sense_hdr sshdr;
2328 	int sense_valid = 0;
2329 	int the_result;
2330 	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2331 	unsigned int alignment;
2332 	unsigned long long lba;
2333 	unsigned sector_size;
2334 
2335 	if (sdp->no_read_capacity_16)
2336 		return -EINVAL;
2337 
2338 	do {
2339 		memset(cmd, 0, 16);
2340 		cmd[0] = SERVICE_ACTION_IN_16;
2341 		cmd[1] = SAI_READ_CAPACITY_16;
2342 		cmd[13] = RC16_LEN;
2343 		memset(buffer, 0, RC16_LEN);
2344 
2345 		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
2346 					buffer, RC16_LEN, &sshdr,
2347 					SD_TIMEOUT, sdkp->max_retries, NULL);
2348 
2349 		if (media_not_present(sdkp, &sshdr))
2350 			return -ENODEV;
2351 
2352 		if (the_result > 0) {
2353 			sense_valid = scsi_sense_valid(&sshdr);
2354 			if (sense_valid &&
2355 			    sshdr.sense_key == ILLEGAL_REQUEST &&
2356 			    (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
2357 			    sshdr.ascq == 0x00)
2358 				/* Invalid Command Operation Code or
2359 				 * Invalid Field in CDB, just retry
2360 				 * silently with RC10 */
2361 				return -EINVAL;
2362 			if (sense_valid &&
2363 			    sshdr.sense_key == UNIT_ATTENTION &&
2364 			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2365 				/* Device reset might occur several times,
2366 				 * give it one more chance */
2367 				if (--reset_retries > 0)
2368 					continue;
2369 		}
2370 		retries--;
2371 
2372 	} while (the_result && retries);
2373 
2374 	if (the_result) {
2375 		sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
2376 		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2377 		return -EINVAL;
2378 	}
2379 
2380 	sector_size = get_unaligned_be32(&buffer[8]);
2381 	lba = get_unaligned_be64(&buffer[0]);
2382 
2383 	if (sd_read_protection_type(sdkp, buffer) < 0) {
2384 		sdkp->capacity = 0;
2385 		return -ENODEV;
2386 	}
2387 
2388 	/* Logical blocks per physical block exponent */
2389 	sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
2390 
2391 	/* RC basis */
2392 	sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
2393 
2394 	/* Lowest aligned logical block */
2395 	alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
2396 	blk_queue_alignment_offset(sdp->request_queue, alignment);
2397 	if (alignment && sdkp->first_scan)
2398 		sd_printk(KERN_NOTICE, sdkp,
2399 			  "physical block alignment offset: %u\n", alignment);
2400 
2401 	if (buffer[14] & 0x80) { /* LBPME */
2402 		sdkp->lbpme = 1;
2403 
2404 		if (buffer[14] & 0x40) /* LBPRZ */
2405 			sdkp->lbprz = 1;
2406 
2407 		sd_config_discard(sdkp, SD_LBP_WS16);
2408 	}
2409 
2410 	sdkp->capacity = lba + 1;
2411 	return sector_size;
2412 }
2413 
2414 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2415 						unsigned char *buffer)
2416 {
2417 	unsigned char cmd[16];
2418 	struct scsi_sense_hdr sshdr;
2419 	int sense_valid = 0;
2420 	int the_result;
2421 	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2422 	sector_t lba;
2423 	unsigned sector_size;
2424 
2425 	do {
2426 		cmd[0] = READ_CAPACITY;
2427 		memset(&cmd[1], 0, 9);
2428 		memset(buffer, 0, 8);
2429 
2430 		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
2431 					buffer, 8, &sshdr,
2432 					SD_TIMEOUT, sdkp->max_retries, NULL);
2433 
2434 		if (media_not_present(sdkp, &sshdr))
2435 			return -ENODEV;
2436 
2437 		if (the_result > 0) {
2438 			sense_valid = scsi_sense_valid(&sshdr);
2439 			if (sense_valid &&
2440 			    sshdr.sense_key == UNIT_ATTENTION &&
2441 			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2442 				/* Device reset might occur several times,
2443 				 * give it one more chance */
2444 				if (--reset_retries > 0)
2445 					continue;
2446 		}
2447 		retries--;
2448 
2449 	} while (the_result && retries);
2450 
2451 	if (the_result) {
2452 		sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
2453 		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2454 		return -EINVAL;
2455 	}
2456 
2457 	sector_size = get_unaligned_be32(&buffer[4]);
2458 	lba = get_unaligned_be32(&buffer[0]);
2459 
2460 	if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
2461 		/* Some buggy (usb cardreader) devices return an lba of
2462 		   0xffffffff when the want to report a size of 0 (with
2463 		   which they really mean no media is present) */
2464 		sdkp->capacity = 0;
2465 		sdkp->physical_block_size = sector_size;
2466 		return sector_size;
2467 	}
2468 
2469 	sdkp->capacity = lba + 1;
2470 	sdkp->physical_block_size = sector_size;
2471 	return sector_size;
2472 }
2473 
2474 static int sd_try_rc16_first(struct scsi_device *sdp)
2475 {
2476 	if (sdp->host->max_cmd_len < 16)
2477 		return 0;
2478 	if (sdp->try_rc_10_first)
2479 		return 0;
2480 	if (sdp->scsi_level > SCSI_SPC_2)
2481 		return 1;
2482 	if (scsi_device_protection(sdp))
2483 		return 1;
2484 	return 0;
2485 }
2486 
2487 /*
2488  * read disk capacity
2489  */
2490 static void
2491 sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
2492 {
2493 	int sector_size;
2494 	struct scsi_device *sdp = sdkp->device;
2495 
2496 	if (sd_try_rc16_first(sdp)) {
2497 		sector_size = read_capacity_16(sdkp, sdp, buffer);
2498 		if (sector_size == -EOVERFLOW)
2499 			goto got_data;
2500 		if (sector_size == -ENODEV)
2501 			return;
2502 		if (sector_size < 0)
2503 			sector_size = read_capacity_10(sdkp, sdp, buffer);
2504 		if (sector_size < 0)
2505 			return;
2506 	} else {
2507 		sector_size = read_capacity_10(sdkp, sdp, buffer);
2508 		if (sector_size == -EOVERFLOW)
2509 			goto got_data;
2510 		if (sector_size < 0)
2511 			return;
2512 		if ((sizeof(sdkp->capacity) > 4) &&
2513 		    (sdkp->capacity > 0xffffffffULL)) {
2514 			int old_sector_size = sector_size;
2515 			sd_printk(KERN_NOTICE, sdkp, "Very big device. "
2516 					"Trying to use READ CAPACITY(16).\n");
2517 			sector_size = read_capacity_16(sdkp, sdp, buffer);
2518 			if (sector_size < 0) {
2519 				sd_printk(KERN_NOTICE, sdkp,
2520 					"Using 0xffffffff as device size\n");
2521 				sdkp->capacity = 1 + (sector_t) 0xffffffff;
2522 				sector_size = old_sector_size;
2523 				goto got_data;
2524 			}
2525 			/* Remember that READ CAPACITY(16) succeeded */
2526 			sdp->try_rc_10_first = 0;
2527 		}
2528 	}
2529 
2530 	/* Some devices are known to return the total number of blocks,
2531 	 * not the highest block number.  Some devices have versions
2532 	 * which do this and others which do not.  Some devices we might
2533 	 * suspect of doing this but we don't know for certain.
2534 	 *
2535 	 * If we know the reported capacity is wrong, decrement it.  If
2536 	 * we can only guess, then assume the number of blocks is even
2537 	 * (usually true but not always) and err on the side of lowering
2538 	 * the capacity.
2539 	 */
2540 	if (sdp->fix_capacity ||
2541 	    (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
2542 		sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
2543 				"from its reported value: %llu\n",
2544 				(unsigned long long) sdkp->capacity);
2545 		--sdkp->capacity;
2546 	}
2547 
2548 got_data:
2549 	if (sector_size == 0) {
2550 		sector_size = 512;
2551 		sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
2552 			  "assuming 512.\n");
2553 	}
2554 
2555 	if (sector_size != 512 &&
2556 	    sector_size != 1024 &&
2557 	    sector_size != 2048 &&
2558 	    sector_size != 4096) {
2559 		sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2560 			  sector_size);
2561 		/*
2562 		 * The user might want to re-format the drive with
2563 		 * a supported sectorsize.  Once this happens, it
2564 		 * would be relatively trivial to set the thing up.
2565 		 * For this reason, we leave the thing in the table.
2566 		 */
2567 		sdkp->capacity = 0;
2568 		/*
2569 		 * set a bogus sector size so the normal read/write
2570 		 * logic in the block layer will eventually refuse any
2571 		 * request on this device without tripping over power
2572 		 * of two sector size assumptions
2573 		 */
2574 		sector_size = 512;
2575 	}
2576 	blk_queue_logical_block_size(sdp->request_queue, sector_size);
2577 	blk_queue_physical_block_size(sdp->request_queue,
2578 				      sdkp->physical_block_size);
2579 	sdkp->device->sector_size = sector_size;
2580 
2581 	if (sdkp->capacity > 0xffffffff)
2582 		sdp->use_16_for_rw = 1;
2583 
2584 }
2585 
2586 /*
2587  * Print disk capacity
2588  */
2589 static void
2590 sd_print_capacity(struct scsi_disk *sdkp,
2591 		  sector_t old_capacity)
2592 {
2593 	int sector_size = sdkp->device->sector_size;
2594 	char cap_str_2[10], cap_str_10[10];
2595 
2596 	if (!sdkp->first_scan && old_capacity == sdkp->capacity)
2597 		return;
2598 
2599 	string_get_size(sdkp->capacity, sector_size,
2600 			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
2601 	string_get_size(sdkp->capacity, sector_size,
2602 			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
2603 
2604 	sd_printk(KERN_NOTICE, sdkp,
2605 		  "%llu %d-byte logical blocks: (%s/%s)\n",
2606 		  (unsigned long long)sdkp->capacity,
2607 		  sector_size, cap_str_10, cap_str_2);
2608 
2609 	if (sdkp->physical_block_size != sector_size)
2610 		sd_printk(KERN_NOTICE, sdkp,
2611 			  "%u-byte physical blocks\n",
2612 			  sdkp->physical_block_size);
2613 }
2614 
2615 /* called with buffer of length 512 */
2616 static inline int
2617 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
2618 		 unsigned char *buffer, int len, struct scsi_mode_data *data,
2619 		 struct scsi_sense_hdr *sshdr)
2620 {
2621 	/*
2622 	 * If we must use MODE SENSE(10), make sure that the buffer length
2623 	 * is at least 8 bytes so that the mode sense header fits.
2624 	 */
2625 	if (sdkp->device->use_10_for_ms && len < 8)
2626 		len = 8;
2627 
2628 	return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
2629 			       SD_TIMEOUT, sdkp->max_retries, data,
2630 			       sshdr);
2631 }
2632 
2633 /*
2634  * read write protect setting, if possible - called only in sd_revalidate_disk()
2635  * called with buffer of length SD_BUF_SIZE
2636  */
2637 static void
2638 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2639 {
2640 	int res;
2641 	struct scsi_device *sdp = sdkp->device;
2642 	struct scsi_mode_data data;
2643 	int old_wp = sdkp->write_prot;
2644 
2645 	set_disk_ro(sdkp->disk, 0);
2646 	if (sdp->skip_ms_page_3f) {
2647 		sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
2648 		return;
2649 	}
2650 
2651 	if (sdp->use_192_bytes_for_3f) {
2652 		res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
2653 	} else {
2654 		/*
2655 		 * First attempt: ask for all pages (0x3F), but only 4 bytes.
2656 		 * We have to start carefully: some devices hang if we ask
2657 		 * for more than is available.
2658 		 */
2659 		res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
2660 
2661 		/*
2662 		 * Second attempt: ask for page 0 When only page 0 is
2663 		 * implemented, a request for page 3F may return Sense Key
2664 		 * 5: Illegal Request, Sense Code 24: Invalid field in
2665 		 * CDB.
2666 		 */
2667 		if (res < 0)
2668 			res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
2669 
2670 		/*
2671 		 * Third attempt: ask 255 bytes, as we did earlier.
2672 		 */
2673 		if (res < 0)
2674 			res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
2675 					       &data, NULL);
2676 	}
2677 
2678 	if (res < 0) {
2679 		sd_first_printk(KERN_WARNING, sdkp,
2680 			  "Test WP failed, assume Write Enabled\n");
2681 	} else {
2682 		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2683 		set_disk_ro(sdkp->disk, sdkp->write_prot);
2684 		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2685 			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2686 				  sdkp->write_prot ? "on" : "off");
2687 			sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
2688 		}
2689 	}
2690 }
2691 
2692 /*
2693  * sd_read_cache_type - called only from sd_revalidate_disk()
2694  * called with buffer of length SD_BUF_SIZE
2695  */
2696 static void
2697 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2698 {
2699 	int len = 0, res;
2700 	struct scsi_device *sdp = sdkp->device;
2701 
2702 	int dbd;
2703 	int modepage;
2704 	int first_len;
2705 	struct scsi_mode_data data;
2706 	struct scsi_sense_hdr sshdr;
2707 	int old_wce = sdkp->WCE;
2708 	int old_rcd = sdkp->RCD;
2709 	int old_dpofua = sdkp->DPOFUA;
2710 
2711 
2712 	if (sdkp->cache_override)
2713 		return;
2714 
2715 	first_len = 4;
2716 	if (sdp->skip_ms_page_8) {
2717 		if (sdp->type == TYPE_RBC)
2718 			goto defaults;
2719 		else {
2720 			if (sdp->skip_ms_page_3f)
2721 				goto defaults;
2722 			modepage = 0x3F;
2723 			if (sdp->use_192_bytes_for_3f)
2724 				first_len = 192;
2725 			dbd = 0;
2726 		}
2727 	} else if (sdp->type == TYPE_RBC) {
2728 		modepage = 6;
2729 		dbd = 8;
2730 	} else {
2731 		modepage = 8;
2732 		dbd = 0;
2733 	}
2734 
2735 	/* cautiously ask */
2736 	res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
2737 			&data, &sshdr);
2738 
2739 	if (res < 0)
2740 		goto bad_sense;
2741 
2742 	if (!data.header_length) {
2743 		modepage = 6;
2744 		first_len = 0;
2745 		sd_first_printk(KERN_ERR, sdkp,
2746 				"Missing header in MODE_SENSE response\n");
2747 	}
2748 
2749 	/* that went OK, now ask for the proper length */
2750 	len = data.length;
2751 
2752 	/*
2753 	 * We're only interested in the first three bytes, actually.
2754 	 * But the data cache page is defined for the first 20.
2755 	 */
2756 	if (len < 3)
2757 		goto bad_sense;
2758 	else if (len > SD_BUF_SIZE) {
2759 		sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2760 			  "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2761 		len = SD_BUF_SIZE;
2762 	}
2763 	if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
2764 		len = 192;
2765 
2766 	/* Get the data */
2767 	if (len > first_len)
2768 		res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
2769 				&data, &sshdr);
2770 
2771 	if (!res) {
2772 		int offset = data.header_length + data.block_descriptor_length;
2773 
2774 		while (offset < len) {
2775 			u8 page_code = buffer[offset] & 0x3F;
2776 			u8 spf       = buffer[offset] & 0x40;
2777 
2778 			if (page_code == 8 || page_code == 6) {
2779 				/* We're interested only in the first 3 bytes.
2780 				 */
2781 				if (len - offset <= 2) {
2782 					sd_first_printk(KERN_ERR, sdkp,
2783 						"Incomplete mode parameter "
2784 							"data\n");
2785 					goto defaults;
2786 				} else {
2787 					modepage = page_code;
2788 					goto Page_found;
2789 				}
2790 			} else {
2791 				/* Go to the next page */
2792 				if (spf && len - offset > 3)
2793 					offset += 4 + (buffer[offset+2] << 8) +
2794 						buffer[offset+3];
2795 				else if (!spf && len - offset > 1)
2796 					offset += 2 + buffer[offset+1];
2797 				else {
2798 					sd_first_printk(KERN_ERR, sdkp,
2799 							"Incomplete mode "
2800 							"parameter data\n");
2801 					goto defaults;
2802 				}
2803 			}
2804 		}
2805 
2806 		sd_first_printk(KERN_WARNING, sdkp,
2807 				"No Caching mode page found\n");
2808 		goto defaults;
2809 
2810 	Page_found:
2811 		if (modepage == 8) {
2812 			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2813 			sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
2814 		} else {
2815 			sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
2816 			sdkp->RCD = 0;
2817 		}
2818 
2819 		sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
2820 		if (sdp->broken_fua) {
2821 			sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2822 			sdkp->DPOFUA = 0;
2823 		} else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
2824 			   !sdkp->device->use_16_for_rw) {
2825 			sd_first_printk(KERN_NOTICE, sdkp,
2826 				  "Uses READ/WRITE(6), disabling FUA\n");
2827 			sdkp->DPOFUA = 0;
2828 		}
2829 
2830 		/* No cache flush allowed for write protected devices */
2831 		if (sdkp->WCE && sdkp->write_prot)
2832 			sdkp->WCE = 0;
2833 
2834 		if (sdkp->first_scan || old_wce != sdkp->WCE ||
2835 		    old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
2836 			sd_printk(KERN_NOTICE, sdkp,
2837 				  "Write cache: %s, read cache: %s, %s\n",
2838 				  sdkp->WCE ? "enabled" : "disabled",
2839 				  sdkp->RCD ? "disabled" : "enabled",
2840 				  sdkp->DPOFUA ? "supports DPO and FUA"
2841 				  : "doesn't support DPO or FUA");
2842 
2843 		return;
2844 	}
2845 
2846 bad_sense:
2847 	if (scsi_sense_valid(&sshdr) &&
2848 	    sshdr.sense_key == ILLEGAL_REQUEST &&
2849 	    sshdr.asc == 0x24 && sshdr.ascq == 0x0)
2850 		/* Invalid field in CDB */
2851 		sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
2852 	else
2853 		sd_first_printk(KERN_ERR, sdkp,
2854 				"Asking for cache data failed\n");
2855 
2856 defaults:
2857 	if (sdp->wce_default_on) {
2858 		sd_first_printk(KERN_NOTICE, sdkp,
2859 				"Assuming drive cache: write back\n");
2860 		sdkp->WCE = 1;
2861 	} else {
2862 		sd_first_printk(KERN_WARNING, sdkp,
2863 				"Assuming drive cache: write through\n");
2864 		sdkp->WCE = 0;
2865 	}
2866 	sdkp->RCD = 0;
2867 	sdkp->DPOFUA = 0;
2868 }
2869 
2870 /*
2871  * The ATO bit indicates whether the DIF application tag is available
2872  * for use by the operating system.
2873  */
2874 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
2875 {
2876 	int res, offset;
2877 	struct scsi_device *sdp = sdkp->device;
2878 	struct scsi_mode_data data;
2879 	struct scsi_sense_hdr sshdr;
2880 
2881 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
2882 		return;
2883 
2884 	if (sdkp->protection_type == 0)
2885 		return;
2886 
2887 	res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
2888 			      sdkp->max_retries, &data, &sshdr);
2889 
2890 	if (res < 0 || !data.header_length ||
2891 	    data.length < 6) {
2892 		sd_first_printk(KERN_WARNING, sdkp,
2893 			  "getting Control mode page failed, assume no ATO\n");
2894 
2895 		if (scsi_sense_valid(&sshdr))
2896 			sd_print_sense_hdr(sdkp, &sshdr);
2897 
2898 		return;
2899 	}
2900 
2901 	offset = data.header_length + data.block_descriptor_length;
2902 
2903 	if ((buffer[offset] & 0x3f) != 0x0a) {
2904 		sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
2905 		return;
2906 	}
2907 
2908 	if ((buffer[offset + 5] & 0x80) == 0)
2909 		return;
2910 
2911 	sdkp->ATO = 1;
2912 
2913 	return;
2914 }
2915 
2916 /**
2917  * sd_read_block_limits - Query disk device for preferred I/O sizes.
2918  * @sdkp: disk to query
2919  */
2920 static void sd_read_block_limits(struct scsi_disk *sdkp)
2921 {
2922 	unsigned int sector_sz = sdkp->device->sector_size;
2923 	const int vpd_len = 64;
2924 	unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
2925 
2926 	if (!buffer ||
2927 	    /* Block Limits VPD */
2928 	    scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2929 		goto out;
2930 
2931 	blk_queue_io_min(sdkp->disk->queue,
2932 			 get_unaligned_be16(&buffer[6]) * sector_sz);
2933 
2934 	sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
2935 	sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
2936 
2937 	if (buffer[3] == 0x3c) {
2938 		unsigned int lba_count, desc_count;
2939 
2940 		sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
2941 
2942 		if (!sdkp->lbpme)
2943 			goto out;
2944 
2945 		lba_count = get_unaligned_be32(&buffer[20]);
2946 		desc_count = get_unaligned_be32(&buffer[24]);
2947 
2948 		if (lba_count && desc_count)
2949 			sdkp->max_unmap_blocks = lba_count;
2950 
2951 		sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
2952 
2953 		if (buffer[32] & 0x80)
2954 			sdkp->unmap_alignment =
2955 				get_unaligned_be32(&buffer[32]) & ~(1 << 31);
2956 
2957 		if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
2958 
2959 			if (sdkp->max_unmap_blocks)
2960 				sd_config_discard(sdkp, SD_LBP_UNMAP);
2961 			else
2962 				sd_config_discard(sdkp, SD_LBP_WS16);
2963 
2964 		} else {	/* LBP VPD page tells us what to use */
2965 			if (sdkp->lbpu && sdkp->max_unmap_blocks)
2966 				sd_config_discard(sdkp, SD_LBP_UNMAP);
2967 			else if (sdkp->lbpws)
2968 				sd_config_discard(sdkp, SD_LBP_WS16);
2969 			else if (sdkp->lbpws10)
2970 				sd_config_discard(sdkp, SD_LBP_WS10);
2971 			else
2972 				sd_config_discard(sdkp, SD_LBP_DISABLE);
2973 		}
2974 	}
2975 
2976  out:
2977 	kfree(buffer);
2978 }
2979 
2980 /**
2981  * sd_read_block_characteristics - Query block dev. characteristics
2982  * @sdkp: disk to query
2983  */
2984 static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2985 {
2986 	struct request_queue *q = sdkp->disk->queue;
2987 	unsigned char *buffer;
2988 	u16 rot;
2989 	const int vpd_len = 64;
2990 
2991 	buffer = kmalloc(vpd_len, GFP_KERNEL);
2992 
2993 	if (!buffer ||
2994 	    /* Block Device Characteristics VPD */
2995 	    scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
2996 		goto out;
2997 
2998 	rot = get_unaligned_be16(&buffer[4]);
2999 
3000 	if (rot == 1) {
3001 		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
3002 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
3003 	}
3004 
3005 	if (sdkp->device->type == TYPE_ZBC) {
3006 		/* Host-managed */
3007 		blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
3008 	} else {
3009 		sdkp->zoned = (buffer[8] >> 4) & 3;
3010 		if (sdkp->zoned == 1) {
3011 			/* Host-aware */
3012 			blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
3013 		} else {
3014 			/* Regular disk or drive managed disk */
3015 			blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
3016 		}
3017 	}
3018 
3019 	if (!sdkp->first_scan)
3020 		goto out;
3021 
3022 	if (blk_queue_is_zoned(q)) {
3023 		sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
3024 		      q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
3025 	} else {
3026 		if (sdkp->zoned == 1)
3027 			sd_printk(KERN_NOTICE, sdkp,
3028 				  "Host-aware SMR disk used as regular disk\n");
3029 		else if (sdkp->zoned == 2)
3030 			sd_printk(KERN_NOTICE, sdkp,
3031 				  "Drive-managed SMR disk\n");
3032 	}
3033 
3034  out:
3035 	kfree(buffer);
3036 }
3037 
3038 /**
3039  * sd_read_block_provisioning - Query provisioning VPD page
3040  * @sdkp: disk to query
3041  */
3042 static void sd_read_block_provisioning(struct scsi_disk *sdkp)
3043 {
3044 	unsigned char *buffer;
3045 	const int vpd_len = 8;
3046 
3047 	if (sdkp->lbpme == 0)
3048 		return;
3049 
3050 	buffer = kmalloc(vpd_len, GFP_KERNEL);
3051 
3052 	if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
3053 		goto out;
3054 
3055 	sdkp->lbpvpd	= 1;
3056 	sdkp->lbpu	= (buffer[5] >> 7) & 1;	/* UNMAP */
3057 	sdkp->lbpws	= (buffer[5] >> 6) & 1;	/* WRITE SAME(16) with UNMAP */
3058 	sdkp->lbpws10	= (buffer[5] >> 5) & 1;	/* WRITE SAME(10) with UNMAP */
3059 
3060  out:
3061 	kfree(buffer);
3062 }
3063 
3064 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
3065 {
3066 	struct scsi_device *sdev = sdkp->device;
3067 
3068 	if (sdev->host->no_write_same) {
3069 		sdev->no_write_same = 1;
3070 
3071 		return;
3072 	}
3073 
3074 	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
3075 		/* too large values might cause issues with arcmsr */
3076 		int vpd_buf_len = 64;
3077 
3078 		sdev->no_report_opcodes = 1;
3079 
3080 		/* Disable WRITE SAME if REPORT SUPPORTED OPERATION
3081 		 * CODES is unsupported and the device has an ATA
3082 		 * Information VPD page (SAT).
3083 		 */
3084 		if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
3085 			sdev->no_write_same = 1;
3086 	}
3087 
3088 	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
3089 		sdkp->ws16 = 1;
3090 
3091 	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
3092 		sdkp->ws10 = 1;
3093 }
3094 
3095 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
3096 {
3097 	struct scsi_device *sdev = sdkp->device;
3098 
3099 	if (!sdev->security_supported)
3100 		return;
3101 
3102 	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3103 			SECURITY_PROTOCOL_IN) == 1 &&
3104 	    scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3105 			SECURITY_PROTOCOL_OUT) == 1)
3106 		sdkp->security = 1;
3107 }
3108 
3109 static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
3110 {
3111 	return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
3112 }
3113 
3114 /**
3115  * sd_read_cpr - Query concurrent positioning ranges
3116  * @sdkp:	disk to query
3117  */
3118 static void sd_read_cpr(struct scsi_disk *sdkp)
3119 {
3120 	struct blk_independent_access_ranges *iars = NULL;
3121 	unsigned char *buffer = NULL;
3122 	unsigned int nr_cpr = 0;
3123 	int i, vpd_len, buf_len = SD_BUF_SIZE;
3124 	u8 *desc;
3125 
3126 	/*
3127 	 * We need to have the capacity set first for the block layer to be
3128 	 * able to check the ranges.
3129 	 */
3130 	if (sdkp->first_scan)
3131 		return;
3132 
3133 	if (!sdkp->capacity)
3134 		goto out;
3135 
3136 	/*
3137 	 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
3138 	 * leading to a maximum page size of 64 + 256*32 bytes.
3139 	 */
3140 	buf_len = 64 + 256*32;
3141 	buffer = kmalloc(buf_len, GFP_KERNEL);
3142 	if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
3143 		goto out;
3144 
3145 	/* We must have at least a 64B header and one 32B range descriptor */
3146 	vpd_len = get_unaligned_be16(&buffer[2]) + 3;
3147 	if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
3148 		sd_printk(KERN_ERR, sdkp,
3149 			  "Invalid Concurrent Positioning Ranges VPD page\n");
3150 		goto out;
3151 	}
3152 
3153 	nr_cpr = (vpd_len - 64) / 32;
3154 	if (nr_cpr == 1) {
3155 		nr_cpr = 0;
3156 		goto out;
3157 	}
3158 
3159 	iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
3160 	if (!iars) {
3161 		nr_cpr = 0;
3162 		goto out;
3163 	}
3164 
3165 	desc = &buffer[64];
3166 	for (i = 0; i < nr_cpr; i++, desc += 32) {
3167 		if (desc[0] != i) {
3168 			sd_printk(KERN_ERR, sdkp,
3169 				"Invalid Concurrent Positioning Range number\n");
3170 			nr_cpr = 0;
3171 			break;
3172 		}
3173 
3174 		iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
3175 		iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
3176 	}
3177 
3178 out:
3179 	disk_set_independent_access_ranges(sdkp->disk, iars);
3180 	if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
3181 		sd_printk(KERN_NOTICE, sdkp,
3182 			  "%u concurrent positioning ranges\n", nr_cpr);
3183 		sdkp->nr_actuators = nr_cpr;
3184 	}
3185 
3186 	kfree(buffer);
3187 }
3188 
3189 /*
3190  * Determine the device's preferred I/O size for reads and writes
3191  * unless the reported value is unreasonably small, large, not a
3192  * multiple of the physical block size, or simply garbage.
3193  */
3194 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3195 				      unsigned int dev_max)
3196 {
3197 	struct scsi_device *sdp = sdkp->device;
3198 	unsigned int opt_xfer_bytes =
3199 		logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3200 
3201 	if (sdkp->opt_xfer_blocks == 0)
3202 		return false;
3203 
3204 	if (sdkp->opt_xfer_blocks > dev_max) {
3205 		sd_first_printk(KERN_WARNING, sdkp,
3206 				"Optimal transfer size %u logical blocks " \
3207 				"> dev_max (%u logical blocks)\n",
3208 				sdkp->opt_xfer_blocks, dev_max);
3209 		return false;
3210 	}
3211 
3212 	if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
3213 		sd_first_printk(KERN_WARNING, sdkp,
3214 				"Optimal transfer size %u logical blocks " \
3215 				"> sd driver limit (%u logical blocks)\n",
3216 				sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
3217 		return false;
3218 	}
3219 
3220 	if (opt_xfer_bytes < PAGE_SIZE) {
3221 		sd_first_printk(KERN_WARNING, sdkp,
3222 				"Optimal transfer size %u bytes < " \
3223 				"PAGE_SIZE (%u bytes)\n",
3224 				opt_xfer_bytes, (unsigned int)PAGE_SIZE);
3225 		return false;
3226 	}
3227 
3228 	if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
3229 		sd_first_printk(KERN_WARNING, sdkp,
3230 				"Optimal transfer size %u bytes not a " \
3231 				"multiple of physical block size (%u bytes)\n",
3232 				opt_xfer_bytes, sdkp->physical_block_size);
3233 		return false;
3234 	}
3235 
3236 	sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
3237 			opt_xfer_bytes);
3238 	return true;
3239 }
3240 
3241 /**
3242  *	sd_revalidate_disk - called the first time a new disk is seen,
3243  *	performs disk spin up, read_capacity, etc.
3244  *	@disk: struct gendisk we care about
3245  **/
3246 static int sd_revalidate_disk(struct gendisk *disk)
3247 {
3248 	struct scsi_disk *sdkp = scsi_disk(disk);
3249 	struct scsi_device *sdp = sdkp->device;
3250 	struct request_queue *q = sdkp->disk->queue;
3251 	sector_t old_capacity = sdkp->capacity;
3252 	unsigned char *buffer;
3253 	unsigned int dev_max, rw_max;
3254 
3255 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
3256 				      "sd_revalidate_disk\n"));
3257 
3258 	/*
3259 	 * If the device is offline, don't try and read capacity or any
3260 	 * of the other niceties.
3261 	 */
3262 	if (!scsi_device_online(sdp))
3263 		goto out;
3264 
3265 	buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
3266 	if (!buffer) {
3267 		sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
3268 			  "allocation failure.\n");
3269 		goto out;
3270 	}
3271 
3272 	sd_spinup_disk(sdkp);
3273 
3274 	/*
3275 	 * Without media there is no reason to ask; moreover, some devices
3276 	 * react badly if we do.
3277 	 */
3278 	if (sdkp->media_present) {
3279 		sd_read_capacity(sdkp, buffer);
3280 
3281 		/*
3282 		 * set the default to rotational.  All non-rotational devices
3283 		 * support the block characteristics VPD page, which will
3284 		 * cause this to be updated correctly and any device which
3285 		 * doesn't support it should be treated as rotational.
3286 		 */
3287 		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
3288 		blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
3289 
3290 		if (scsi_device_supports_vpd(sdp)) {
3291 			sd_read_block_provisioning(sdkp);
3292 			sd_read_block_limits(sdkp);
3293 			sd_read_block_characteristics(sdkp);
3294 			sd_zbc_read_zones(sdkp, buffer);
3295 		}
3296 
3297 		sd_print_capacity(sdkp, old_capacity);
3298 
3299 		sd_read_write_protect_flag(sdkp, buffer);
3300 		sd_read_cache_type(sdkp, buffer);
3301 		sd_read_app_tag_own(sdkp, buffer);
3302 		sd_read_write_same(sdkp, buffer);
3303 		sd_read_security(sdkp, buffer);
3304 		sd_read_cpr(sdkp);
3305 	}
3306 
3307 	/*
3308 	 * We now have all cache related info, determine how we deal
3309 	 * with flush requests.
3310 	 */
3311 	sd_set_flush_flag(sdkp);
3312 
3313 	/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
3314 	dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
3315 
3316 	/* Some devices report a maximum block count for READ/WRITE requests. */
3317 	dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
3318 	q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
3319 
3320 	if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3321 		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3322 		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
3323 	} else {
3324 		q->limits.io_opt = 0;
3325 		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
3326 				      (sector_t)BLK_DEF_MAX_SECTORS);
3327 	}
3328 
3329 	/* Do not exceed controller limit */
3330 	rw_max = min(rw_max, queue_max_hw_sectors(q));
3331 
3332 	/*
3333 	 * Only update max_sectors if previously unset or if the current value
3334 	 * exceeds the capabilities of the hardware.
3335 	 */
3336 	if (sdkp->first_scan ||
3337 	    q->limits.max_sectors > q->limits.max_dev_sectors ||
3338 	    q->limits.max_sectors > q->limits.max_hw_sectors)
3339 		q->limits.max_sectors = rw_max;
3340 
3341 	sdkp->first_scan = 0;
3342 
3343 	set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
3344 	sd_config_write_same(sdkp);
3345 	kfree(buffer);
3346 
3347 	/*
3348 	 * For a zoned drive, revalidating the zones can be done only once
3349 	 * the gendisk capacity is set. So if this fails, set back the gendisk
3350 	 * capacity to 0.
3351 	 */
3352 	if (sd_zbc_revalidate_zones(sdkp))
3353 		set_capacity_and_notify(disk, 0);
3354 
3355  out:
3356 	return 0;
3357 }
3358 
3359 /**
3360  *	sd_unlock_native_capacity - unlock native capacity
3361  *	@disk: struct gendisk to set capacity for
3362  *
3363  *	Block layer calls this function if it detects that partitions
3364  *	on @disk reach beyond the end of the device.  If the SCSI host
3365  *	implements ->unlock_native_capacity() method, it's invoked to
3366  *	give it a chance to adjust the device capacity.
3367  *
3368  *	CONTEXT:
3369  *	Defined by block layer.  Might sleep.
3370  */
3371 static void sd_unlock_native_capacity(struct gendisk *disk)
3372 {
3373 	struct scsi_device *sdev = scsi_disk(disk)->device;
3374 
3375 	if (sdev->host->hostt->unlock_native_capacity)
3376 		sdev->host->hostt->unlock_native_capacity(sdev);
3377 }
3378 
3379 /**
3380  *	sd_format_disk_name - format disk name
3381  *	@prefix: name prefix - ie. "sd" for SCSI disks
3382  *	@index: index of the disk to format name for
3383  *	@buf: output buffer
3384  *	@buflen: length of the output buffer
3385  *
3386  *	SCSI disk names starts at sda.  The 26th device is sdz and the
3387  *	27th is sdaa.  The last one for two lettered suffix is sdzz
3388  *	which is followed by sdaaa.
3389  *
3390  *	This is basically 26 base counting with one extra 'nil' entry
3391  *	at the beginning from the second digit on and can be
3392  *	determined using similar method as 26 base conversion with the
3393  *	index shifted -1 after each digit is computed.
3394  *
3395  *	CONTEXT:
3396  *	Don't care.
3397  *
3398  *	RETURNS:
3399  *	0 on success, -errno on failure.
3400  */
3401 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
3402 {
3403 	const int base = 'z' - 'a' + 1;
3404 	char *begin = buf + strlen(prefix);
3405 	char *end = buf + buflen;
3406 	char *p;
3407 	int unit;
3408 
3409 	p = end - 1;
3410 	*p = '\0';
3411 	unit = base;
3412 	do {
3413 		if (p == begin)
3414 			return -EINVAL;
3415 		*--p = 'a' + (index % unit);
3416 		index = (index / unit) - 1;
3417 	} while (index >= 0);
3418 
3419 	memmove(begin, p, end - p);
3420 	memcpy(buf, prefix, strlen(prefix));
3421 
3422 	return 0;
3423 }
3424 
3425 /**
3426  *	sd_probe - called during driver initialization and whenever a
3427  *	new scsi device is attached to the system. It is called once
3428  *	for each scsi device (not just disks) present.
3429  *	@dev: pointer to device object
3430  *
3431  *	Returns 0 if successful (or not interested in this scsi device
3432  *	(e.g. scanner)); 1 when there is an error.
3433  *
3434  *	Note: this function is invoked from the scsi mid-level.
3435  *	This function sets up the mapping between a given
3436  *	<host,channel,id,lun> (found in sdp) and new device name
3437  *	(e.g. /dev/sda). More precisely it is the block device major
3438  *	and minor number that is chosen here.
3439  *
3440  *	Assume sd_probe is not re-entrant (for time being)
3441  *	Also think about sd_probe() and sd_remove() running coincidentally.
3442  **/
3443 static int sd_probe(struct device *dev)
3444 {
3445 	struct scsi_device *sdp = to_scsi_device(dev);
3446 	struct scsi_disk *sdkp;
3447 	struct gendisk *gd;
3448 	int index;
3449 	int error;
3450 
3451 	scsi_autopm_get_device(sdp);
3452 	error = -ENODEV;
3453 	if (sdp->type != TYPE_DISK &&
3454 	    sdp->type != TYPE_ZBC &&
3455 	    sdp->type != TYPE_MOD &&
3456 	    sdp->type != TYPE_RBC)
3457 		goto out;
3458 
3459 	if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
3460 		sdev_printk(KERN_WARNING, sdp,
3461 			    "Unsupported ZBC host-managed device.\n");
3462 		goto out;
3463 	}
3464 
3465 	SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
3466 					"sd_probe\n"));
3467 
3468 	error = -ENOMEM;
3469 	sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
3470 	if (!sdkp)
3471 		goto out;
3472 
3473 	gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
3474 			       &sd_bio_compl_lkclass);
3475 	if (!gd)
3476 		goto out_free;
3477 
3478 	index = ida_alloc(&sd_index_ida, GFP_KERNEL);
3479 	if (index < 0) {
3480 		sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
3481 		goto out_put;
3482 	}
3483 
3484 	error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
3485 	if (error) {
3486 		sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
3487 		goto out_free_index;
3488 	}
3489 
3490 	sdkp->device = sdp;
3491 	sdkp->disk = gd;
3492 	sdkp->index = index;
3493 	sdkp->max_retries = SD_MAX_RETRIES;
3494 	atomic_set(&sdkp->openers, 0);
3495 	atomic_set(&sdkp->device->ioerr_cnt, 0);
3496 
3497 	if (!sdp->request_queue->rq_timeout) {
3498 		if (sdp->type != TYPE_MOD)
3499 			blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
3500 		else
3501 			blk_queue_rq_timeout(sdp->request_queue,
3502 					     SD_MOD_TIMEOUT);
3503 	}
3504 
3505 	device_initialize(&sdkp->disk_dev);
3506 	sdkp->disk_dev.parent = get_device(dev);
3507 	sdkp->disk_dev.class = &sd_disk_class;
3508 	dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
3509 
3510 	error = device_add(&sdkp->disk_dev);
3511 	if (error) {
3512 		put_device(&sdkp->disk_dev);
3513 		goto out;
3514 	}
3515 
3516 	dev_set_drvdata(dev, sdkp);
3517 
3518 	gd->major = sd_major((index & 0xf0) >> 4);
3519 	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
3520 	gd->minors = SD_MINORS;
3521 
3522 	gd->fops = &sd_fops;
3523 	gd->private_data = sdkp;
3524 
3525 	/* defaults, until the device tells us otherwise */
3526 	sdp->sector_size = 512;
3527 	sdkp->capacity = 0;
3528 	sdkp->media_present = 1;
3529 	sdkp->write_prot = 0;
3530 	sdkp->cache_override = 0;
3531 	sdkp->WCE = 0;
3532 	sdkp->RCD = 0;
3533 	sdkp->ATO = 0;
3534 	sdkp->first_scan = 1;
3535 	sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
3536 
3537 	sd_revalidate_disk(gd);
3538 
3539 	if (sdp->removable) {
3540 		gd->flags |= GENHD_FL_REMOVABLE;
3541 		gd->events |= DISK_EVENT_MEDIA_CHANGE;
3542 		gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
3543 	}
3544 
3545 	blk_pm_runtime_init(sdp->request_queue, dev);
3546 	if (sdp->rpm_autosuspend) {
3547 		pm_runtime_set_autosuspend_delay(dev,
3548 			sdp->host->hostt->rpm_autosuspend_delay);
3549 	}
3550 
3551 	error = device_add_disk(dev, gd, NULL);
3552 	if (error) {
3553 		put_device(&sdkp->disk_dev);
3554 		goto out;
3555 	}
3556 
3557 	if (sdkp->capacity)
3558 		sd_dif_config_host(sdkp);
3559 
3560 	sd_revalidate_disk(gd);
3561 
3562 	if (sdkp->security) {
3563 		sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
3564 		if (sdkp->opal_dev)
3565 			sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
3566 	}
3567 
3568 	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
3569 		  sdp->removable ? "removable " : "");
3570 	scsi_autopm_put_device(sdp);
3571 
3572 	return 0;
3573 
3574  out_free_index:
3575 	ida_free(&sd_index_ida, index);
3576  out_put:
3577 	put_disk(gd);
3578  out_free:
3579 	sd_zbc_release_disk(sdkp);
3580 	kfree(sdkp);
3581  out:
3582 	scsi_autopm_put_device(sdp);
3583 	return error;
3584 }
3585 
3586 /**
3587  *	sd_remove - called whenever a scsi disk (previously recognized by
3588  *	sd_probe) is detached from the system. It is called (potentially
3589  *	multiple times) during sd module unload.
3590  *	@dev: pointer to device object
3591  *
3592  *	Note: this function is invoked from the scsi mid-level.
3593  *	This function potentially frees up a device name (e.g. /dev/sdc)
3594  *	that could be re-used by a subsequent sd_probe().
3595  *	This function is not called when the built-in sd driver is "exit-ed".
3596  **/
3597 static int sd_remove(struct device *dev)
3598 {
3599 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3600 
3601 	scsi_autopm_get_device(sdkp->device);
3602 
3603 	device_del(&sdkp->disk_dev);
3604 	del_gendisk(sdkp->disk);
3605 	sd_shutdown(dev);
3606 
3607 	put_disk(sdkp->disk);
3608 	return 0;
3609 }
3610 
3611 static void scsi_disk_release(struct device *dev)
3612 {
3613 	struct scsi_disk *sdkp = to_scsi_disk(dev);
3614 
3615 	ida_free(&sd_index_ida, sdkp->index);
3616 	sd_zbc_release_disk(sdkp);
3617 	put_device(&sdkp->device->sdev_gendev);
3618 	free_opal_dev(sdkp->opal_dev);
3619 
3620 	kfree(sdkp);
3621 }
3622 
3623 static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3624 {
3625 	unsigned char cmd[6] = { START_STOP };	/* START_VALID */
3626 	struct scsi_sense_hdr sshdr;
3627 	struct scsi_device *sdp = sdkp->device;
3628 	int res;
3629 
3630 	if (start)
3631 		cmd[4] |= 1;	/* START */
3632 
3633 	if (sdp->start_stop_pwr_cond)
3634 		cmd[4] |= start ? 1 << 4 : 3 << 4;	/* Active or Standby */
3635 
3636 	if (!scsi_device_online(sdp))
3637 		return -ENODEV;
3638 
3639 	res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
3640 			SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
3641 	if (res) {
3642 		sd_print_result(sdkp, "Start/Stop Unit failed", res);
3643 		if (res > 0 && scsi_sense_valid(&sshdr)) {
3644 			sd_print_sense_hdr(sdkp, &sshdr);
3645 			/* 0x3a is medium not present */
3646 			if (sshdr.asc == 0x3a)
3647 				res = 0;
3648 		}
3649 	}
3650 
3651 	/* SCSI error codes must not go to the generic layer */
3652 	if (res)
3653 		return -EIO;
3654 
3655 	return 0;
3656 }
3657 
3658 /*
3659  * Send a SYNCHRONIZE CACHE instruction down to the device through
3660  * the normal SCSI command structure.  Wait for the command to
3661  * complete.
3662  */
3663 static void sd_shutdown(struct device *dev)
3664 {
3665 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3666 
3667 	if (!sdkp)
3668 		return;         /* this can happen */
3669 
3670 	if (pm_runtime_suspended(dev))
3671 		return;
3672 
3673 	if (sdkp->WCE && sdkp->media_present) {
3674 		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3675 		sd_sync_cache(sdkp, NULL);
3676 	}
3677 
3678 	if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
3679 		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3680 		sd_start_stop_device(sdkp, 0);
3681 	}
3682 }
3683 
3684 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3685 {
3686 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3687 	struct scsi_sense_hdr sshdr;
3688 	int ret = 0;
3689 
3690 	if (!sdkp)	/* E.g.: runtime suspend following sd_remove() */
3691 		return 0;
3692 
3693 	if (sdkp->WCE && sdkp->media_present) {
3694 		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3695 		ret = sd_sync_cache(sdkp, &sshdr);
3696 
3697 		if (ret) {
3698 			/* ignore OFFLINE device */
3699 			if (ret == -ENODEV)
3700 				return 0;
3701 
3702 			if (!scsi_sense_valid(&sshdr) ||
3703 			    sshdr.sense_key != ILLEGAL_REQUEST)
3704 				return ret;
3705 
3706 			/*
3707 			 * sshdr.sense_key == ILLEGAL_REQUEST means this drive
3708 			 * doesn't support sync. There's not much to do and
3709 			 * suspend shouldn't fail.
3710 			 */
3711 			ret = 0;
3712 		}
3713 	}
3714 
3715 	if (sdkp->device->manage_start_stop) {
3716 		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3717 		/* an error is not worth aborting a system sleep */
3718 		ret = sd_start_stop_device(sdkp, 0);
3719 		if (ignore_stop_errors)
3720 			ret = 0;
3721 	}
3722 
3723 	return ret;
3724 }
3725 
3726 static int sd_suspend_system(struct device *dev)
3727 {
3728 	if (pm_runtime_suspended(dev))
3729 		return 0;
3730 
3731 	return sd_suspend_common(dev, true);
3732 }
3733 
3734 static int sd_suspend_runtime(struct device *dev)
3735 {
3736 	return sd_suspend_common(dev, false);
3737 }
3738 
3739 static int sd_resume(struct device *dev)
3740 {
3741 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3742 	int ret;
3743 
3744 	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
3745 		return 0;
3746 
3747 	if (!sdkp->device->manage_start_stop)
3748 		return 0;
3749 
3750 	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
3751 	ret = sd_start_stop_device(sdkp, 1);
3752 	if (!ret)
3753 		opal_unlock_from_suspend(sdkp->opal_dev);
3754 	return ret;
3755 }
3756 
3757 static int sd_resume_system(struct device *dev)
3758 {
3759 	if (pm_runtime_suspended(dev))
3760 		return 0;
3761 
3762 	return sd_resume(dev);
3763 }
3764 
3765 static int sd_resume_runtime(struct device *dev)
3766 {
3767 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3768 	struct scsi_device *sdp;
3769 
3770 	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
3771 		return 0;
3772 
3773 	sdp = sdkp->device;
3774 
3775 	if (sdp->ignore_media_change) {
3776 		/* clear the device's sense data */
3777 		static const u8 cmd[10] = { REQUEST_SENSE };
3778 
3779 		if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL,
3780 				 NULL, sdp->request_queue->rq_timeout, 1, 0,
3781 				 RQF_PM, NULL))
3782 			sd_printk(KERN_NOTICE, sdkp,
3783 				  "Failed to clear sense data\n");
3784 	}
3785 
3786 	return sd_resume(dev);
3787 }
3788 
3789 /**
3790  *	init_sd - entry point for this driver (both when built in or when
3791  *	a module).
3792  *
3793  *	Note: this function registers this driver with the scsi mid-level.
3794  **/
3795 static int __init init_sd(void)
3796 {
3797 	int majors = 0, i, err;
3798 
3799 	SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
3800 
3801 	for (i = 0; i < SD_MAJORS; i++) {
3802 		if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
3803 			continue;
3804 		majors++;
3805 	}
3806 
3807 	if (!majors)
3808 		return -ENODEV;
3809 
3810 	err = class_register(&sd_disk_class);
3811 	if (err)
3812 		goto err_out;
3813 
3814 	sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
3815 					 0, 0, NULL);
3816 	if (!sd_cdb_cache) {
3817 		printk(KERN_ERR "sd: can't init extended cdb cache\n");
3818 		err = -ENOMEM;
3819 		goto err_out_class;
3820 	}
3821 
3822 	sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
3823 	if (!sd_cdb_pool) {
3824 		printk(KERN_ERR "sd: can't init extended cdb pool\n");
3825 		err = -ENOMEM;
3826 		goto err_out_cache;
3827 	}
3828 
3829 	sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
3830 	if (!sd_page_pool) {
3831 		printk(KERN_ERR "sd: can't init discard page pool\n");
3832 		err = -ENOMEM;
3833 		goto err_out_ppool;
3834 	}
3835 
3836 	err = scsi_register_driver(&sd_template.gendrv);
3837 	if (err)
3838 		goto err_out_driver;
3839 
3840 	return 0;
3841 
3842 err_out_driver:
3843 	mempool_destroy(sd_page_pool);
3844 
3845 err_out_ppool:
3846 	mempool_destroy(sd_cdb_pool);
3847 
3848 err_out_cache:
3849 	kmem_cache_destroy(sd_cdb_cache);
3850 
3851 err_out_class:
3852 	class_unregister(&sd_disk_class);
3853 err_out:
3854 	for (i = 0; i < SD_MAJORS; i++)
3855 		unregister_blkdev(sd_major(i), "sd");
3856 	return err;
3857 }
3858 
3859 /**
3860  *	exit_sd - exit point for this driver (when it is a module).
3861  *
3862  *	Note: this function unregisters this driver from the scsi mid-level.
3863  **/
3864 static void __exit exit_sd(void)
3865 {
3866 	int i;
3867 
3868 	SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
3869 
3870 	scsi_unregister_driver(&sd_template.gendrv);
3871 	mempool_destroy(sd_cdb_pool);
3872 	mempool_destroy(sd_page_pool);
3873 	kmem_cache_destroy(sd_cdb_cache);
3874 
3875 	class_unregister(&sd_disk_class);
3876 
3877 	for (i = 0; i < SD_MAJORS; i++)
3878 		unregister_blkdev(sd_major(i), "sd");
3879 }
3880 
3881 module_init(init_sd);
3882 module_exit(exit_sd);
3883 
3884 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
3885 {
3886 	scsi_print_sense_hdr(sdkp->device,
3887 			     sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
3888 }
3889 
3890 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
3891 {
3892 	const char *hb_string = scsi_hostbyte_string(result);
3893 
3894 	if (hb_string)
3895 		sd_printk(KERN_INFO, sdkp,
3896 			  "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
3897 			  hb_string ? hb_string : "invalid",
3898 			  "DRIVER_OK");
3899 	else
3900 		sd_printk(KERN_INFO, sdkp,
3901 			  "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
3902 			  msg, host_byte(result), "DRIVER_OK");
3903 }
3904