xref: /linux/drivers/scsi/sd.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *      sd.c Copyright (C) 1992 Drew Eckhardt
4  *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5  *
6  *      Linux scsi disk driver
7  *              Initial versions: Drew Eckhardt
8  *              Subsequent revisions: Eric Youngdale
9  *	Modification history:
10  *       - Drew Eckhardt <drew@colorado.edu> original
11  *       - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
12  *         outstanding request, and other enhancements.
13  *         Support loadable low-level scsi drivers.
14  *       - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
15  *         eight major numbers.
16  *       - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
17  *	 - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
18  *	   sd_init and cleanups.
19  *	 - Alex Davis <letmein@erols.com> Fix problem where partition info
20  *	   not being read in sd_open. Fix problem where removable media
21  *	   could be ejected after sd_open.
22  *	 - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
23  *	 - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
24  *	   <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
25  *	   Support 32k/1M disks.
26  *
27  *	Logging policy (needs CONFIG_SCSI_LOGGING defined):
28  *	 - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
29  *	 - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
30  *	 - entering sd_ioctl: SCSI_LOG_IOCTL level 1
31  *	 - entering other commands: SCSI_LOG_HLQUEUE level 3
32  *	Note: when the logging level is set by the user, it must be greater
33  *	than the level indicated above to trigger output.
34  */
35 
36 #include <linux/bio-integrity.h>
37 #include <linux/module.h>
38 #include <linux/fs.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/hdreg.h>
42 #include <linux/errno.h>
43 #include <linux/idr.h>
44 #include <linux/interrupt.h>
45 #include <linux/init.h>
46 #include <linux/blkdev.h>
47 #include <linux/blkpg.h>
48 #include <linux/blk-pm.h>
49 #include <linux/delay.h>
50 #include <linux/rw_hint.h>
51 #include <linux/major.h>
52 #include <linux/mutex.h>
53 #include <linux/string_helpers.h>
54 #include <linux/slab.h>
55 #include <linux/sed-opal.h>
56 #include <linux/pm_runtime.h>
57 #include <linux/pr.h>
58 #include <linux/t10-pi.h>
59 #include <linux/uaccess.h>
60 #include <linux/unaligned.h>
61 
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_dbg.h>
65 #include <scsi/scsi_device.h>
66 #include <scsi/scsi_devinfo.h>
67 #include <scsi/scsi_driver.h>
68 #include <scsi/scsi_eh.h>
69 #include <scsi/scsi_host.h>
70 #include <scsi/scsi_ioctl.h>
71 #include <scsi/scsicam.h>
72 #include <scsi/scsi_common.h>
73 
74 #include "sd.h"
75 #include "scsi_priv.h"
76 #include "scsi_logging.h"
77 
78 MODULE_AUTHOR("Eric Youngdale");
79 MODULE_DESCRIPTION("SCSI disk (sd) driver");
80 MODULE_LICENSE("GPL");
81 
82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
95 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
96 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
97 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
98 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
99 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
100 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
101 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
102 
103 #define SD_MINORS	16
104 
105 static void sd_config_write_same(struct scsi_disk *sdkp,
106 		struct queue_limits *lim);
107 static void  sd_revalidate_disk(struct gendisk *);
108 
109 static DEFINE_IDA(sd_index_ida);
110 static DEFINE_MUTEX(sd_mutex_lock);
111 
112 static mempool_t *sd_page_pool;
113 static mempool_t *sd_large_page_pool;
114 static atomic_t sd_large_page_pool_users = ATOMIC_INIT(0);
115 static struct lock_class_key sd_bio_compl_lkclass;
116 
117 static const char *sd_cache_types[] = {
118 	"write through", "none", "write back",
119 	"write back, no read (daft)"
120 };
121 
122 static int sd_large_pool_create(void)
123 {
124 	mutex_lock(&sd_mutex_lock);
125 	if (!sd_large_page_pool) {
126 		sd_large_page_pool = mempool_create_page_pool(
127 			SD_MEMPOOL_SIZE, get_order(BLK_MAX_BLOCK_SIZE));
128 		if (!sd_large_page_pool) {
129 			printk(KERN_ERR "sd: can't create large page mempool\n");
130 			mutex_unlock(&sd_mutex_lock);
131 			return -ENOMEM;
132 		}
133 	}
134 	atomic_inc(&sd_large_page_pool_users);
135 	mutex_unlock(&sd_mutex_lock);
136 	return 0;
137 }
138 
139 static void sd_large_pool_destroy(void)
140 {
141 	mutex_lock(&sd_mutex_lock);
142 	if (atomic_dec_and_test(&sd_large_page_pool_users)) {
143 		mempool_destroy(sd_large_page_pool);
144 		sd_large_page_pool = NULL;
145 	}
146 	mutex_unlock(&sd_mutex_lock);
147 }
148 
149 static void sd_disable_discard(struct scsi_disk *sdkp)
150 {
151 	sdkp->provisioning_mode = SD_LBP_DISABLE;
152 	blk_queue_disable_discard(sdkp->disk->queue);
153 }
154 
155 static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
156 		unsigned int mode)
157 {
158 	unsigned int logical_block_size = sdkp->device->sector_size;
159 	unsigned int max_blocks = 0;
160 
161 	lim->discard_alignment = sdkp->unmap_alignment * logical_block_size;
162 	lim->discard_granularity = max(sdkp->physical_block_size,
163 			sdkp->unmap_granularity * logical_block_size);
164 	sdkp->provisioning_mode = mode;
165 
166 	switch (mode) {
167 
168 	case SD_LBP_FULL:
169 	case SD_LBP_DISABLE:
170 		break;
171 
172 	case SD_LBP_UNMAP:
173 		max_blocks = min_not_zero(sdkp->max_unmap_blocks,
174 					  (u32)SD_MAX_WS16_BLOCKS);
175 		break;
176 
177 	case SD_LBP_WS16:
178 		if (sdkp->device->unmap_limit_for_ws)
179 			max_blocks = sdkp->max_unmap_blocks;
180 		else
181 			max_blocks = sdkp->max_ws_blocks;
182 
183 		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
184 		break;
185 
186 	case SD_LBP_WS10:
187 		if (sdkp->device->unmap_limit_for_ws)
188 			max_blocks = sdkp->max_unmap_blocks;
189 		else
190 			max_blocks = sdkp->max_ws_blocks;
191 
192 		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
193 		break;
194 
195 	case SD_LBP_ZERO:
196 		max_blocks = min_not_zero(sdkp->max_ws_blocks,
197 					  (u32)SD_MAX_WS10_BLOCKS);
198 		break;
199 	}
200 
201 	lim->max_hw_discard_sectors = max_blocks *
202 		(logical_block_size >> SECTOR_SHIFT);
203 }
204 
205 static void sd_set_flush_flag(struct scsi_disk *sdkp,
206 		struct queue_limits *lim)
207 {
208 	if (sdkp->WCE) {
209 		lim->features |= BLK_FEAT_WRITE_CACHE;
210 		if (sdkp->DPOFUA)
211 			lim->features |= BLK_FEAT_FUA;
212 		else
213 			lim->features &= ~BLK_FEAT_FUA;
214 	} else {
215 		lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
216 	}
217 }
218 
219 static ssize_t
220 cache_type_store(struct device *dev, struct device_attribute *attr,
221 		 const char *buf, size_t count)
222 {
223 	int ct, rcd, wce, sp;
224 	struct scsi_disk *sdkp = to_scsi_disk(dev);
225 	struct scsi_device *sdp = sdkp->device;
226 	char buffer[64];
227 	char *buffer_data;
228 	struct scsi_mode_data data;
229 	struct scsi_sense_hdr sshdr;
230 	static const char temp[] = "temporary ";
231 	int len, ret;
232 
233 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
234 		/* no cache control on RBC devices; theoretically they
235 		 * can do it, but there's probably so many exceptions
236 		 * it's not worth the risk */
237 		return -EINVAL;
238 
239 	if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
240 		buf += sizeof(temp) - 1;
241 		sdkp->cache_override = 1;
242 	} else {
243 		sdkp->cache_override = 0;
244 	}
245 
246 	ct = sysfs_match_string(sd_cache_types, buf);
247 	if (ct < 0)
248 		return -EINVAL;
249 
250 	rcd = ct & 0x01 ? 1 : 0;
251 	wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
252 
253 	if (sdkp->cache_override) {
254 		struct queue_limits lim;
255 
256 		sdkp->WCE = wce;
257 		sdkp->RCD = rcd;
258 
259 		lim = queue_limits_start_update(sdkp->disk->queue);
260 		sd_set_flush_flag(sdkp, &lim);
261 		ret = queue_limits_commit_update_frozen(sdkp->disk->queue,
262 				&lim);
263 		if (ret)
264 			return ret;
265 		return count;
266 	}
267 
268 	if (scsi_mode_sense(sdp, 0x08, 8, 0, buffer, sizeof(buffer), SD_TIMEOUT,
269 			    sdkp->max_retries, &data, NULL))
270 		return -EINVAL;
271 	len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
272 		  data.block_descriptor_length);
273 	buffer_data = buffer + data.header_length +
274 		data.block_descriptor_length;
275 	buffer_data[2] &= ~0x05;
276 	buffer_data[2] |= wce << 2 | rcd;
277 	sp = buffer_data[0] & 0x80 ? 1 : 0;
278 	buffer_data[0] &= ~0x80;
279 
280 	/*
281 	 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
282 	 * received mode parameter buffer before doing MODE SELECT.
283 	 */
284 	data.device_specific = 0;
285 
286 	ret = scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
287 			       sdkp->max_retries, &data, &sshdr);
288 	if (ret) {
289 		if (ret > 0 && scsi_sense_valid(&sshdr))
290 			sd_print_sense_hdr(sdkp, &sshdr);
291 		return -EINVAL;
292 	}
293 	sd_revalidate_disk(sdkp->disk);
294 	return count;
295 }
296 
297 static ssize_t
298 manage_start_stop_show(struct device *dev,
299 		       struct device_attribute *attr, char *buf)
300 {
301 	struct scsi_disk *sdkp = to_scsi_disk(dev);
302 	struct scsi_device *sdp = sdkp->device;
303 
304 	return sysfs_emit(buf, "%u\n",
305 			  sdp->manage_system_start_stop &&
306 			  sdp->manage_runtime_start_stop &&
307 			  sdp->manage_shutdown);
308 }
309 static DEVICE_ATTR_RO(manage_start_stop);
310 
311 static ssize_t
312 manage_system_start_stop_show(struct device *dev,
313 			      struct device_attribute *attr, char *buf)
314 {
315 	struct scsi_disk *sdkp = to_scsi_disk(dev);
316 	struct scsi_device *sdp = sdkp->device;
317 
318 	return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop);
319 }
320 
321 static ssize_t
322 manage_system_start_stop_store(struct device *dev,
323 			       struct device_attribute *attr,
324 			       const char *buf, size_t count)
325 {
326 	struct scsi_disk *sdkp = to_scsi_disk(dev);
327 	struct scsi_device *sdp = sdkp->device;
328 	bool v;
329 
330 	if (!capable(CAP_SYS_ADMIN))
331 		return -EACCES;
332 
333 	if (kstrtobool(buf, &v))
334 		return -EINVAL;
335 
336 	sdp->manage_system_start_stop = v;
337 
338 	return count;
339 }
340 static DEVICE_ATTR_RW(manage_system_start_stop);
341 
342 static ssize_t
343 manage_runtime_start_stop_show(struct device *dev,
344 			       struct device_attribute *attr, char *buf)
345 {
346 	struct scsi_disk *sdkp = to_scsi_disk(dev);
347 	struct scsi_device *sdp = sdkp->device;
348 
349 	return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop);
350 }
351 
352 static ssize_t
353 manage_runtime_start_stop_store(struct device *dev,
354 				struct device_attribute *attr,
355 				const char *buf, size_t count)
356 {
357 	struct scsi_disk *sdkp = to_scsi_disk(dev);
358 	struct scsi_device *sdp = sdkp->device;
359 	bool v;
360 
361 	if (!capable(CAP_SYS_ADMIN))
362 		return -EACCES;
363 
364 	if (kstrtobool(buf, &v))
365 		return -EINVAL;
366 
367 	sdp->manage_runtime_start_stop = v;
368 
369 	return count;
370 }
371 static DEVICE_ATTR_RW(manage_runtime_start_stop);
372 
373 static ssize_t manage_shutdown_show(struct device *dev,
374 				    struct device_attribute *attr, char *buf)
375 {
376 	struct scsi_disk *sdkp = to_scsi_disk(dev);
377 	struct scsi_device *sdp = sdkp->device;
378 
379 	return sysfs_emit(buf, "%u\n", sdp->manage_shutdown);
380 }
381 
382 static ssize_t manage_shutdown_store(struct device *dev,
383 				     struct device_attribute *attr,
384 				     const char *buf, size_t count)
385 {
386 	struct scsi_disk *sdkp = to_scsi_disk(dev);
387 	struct scsi_device *sdp = sdkp->device;
388 	bool v;
389 
390 	if (!capable(CAP_SYS_ADMIN))
391 		return -EACCES;
392 
393 	if (kstrtobool(buf, &v))
394 		return -EINVAL;
395 
396 	sdp->manage_shutdown = v;
397 
398 	return count;
399 }
400 static DEVICE_ATTR_RW(manage_shutdown);
401 
402 static ssize_t manage_restart_show(struct device *dev,
403 				   struct device_attribute *attr, char *buf)
404 {
405 	struct scsi_disk *sdkp = to_scsi_disk(dev);
406 	struct scsi_device *sdp = sdkp->device;
407 
408 	return sysfs_emit(buf, "%u\n", sdp->manage_restart);
409 }
410 
411 static ssize_t manage_restart_store(struct device *dev,
412 				    struct device_attribute *attr,
413 				    const char *buf, size_t count)
414 {
415 	struct scsi_disk *sdkp = to_scsi_disk(dev);
416 	struct scsi_device *sdp = sdkp->device;
417 	bool v;
418 
419 	if (!capable(CAP_SYS_ADMIN))
420 		return -EACCES;
421 
422 	if (kstrtobool(buf, &v))
423 		return -EINVAL;
424 
425 	sdp->manage_restart = v;
426 
427 	return count;
428 }
429 static DEVICE_ATTR_RW(manage_restart);
430 
431 static ssize_t
432 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
433 {
434 	struct scsi_disk *sdkp = to_scsi_disk(dev);
435 
436 	return sprintf(buf, "%u\n", sdkp->device->allow_restart);
437 }
438 
439 static ssize_t
440 allow_restart_store(struct device *dev, struct device_attribute *attr,
441 		    const char *buf, size_t count)
442 {
443 	bool v;
444 	struct scsi_disk *sdkp = to_scsi_disk(dev);
445 	struct scsi_device *sdp = sdkp->device;
446 
447 	if (!capable(CAP_SYS_ADMIN))
448 		return -EACCES;
449 
450 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
451 		return -EINVAL;
452 
453 	if (kstrtobool(buf, &v))
454 		return -EINVAL;
455 
456 	sdp->allow_restart = v;
457 
458 	return count;
459 }
460 static DEVICE_ATTR_RW(allow_restart);
461 
462 static ssize_t
463 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
464 {
465 	struct scsi_disk *sdkp = to_scsi_disk(dev);
466 	int ct = sdkp->RCD + 2*sdkp->WCE;
467 
468 	return sprintf(buf, "%s\n", sd_cache_types[ct]);
469 }
470 static DEVICE_ATTR_RW(cache_type);
471 
472 static ssize_t
473 FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
474 {
475 	struct scsi_disk *sdkp = to_scsi_disk(dev);
476 
477 	return sprintf(buf, "%u\n", sdkp->DPOFUA);
478 }
479 static DEVICE_ATTR_RO(FUA);
480 
481 static ssize_t
482 protection_type_show(struct device *dev, struct device_attribute *attr,
483 		     char *buf)
484 {
485 	struct scsi_disk *sdkp = to_scsi_disk(dev);
486 
487 	return sprintf(buf, "%u\n", sdkp->protection_type);
488 }
489 
490 static ssize_t
491 protection_type_store(struct device *dev, struct device_attribute *attr,
492 		      const char *buf, size_t count)
493 {
494 	struct scsi_disk *sdkp = to_scsi_disk(dev);
495 	unsigned int val;
496 	int err;
497 
498 	if (!capable(CAP_SYS_ADMIN))
499 		return -EACCES;
500 
501 	err = kstrtouint(buf, 10, &val);
502 
503 	if (err)
504 		return err;
505 
506 	if (val <= T10_PI_TYPE3_PROTECTION)
507 		sdkp->protection_type = val;
508 
509 	return count;
510 }
511 static DEVICE_ATTR_RW(protection_type);
512 
513 static ssize_t
514 protection_mode_show(struct device *dev, struct device_attribute *attr,
515 		     char *buf)
516 {
517 	struct scsi_disk *sdkp = to_scsi_disk(dev);
518 	struct scsi_device *sdp = sdkp->device;
519 	unsigned int dif, dix;
520 
521 	dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
522 	dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
523 
524 	if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
525 		dif = 0;
526 		dix = 1;
527 	}
528 
529 	if (!dif && !dix)
530 		return sprintf(buf, "none\n");
531 
532 	return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
533 }
534 static DEVICE_ATTR_RO(protection_mode);
535 
536 static ssize_t
537 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
538 {
539 	struct scsi_disk *sdkp = to_scsi_disk(dev);
540 
541 	return sprintf(buf, "%u\n", sdkp->ATO);
542 }
543 static DEVICE_ATTR_RO(app_tag_own);
544 
545 static ssize_t
546 thin_provisioning_show(struct device *dev, struct device_attribute *attr,
547 		       char *buf)
548 {
549 	struct scsi_disk *sdkp = to_scsi_disk(dev);
550 
551 	return sprintf(buf, "%u\n", sdkp->lbpme);
552 }
553 static DEVICE_ATTR_RO(thin_provisioning);
554 
555 /* sysfs_match_string() requires dense arrays */
556 static const char *lbp_mode[] = {
557 	[SD_LBP_FULL]		= "full",
558 	[SD_LBP_UNMAP]		= "unmap",
559 	[SD_LBP_WS16]		= "writesame_16",
560 	[SD_LBP_WS10]		= "writesame_10",
561 	[SD_LBP_ZERO]		= "writesame_zero",
562 	[SD_LBP_DISABLE]	= "disabled",
563 };
564 
565 static ssize_t
566 provisioning_mode_show(struct device *dev, struct device_attribute *attr,
567 		       char *buf)
568 {
569 	struct scsi_disk *sdkp = to_scsi_disk(dev);
570 
571 	return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
572 }
573 
574 static ssize_t
575 provisioning_mode_store(struct device *dev, struct device_attribute *attr,
576 			const char *buf, size_t count)
577 {
578 	struct scsi_disk *sdkp = to_scsi_disk(dev);
579 	struct scsi_device *sdp = sdkp->device;
580 	struct queue_limits lim;
581 	int mode, err;
582 
583 	if (!capable(CAP_SYS_ADMIN))
584 		return -EACCES;
585 
586 	if (sdp->type != TYPE_DISK)
587 		return -EINVAL;
588 
589 	mode = sysfs_match_string(lbp_mode, buf);
590 	if (mode < 0)
591 		return -EINVAL;
592 
593 	lim = queue_limits_start_update(sdkp->disk->queue);
594 	sd_config_discard(sdkp, &lim, mode);
595 	err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
596 	if (err)
597 		return err;
598 	return count;
599 }
600 static DEVICE_ATTR_RW(provisioning_mode);
601 
602 /* sysfs_match_string() requires dense arrays */
603 static const char *zeroing_mode[] = {
604 	[SD_ZERO_WRITE]		= "write",
605 	[SD_ZERO_WS]		= "writesame",
606 	[SD_ZERO_WS16_UNMAP]	= "writesame_16_unmap",
607 	[SD_ZERO_WS10_UNMAP]	= "writesame_10_unmap",
608 };
609 
610 static ssize_t
611 zeroing_mode_show(struct device *dev, struct device_attribute *attr,
612 		  char *buf)
613 {
614 	struct scsi_disk *sdkp = to_scsi_disk(dev);
615 
616 	return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
617 }
618 
619 static ssize_t
620 zeroing_mode_store(struct device *dev, struct device_attribute *attr,
621 		   const char *buf, size_t count)
622 {
623 	struct scsi_disk *sdkp = to_scsi_disk(dev);
624 	int mode;
625 
626 	if (!capable(CAP_SYS_ADMIN))
627 		return -EACCES;
628 
629 	mode = sysfs_match_string(zeroing_mode, buf);
630 	if (mode < 0)
631 		return -EINVAL;
632 
633 	sdkp->zeroing_mode = mode;
634 
635 	return count;
636 }
637 static DEVICE_ATTR_RW(zeroing_mode);
638 
639 static ssize_t
640 max_medium_access_timeouts_show(struct device *dev,
641 				struct device_attribute *attr, char *buf)
642 {
643 	struct scsi_disk *sdkp = to_scsi_disk(dev);
644 
645 	return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
646 }
647 
648 static ssize_t
649 max_medium_access_timeouts_store(struct device *dev,
650 				 struct device_attribute *attr, const char *buf,
651 				 size_t count)
652 {
653 	struct scsi_disk *sdkp = to_scsi_disk(dev);
654 	int err;
655 
656 	if (!capable(CAP_SYS_ADMIN))
657 		return -EACCES;
658 
659 	err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
660 
661 	return err ? err : count;
662 }
663 static DEVICE_ATTR_RW(max_medium_access_timeouts);
664 
665 static ssize_t
666 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
667 			   char *buf)
668 {
669 	struct scsi_disk *sdkp = to_scsi_disk(dev);
670 
671 	return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
672 }
673 
674 static ssize_t
675 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
676 			    const char *buf, size_t count)
677 {
678 	struct scsi_disk *sdkp = to_scsi_disk(dev);
679 	struct scsi_device *sdp = sdkp->device;
680 	struct queue_limits lim;
681 	unsigned long max;
682 	int err;
683 
684 	if (!capable(CAP_SYS_ADMIN))
685 		return -EACCES;
686 
687 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
688 		return -EINVAL;
689 
690 	err = kstrtoul(buf, 10, &max);
691 
692 	if (err)
693 		return err;
694 
695 	if (max == 0)
696 		sdp->no_write_same = 1;
697 	else if (max <= SD_MAX_WS16_BLOCKS) {
698 		sdp->no_write_same = 0;
699 		sdkp->max_ws_blocks = max;
700 	}
701 
702 	lim = queue_limits_start_update(sdkp->disk->queue);
703 	sd_config_write_same(sdkp, &lim);
704 	err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
705 	if (err)
706 		return err;
707 	return count;
708 }
709 static DEVICE_ATTR_RW(max_write_same_blocks);
710 
711 static ssize_t
712 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
713 {
714 	struct scsi_disk *sdkp = to_scsi_disk(dev);
715 
716 	if (sdkp->device->type == TYPE_ZBC)
717 		return sprintf(buf, "host-managed\n");
718 	if (sdkp->zoned == 1)
719 		return sprintf(buf, "host-aware\n");
720 	if (sdkp->zoned == 2)
721 		return sprintf(buf, "drive-managed\n");
722 	return sprintf(buf, "none\n");
723 }
724 static DEVICE_ATTR_RO(zoned_cap);
725 
726 static ssize_t
727 max_retries_store(struct device *dev, struct device_attribute *attr,
728 		  const char *buf, size_t count)
729 {
730 	struct scsi_disk *sdkp = to_scsi_disk(dev);
731 	struct scsi_device *sdev = sdkp->device;
732 	int retries, err;
733 
734 	err = kstrtoint(buf, 10, &retries);
735 	if (err)
736 		return err;
737 
738 	if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
739 		sdkp->max_retries = retries;
740 		return count;
741 	}
742 
743 	sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
744 		    SD_MAX_RETRIES);
745 	return -EINVAL;
746 }
747 
748 static ssize_t
749 max_retries_show(struct device *dev, struct device_attribute *attr,
750 		 char *buf)
751 {
752 	struct scsi_disk *sdkp = to_scsi_disk(dev);
753 
754 	return sprintf(buf, "%d\n", sdkp->max_retries);
755 }
756 
757 static DEVICE_ATTR_RW(max_retries);
758 
759 static struct attribute *sd_disk_attrs[] = {
760 	&dev_attr_cache_type.attr,
761 	&dev_attr_FUA.attr,
762 	&dev_attr_allow_restart.attr,
763 	&dev_attr_manage_start_stop.attr,
764 	&dev_attr_manage_system_start_stop.attr,
765 	&dev_attr_manage_runtime_start_stop.attr,
766 	&dev_attr_manage_shutdown.attr,
767 	&dev_attr_manage_restart.attr,
768 	&dev_attr_protection_type.attr,
769 	&dev_attr_protection_mode.attr,
770 	&dev_attr_app_tag_own.attr,
771 	&dev_attr_thin_provisioning.attr,
772 	&dev_attr_provisioning_mode.attr,
773 	&dev_attr_zeroing_mode.attr,
774 	&dev_attr_max_write_same_blocks.attr,
775 	&dev_attr_max_medium_access_timeouts.attr,
776 	&dev_attr_zoned_cap.attr,
777 	&dev_attr_max_retries.attr,
778 	NULL,
779 };
780 ATTRIBUTE_GROUPS(sd_disk);
781 
782 static void scsi_disk_release(struct device *dev)
783 {
784 	struct scsi_disk *sdkp = to_scsi_disk(dev);
785 
786 	ida_free(&sd_index_ida, sdkp->index);
787 	put_device(&sdkp->device->sdev_gendev);
788 	free_opal_dev(sdkp->opal_dev);
789 
790 	kfree(sdkp);
791 }
792 
793 static struct class sd_disk_class = {
794 	.name		= "scsi_disk",
795 	.dev_release	= scsi_disk_release,
796 	.dev_groups	= sd_disk_groups,
797 };
798 
799 /*
800  * Don't request a new module, as that could deadlock in multipath
801  * environment.
802  */
803 static void sd_default_probe(dev_t devt)
804 {
805 }
806 
807 /*
808  * Device no to disk mapping:
809  *
810  *       major         disc2     disc  p1
811  *   |............|.............|....|....| <- dev_t
812  *    31        20 19          8 7  4 3  0
813  *
814  * Inside a major, we have 16k disks, however mapped non-
815  * contiguously. The first 16 disks are for major0, the next
816  * ones with major1, ... Disk 256 is for major0 again, disk 272
817  * for major1, ...
818  * As we stay compatible with our numbering scheme, we can reuse
819  * the well-know SCSI majors 8, 65--71, 136--143.
820  */
821 static int sd_major(int major_idx)
822 {
823 	switch (major_idx) {
824 	case 0:
825 		return SCSI_DISK0_MAJOR;
826 	case 1 ... 7:
827 		return SCSI_DISK1_MAJOR + major_idx - 1;
828 	case 8 ... 15:
829 		return SCSI_DISK8_MAJOR + major_idx - 8;
830 	default:
831 		BUG();
832 		return 0;	/* shut up gcc */
833 	}
834 }
835 
836 #ifdef CONFIG_BLK_SED_OPAL
837 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
838 		size_t len, bool send)
839 {
840 	struct scsi_disk *sdkp = data;
841 	struct scsi_device *sdev = sdkp->device;
842 	u8 cdb[12] = { 0, };
843 	const struct scsi_exec_args exec_args = {
844 		.req_flags = BLK_MQ_REQ_PM,
845 	};
846 	int ret;
847 
848 	cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
849 	cdb[1] = secp;
850 	put_unaligned_be16(spsp, &cdb[2]);
851 	put_unaligned_be32(len, &cdb[6]);
852 
853 	ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
854 			       buffer, len, SD_TIMEOUT, sdkp->max_retries,
855 			       &exec_args);
856 	return ret <= 0 ? ret : -EIO;
857 }
858 #endif /* CONFIG_BLK_SED_OPAL */
859 
860 /*
861  * Look up the DIX operation based on whether the command is read or
862  * write and whether dix and dif are enabled.
863  */
864 static unsigned int sd_prot_op(bool write, bool dix, bool dif)
865 {
866 	/* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
867 	static const unsigned int ops[] = {	/* wrt dix dif */
868 		SCSI_PROT_NORMAL,		/*  0	0   0  */
869 		SCSI_PROT_READ_STRIP,		/*  0	0   1  */
870 		SCSI_PROT_READ_INSERT,		/*  0	1   0  */
871 		SCSI_PROT_READ_PASS,		/*  0	1   1  */
872 		SCSI_PROT_NORMAL,		/*  1	0   0  */
873 		SCSI_PROT_WRITE_INSERT,		/*  1	0   1  */
874 		SCSI_PROT_WRITE_STRIP,		/*  1	1   0  */
875 		SCSI_PROT_WRITE_PASS,		/*  1	1   1  */
876 	};
877 
878 	return ops[write << 2 | dix << 1 | dif];
879 }
880 
881 /*
882  * Returns a mask of the protection flags that are valid for a given DIX
883  * operation.
884  */
885 static unsigned int sd_prot_flag_mask(unsigned int prot_op)
886 {
887 	static const unsigned int flag_mask[] = {
888 		[SCSI_PROT_NORMAL]		= 0,
889 
890 		[SCSI_PROT_READ_STRIP]		= SCSI_PROT_TRANSFER_PI |
891 						  SCSI_PROT_GUARD_CHECK |
892 						  SCSI_PROT_REF_CHECK |
893 						  SCSI_PROT_REF_INCREMENT,
894 
895 		[SCSI_PROT_READ_INSERT]		= SCSI_PROT_REF_INCREMENT |
896 						  SCSI_PROT_IP_CHECKSUM,
897 
898 		[SCSI_PROT_READ_PASS]		= SCSI_PROT_TRANSFER_PI |
899 						  SCSI_PROT_GUARD_CHECK |
900 						  SCSI_PROT_REF_CHECK |
901 						  SCSI_PROT_REF_INCREMENT |
902 						  SCSI_PROT_IP_CHECKSUM,
903 
904 		[SCSI_PROT_WRITE_INSERT]	= SCSI_PROT_TRANSFER_PI |
905 						  SCSI_PROT_REF_INCREMENT,
906 
907 		[SCSI_PROT_WRITE_STRIP]		= SCSI_PROT_GUARD_CHECK |
908 						  SCSI_PROT_REF_CHECK |
909 						  SCSI_PROT_REF_INCREMENT |
910 						  SCSI_PROT_IP_CHECKSUM,
911 
912 		[SCSI_PROT_WRITE_PASS]		= SCSI_PROT_TRANSFER_PI |
913 						  SCSI_PROT_GUARD_CHECK |
914 						  SCSI_PROT_REF_CHECK |
915 						  SCSI_PROT_REF_INCREMENT |
916 						  SCSI_PROT_IP_CHECKSUM,
917 	};
918 
919 	return flag_mask[prot_op];
920 }
921 
922 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
923 					   unsigned int dix, unsigned int dif)
924 {
925 	struct request *rq = scsi_cmd_to_rq(scmd);
926 	struct bio *bio = rq->bio;
927 	unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
928 	unsigned int protect = 0;
929 
930 	if (dix) {				/* DIX Type 0, 1, 2, 3 */
931 		if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
932 			scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
933 
934 		if (bio_integrity_flagged(bio, BIP_CHECK_GUARD))
935 			scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
936 	}
937 
938 	if (dif != T10_PI_TYPE3_PROTECTION) {	/* DIX/DIF Type 0, 1, 2 */
939 		scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
940 
941 		if (bio_integrity_flagged(bio, BIP_CHECK_REFTAG))
942 			scmd->prot_flags |= SCSI_PROT_REF_CHECK;
943 	}
944 
945 	if (dif) {				/* DIX/DIF Type 1, 2, 3 */
946 		scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
947 
948 		if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
949 			protect = 3 << 5;	/* Disable target PI checking */
950 		else
951 			protect = 1 << 5;	/* Enable target PI checking */
952 	}
953 
954 	scsi_set_prot_op(scmd, prot_op);
955 	scsi_set_prot_type(scmd, dif);
956 	scmd->prot_flags &= sd_prot_flag_mask(prot_op);
957 
958 	return protect;
959 }
960 
961 static void *sd_set_special_bvec(struct scsi_cmnd *cmd, unsigned int data_len)
962 {
963 	struct page *page;
964 	struct request *rq = scsi_cmd_to_rq(cmd);
965 	struct scsi_device *sdp = cmd->device;
966 	unsigned sector_size = sdp->sector_size;
967 	unsigned int nr_pages = DIV_ROUND_UP(sector_size, PAGE_SIZE);
968 	int n;
969 
970 	if (sector_size > PAGE_SIZE)
971 		page = mempool_alloc(sd_large_page_pool, GFP_ATOMIC);
972 	else
973 		page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
974 	if (!page)
975 		return NULL;
976 
977 	for (n = 0; n < nr_pages; n++)
978 		clear_highpage(page + n);
979 	bvec_set_page(&rq->special_vec, page, data_len, 0);
980 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
981 	return bvec_virt(&rq->special_vec);
982 }
983 
984 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
985 {
986 	struct scsi_device *sdp = cmd->device;
987 	struct request *rq = scsi_cmd_to_rq(cmd);
988 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
989 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
990 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
991 	unsigned int data_len = 24;
992 	char *buf;
993 
994 	buf = sd_set_special_bvec(cmd, data_len);
995 	if (!buf)
996 		return BLK_STS_RESOURCE;
997 
998 	cmd->cmd_len = 10;
999 	cmd->cmnd[0] = UNMAP;
1000 	cmd->cmnd[8] = 24;
1001 
1002 	put_unaligned_be16(6 + 16, &buf[0]);
1003 	put_unaligned_be16(16, &buf[2]);
1004 	put_unaligned_be64(lba, &buf[8]);
1005 	put_unaligned_be32(nr_blocks, &buf[16]);
1006 
1007 	cmd->allowed = sdkp->max_retries;
1008 	cmd->transfersize = data_len;
1009 	rq->timeout = SD_TIMEOUT;
1010 
1011 	return scsi_alloc_sgtables(cmd);
1012 }
1013 
1014 static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim)
1015 {
1016 	unsigned int logical_block_size = sdkp->device->sector_size,
1017 		physical_block_size_sectors, max_atomic, unit_min, unit_max;
1018 
1019 	if ((!sdkp->max_atomic && !sdkp->max_atomic_with_boundary) ||
1020 	    sdkp->protection_type == T10_PI_TYPE2_PROTECTION)
1021 		return;
1022 
1023 	physical_block_size_sectors = sdkp->physical_block_size /
1024 					sdkp->device->sector_size;
1025 
1026 	unit_min = rounddown_pow_of_two(sdkp->atomic_granularity ?
1027 					sdkp->atomic_granularity :
1028 					physical_block_size_sectors);
1029 
1030 	/*
1031 	 * Only use atomic boundary when we have the odd scenario of
1032 	 * sdkp->max_atomic == 0, which the spec does permit.
1033 	 */
1034 	if (sdkp->max_atomic) {
1035 		max_atomic = sdkp->max_atomic;
1036 		unit_max = rounddown_pow_of_two(sdkp->max_atomic);
1037 		sdkp->use_atomic_write_boundary = 0;
1038 	} else {
1039 		max_atomic = sdkp->max_atomic_with_boundary;
1040 		unit_max = rounddown_pow_of_two(sdkp->max_atomic_boundary);
1041 		sdkp->use_atomic_write_boundary = 1;
1042 	}
1043 
1044 	/*
1045 	 * Ensure compliance with granularity and alignment. For now, keep it
1046 	 * simple and just don't support atomic writes for values mismatched
1047 	 * with max_{boundary}atomic, physical block size, and
1048 	 * atomic_granularity itself.
1049 	 *
1050 	 * We're really being distrustful by checking unit_max also...
1051 	 */
1052 	if (sdkp->atomic_granularity > 1) {
1053 		if (unit_min > 1 && unit_min % sdkp->atomic_granularity)
1054 			return;
1055 		if (unit_max > 1 && unit_max % sdkp->atomic_granularity)
1056 			return;
1057 	}
1058 
1059 	if (sdkp->atomic_alignment > 1) {
1060 		if (unit_min > 1 && unit_min % sdkp->atomic_alignment)
1061 			return;
1062 		if (unit_max > 1 && unit_max % sdkp->atomic_alignment)
1063 			return;
1064 	}
1065 
1066 	lim->atomic_write_hw_max = max_atomic * logical_block_size;
1067 	lim->atomic_write_hw_boundary = 0;
1068 	lim->atomic_write_hw_unit_min = unit_min * logical_block_size;
1069 	lim->atomic_write_hw_unit_max = unit_max * logical_block_size;
1070 	lim->features |= BLK_FEAT_ATOMIC_WRITES;
1071 }
1072 
1073 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
1074 		bool unmap)
1075 {
1076 	struct scsi_device *sdp = cmd->device;
1077 	struct request *rq = scsi_cmd_to_rq(cmd);
1078 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1079 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1080 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1081 	u32 data_len = sdp->sector_size;
1082 
1083 	if (!sd_set_special_bvec(cmd, data_len))
1084 		return BLK_STS_RESOURCE;
1085 
1086 	cmd->cmd_len = 16;
1087 	cmd->cmnd[0] = WRITE_SAME_16;
1088 	if (unmap)
1089 		cmd->cmnd[1] = 0x8; /* UNMAP */
1090 	put_unaligned_be64(lba, &cmd->cmnd[2]);
1091 	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1092 
1093 	cmd->allowed = sdkp->max_retries;
1094 	cmd->transfersize = data_len;
1095 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
1096 
1097 	return scsi_alloc_sgtables(cmd);
1098 }
1099 
1100 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
1101 		bool unmap)
1102 {
1103 	struct scsi_device *sdp = cmd->device;
1104 	struct request *rq = scsi_cmd_to_rq(cmd);
1105 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1106 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1107 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1108 	u32 data_len = sdp->sector_size;
1109 
1110 	if (!sd_set_special_bvec(cmd, data_len))
1111 		return BLK_STS_RESOURCE;
1112 
1113 	cmd->cmd_len = 10;
1114 	cmd->cmnd[0] = WRITE_SAME;
1115 	if (unmap)
1116 		cmd->cmnd[1] = 0x8; /* UNMAP */
1117 	put_unaligned_be32(lba, &cmd->cmnd[2]);
1118 	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1119 
1120 	cmd->allowed = sdkp->max_retries;
1121 	cmd->transfersize = data_len;
1122 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
1123 
1124 	return scsi_alloc_sgtables(cmd);
1125 }
1126 
1127 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
1128 {
1129 	struct request *rq = scsi_cmd_to_rq(cmd);
1130 	struct scsi_device *sdp = cmd->device;
1131 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1132 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1133 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1134 
1135 	if (!(rq->cmd_flags & REQ_NOUNMAP)) {
1136 		switch (sdkp->zeroing_mode) {
1137 		case SD_ZERO_WS16_UNMAP:
1138 			return sd_setup_write_same16_cmnd(cmd, true);
1139 		case SD_ZERO_WS10_UNMAP:
1140 			return sd_setup_write_same10_cmnd(cmd, true);
1141 		}
1142 	}
1143 
1144 	if (sdp->no_write_same) {
1145 		rq->rq_flags |= RQF_QUIET;
1146 		return BLK_STS_TARGET;
1147 	}
1148 
1149 	if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
1150 		return sd_setup_write_same16_cmnd(cmd, false);
1151 
1152 	return sd_setup_write_same10_cmnd(cmd, false);
1153 }
1154 
1155 static void sd_disable_write_same(struct scsi_disk *sdkp)
1156 {
1157 	sdkp->device->no_write_same = 1;
1158 	sdkp->max_ws_blocks = 0;
1159 	blk_queue_disable_write_zeroes(sdkp->disk->queue);
1160 }
1161 
1162 static void sd_config_write_same(struct scsi_disk *sdkp,
1163 		struct queue_limits *lim)
1164 {
1165 	unsigned int logical_block_size = sdkp->device->sector_size;
1166 
1167 	if (sdkp->device->no_write_same) {
1168 		sdkp->max_ws_blocks = 0;
1169 		goto out;
1170 	}
1171 
1172 	/* Some devices can not handle block counts above 0xffff despite
1173 	 * supporting WRITE SAME(16). Consequently we default to 64k
1174 	 * blocks per I/O unless the device explicitly advertises a
1175 	 * bigger limit.
1176 	 */
1177 	if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
1178 		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1179 						   (u32)SD_MAX_WS16_BLOCKS);
1180 	else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
1181 		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1182 						   (u32)SD_MAX_WS10_BLOCKS);
1183 	else {
1184 		sdkp->device->no_write_same = 1;
1185 		sdkp->max_ws_blocks = 0;
1186 	}
1187 
1188 	if (sdkp->lbprz && sdkp->lbpws)
1189 		sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
1190 	else if (sdkp->lbprz && sdkp->lbpws10)
1191 		sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
1192 	else if (sdkp->max_ws_blocks)
1193 		sdkp->zeroing_mode = SD_ZERO_WS;
1194 	else
1195 		sdkp->zeroing_mode = SD_ZERO_WRITE;
1196 
1197 	if (sdkp->max_ws_blocks &&
1198 	    sdkp->physical_block_size > logical_block_size) {
1199 		/*
1200 		 * Reporting a maximum number of blocks that is not aligned
1201 		 * on the device physical size would cause a large write same
1202 		 * request to be split into physically unaligned chunks by
1203 		 * __blkdev_issue_write_zeroes() even if the caller of this
1204 		 * functions took care to align the large request. So make sure
1205 		 * the maximum reported is aligned to the device physical block
1206 		 * size. This is only an optional optimization for regular
1207 		 * disks, but this is mandatory to avoid failure of large write
1208 		 * same requests directed at sequential write required zones of
1209 		 * host-managed ZBC disks.
1210 		 */
1211 		sdkp->max_ws_blocks =
1212 			round_down(sdkp->max_ws_blocks,
1213 				   bytes_to_logical(sdkp->device,
1214 						    sdkp->physical_block_size));
1215 	}
1216 
1217 out:
1218 	lim->max_write_zeroes_sectors =
1219 		sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT);
1220 
1221 	if (sdkp->zeroing_mode == SD_ZERO_WS16_UNMAP ||
1222 	    sdkp->zeroing_mode == SD_ZERO_WS10_UNMAP)
1223 		lim->max_hw_wzeroes_unmap_sectors =
1224 				lim->max_write_zeroes_sectors;
1225 }
1226 
1227 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
1228 {
1229 	struct request *rq = scsi_cmd_to_rq(cmd);
1230 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1231 
1232 	/* flush requests don't perform I/O, zero the S/G table */
1233 	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1234 
1235 	if (cmd->device->use_16_for_sync) {
1236 		cmd->cmnd[0] = SYNCHRONIZE_CACHE_16;
1237 		cmd->cmd_len = 16;
1238 	} else {
1239 		cmd->cmnd[0] = SYNCHRONIZE_CACHE;
1240 		cmd->cmd_len = 10;
1241 	}
1242 	cmd->transfersize = 0;
1243 	cmd->allowed = sdkp->max_retries;
1244 
1245 	rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
1246 	return BLK_STS_OK;
1247 }
1248 
1249 /**
1250  * sd_group_number() - Compute the GROUP NUMBER field
1251  * @cmd: SCSI command for which to compute the value of the six-bit GROUP NUMBER
1252  *	field.
1253  *
1254  * From SBC-5 r05 (https://www.t10.org/cgi-bin/ac.pl?t=f&f=sbc5r05.pdf):
1255  * 0: no relative lifetime.
1256  * 1: shortest relative lifetime.
1257  * 2: second shortest relative lifetime.
1258  * 3 - 0x3d: intermediate relative lifetimes.
1259  * 0x3e: second longest relative lifetime.
1260  * 0x3f: longest relative lifetime.
1261  */
1262 static u8 sd_group_number(struct scsi_cmnd *cmd)
1263 {
1264 	const struct request *rq = scsi_cmd_to_rq(cmd);
1265 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1266 
1267 	if (!sdkp->rscs)
1268 		return 0;
1269 
1270 	return min3((u32)rq->bio->bi_write_hint,
1271 		    (u32)sdkp->permanent_stream_count, 0x3fu);
1272 }
1273 
1274 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
1275 				       sector_t lba, unsigned int nr_blocks,
1276 				       unsigned char flags, unsigned int dld)
1277 {
1278 	cmd->cmd_len = SD_EXT_CDB_SIZE;
1279 	cmd->cmnd[0]  = VARIABLE_LENGTH_CMD;
1280 	cmd->cmnd[6]  = sd_group_number(cmd);
1281 	cmd->cmnd[7]  = 0x18; /* Additional CDB len */
1282 	cmd->cmnd[9]  = write ? WRITE_32 : READ_32;
1283 	cmd->cmnd[10] = flags;
1284 	cmd->cmnd[11] = dld & 0x07;
1285 	put_unaligned_be64(lba, &cmd->cmnd[12]);
1286 	put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
1287 	put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
1288 
1289 	return BLK_STS_OK;
1290 }
1291 
1292 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
1293 				       sector_t lba, unsigned int nr_blocks,
1294 				       unsigned char flags, unsigned int dld)
1295 {
1296 	cmd->cmd_len  = 16;
1297 	cmd->cmnd[0]  = write ? WRITE_16 : READ_16;
1298 	cmd->cmnd[1]  = flags | ((dld >> 2) & 0x01);
1299 	cmd->cmnd[14] = ((dld & 0x03) << 6) | sd_group_number(cmd);
1300 	cmd->cmnd[15] = 0;
1301 	put_unaligned_be64(lba, &cmd->cmnd[2]);
1302 	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1303 
1304 	return BLK_STS_OK;
1305 }
1306 
1307 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
1308 				       sector_t lba, unsigned int nr_blocks,
1309 				       unsigned char flags)
1310 {
1311 	cmd->cmd_len = 10;
1312 	cmd->cmnd[0] = write ? WRITE_10 : READ_10;
1313 	cmd->cmnd[1] = flags;
1314 	cmd->cmnd[6] = sd_group_number(cmd);
1315 	cmd->cmnd[9] = 0;
1316 	put_unaligned_be32(lba, &cmd->cmnd[2]);
1317 	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1318 
1319 	return BLK_STS_OK;
1320 }
1321 
1322 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
1323 				      sector_t lba, unsigned int nr_blocks,
1324 				      unsigned char flags)
1325 {
1326 	/* Avoid that 0 blocks gets translated into 256 blocks. */
1327 	if (WARN_ON_ONCE(nr_blocks == 0))
1328 		return BLK_STS_IOERR;
1329 
1330 	if (unlikely(flags & 0x8)) {
1331 		/*
1332 		 * This happens only if this drive failed 10byte rw
1333 		 * command with ILLEGAL_REQUEST during operation and
1334 		 * thus turned off use_10_for_rw.
1335 		 */
1336 		scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
1337 		return BLK_STS_IOERR;
1338 	}
1339 
1340 	cmd->cmd_len = 6;
1341 	cmd->cmnd[0] = write ? WRITE_6 : READ_6;
1342 	cmd->cmnd[1] = (lba >> 16) & 0x1f;
1343 	cmd->cmnd[2] = (lba >> 8) & 0xff;
1344 	cmd->cmnd[3] = lba & 0xff;
1345 	cmd->cmnd[4] = nr_blocks;
1346 	cmd->cmnd[5] = 0;
1347 
1348 	return BLK_STS_OK;
1349 }
1350 
1351 /*
1352  * Check if a command has a duration limit set. If it does, and the target
1353  * device supports CDL and the feature is enabled, return the limit
1354  * descriptor index to use. Return 0 (no limit) otherwise.
1355  */
1356 static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd)
1357 {
1358 	struct scsi_device *sdp = sdkp->device;
1359 	int hint;
1360 
1361 	if (!sdp->cdl_supported || !sdp->cdl_enable)
1362 		return 0;
1363 
1364 	/*
1365 	 * Use "no limit" if the request ioprio does not specify a duration
1366 	 * limit hint.
1367 	 */
1368 	hint = IOPRIO_PRIO_HINT(req_get_ioprio(scsi_cmd_to_rq(scmd)));
1369 	if (hint < IOPRIO_HINT_DEV_DURATION_LIMIT_1 ||
1370 	    hint > IOPRIO_HINT_DEV_DURATION_LIMIT_7)
1371 		return 0;
1372 
1373 	return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1;
1374 }
1375 
1376 static blk_status_t sd_setup_atomic_cmnd(struct scsi_cmnd *cmd,
1377 					sector_t lba, unsigned int nr_blocks,
1378 					bool boundary, unsigned char flags)
1379 {
1380 	cmd->cmd_len  = 16;
1381 	cmd->cmnd[0]  = WRITE_ATOMIC_16;
1382 	cmd->cmnd[1]  = flags;
1383 	put_unaligned_be64(lba, &cmd->cmnd[2]);
1384 	put_unaligned_be16(nr_blocks, &cmd->cmnd[12]);
1385 	if (boundary)
1386 		put_unaligned_be16(nr_blocks, &cmd->cmnd[10]);
1387 	else
1388 		put_unaligned_be16(0, &cmd->cmnd[10]);
1389 	put_unaligned_be16(nr_blocks, &cmd->cmnd[12]);
1390 	cmd->cmnd[14] = 0;
1391 	cmd->cmnd[15] = 0;
1392 
1393 	return BLK_STS_OK;
1394 }
1395 
1396 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
1397 {
1398 	struct request *rq = scsi_cmd_to_rq(cmd);
1399 	struct scsi_device *sdp = cmd->device;
1400 	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1401 	sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1402 	sector_t threshold;
1403 	unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1404 	unsigned int mask = logical_to_sectors(sdp, 1) - 1;
1405 	bool write = rq_data_dir(rq) == WRITE;
1406 	unsigned char protect, fua;
1407 	unsigned int dld;
1408 	blk_status_t ret;
1409 	unsigned int dif;
1410 	bool dix;
1411 
1412 	ret = scsi_alloc_sgtables(cmd);
1413 	if (ret != BLK_STS_OK)
1414 		return ret;
1415 
1416 	ret = BLK_STS_IOERR;
1417 	if (!scsi_device_online(sdp) || sdp->changed) {
1418 		scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
1419 		goto fail;
1420 	}
1421 
1422 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
1423 		scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
1424 		goto fail;
1425 	}
1426 
1427 	if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
1428 		scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
1429 		goto fail;
1430 	}
1431 
1432 	/*
1433 	 * Some SD card readers can't handle accesses which touch the
1434 	 * last one or two logical blocks. Split accesses as needed.
1435 	 */
1436 	threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
1437 
1438 	if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
1439 		if (lba < threshold) {
1440 			/* Access up to the threshold but not beyond */
1441 			nr_blocks = threshold - lba;
1442 		} else {
1443 			/* Access only a single logical block */
1444 			nr_blocks = 1;
1445 		}
1446 	}
1447 
1448 	fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
1449 	dix = scsi_prot_sg_count(cmd);
1450 	dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
1451 	dld = sd_cdl_dld(sdkp, cmd);
1452 
1453 	if (dif || dix)
1454 		protect = sd_setup_protect_cmnd(cmd, dix, dif);
1455 	else
1456 		protect = 0;
1457 
1458 	if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
1459 		ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
1460 					 protect | fua, dld);
1461 	} else if (rq->cmd_flags & REQ_ATOMIC) {
1462 		ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks,
1463 				sdkp->use_atomic_write_boundary,
1464 				protect | fua);
1465 	} else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
1466 		ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
1467 					 protect | fua, dld);
1468 	} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
1469 		   sdp->use_10_for_rw || protect || rq->bio->bi_write_hint) {
1470 		ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
1471 					 protect | fua);
1472 	} else {
1473 		ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
1474 					protect | fua);
1475 	}
1476 
1477 	if (unlikely(ret != BLK_STS_OK))
1478 		goto fail;
1479 
1480 	/*
1481 	 * We shouldn't disconnect in the middle of a sector, so with a dumb
1482 	 * host adapter, it's safe to assume that we can at least transfer
1483 	 * this many bytes between each connect / disconnect.
1484 	 */
1485 	cmd->transfersize = sdp->sector_size;
1486 	cmd->underflow = nr_blocks << 9;
1487 	cmd->allowed = sdkp->max_retries;
1488 	cmd->sdb.length = nr_blocks * sdp->sector_size;
1489 
1490 	SCSI_LOG_HLQUEUE(1,
1491 			 scmd_printk(KERN_INFO, cmd,
1492 				     "%s: block=%llu, count=%d\n", __func__,
1493 				     (unsigned long long)blk_rq_pos(rq),
1494 				     blk_rq_sectors(rq)));
1495 	SCSI_LOG_HLQUEUE(2,
1496 			 scmd_printk(KERN_INFO, cmd,
1497 				     "%s %d/%u 512 byte blocks.\n",
1498 				     write ? "writing" : "reading", nr_blocks,
1499 				     blk_rq_sectors(rq)));
1500 
1501 	/*
1502 	 * This indicates that the command is ready from our end to be queued.
1503 	 */
1504 	return BLK_STS_OK;
1505 fail:
1506 	scsi_free_sgtables(cmd);
1507 	return ret;
1508 }
1509 
1510 static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
1511 {
1512 	struct request *rq = scsi_cmd_to_rq(cmd);
1513 
1514 	switch (req_op(rq)) {
1515 	case REQ_OP_DISCARD:
1516 		switch (scsi_disk(rq->q->disk)->provisioning_mode) {
1517 		case SD_LBP_UNMAP:
1518 			return sd_setup_unmap_cmnd(cmd);
1519 		case SD_LBP_WS16:
1520 			return sd_setup_write_same16_cmnd(cmd, true);
1521 		case SD_LBP_WS10:
1522 			return sd_setup_write_same10_cmnd(cmd, true);
1523 		case SD_LBP_ZERO:
1524 			return sd_setup_write_same10_cmnd(cmd, false);
1525 		default:
1526 			return BLK_STS_TARGET;
1527 		}
1528 	case REQ_OP_WRITE_ZEROES:
1529 		return sd_setup_write_zeroes_cmnd(cmd);
1530 	case REQ_OP_FLUSH:
1531 		return sd_setup_flush_cmnd(cmd);
1532 	case REQ_OP_READ:
1533 	case REQ_OP_WRITE:
1534 		return sd_setup_read_write_cmnd(cmd);
1535 	case REQ_OP_ZONE_RESET:
1536 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1537 						   false);
1538 	case REQ_OP_ZONE_RESET_ALL:
1539 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1540 						   true);
1541 	case REQ_OP_ZONE_OPEN:
1542 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
1543 	case REQ_OP_ZONE_CLOSE:
1544 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
1545 	case REQ_OP_ZONE_FINISH:
1546 		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
1547 	default:
1548 		WARN_ON_ONCE(1);
1549 		return BLK_STS_NOTSUPP;
1550 	}
1551 }
1552 
1553 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1554 {
1555 	struct request *rq = scsi_cmd_to_rq(SCpnt);
1556 	struct scsi_device *sdp = SCpnt->device;
1557 	unsigned sector_size = sdp->sector_size;
1558 
1559 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
1560 		if (sector_size > PAGE_SIZE)
1561 			mempool_free(rq->special_vec.bv_page, sd_large_page_pool);
1562 		else
1563 			mempool_free(rq->special_vec.bv_page, sd_page_pool);
1564 	}
1565 }
1566 
1567 static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp)
1568 {
1569 	if (sdkp->device->removable || sdkp->write_prot) {
1570 		if (disk_check_media_change(disk))
1571 			return true;
1572 	}
1573 
1574 	/*
1575 	 * Force a full rescan after ioctl(BLKRRPART).  While the disk state has
1576 	 * nothing to do with partitions, BLKRRPART is used to force a full
1577 	 * revalidate after things like a format for historical reasons.
1578 	 */
1579 	return test_bit(GD_NEED_PART_SCAN, &disk->state);
1580 }
1581 
1582 /**
1583  *	sd_open - open a scsi disk device
1584  *	@disk: disk to open
1585  *	@mode: open mode
1586  *
1587  *	Returns 0 if successful. Returns a negated errno value in case
1588  *	of error.
1589  *
1590  *	Note: This can be called from a user context (e.g. fsck(1) )
1591  *	or from within the kernel (e.g. as a result of a mount(1) ).
1592  *	In the latter case @inode and @filp carry an abridged amount
1593  *	of information as noted above.
1594  *
1595  *	Locking: called with disk->open_mutex held.
1596  **/
1597 static int sd_open(struct gendisk *disk, blk_mode_t mode)
1598 {
1599 	struct scsi_disk *sdkp = scsi_disk(disk);
1600 	struct scsi_device *sdev = sdkp->device;
1601 	int retval;
1602 
1603 	if (scsi_device_get(sdev))
1604 		return -ENXIO;
1605 
1606 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
1607 
1608 	/*
1609 	 * If the device is in error recovery, wait until it is done.
1610 	 * If the device is offline, then disallow any access to it.
1611 	 */
1612 	retval = -ENXIO;
1613 	if (!scsi_block_when_processing_errors(sdev))
1614 		goto error_out;
1615 
1616 	if (sd_need_revalidate(disk, sdkp))
1617 		sd_revalidate_disk(disk);
1618 
1619 	/*
1620 	 * If the drive is empty, just let the open fail.
1621 	 */
1622 	retval = -ENOMEDIUM;
1623 	if (sdev->removable && !sdkp->media_present &&
1624 	    !(mode & BLK_OPEN_NDELAY))
1625 		goto error_out;
1626 
1627 	/*
1628 	 * If the device has the write protect tab set, have the open fail
1629 	 * if the user expects to be able to write to the thing.
1630 	 */
1631 	retval = -EROFS;
1632 	if (sdkp->write_prot && (mode & BLK_OPEN_WRITE))
1633 		goto error_out;
1634 
1635 	/*
1636 	 * It is possible that the disk changing stuff resulted in
1637 	 * the device being taken offline.  If this is the case,
1638 	 * report this to the user, and don't pretend that the
1639 	 * open actually succeeded.
1640 	 */
1641 	retval = -ENXIO;
1642 	if (!scsi_device_online(sdev))
1643 		goto error_out;
1644 
1645 	if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
1646 		if (scsi_block_when_processing_errors(sdev))
1647 			scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
1648 	}
1649 
1650 	return 0;
1651 
1652 error_out:
1653 	scsi_device_put(sdev);
1654 	return retval;
1655 }
1656 
1657 /**
1658  *	sd_release - invoked when the (last) close(2) is called on this
1659  *	scsi disk.
1660  *	@disk: disk to release
1661  *
1662  *	Returns 0.
1663  *
1664  *	Note: may block (uninterruptible) if error recovery is underway
1665  *	on this disk.
1666  *
1667  *	Locking: called with disk->open_mutex held.
1668  **/
1669 static void sd_release(struct gendisk *disk)
1670 {
1671 	struct scsi_disk *sdkp = scsi_disk(disk);
1672 	struct scsi_device *sdev = sdkp->device;
1673 
1674 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
1675 
1676 	if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
1677 		if (scsi_block_when_processing_errors(sdev))
1678 			scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1679 	}
1680 
1681 	scsi_device_put(sdev);
1682 }
1683 
1684 static int sd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
1685 {
1686 	struct scsi_disk *sdkp = scsi_disk(disk);
1687 	struct scsi_device *sdp = sdkp->device;
1688 	struct Scsi_Host *host = sdp->host;
1689 	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
1690 	int diskinfo[4];
1691 
1692 	/* default to most commonly used values */
1693 	diskinfo[0] = 0x40;	/* 1 << 6 */
1694 	diskinfo[1] = 0x20;	/* 1 << 5 */
1695 	diskinfo[2] = capacity >> 11;
1696 
1697 	/* override with calculated, extended default, or driver values */
1698 	if (host->hostt->bios_param)
1699 		host->hostt->bios_param(sdp, disk, capacity, diskinfo);
1700 	else
1701 		scsicam_bios_param(disk, capacity, diskinfo);
1702 
1703 	geo->heads = diskinfo[0];
1704 	geo->sectors = diskinfo[1];
1705 	geo->cylinders = diskinfo[2];
1706 	return 0;
1707 }
1708 
1709 /**
1710  *	sd_ioctl - process an ioctl
1711  *	@bdev: target block device
1712  *	@mode: open mode
1713  *	@cmd: ioctl command number
1714  *	@arg: this is third argument given to ioctl(2) system call.
1715  *	Often contains a pointer.
1716  *
1717  *	Returns 0 if successful (some ioctls return positive numbers on
1718  *	success as well). Returns a negated errno value in case of error.
1719  *
1720  *	Note: most ioctls are forward onto the block subsystem or further
1721  *	down in the scsi subsystem.
1722  **/
1723 static int sd_ioctl(struct block_device *bdev, blk_mode_t mode,
1724 		    unsigned int cmd, unsigned long arg)
1725 {
1726 	struct gendisk *disk = bdev->bd_disk;
1727 	struct scsi_disk *sdkp = scsi_disk(disk);
1728 	struct scsi_device *sdp = sdkp->device;
1729 	void __user *p = (void __user *)arg;
1730 	int error;
1731 
1732 	SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp,
1733 				    "sd_ioctl: disk=%s, cmd=0x%x\n",
1734 				    disk->disk_name, cmd));
1735 
1736 	if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
1737 		return -ENOIOCTLCMD;
1738 
1739 	/*
1740 	 * If we are in the middle of error recovery, don't let anyone
1741 	 * else try and use this device.  Also, if error recovery fails, it
1742 	 * may try and take the device offline, in which case all further
1743 	 * access to the device is prohibited.
1744 	 */
1745 	error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
1746 			(mode & BLK_OPEN_NDELAY));
1747 	if (error)
1748 		return error;
1749 
1750 	if (is_sed_ioctl(cmd))
1751 		return sed_ioctl(sdkp->opal_dev, cmd, p);
1752 	return scsi_ioctl(sdp, mode & BLK_OPEN_WRITE, cmd, p);
1753 }
1754 
1755 static void set_media_not_present(struct scsi_disk *sdkp)
1756 {
1757 	if (sdkp->media_present)
1758 		sdkp->device->changed = 1;
1759 
1760 	if (sdkp->device->removable) {
1761 		sdkp->media_present = 0;
1762 		sdkp->capacity = 0;
1763 	}
1764 }
1765 
1766 static int media_not_present(struct scsi_disk *sdkp,
1767 			     struct scsi_sense_hdr *sshdr)
1768 {
1769 	if (!scsi_sense_valid(sshdr))
1770 		return 0;
1771 
1772 	/* not invoked for commands that could return deferred errors */
1773 	switch (sshdr->sense_key) {
1774 	case UNIT_ATTENTION:
1775 	case NOT_READY:
1776 		/* medium not present */
1777 		if (sshdr->asc == 0x3A) {
1778 			set_media_not_present(sdkp);
1779 			return 1;
1780 		}
1781 	}
1782 	return 0;
1783 }
1784 
1785 /**
1786  *	sd_check_events - check media events
1787  *	@disk: kernel device descriptor
1788  *	@clearing: disk events currently being cleared
1789  *
1790  *	Returns mask of DISK_EVENT_*.
1791  *
1792  *	Note: this function is invoked from the block subsystem.
1793  **/
1794 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1795 {
1796 	struct scsi_disk *sdkp = disk->private_data;
1797 	struct scsi_device *sdp;
1798 	int retval;
1799 	bool disk_changed;
1800 
1801 	if (!sdkp)
1802 		return 0;
1803 
1804 	sdp = sdkp->device;
1805 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1806 
1807 	/*
1808 	 * If the device is offline, don't send any commands - just pretend as
1809 	 * if the command failed.  If the device ever comes back online, we
1810 	 * can deal with it then.  It is only because of unrecoverable errors
1811 	 * that we would ever take a device offline in the first place.
1812 	 */
1813 	if (!scsi_device_online(sdp)) {
1814 		set_media_not_present(sdkp);
1815 		goto out;
1816 	}
1817 
1818 	/*
1819 	 * Using TEST_UNIT_READY enables differentiation between drive with
1820 	 * no cartridge loaded - NOT READY, drive with changed cartridge -
1821 	 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1822 	 *
1823 	 * Drives that auto spin down. eg iomega jaz 1G, will be started
1824 	 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1825 	 * sd_revalidate() is called.
1826 	 */
1827 	if (scsi_block_when_processing_errors(sdp)) {
1828 		struct scsi_sense_hdr sshdr = { 0, };
1829 
1830 		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
1831 					      &sshdr);
1832 
1833 		/* failed to execute TUR, assume media not present */
1834 		if (retval < 0 || host_byte(retval)) {
1835 			set_media_not_present(sdkp);
1836 			goto out;
1837 		}
1838 
1839 		if (media_not_present(sdkp, &sshdr))
1840 			goto out;
1841 	}
1842 
1843 	/*
1844 	 * For removable scsi disk we have to recognise the presence
1845 	 * of a disk in the drive.
1846 	 */
1847 	if (!sdkp->media_present)
1848 		sdp->changed = 1;
1849 	sdkp->media_present = 1;
1850 out:
1851 	/*
1852 	 * sdp->changed is set under the following conditions:
1853 	 *
1854 	 *	Medium present state has changed in either direction.
1855 	 *	Device has indicated UNIT_ATTENTION.
1856 	 */
1857 	disk_changed = sdp->changed;
1858 	sdp->changed = 0;
1859 	return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1860 }
1861 
1862 static int sd_sync_cache(struct scsi_disk *sdkp)
1863 {
1864 	int res;
1865 	struct scsi_device *sdp = sdkp->device;
1866 	const int timeout = sdp->request_queue->rq_timeout
1867 		* SD_FLUSH_TIMEOUT_MULTIPLIER;
1868 	/* Leave the rest of the command zero to indicate flush everything. */
1869 	const unsigned char cmd[16] = { sdp->use_16_for_sync ?
1870 				SYNCHRONIZE_CACHE_16 : SYNCHRONIZE_CACHE };
1871 	struct scsi_sense_hdr sshdr;
1872 	struct scsi_failure failure_defs[] = {
1873 		{
1874 			.allowed = 3,
1875 			.result = SCMD_FAILURE_RESULT_ANY,
1876 		},
1877 		{}
1878 	};
1879 	struct scsi_failures failures = {
1880 		.failure_definitions = failure_defs,
1881 	};
1882 	const struct scsi_exec_args exec_args = {
1883 		.req_flags = BLK_MQ_REQ_PM,
1884 		.sshdr = &sshdr,
1885 		.failures = &failures,
1886 	};
1887 
1888 	if (!scsi_device_online(sdp))
1889 		return -ENODEV;
1890 
1891 	res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, timeout,
1892 			       sdkp->max_retries, &exec_args);
1893 	if (res) {
1894 		sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1895 
1896 		if (res < 0)
1897 			return res;
1898 
1899 		if (scsi_status_is_check_condition(res) &&
1900 		    scsi_sense_valid(&sshdr)) {
1901 			sd_print_sense_hdr(sdkp, &sshdr);
1902 
1903 			/* we need to evaluate the error return  */
1904 			if (sshdr.asc == 0x3a ||	/* medium not present */
1905 			    sshdr.asc == 0x20 ||	/* invalid command */
1906 			    (sshdr.asc == 0x74 && sshdr.ascq == 0x71))	/* drive is password locked */
1907 				/* this is no error here */
1908 				return 0;
1909 
1910 			/*
1911 			 * If a format is in progress or if the drive does not
1912 			 * support sync, there is not much we can do because
1913 			 * this is called during shutdown or suspend so just
1914 			 * return success so those operations can proceed.
1915 			 */
1916 			if ((sshdr.asc == 0x04 && sshdr.ascq == 0x04) ||
1917 			    sshdr.sense_key == ILLEGAL_REQUEST)
1918 				return 0;
1919 		}
1920 
1921 		switch (host_byte(res)) {
1922 		/* ignore errors due to racing a disconnection */
1923 		case DID_BAD_TARGET:
1924 		case DID_NO_CONNECT:
1925 			return 0;
1926 		/* signal the upper layer it might try again */
1927 		case DID_BUS_BUSY:
1928 		case DID_IMM_RETRY:
1929 		case DID_REQUEUE:
1930 		case DID_SOFT_ERROR:
1931 			return -EBUSY;
1932 		default:
1933 			return -EIO;
1934 		}
1935 	}
1936 	return 0;
1937 }
1938 
1939 static void sd_rescan(struct device *dev)
1940 {
1941 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
1942 
1943 	sd_revalidate_disk(sdkp->disk);
1944 }
1945 
1946 static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
1947 		enum blk_unique_id type)
1948 {
1949 	struct scsi_device *sdev = scsi_disk(disk)->device;
1950 	const struct scsi_vpd *vpd;
1951 	const unsigned char *d;
1952 	int ret = -ENXIO, len;
1953 
1954 	rcu_read_lock();
1955 	vpd = rcu_dereference(sdev->vpd_pg83);
1956 	if (!vpd)
1957 		goto out_unlock;
1958 
1959 	ret = -EINVAL;
1960 	for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
1961 		/* we only care about designators with LU association */
1962 		if (((d[1] >> 4) & 0x3) != 0x00)
1963 			continue;
1964 		if ((d[1] & 0xf) != type)
1965 			continue;
1966 
1967 		/*
1968 		 * Only exit early if a 16-byte descriptor was found.  Otherwise
1969 		 * keep looking as one with more entropy might still show up.
1970 		 */
1971 		len = d[3];
1972 		if (len != 8 && len != 12 && len != 16)
1973 			continue;
1974 		ret = len;
1975 		memcpy(id, d + 4, len);
1976 		if (len == 16)
1977 			break;
1978 	}
1979 out_unlock:
1980 	rcu_read_unlock();
1981 	return ret;
1982 }
1983 
1984 static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result)
1985 {
1986 	switch (host_byte(result)) {
1987 	case DID_TRANSPORT_MARGINAL:
1988 	case DID_TRANSPORT_DISRUPTED:
1989 	case DID_BUS_BUSY:
1990 		return PR_STS_RETRY_PATH_FAILURE;
1991 	case DID_NO_CONNECT:
1992 		return PR_STS_PATH_FAILED;
1993 	case DID_TRANSPORT_FAILFAST:
1994 		return PR_STS_PATH_FAST_FAILED;
1995 	}
1996 
1997 	switch (status_byte(result)) {
1998 	case SAM_STAT_RESERVATION_CONFLICT:
1999 		return PR_STS_RESERVATION_CONFLICT;
2000 	case SAM_STAT_CHECK_CONDITION:
2001 		if (!scsi_sense_valid(sshdr))
2002 			return PR_STS_IOERR;
2003 
2004 		if (sshdr->sense_key == ILLEGAL_REQUEST &&
2005 		    (sshdr->asc == 0x26 || sshdr->asc == 0x24))
2006 			return -EINVAL;
2007 
2008 		fallthrough;
2009 	default:
2010 		return PR_STS_IOERR;
2011 	}
2012 }
2013 
2014 static int sd_pr_in_command(struct block_device *bdev, u8 sa,
2015 			    unsigned char *data, int data_len)
2016 {
2017 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
2018 	struct scsi_device *sdev = sdkp->device;
2019 	struct scsi_sense_hdr sshdr;
2020 	u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa };
2021 	struct scsi_failure failure_defs[] = {
2022 		{
2023 			.sense = UNIT_ATTENTION,
2024 			.asc = SCMD_FAILURE_ASC_ANY,
2025 			.ascq = SCMD_FAILURE_ASCQ_ANY,
2026 			.allowed = 5,
2027 			.result = SAM_STAT_CHECK_CONDITION,
2028 		},
2029 		{}
2030 	};
2031 	struct scsi_failures failures = {
2032 		.failure_definitions = failure_defs,
2033 	};
2034 	const struct scsi_exec_args exec_args = {
2035 		.sshdr = &sshdr,
2036 		.failures = &failures,
2037 	};
2038 	int result;
2039 
2040 	put_unaligned_be16(data_len, &cmd[7]);
2041 
2042 	result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len,
2043 				  SD_TIMEOUT, sdkp->max_retries, &exec_args);
2044 	if (scsi_status_is_check_condition(result) &&
2045 	    scsi_sense_valid(&sshdr)) {
2046 		sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
2047 		scsi_print_sense_hdr(sdev, NULL, &sshdr);
2048 	}
2049 
2050 	if (result <= 0)
2051 		return result;
2052 
2053 	return sd_scsi_to_pr_err(&sshdr, result);
2054 }
2055 
2056 static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info)
2057 {
2058 	int result, i, data_offset, num_copy_keys;
2059 	u32 num_keys = keys_info->num_keys;
2060 	int data_len;
2061 	u8 *data;
2062 
2063 	/*
2064 	 * Each reservation key takes 8 bytes and there is an 8-byte header
2065 	 * before the reservation key list. The total size must fit into the
2066 	 * 16-bit ALLOCATION LENGTH field.
2067 	 */
2068 	if (check_mul_overflow(num_keys, 8, &data_len) ||
2069 	    check_add_overflow(data_len, 8, &data_len) ||
2070 	    data_len > USHRT_MAX)
2071 		return -EINVAL;
2072 
2073 	data = kzalloc(data_len, GFP_KERNEL);
2074 	if (!data)
2075 		return -ENOMEM;
2076 
2077 	result = sd_pr_in_command(bdev, READ_KEYS, data, data_len);
2078 	if (result)
2079 		goto free_data;
2080 
2081 	keys_info->generation = get_unaligned_be32(&data[0]);
2082 	keys_info->num_keys = get_unaligned_be32(&data[4]) / 8;
2083 
2084 	data_offset = 8;
2085 	num_copy_keys = min(num_keys, keys_info->num_keys);
2086 
2087 	for (i = 0; i < num_copy_keys; i++) {
2088 		keys_info->keys[i] = get_unaligned_be64(&data[data_offset]);
2089 		data_offset += 8;
2090 	}
2091 
2092 free_data:
2093 	kfree(data);
2094 	return result;
2095 }
2096 
2097 static int sd_pr_read_reservation(struct block_device *bdev,
2098 				  struct pr_held_reservation *rsv)
2099 {
2100 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
2101 	struct scsi_device *sdev = sdkp->device;
2102 	u8 data[24] = { };
2103 	int result, len;
2104 
2105 	result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data));
2106 	if (result)
2107 		return result;
2108 
2109 	len = get_unaligned_be32(&data[4]);
2110 	if (!len)
2111 		return 0;
2112 
2113 	/* Make sure we have at least the key and type */
2114 	if (len < 14) {
2115 		sdev_printk(KERN_INFO, sdev,
2116 			    "READ RESERVATION failed due to short return buffer of %d bytes\n",
2117 			    len);
2118 		return -EINVAL;
2119 	}
2120 
2121 	rsv->generation = get_unaligned_be32(&data[0]);
2122 	rsv->key = get_unaligned_be64(&data[8]);
2123 	rsv->type = scsi_pr_type_to_block(data[21] & 0x0f);
2124 	return 0;
2125 }
2126 
2127 static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key,
2128 			     u64 sa_key, enum scsi_pr_type type, u8 flags)
2129 {
2130 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
2131 	struct scsi_device *sdev = sdkp->device;
2132 	struct scsi_sense_hdr sshdr;
2133 	struct scsi_failure failure_defs[] = {
2134 		{
2135 			.sense = UNIT_ATTENTION,
2136 			.asc = SCMD_FAILURE_ASC_ANY,
2137 			.ascq = SCMD_FAILURE_ASCQ_ANY,
2138 			.allowed = 5,
2139 			.result = SAM_STAT_CHECK_CONDITION,
2140 		},
2141 		{}
2142 	};
2143 	struct scsi_failures failures = {
2144 		.failure_definitions = failure_defs,
2145 	};
2146 	const struct scsi_exec_args exec_args = {
2147 		.sshdr = &sshdr,
2148 		.failures = &failures,
2149 	};
2150 	int result;
2151 	u8 cmd[16] = { 0, };
2152 	u8 data[24] = { 0, };
2153 
2154 	cmd[0] = PERSISTENT_RESERVE_OUT;
2155 	cmd[1] = sa;
2156 	cmd[2] = type;
2157 	put_unaligned_be32(sizeof(data), &cmd[5]);
2158 
2159 	put_unaligned_be64(key, &data[0]);
2160 	put_unaligned_be64(sa_key, &data[8]);
2161 	data[20] = flags;
2162 
2163 	result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data,
2164 				  sizeof(data), SD_TIMEOUT, sdkp->max_retries,
2165 				  &exec_args);
2166 
2167 	if (scsi_status_is_check_condition(result) &&
2168 	    scsi_sense_valid(&sshdr)) {
2169 		sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
2170 		scsi_print_sense_hdr(sdev, NULL, &sshdr);
2171 	}
2172 
2173 	if (result <= 0)
2174 		return result;
2175 
2176 	return sd_scsi_to_pr_err(&sshdr, result);
2177 }
2178 
2179 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
2180 		u32 flags)
2181 {
2182 	if (flags & ~PR_FL_IGNORE_KEY)
2183 		return -EOPNOTSUPP;
2184 	return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
2185 			old_key, new_key, 0,
2186 			(1 << 0) /* APTPL */);
2187 }
2188 
2189 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
2190 		u32 flags)
2191 {
2192 	if (flags)
2193 		return -EOPNOTSUPP;
2194 	return sd_pr_out_command(bdev, 0x01, key, 0,
2195 				 block_pr_type_to_scsi(type), 0);
2196 }
2197 
2198 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2199 {
2200 	return sd_pr_out_command(bdev, 0x02, key, 0,
2201 				 block_pr_type_to_scsi(type), 0);
2202 }
2203 
2204 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
2205 		enum pr_type type, bool abort)
2206 {
2207 	return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
2208 				 block_pr_type_to_scsi(type), 0);
2209 }
2210 
2211 static int sd_pr_clear(struct block_device *bdev, u64 key)
2212 {
2213 	return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0);
2214 }
2215 
2216 static const struct pr_ops sd_pr_ops = {
2217 	.pr_register	= sd_pr_register,
2218 	.pr_reserve	= sd_pr_reserve,
2219 	.pr_release	= sd_pr_release,
2220 	.pr_preempt	= sd_pr_preempt,
2221 	.pr_clear	= sd_pr_clear,
2222 	.pr_read_keys	= sd_pr_read_keys,
2223 	.pr_read_reservation = sd_pr_read_reservation,
2224 };
2225 
2226 static void scsi_disk_free_disk(struct gendisk *disk)
2227 {
2228 	struct scsi_disk *sdkp = scsi_disk(disk);
2229 
2230 	put_device(&sdkp->disk_dev);
2231 }
2232 
2233 /**
2234  *	sd_eh_reset - reset error handling callback
2235  *	@scmd:		sd-issued command that has failed
2236  *
2237  *	This function is called by the SCSI midlayer before starting
2238  *	SCSI EH. When counting medium access failures we have to be
2239  *	careful to register it only only once per device and SCSI EH run;
2240  *	there might be several timed out commands which will cause the
2241  *	'max_medium_access_timeouts' counter to trigger after the first
2242  *	SCSI EH run already and set the device to offline.
2243  *	So this function resets the internal counter before starting SCSI EH.
2244  **/
2245 static void sd_eh_reset(struct scsi_cmnd *scmd)
2246 {
2247 	struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
2248 
2249 	/* New SCSI EH run, reset gate variable */
2250 	sdkp->ignore_medium_access_errors = false;
2251 }
2252 
2253 /**
2254  *	sd_eh_action - error handling callback
2255  *	@scmd:		sd-issued command that has failed
2256  *	@eh_disp:	The recovery disposition suggested by the midlayer
2257  *
2258  *	This function is called by the SCSI midlayer upon completion of an
2259  *	error test command (currently TEST UNIT READY). The result of sending
2260  *	the eh command is passed in eh_disp.  We're looking for devices that
2261  *	fail medium access commands but are OK with non access commands like
2262  *	test unit ready (so wrongly see the device as having a successful
2263  *	recovery)
2264  **/
2265 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
2266 {
2267 	struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
2268 	struct scsi_device *sdev = scmd->device;
2269 
2270 	if (!scsi_device_online(sdev) ||
2271 	    !scsi_medium_access_command(scmd) ||
2272 	    host_byte(scmd->result) != DID_TIME_OUT ||
2273 	    eh_disp != SUCCESS)
2274 		return eh_disp;
2275 
2276 	/*
2277 	 * The device has timed out executing a medium access command.
2278 	 * However, the TEST UNIT READY command sent during error
2279 	 * handling completed successfully. Either the device is in the
2280 	 * process of recovering or has it suffered an internal failure
2281 	 * that prevents access to the storage medium.
2282 	 */
2283 	if (!sdkp->ignore_medium_access_errors) {
2284 		sdkp->medium_access_timed_out++;
2285 		sdkp->ignore_medium_access_errors = true;
2286 	}
2287 
2288 	/*
2289 	 * If the device keeps failing read/write commands but TEST UNIT
2290 	 * READY always completes successfully we assume that medium
2291 	 * access is no longer possible and take the device offline.
2292 	 */
2293 	if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
2294 		scmd_printk(KERN_ERR, scmd,
2295 			    "Medium access timeout failure. Offlining disk!\n");
2296 		mutex_lock(&sdev->state_mutex);
2297 		scsi_device_set_state(sdev, SDEV_OFFLINE);
2298 		mutex_unlock(&sdev->state_mutex);
2299 
2300 		return SUCCESS;
2301 	}
2302 
2303 	return eh_disp;
2304 }
2305 
2306 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
2307 {
2308 	struct request *req = scsi_cmd_to_rq(scmd);
2309 	struct scsi_device *sdev = scmd->device;
2310 	unsigned int transferred, good_bytes;
2311 	u64 start_lba, end_lba, bad_lba;
2312 
2313 	/*
2314 	 * Some commands have a payload smaller than the device logical
2315 	 * block size (e.g. INQUIRY on a 4K disk).
2316 	 */
2317 	if (scsi_bufflen(scmd) <= sdev->sector_size)
2318 		return 0;
2319 
2320 	/* Check if we have a 'bad_lba' information */
2321 	if (!scsi_get_sense_info_fld(scmd->sense_buffer,
2322 				     SCSI_SENSE_BUFFERSIZE,
2323 				     &bad_lba))
2324 		return 0;
2325 
2326 	/*
2327 	 * If the bad lba was reported incorrectly, we have no idea where
2328 	 * the error is.
2329 	 */
2330 	start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
2331 	end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
2332 	if (bad_lba < start_lba || bad_lba >= end_lba)
2333 		return 0;
2334 
2335 	/*
2336 	 * resid is optional but mostly filled in.  When it's unused,
2337 	 * its value is zero, so we assume the whole buffer transferred
2338 	 */
2339 	transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
2340 
2341 	/* This computation should always be done in terms of the
2342 	 * resolution of the device's medium.
2343 	 */
2344 	good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
2345 
2346 	return min(good_bytes, transferred);
2347 }
2348 
2349 /**
2350  *	sd_done - bottom half handler: called when the lower level
2351  *	driver has completed (successfully or otherwise) a scsi command.
2352  *	@SCpnt: mid-level's per command structure.
2353  *
2354  *	Note: potentially run from within an ISR. Must not block.
2355  **/
2356 static int sd_done(struct scsi_cmnd *SCpnt)
2357 {
2358 	int result = SCpnt->result;
2359 	unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
2360 	unsigned int sector_size = SCpnt->device->sector_size;
2361 	unsigned int resid;
2362 	struct scsi_sense_hdr sshdr;
2363 	struct request *req = scsi_cmd_to_rq(SCpnt);
2364 	struct scsi_disk *sdkp = scsi_disk(req->q->disk);
2365 	int sense_valid = 0;
2366 	int sense_deferred = 0;
2367 
2368 	switch (req_op(req)) {
2369 	case REQ_OP_DISCARD:
2370 	case REQ_OP_WRITE_ZEROES:
2371 	case REQ_OP_ZONE_RESET:
2372 	case REQ_OP_ZONE_RESET_ALL:
2373 	case REQ_OP_ZONE_OPEN:
2374 	case REQ_OP_ZONE_CLOSE:
2375 	case REQ_OP_ZONE_FINISH:
2376 		if (!result) {
2377 			good_bytes = blk_rq_bytes(req);
2378 			scsi_set_resid(SCpnt, 0);
2379 		} else {
2380 			good_bytes = 0;
2381 			scsi_set_resid(SCpnt, blk_rq_bytes(req));
2382 		}
2383 		break;
2384 	default:
2385 		/*
2386 		 * In case of bogus fw or device, we could end up having
2387 		 * an unaligned partial completion. Check this here and force
2388 		 * alignment.
2389 		 */
2390 		resid = scsi_get_resid(SCpnt);
2391 		if (resid & (sector_size - 1)) {
2392 			sd_printk(KERN_INFO, sdkp,
2393 				"Unaligned partial completion (resid=%u, sector_sz=%u)\n",
2394 				resid, sector_size);
2395 			scsi_print_command(SCpnt);
2396 			resid = min(scsi_bufflen(SCpnt),
2397 				    round_up(resid, sector_size));
2398 			scsi_set_resid(SCpnt, resid);
2399 		}
2400 	}
2401 
2402 	if (result) {
2403 		sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
2404 		if (sense_valid)
2405 			sense_deferred = scsi_sense_is_deferred(&sshdr);
2406 	}
2407 	sdkp->medium_access_timed_out = 0;
2408 
2409 	if (!scsi_status_is_check_condition(result) &&
2410 	    (!sense_valid || sense_deferred))
2411 		goto out;
2412 
2413 	switch (sshdr.sense_key) {
2414 	case HARDWARE_ERROR:
2415 	case MEDIUM_ERROR:
2416 		good_bytes = sd_completed_bytes(SCpnt);
2417 		break;
2418 	case RECOVERED_ERROR:
2419 		good_bytes = scsi_bufflen(SCpnt);
2420 		break;
2421 	case NO_SENSE:
2422 		/* This indicates a false check condition, so ignore it.  An
2423 		 * unknown amount of data was transferred so treat it as an
2424 		 * error.
2425 		 */
2426 		SCpnt->result = 0;
2427 		memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2428 		break;
2429 	case ABORTED_COMMAND:
2430 		if (sshdr.asc == 0x10)  /* DIF: Target detected corruption */
2431 			good_bytes = sd_completed_bytes(SCpnt);
2432 		break;
2433 	case ILLEGAL_REQUEST:
2434 		switch (sshdr.asc) {
2435 		case 0x10:	/* DIX: Host detected corruption */
2436 			good_bytes = sd_completed_bytes(SCpnt);
2437 			break;
2438 		case 0x20:	/* INVALID COMMAND OPCODE */
2439 		case 0x24:	/* INVALID FIELD IN CDB */
2440 			switch (SCpnt->cmnd[0]) {
2441 			case UNMAP:
2442 				sd_disable_discard(sdkp);
2443 				break;
2444 			case WRITE_SAME_16:
2445 			case WRITE_SAME:
2446 				if (SCpnt->cmnd[1] & 8) { /* UNMAP */
2447 					sd_disable_discard(sdkp);
2448 				} else {
2449 					sd_disable_write_same(sdkp);
2450 					req->rq_flags |= RQF_QUIET;
2451 				}
2452 				break;
2453 			}
2454 		}
2455 		break;
2456 	default:
2457 		break;
2458 	}
2459 
2460  out:
2461 	if (sdkp->device->type == TYPE_ZBC)
2462 		good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
2463 
2464 	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
2465 					   "sd_done: completed %d of %d bytes\n",
2466 					   good_bytes, scsi_bufflen(SCpnt)));
2467 
2468 	return good_bytes;
2469 }
2470 
2471 /*
2472  * spinup disk - called only in sd_revalidate_disk()
2473  */
2474 static void
2475 sd_spinup_disk(struct scsi_disk *sdkp)
2476 {
2477 	static const u8 cmd[10] = { TEST_UNIT_READY };
2478 	unsigned long spintime_expire = 0;
2479 	int spintime, sense_valid = 0;
2480 	unsigned int the_result;
2481 	struct scsi_sense_hdr sshdr;
2482 	struct scsi_failure failure_defs[] = {
2483 		/* Do not retry Medium Not Present */
2484 		{
2485 			.sense = UNIT_ATTENTION,
2486 			.asc = 0x3A,
2487 			.ascq = SCMD_FAILURE_ASCQ_ANY,
2488 			.result = SAM_STAT_CHECK_CONDITION,
2489 		},
2490 		{
2491 			.sense = NOT_READY,
2492 			.asc = 0x3A,
2493 			.ascq = SCMD_FAILURE_ASCQ_ANY,
2494 			.result = SAM_STAT_CHECK_CONDITION,
2495 		},
2496 		/* Retry when scsi_status_is_good would return false 3 times */
2497 		{
2498 			.result = SCMD_FAILURE_STAT_ANY,
2499 			.allowed = 3,
2500 		},
2501 		{}
2502 	};
2503 	struct scsi_failures failures = {
2504 		.failure_definitions = failure_defs,
2505 	};
2506 	const struct scsi_exec_args exec_args = {
2507 		.sshdr = &sshdr,
2508 		.failures = &failures,
2509 	};
2510 
2511 	spintime = 0;
2512 
2513 	/* Spin up drives, as required.  Only do this at boot time */
2514 	/* Spinup needs to be done for module loads too. */
2515 	do {
2516 		bool media_was_present = sdkp->media_present;
2517 
2518 		scsi_failures_reset_retries(&failures);
2519 
2520 		the_result = scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN,
2521 					      NULL, 0, SD_TIMEOUT,
2522 					      sdkp->max_retries, &exec_args);
2523 
2524 
2525 		if (the_result > 0) {
2526 			/*
2527 			 * If the drive has indicated to us that it doesn't
2528 			 * have any media in it, don't bother with any more
2529 			 * polling.
2530 			 */
2531 			if (media_not_present(sdkp, &sshdr)) {
2532 				if (media_was_present)
2533 					sd_printk(KERN_NOTICE, sdkp,
2534 						  "Media removed, stopped polling\n");
2535 				return;
2536 			}
2537 			sense_valid = scsi_sense_valid(&sshdr);
2538 		}
2539 
2540 		if (!scsi_status_is_check_condition(the_result)) {
2541 			/* no sense, TUR either succeeded or failed
2542 			 * with a status error */
2543 			if(!spintime && !scsi_status_is_good(the_result)) {
2544 				sd_print_result(sdkp, "Test Unit Ready failed",
2545 						the_result);
2546 			}
2547 			break;
2548 		}
2549 
2550 		/*
2551 		 * The device does not want the automatic start to be issued.
2552 		 */
2553 		if (sdkp->device->no_start_on_add)
2554 			break;
2555 
2556 		if (sense_valid && sshdr.sense_key == NOT_READY) {
2557 			if (sshdr.asc == 4 && sshdr.ascq == 3)
2558 				break;	/* manual intervention required */
2559 			if (sshdr.asc == 4 && sshdr.ascq == 0xb)
2560 				break;	/* standby */
2561 			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2562 				break;	/* unavailable */
2563 			if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2564 				break;	/* sanitize in progress */
2565 			if (sshdr.asc == 4 && sshdr.ascq == 0x24)
2566 				break;	/* depopulation in progress */
2567 			if (sshdr.asc == 4 && sshdr.ascq == 0x25)
2568 				break;	/* depopulation restoration in progress */
2569 			/*
2570 			 * Issue command to spin up drive when not ready
2571 			 */
2572 			if (!spintime) {
2573 				/* Return immediately and start spin cycle */
2574 				const u8 start_cmd[10] = {
2575 					[0] = START_STOP,
2576 					[1] = 1,
2577 					[4] = sdkp->device->start_stop_pwr_cond ?
2578 						0x11 : 1,
2579 				};
2580 
2581 				sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
2582 				scsi_execute_cmd(sdkp->device, start_cmd,
2583 						 REQ_OP_DRV_IN, NULL, 0,
2584 						 SD_TIMEOUT, sdkp->max_retries,
2585 						 &exec_args);
2586 				spintime_expire = jiffies + 100 * HZ;
2587 				spintime = 1;
2588 			}
2589 			/* Wait 1 second for next try */
2590 			msleep(1000);
2591 			printk(KERN_CONT ".");
2592 
2593 		/*
2594 		 * Wait for USB flash devices with slow firmware.
2595 		 * Yes, this sense key/ASC combination shouldn't
2596 		 * occur here.  It's characteristic of these devices.
2597 		 */
2598 		} else if (sense_valid &&
2599 				sshdr.sense_key == UNIT_ATTENTION &&
2600 				sshdr.asc == 0x28) {
2601 			if (!spintime) {
2602 				spintime_expire = jiffies + 5 * HZ;
2603 				spintime = 1;
2604 			}
2605 			/* Wait 1 second for next try */
2606 			msleep(1000);
2607 		} else {
2608 			/* we don't understand the sense code, so it's
2609 			 * probably pointless to loop */
2610 			if(!spintime) {
2611 				sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
2612 				sd_print_sense_hdr(sdkp, &sshdr);
2613 			}
2614 			break;
2615 		}
2616 
2617 	} while (spintime && time_before_eq(jiffies, spintime_expire));
2618 
2619 	if (spintime) {
2620 		if (scsi_status_is_good(the_result))
2621 			printk(KERN_CONT "ready\n");
2622 		else
2623 			printk(KERN_CONT "not responding...\n");
2624 	}
2625 }
2626 
2627 /*
2628  * Determine whether disk supports Data Integrity Field.
2629  */
2630 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
2631 {
2632 	struct scsi_device *sdp = sdkp->device;
2633 	u8 type;
2634 
2635 	if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
2636 		sdkp->protection_type = 0;
2637 		return 0;
2638 	}
2639 
2640 	type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
2641 
2642 	if (type > T10_PI_TYPE3_PROTECTION) {
2643 		sd_printk(KERN_ERR, sdkp,
2644 			  "formatted with unsupported protection type %u. Disabling disk!\n",
2645 			  type);
2646 		sdkp->protection_type = 0;
2647 		return -ENODEV;
2648 	}
2649 
2650 	sdkp->protection_type = type;
2651 
2652 	return 0;
2653 }
2654 
2655 static void sd_config_protection(struct scsi_disk *sdkp,
2656 		struct queue_limits *lim)
2657 {
2658 	struct scsi_device *sdp = sdkp->device;
2659 
2660 	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
2661 		sd_dif_config_host(sdkp, lim);
2662 
2663 	if (!sdkp->protection_type)
2664 		return;
2665 
2666 	if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
2667 		sd_first_printk(KERN_NOTICE, sdkp,
2668 				"Disabling DIF Type %u protection\n",
2669 				sdkp->protection_type);
2670 		sdkp->protection_type = 0;
2671 	}
2672 
2673 	sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
2674 			sdkp->protection_type);
2675 }
2676 
2677 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2678 			struct scsi_sense_hdr *sshdr, int sense_valid,
2679 			int the_result)
2680 {
2681 	if (sense_valid)
2682 		sd_print_sense_hdr(sdkp, sshdr);
2683 	else
2684 		sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
2685 
2686 	/*
2687 	 * Set dirty bit for removable devices if not ready -
2688 	 * sometimes drives will not report this properly.
2689 	 */
2690 	if (sdp->removable &&
2691 	    sense_valid && sshdr->sense_key == NOT_READY)
2692 		set_media_not_present(sdkp);
2693 
2694 	/*
2695 	 * We used to set media_present to 0 here to indicate no media
2696 	 * in the drive, but some drives fail read capacity even with
2697 	 * media present, so we can't do that.
2698 	 */
2699 	sdkp->capacity = 0; /* unknown mapped to zero - as usual */
2700 }
2701 
2702 #define RC16_LEN 32
2703 #if RC16_LEN > SD_BUF_SIZE
2704 #error RC16_LEN must not be more than SD_BUF_SIZE
2705 #endif
2706 
2707 #define READ_CAPACITY_RETRIES_ON_RESET	10
2708 
2709 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2710 		struct queue_limits *lim, unsigned char *buffer)
2711 {
2712 	unsigned char cmd[16];
2713 	struct scsi_sense_hdr sshdr;
2714 	const struct scsi_exec_args exec_args = {
2715 		.sshdr = &sshdr,
2716 	};
2717 	int sense_valid = 0;
2718 	int the_result;
2719 	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2720 	unsigned int alignment;
2721 	unsigned long long lba;
2722 	unsigned sector_size;
2723 
2724 	if (sdp->no_read_capacity_16)
2725 		return -EINVAL;
2726 
2727 	do {
2728 		memset(cmd, 0, 16);
2729 		cmd[0] = SERVICE_ACTION_IN_16;
2730 		cmd[1] = SAI_READ_CAPACITY_16;
2731 		cmd[13] = RC16_LEN;
2732 		memset(buffer, 0, RC16_LEN);
2733 
2734 		the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
2735 					      buffer, RC16_LEN, SD_TIMEOUT,
2736 					      sdkp->max_retries, &exec_args);
2737 		if (the_result > 0) {
2738 			if (media_not_present(sdkp, &sshdr))
2739 				return -ENODEV;
2740 
2741 			sense_valid = scsi_sense_valid(&sshdr);
2742 			if (sense_valid &&
2743 			    sshdr.sense_key == ILLEGAL_REQUEST &&
2744 			    (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
2745 			    sshdr.ascq == 0x00)
2746 				/* Invalid Command Operation Code or
2747 				 * Invalid Field in CDB, just retry
2748 				 * silently with RC10 */
2749 				return -EINVAL;
2750 			if (sense_valid &&
2751 			    sshdr.sense_key == UNIT_ATTENTION &&
2752 			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2753 				/* Device reset might occur several times,
2754 				 * give it one more chance */
2755 				if (--reset_retries > 0)
2756 					continue;
2757 		}
2758 		retries--;
2759 
2760 	} while (the_result && retries);
2761 
2762 	if (the_result) {
2763 		sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
2764 		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2765 		return -EINVAL;
2766 	}
2767 
2768 	sector_size = get_unaligned_be32(&buffer[8]);
2769 	lba = get_unaligned_be64(&buffer[0]);
2770 
2771 	if (sd_read_protection_type(sdkp, buffer) < 0) {
2772 		sdkp->capacity = 0;
2773 		return -ENODEV;
2774 	}
2775 
2776 	/* Logical blocks per physical block exponent */
2777 	sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
2778 
2779 	/* RC basis */
2780 	sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
2781 
2782 	/* Lowest aligned logical block */
2783 	alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
2784 	lim->alignment_offset = alignment;
2785 	if (alignment && sdkp->first_scan)
2786 		sd_printk(KERN_NOTICE, sdkp,
2787 			  "physical block alignment offset: %u\n", alignment);
2788 
2789 	if (buffer[14] & 0x80) { /* LBPME */
2790 		sdkp->lbpme = 1;
2791 
2792 		if (buffer[14] & 0x40) /* LBPRZ */
2793 			sdkp->lbprz = 1;
2794 	}
2795 
2796 	sdkp->capacity = lba + 1;
2797 	return sector_size;
2798 }
2799 
2800 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2801 						unsigned char *buffer)
2802 {
2803 	static const u8 cmd[10] = { READ_CAPACITY };
2804 	struct scsi_sense_hdr sshdr;
2805 	struct scsi_failure failure_defs[] = {
2806 		/* Do not retry Medium Not Present */
2807 		{
2808 			.sense = UNIT_ATTENTION,
2809 			.asc = 0x3A,
2810 			.result = SAM_STAT_CHECK_CONDITION,
2811 		},
2812 		{
2813 			.sense = NOT_READY,
2814 			.asc = 0x3A,
2815 			.result = SAM_STAT_CHECK_CONDITION,
2816 		},
2817 		 /* Device reset might occur several times so retry a lot */
2818 		{
2819 			.sense = UNIT_ATTENTION,
2820 			.asc = 0x29,
2821 			.allowed = READ_CAPACITY_RETRIES_ON_RESET,
2822 			.result = SAM_STAT_CHECK_CONDITION,
2823 		},
2824 		/* Any other error not listed above retry 3 times */
2825 		{
2826 			.result = SCMD_FAILURE_RESULT_ANY,
2827 			.allowed = 3,
2828 		},
2829 		{}
2830 	};
2831 	struct scsi_failures failures = {
2832 		.failure_definitions = failure_defs,
2833 	};
2834 	const struct scsi_exec_args exec_args = {
2835 		.sshdr = &sshdr,
2836 		.failures = &failures,
2837 	};
2838 	int sense_valid = 0;
2839 	int the_result;
2840 	sector_t lba;
2841 	unsigned sector_size;
2842 
2843 	memset(buffer, 0, 8);
2844 
2845 	the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
2846 				      8, SD_TIMEOUT, sdkp->max_retries,
2847 				      &exec_args);
2848 
2849 	if (the_result > 0) {
2850 		sense_valid = scsi_sense_valid(&sshdr);
2851 
2852 		if (media_not_present(sdkp, &sshdr))
2853 			return -ENODEV;
2854 	}
2855 
2856 	if (the_result) {
2857 		sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
2858 		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2859 		return -EINVAL;
2860 	}
2861 
2862 	sector_size = get_unaligned_be32(&buffer[4]);
2863 	lba = get_unaligned_be32(&buffer[0]);
2864 
2865 	if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
2866 		/* Some buggy (usb cardreader) devices return an lba of
2867 		   0xffffffff when the want to report a size of 0 (with
2868 		   which they really mean no media is present) */
2869 		sdkp->capacity = 0;
2870 		sdkp->physical_block_size = sector_size;
2871 		return sector_size;
2872 	}
2873 
2874 	sdkp->capacity = lba + 1;
2875 	sdkp->physical_block_size = sector_size;
2876 	return sector_size;
2877 }
2878 
2879 static int sd_try_rc16_first(struct scsi_device *sdp)
2880 {
2881 	if (sdp->host->max_cmd_len < 16)
2882 		return 0;
2883 	if (sdp->try_rc_10_first)
2884 		return 0;
2885 	if (sdp->scsi_level > SCSI_SPC_2)
2886 		return 1;
2887 	if (scsi_device_protection(sdp))
2888 		return 1;
2889 	return 0;
2890 }
2891 
2892 /*
2893  * read disk capacity
2894  */
2895 static void
2896 sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim,
2897 		unsigned char *buffer)
2898 {
2899 	int sector_size;
2900 	struct scsi_device *sdp = sdkp->device;
2901 
2902 	if (sd_try_rc16_first(sdp)) {
2903 		sector_size = read_capacity_16(sdkp, sdp, lim, buffer);
2904 		if (sector_size == -EOVERFLOW)
2905 			goto got_data;
2906 		if (sector_size == -ENODEV)
2907 			return;
2908 		if (sector_size < 0)
2909 			sector_size = read_capacity_10(sdkp, sdp, buffer);
2910 		if (sector_size < 0)
2911 			return;
2912 	} else {
2913 		sector_size = read_capacity_10(sdkp, sdp, buffer);
2914 		if (sector_size == -EOVERFLOW)
2915 			goto got_data;
2916 		if (sector_size < 0)
2917 			return;
2918 		if ((sizeof(sdkp->capacity) > 4) &&
2919 		    (sdkp->capacity > 0xffffffffULL)) {
2920 			int old_sector_size = sector_size;
2921 			sd_printk(KERN_NOTICE, sdkp,
2922 				  "Very big device. Trying to use READ CAPACITY(16).\n");
2923 			sector_size = read_capacity_16(sdkp, sdp, lim, buffer);
2924 			if (sector_size < 0) {
2925 				sd_printk(KERN_NOTICE, sdkp,
2926 					"Using 0xffffffff as device size\n");
2927 				sdkp->capacity = 1 + (sector_t) 0xffffffff;
2928 				sector_size = old_sector_size;
2929 				goto got_data;
2930 			}
2931 			/* Remember that READ CAPACITY(16) succeeded */
2932 			sdp->try_rc_10_first = 0;
2933 		}
2934 	}
2935 
2936 	/* Some devices are known to return the total number of blocks,
2937 	 * not the highest block number.  Some devices have versions
2938 	 * which do this and others which do not.  Some devices we might
2939 	 * suspect of doing this but we don't know for certain.
2940 	 *
2941 	 * If we know the reported capacity is wrong, decrement it.  If
2942 	 * we can only guess, then assume the number of blocks is even
2943 	 * (usually true but not always) and err on the side of lowering
2944 	 * the capacity.
2945 	 */
2946 	if (sdp->fix_capacity ||
2947 	    (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
2948 		sd_printk(KERN_INFO, sdkp,
2949 			  "Adjusting the sector count from its reported value: %llu\n",
2950 			  (unsigned long long) sdkp->capacity);
2951 		--sdkp->capacity;
2952 	}
2953 
2954 got_data:
2955 	if (sector_size == 0) {
2956 		sector_size = 512;
2957 		sd_printk(KERN_NOTICE, sdkp,
2958 			  "Sector size 0 reported, assuming 512.\n");
2959 	}
2960 
2961 	if (blk_validate_block_size(sector_size)) {
2962 		sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2963 			  sector_size);
2964 		/*
2965 		 * The user might want to re-format the drive with
2966 		 * a supported sectorsize.  Once this happens, it
2967 		 * would be relatively trivial to set the thing up.
2968 		 * For this reason, we leave the thing in the table.
2969 		 */
2970 		sdkp->capacity = 0;
2971 		/*
2972 		 * set a bogus sector size so the normal read/write
2973 		 * logic in the block layer will eventually refuse any
2974 		 * request on this device without tripping over power
2975 		 * of two sector size assumptions
2976 		 */
2977 		sector_size = 512;
2978 	}
2979 	lim->logical_block_size = sector_size;
2980 	lim->physical_block_size = sdkp->physical_block_size;
2981 	sdkp->device->sector_size = sector_size;
2982 
2983 	if (sdkp->capacity > 0xffffffff)
2984 		sdp->use_16_for_rw = 1;
2985 
2986 }
2987 
2988 /*
2989  * Print disk capacity
2990  */
2991 static void
2992 sd_print_capacity(struct scsi_disk *sdkp,
2993 		  sector_t old_capacity)
2994 {
2995 	int sector_size = sdkp->device->sector_size;
2996 	char cap_str_2[10], cap_str_10[10];
2997 
2998 	if (!sdkp->first_scan && old_capacity == sdkp->capacity)
2999 		return;
3000 
3001 	string_get_size(sdkp->capacity, sector_size,
3002 			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
3003 	string_get_size(sdkp->capacity, sector_size,
3004 			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
3005 
3006 	sd_printk(KERN_NOTICE, sdkp,
3007 		  "%llu %d-byte logical blocks: (%s/%s)\n",
3008 		  (unsigned long long)sdkp->capacity,
3009 		  sector_size, cap_str_10, cap_str_2);
3010 
3011 	if (sdkp->physical_block_size != sector_size)
3012 		sd_printk(KERN_NOTICE, sdkp,
3013 			  "%u-byte physical blocks\n",
3014 			  sdkp->physical_block_size);
3015 }
3016 
3017 /* called with buffer of length 512 */
3018 static inline int
3019 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
3020 		 unsigned char *buffer, int len, struct scsi_mode_data *data,
3021 		 struct scsi_sense_hdr *sshdr)
3022 {
3023 	/*
3024 	 * If we must use MODE SENSE(10), make sure that the buffer length
3025 	 * is at least 8 bytes so that the mode sense header fits.
3026 	 */
3027 	if (sdkp->device->use_10_for_ms && len < 8)
3028 		len = 8;
3029 
3030 	return scsi_mode_sense(sdkp->device, dbd, modepage, 0, buffer, len,
3031 			       SD_TIMEOUT, sdkp->max_retries, data, sshdr);
3032 }
3033 
3034 /*
3035  * read write protect setting, if possible - called only in sd_revalidate_disk()
3036  * called with buffer of length SD_BUF_SIZE
3037  */
3038 static void
3039 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
3040 {
3041 	int res;
3042 	struct scsi_device *sdp = sdkp->device;
3043 	struct scsi_mode_data data;
3044 	int old_wp = sdkp->write_prot;
3045 
3046 	set_disk_ro(sdkp->disk, 0);
3047 	if (sdp->skip_ms_page_3f) {
3048 		sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
3049 		return;
3050 	}
3051 
3052 	if (sdp->use_192_bytes_for_3f) {
3053 		res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
3054 	} else {
3055 		/*
3056 		 * First attempt: ask for all pages (0x3F), but only 4 bytes.
3057 		 * We have to start carefully: some devices hang if we ask
3058 		 * for more than is available.
3059 		 */
3060 		res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
3061 
3062 		/*
3063 		 * Second attempt: ask for page 0 When only page 0 is
3064 		 * implemented, a request for page 3F may return Sense Key
3065 		 * 5: Illegal Request, Sense Code 24: Invalid field in
3066 		 * CDB.
3067 		 */
3068 		if (res < 0)
3069 			res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
3070 
3071 		/*
3072 		 * Third attempt: ask 255 bytes, as we did earlier.
3073 		 */
3074 		if (res < 0)
3075 			res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
3076 					       &data, NULL);
3077 	}
3078 
3079 	if (res < 0) {
3080 		sd_first_printk(KERN_WARNING, sdkp,
3081 			  "Test WP failed, assume Write Enabled\n");
3082 	} else {
3083 		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
3084 		set_disk_ro(sdkp->disk, sdkp->write_prot);
3085 		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
3086 			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
3087 				  sdkp->write_prot ? "on" : "off");
3088 			sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
3089 		}
3090 	}
3091 }
3092 
3093 /*
3094  * sd_read_cache_type - called only from sd_revalidate_disk()
3095  * called with buffer of length SD_BUF_SIZE
3096  */
3097 static void
3098 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
3099 {
3100 	int len = 0, res;
3101 	struct scsi_device *sdp = sdkp->device;
3102 
3103 	int dbd;
3104 	int modepage;
3105 	int first_len;
3106 	struct scsi_mode_data data;
3107 	struct scsi_sense_hdr sshdr;
3108 	int old_wce = sdkp->WCE;
3109 	int old_rcd = sdkp->RCD;
3110 	int old_dpofua = sdkp->DPOFUA;
3111 
3112 
3113 	if (sdkp->cache_override)
3114 		return;
3115 
3116 	first_len = 4;
3117 	if (sdp->skip_ms_page_8) {
3118 		if (sdp->type == TYPE_RBC)
3119 			goto defaults;
3120 		else {
3121 			if (sdp->skip_ms_page_3f)
3122 				goto defaults;
3123 			modepage = 0x3F;
3124 			if (sdp->use_192_bytes_for_3f)
3125 				first_len = 192;
3126 			dbd = 0;
3127 		}
3128 	} else if (sdp->type == TYPE_RBC) {
3129 		modepage = 6;
3130 		dbd = 8;
3131 	} else {
3132 		modepage = 8;
3133 		dbd = 0;
3134 	}
3135 
3136 	/* cautiously ask */
3137 	res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
3138 			&data, &sshdr);
3139 
3140 	if (res < 0)
3141 		goto bad_sense;
3142 
3143 	if (!data.header_length) {
3144 		modepage = 6;
3145 		first_len = 0;
3146 		sd_first_printk(KERN_ERR, sdkp,
3147 				"Missing header in MODE_SENSE response\n");
3148 	}
3149 
3150 	/* that went OK, now ask for the proper length */
3151 	len = data.length;
3152 
3153 	/*
3154 	 * We're only interested in the first three bytes, actually.
3155 	 * But the data cache page is defined for the first 20.
3156 	 */
3157 	if (len < 3)
3158 		goto bad_sense;
3159 	else if (len > SD_BUF_SIZE) {
3160 		sd_first_printk(KERN_NOTICE, sdkp,
3161 				"Truncating mode parameter data from %d to %d bytes\n",
3162 				len, SD_BUF_SIZE);
3163 		len = SD_BUF_SIZE;
3164 	}
3165 	if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
3166 		len = 192;
3167 
3168 	/* Get the data */
3169 	if (len > first_len)
3170 		res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
3171 				&data, &sshdr);
3172 
3173 	if (!res) {
3174 		int offset = data.header_length + data.block_descriptor_length;
3175 
3176 		while (offset < len) {
3177 			u8 page_code = buffer[offset] & 0x3F;
3178 			u8 spf       = buffer[offset] & 0x40;
3179 
3180 			if (page_code == 8 || page_code == 6) {
3181 				/* We're interested only in the first 3 bytes.
3182 				 */
3183 				if (len - offset <= 2) {
3184 					sd_first_printk(KERN_ERR, sdkp,
3185 						"Incomplete mode parameter data\n");
3186 					goto defaults;
3187 				} else {
3188 					modepage = page_code;
3189 					goto Page_found;
3190 				}
3191 			} else {
3192 				/* Go to the next page */
3193 				if (spf && len - offset > 3)
3194 					offset += 4 + (buffer[offset+2] << 8) +
3195 						buffer[offset+3];
3196 				else if (!spf && len - offset > 1)
3197 					offset += 2 + buffer[offset+1];
3198 				else {
3199 					sd_first_printk(KERN_ERR, sdkp,
3200 							"Incomplete mode parameter data\n");
3201 					goto defaults;
3202 				}
3203 			}
3204 		}
3205 
3206 		sd_first_printk(KERN_WARNING, sdkp,
3207 				"No Caching mode page found\n");
3208 		goto defaults;
3209 
3210 	Page_found:
3211 		if (modepage == 8) {
3212 			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
3213 			sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
3214 		} else {
3215 			sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
3216 			sdkp->RCD = 0;
3217 		}
3218 
3219 		sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
3220 		if (sdp->broken_fua) {
3221 			sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
3222 			sdkp->DPOFUA = 0;
3223 		} else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
3224 			   !sdkp->device->use_16_for_rw) {
3225 			sd_first_printk(KERN_NOTICE, sdkp,
3226 				  "Uses READ/WRITE(6), disabling FUA\n");
3227 			sdkp->DPOFUA = 0;
3228 		}
3229 
3230 		/* No cache flush allowed for write protected devices */
3231 		if (sdkp->WCE && sdkp->write_prot)
3232 			sdkp->WCE = 0;
3233 
3234 		if (sdkp->first_scan || old_wce != sdkp->WCE ||
3235 		    old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
3236 			sd_printk(KERN_NOTICE, sdkp,
3237 				  "Write cache: %s, read cache: %s, %s\n",
3238 				  sdkp->WCE ? "enabled" : "disabled",
3239 				  sdkp->RCD ? "disabled" : "enabled",
3240 				  sdkp->DPOFUA ? "supports DPO and FUA"
3241 				  : "doesn't support DPO or FUA");
3242 
3243 		return;
3244 	}
3245 
3246 bad_sense:
3247 	if (res == -EIO && scsi_sense_valid(&sshdr) &&
3248 	    sshdr.sense_key == ILLEGAL_REQUEST &&
3249 	    sshdr.asc == 0x24 && sshdr.ascq == 0x0)
3250 		/* Invalid field in CDB */
3251 		sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
3252 	else
3253 		sd_first_printk(KERN_ERR, sdkp,
3254 				"Asking for cache data failed\n");
3255 
3256 defaults:
3257 	if (sdp->wce_default_on) {
3258 		sd_first_printk(KERN_NOTICE, sdkp,
3259 				"Assuming drive cache: write back\n");
3260 		sdkp->WCE = 1;
3261 	} else {
3262 		sd_first_printk(KERN_WARNING, sdkp,
3263 				"Assuming drive cache: write through\n");
3264 		sdkp->WCE = 0;
3265 	}
3266 	sdkp->RCD = 0;
3267 	sdkp->DPOFUA = 0;
3268 }
3269 
3270 static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id)
3271 {
3272 	u8 cdb[16] = { SERVICE_ACTION_IN_16, SAI_GET_STREAM_STATUS };
3273 	struct {
3274 		struct scsi_stream_status_header h;
3275 		struct scsi_stream_status s;
3276 	} buf;
3277 	struct scsi_device *sdev = sdkp->device;
3278 	struct scsi_sense_hdr sshdr;
3279 	const struct scsi_exec_args exec_args = {
3280 		.sshdr = &sshdr,
3281 	};
3282 	int res;
3283 
3284 	put_unaligned_be16(stream_id, &cdb[4]);
3285 	put_unaligned_be32(sizeof(buf), &cdb[10]);
3286 
3287 	res = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, &buf, sizeof(buf),
3288 			       SD_TIMEOUT, sdkp->max_retries, &exec_args);
3289 	if (res < 0)
3290 		return false;
3291 	if (scsi_status_is_check_condition(res) && scsi_sense_valid(&sshdr))
3292 		sd_print_sense_hdr(sdkp, &sshdr);
3293 	if (res)
3294 		return false;
3295 	if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status))
3296 		return false;
3297 	return buf.s.perm;
3298 }
3299 
3300 static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
3301 {
3302 	struct scsi_device *sdp = sdkp->device;
3303 	const struct scsi_io_group_descriptor *desc, *start, *end;
3304 	u16 permanent_stream_count_old;
3305 	struct scsi_sense_hdr sshdr;
3306 	struct scsi_mode_data data;
3307 	int res;
3308 
3309 	if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS)
3310 		return;
3311 
3312 	res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
3313 			      /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
3314 			      sdkp->max_retries, &data, &sshdr);
3315 	if (res < 0)
3316 		return;
3317 	start = (void *)buffer + data.header_length + 16;
3318 	end = (void *)buffer + ALIGN_DOWN(data.header_length + data.length,
3319 					  sizeof(*end));
3320 	/*
3321 	 * From "SBC-5 Constrained Streams with Data Lifetimes": Device severs
3322 	 * should assign the lowest numbered stream identifiers to permanent
3323 	 * streams.
3324 	 */
3325 	for (desc = start; desc < end; desc++)
3326 		if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
3327 			break;
3328 	permanent_stream_count_old = sdkp->permanent_stream_count;
3329 	sdkp->permanent_stream_count = desc - start;
3330 	if (sdkp->rscs && sdkp->permanent_stream_count < 2)
3331 		sd_printk(KERN_INFO, sdkp,
3332 			  "Unexpected: RSCS has been set and the permanent stream count is %u\n",
3333 			  sdkp->permanent_stream_count);
3334 	else if (sdkp->permanent_stream_count != permanent_stream_count_old)
3335 		sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
3336 			  sdkp->permanent_stream_count);
3337 }
3338 
3339 /*
3340  * The ATO bit indicates whether the DIF application tag is available
3341  * for use by the operating system.
3342  */
3343 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
3344 {
3345 	int res, offset;
3346 	struct scsi_device *sdp = sdkp->device;
3347 	struct scsi_mode_data data;
3348 	struct scsi_sense_hdr sshdr;
3349 
3350 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
3351 		return;
3352 
3353 	if (sdkp->protection_type == 0)
3354 		return;
3355 
3356 	res = scsi_mode_sense(sdp, 1, 0x0a, 0, buffer, 36, SD_TIMEOUT,
3357 			      sdkp->max_retries, &data, &sshdr);
3358 
3359 	if (res < 0 || !data.header_length ||
3360 	    data.length < 6) {
3361 		sd_first_printk(KERN_WARNING, sdkp,
3362 			  "getting Control mode page failed, assume no ATO\n");
3363 
3364 		if (res == -EIO && scsi_sense_valid(&sshdr))
3365 			sd_print_sense_hdr(sdkp, &sshdr);
3366 
3367 		return;
3368 	}
3369 
3370 	offset = data.header_length + data.block_descriptor_length;
3371 
3372 	if ((buffer[offset] & 0x3f) != 0x0a) {
3373 		sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
3374 		return;
3375 	}
3376 
3377 	if ((buffer[offset + 5] & 0x80) == 0)
3378 		return;
3379 
3380 	sdkp->ATO = 1;
3381 
3382 	return;
3383 }
3384 
3385 static unsigned int sd_discard_mode(struct scsi_disk *sdkp)
3386 {
3387 	if (!sdkp->lbpme)
3388 		return SD_LBP_FULL;
3389 
3390 	if (!sdkp->lbpvpd) {
3391 		/* LBP VPD page not provided */
3392 		if (sdkp->max_unmap_blocks)
3393 			return SD_LBP_UNMAP;
3394 		return SD_LBP_WS16;
3395 	}
3396 
3397 	/* LBP VPD page tells us what to use */
3398 	if (sdkp->lbpu && sdkp->max_unmap_blocks)
3399 		return SD_LBP_UNMAP;
3400 	if (sdkp->lbpws)
3401 		return SD_LBP_WS16;
3402 	if (sdkp->lbpws10)
3403 		return SD_LBP_WS10;
3404 	return SD_LBP_DISABLE;
3405 }
3406 
3407 /*
3408  * Query disk device for preferred I/O sizes.
3409  */
3410 static void sd_read_block_limits(struct scsi_disk *sdkp,
3411 		struct queue_limits *lim)
3412 {
3413 	struct scsi_vpd *vpd;
3414 
3415 	rcu_read_lock();
3416 
3417 	vpd = rcu_dereference(sdkp->device->vpd_pgb0);
3418 	if (!vpd || vpd->len < 16)
3419 		goto out;
3420 
3421 	sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]);
3422 	sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
3423 	sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
3424 
3425 	if (vpd->len >= 64) {
3426 		unsigned int lba_count, desc_count;
3427 
3428 		sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
3429 
3430 		if (!sdkp->lbpme)
3431 			goto config_atomic;
3432 
3433 		lba_count = get_unaligned_be32(&vpd->data[20]);
3434 		desc_count = get_unaligned_be32(&vpd->data[24]);
3435 
3436 		if (lba_count && desc_count)
3437 			sdkp->max_unmap_blocks = lba_count;
3438 
3439 		sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
3440 
3441 		if (vpd->data[32] & 0x80)
3442 			sdkp->unmap_alignment =
3443 				get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
3444 
3445 config_atomic:
3446 		sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]);
3447 		sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]);
3448 		sdkp->atomic_granularity = get_unaligned_be32(&vpd->data[52]);
3449 		sdkp->max_atomic_with_boundary = get_unaligned_be32(&vpd->data[56]);
3450 		sdkp->max_atomic_boundary = get_unaligned_be32(&vpd->data[60]);
3451 
3452 		sd_config_atomic(sdkp, lim);
3453 	}
3454 
3455  out:
3456 	rcu_read_unlock();
3457 }
3458 
3459 /* Parse the Block Limits Extension VPD page (0xb7) */
3460 static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
3461 {
3462 	struct scsi_vpd *vpd;
3463 
3464 	rcu_read_lock();
3465 	vpd = rcu_dereference(sdkp->device->vpd_pgb7);
3466 	if (vpd && vpd->len >= 6)
3467 		sdkp->rscs = vpd->data[5] & 1;
3468 	rcu_read_unlock();
3469 }
3470 
3471 /* Query block device characteristics */
3472 static void sd_read_block_characteristics(struct scsi_disk *sdkp,
3473 		struct queue_limits *lim)
3474 {
3475 	struct scsi_vpd *vpd;
3476 	u16 rot;
3477 
3478 	rcu_read_lock();
3479 	vpd = rcu_dereference(sdkp->device->vpd_pgb1);
3480 
3481 	if (!vpd || vpd->len <= 8) {
3482 		rcu_read_unlock();
3483 	        return;
3484 	}
3485 
3486 	rot = get_unaligned_be16(&vpd->data[4]);
3487 	sdkp->zoned = (vpd->data[8] >> 4) & 3;
3488 	rcu_read_unlock();
3489 
3490 	if (rot == 1)
3491 		lim->features &= ~(BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
3492 
3493 	if (!sdkp->first_scan)
3494 		return;
3495 
3496 	if (sdkp->device->type == TYPE_ZBC)
3497 		sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n");
3498 	else if (sdkp->zoned == 1)
3499 		sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n");
3500 	else if (sdkp->zoned == 2)
3501 		sd_printk(KERN_NOTICE, sdkp, "Drive-managed SMR disk\n");
3502 }
3503 
3504 /**
3505  * sd_read_block_provisioning - Query provisioning VPD page
3506  * @sdkp: disk to query
3507  */
3508 static void sd_read_block_provisioning(struct scsi_disk *sdkp)
3509 {
3510 	struct scsi_vpd *vpd;
3511 
3512 	if (sdkp->lbpme == 0)
3513 		return;
3514 
3515 	rcu_read_lock();
3516 	vpd = rcu_dereference(sdkp->device->vpd_pgb2);
3517 
3518 	if (!vpd || vpd->len < 8) {
3519 		rcu_read_unlock();
3520 		return;
3521 	}
3522 
3523 	sdkp->lbpvpd	= 1;
3524 	sdkp->lbpu	= (vpd->data[5] >> 7) & 1; /* UNMAP */
3525 	sdkp->lbpws	= (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
3526 	sdkp->lbpws10	= (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
3527 	rcu_read_unlock();
3528 }
3529 
3530 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
3531 {
3532 	struct scsi_device *sdev = sdkp->device;
3533 
3534 	if (sdev->host->no_write_same) {
3535 		sdev->no_write_same = 1;
3536 
3537 		return;
3538 	}
3539 
3540 	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY, 0) < 0) {
3541 		sdev->no_report_opcodes = 1;
3542 
3543 		/*
3544 		 * Disable WRITE SAME if REPORT SUPPORTED OPERATION CODES is
3545 		 * unsupported and this is an ATA device.
3546 		 */
3547 		if (sdev->is_ata)
3548 			sdev->no_write_same = 1;
3549 	}
3550 
3551 	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16, 0) == 1)
3552 		sdkp->ws16 = 1;
3553 
3554 	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME, 0) == 1)
3555 		sdkp->ws10 = 1;
3556 }
3557 
3558 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
3559 {
3560 	struct scsi_device *sdev = sdkp->device;
3561 
3562 	if (!sdev->security_supported)
3563 		return;
3564 
3565 	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3566 			SECURITY_PROTOCOL_IN, 0) == 1 &&
3567 	    scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3568 			SECURITY_PROTOCOL_OUT, 0) == 1)
3569 		sdkp->security = 1;
3570 }
3571 
3572 static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
3573 {
3574 	return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
3575 }
3576 
3577 /**
3578  * sd_read_cpr - Query concurrent positioning ranges
3579  * @sdkp:	disk to query
3580  */
3581 static void sd_read_cpr(struct scsi_disk *sdkp)
3582 {
3583 	struct blk_independent_access_ranges *iars = NULL;
3584 	unsigned char *buffer = NULL;
3585 	unsigned int nr_cpr = 0;
3586 	int i, vpd_len, buf_len = SD_BUF_SIZE;
3587 	u8 *desc;
3588 
3589 	/*
3590 	 * We need to have the capacity set first for the block layer to be
3591 	 * able to check the ranges.
3592 	 */
3593 	if (sdkp->first_scan)
3594 		return;
3595 
3596 	if (!sdkp->capacity)
3597 		goto out;
3598 
3599 	/*
3600 	 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
3601 	 * leading to a maximum page size of 64 + 256*32 bytes.
3602 	 */
3603 	buf_len = 64 + 256*32;
3604 	buffer = kmalloc(buf_len, GFP_KERNEL);
3605 	if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
3606 		goto out;
3607 
3608 	/* We must have at least a 64B header and one 32B range descriptor */
3609 	vpd_len = get_unaligned_be16(&buffer[2]) + 4;
3610 	if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
3611 		sd_printk(KERN_ERR, sdkp,
3612 			  "Invalid Concurrent Positioning Ranges VPD page\n");
3613 		goto out;
3614 	}
3615 
3616 	nr_cpr = (vpd_len - 64) / 32;
3617 	if (nr_cpr == 1) {
3618 		nr_cpr = 0;
3619 		goto out;
3620 	}
3621 
3622 	iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
3623 	if (!iars) {
3624 		nr_cpr = 0;
3625 		goto out;
3626 	}
3627 
3628 	desc = &buffer[64];
3629 	for (i = 0; i < nr_cpr; i++, desc += 32) {
3630 		if (desc[0] != i) {
3631 			sd_printk(KERN_ERR, sdkp,
3632 				"Invalid Concurrent Positioning Range number\n");
3633 			nr_cpr = 0;
3634 			break;
3635 		}
3636 
3637 		iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
3638 		iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
3639 	}
3640 
3641 out:
3642 	disk_set_independent_access_ranges(sdkp->disk, iars);
3643 	if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
3644 		sd_printk(KERN_NOTICE, sdkp,
3645 			  "%u concurrent positioning ranges\n", nr_cpr);
3646 		sdkp->nr_actuators = nr_cpr;
3647 	}
3648 
3649 	kfree(buffer);
3650 }
3651 
3652 static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp)
3653 {
3654 	struct scsi_device *sdp = sdkp->device;
3655 	unsigned int min_xfer_bytes =
3656 		logical_to_bytes(sdp, sdkp->min_xfer_blocks);
3657 
3658 	if (sdkp->min_xfer_blocks == 0)
3659 		return false;
3660 
3661 	if (min_xfer_bytes & (sdkp->physical_block_size - 1)) {
3662 		sd_first_printk(KERN_WARNING, sdkp,
3663 				"Preferred minimum I/O size %u bytes not a multiple of physical block size (%u bytes)\n",
3664 				min_xfer_bytes, sdkp->physical_block_size);
3665 		sdkp->min_xfer_blocks = 0;
3666 		return false;
3667 	}
3668 
3669 	sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n",
3670 			min_xfer_bytes);
3671 	return true;
3672 }
3673 
3674 /*
3675  * Determine the device's preferred I/O size for reads and writes
3676  * unless the reported value is unreasonably small, large, not a
3677  * multiple of the physical block size, or simply garbage.
3678  */
3679 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3680 				      unsigned int dev_max)
3681 {
3682 	struct scsi_device *sdp = sdkp->device;
3683 	unsigned int opt_xfer_bytes =
3684 		logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3685 	unsigned int min_xfer_bytes =
3686 		logical_to_bytes(sdp, sdkp->min_xfer_blocks);
3687 
3688 	if (sdkp->opt_xfer_blocks == 0)
3689 		return false;
3690 
3691 	if (sdkp->opt_xfer_blocks > dev_max) {
3692 		sd_first_printk(KERN_WARNING, sdkp,
3693 				"Optimal transfer size %u logical blocks > dev_max (%u logical blocks)\n",
3694 				sdkp->opt_xfer_blocks, dev_max);
3695 		return false;
3696 	}
3697 
3698 	if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
3699 		sd_first_printk(KERN_WARNING, sdkp,
3700 				"Optimal transfer size %u logical blocks > sd driver limit (%u logical blocks)\n",
3701 				sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
3702 		return false;
3703 	}
3704 
3705 	if (opt_xfer_bytes < PAGE_SIZE) {
3706 		sd_first_printk(KERN_WARNING, sdkp,
3707 				"Optimal transfer size %u bytes < PAGE_SIZE (%u bytes)\n",
3708 				opt_xfer_bytes, (unsigned int)PAGE_SIZE);
3709 		return false;
3710 	}
3711 
3712 	if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) {
3713 		sd_first_printk(KERN_WARNING, sdkp,
3714 				"Optimal transfer size %u bytes not a multiple of preferred minimum block size (%u bytes)\n",
3715 				opt_xfer_bytes, min_xfer_bytes);
3716 		return false;
3717 	}
3718 
3719 	if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
3720 		sd_first_printk(KERN_WARNING, sdkp,
3721 				"Optimal transfer size %u bytes not a multiple of physical block size (%u bytes)\n",
3722 				opt_xfer_bytes, sdkp->physical_block_size);
3723 		return false;
3724 	}
3725 
3726 	sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
3727 			opt_xfer_bytes);
3728 	return true;
3729 }
3730 
3731 static void sd_read_block_zero(struct scsi_disk *sdkp)
3732 {
3733 	struct scsi_device *sdev = sdkp->device;
3734 	unsigned int buf_len = sdev->sector_size;
3735 	u8 *buffer, cmd[16] = { };
3736 
3737 	buffer = kmalloc(buf_len, GFP_KERNEL);
3738 	if (!buffer)
3739 		return;
3740 
3741 	if (sdev->use_16_for_rw) {
3742 		cmd[0] = READ_16;
3743 		put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
3744 		put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
3745 	} else {
3746 		cmd[0] = READ_10;
3747 		put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
3748 		put_unaligned_be16(1, &cmd[7]);	/* Transfer 1 logical block */
3749 	}
3750 
3751 	scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
3752 			 SD_TIMEOUT, sdkp->max_retries, NULL);
3753 	kfree(buffer);
3754 }
3755 
3756 /**
3757  *	sd_revalidate_disk - called the first time a new disk is seen,
3758  *	performs disk spin up, read_capacity, etc.
3759  *	@disk: struct gendisk we care about
3760  **/
3761 static void sd_revalidate_disk(struct gendisk *disk)
3762 {
3763 	struct scsi_disk *sdkp = scsi_disk(disk);
3764 	struct scsi_device *sdp = sdkp->device;
3765 	sector_t old_capacity = sdkp->capacity;
3766 	struct queue_limits *lim = NULL;
3767 	unsigned char *buffer = NULL;
3768 	unsigned int dev_max;
3769 	int err;
3770 
3771 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
3772 				      "sd_revalidate_disk\n"));
3773 
3774 	/*
3775 	 * If the device is offline, don't try and read capacity or any
3776 	 * of the other niceties.
3777 	 */
3778 	if (!scsi_device_online(sdp))
3779 		return;
3780 
3781 	lim = kmalloc_obj(*lim);
3782 	if (!lim)
3783 		return;
3784 
3785 	buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
3786 	if (!buffer)
3787 		goto out;
3788 
3789 	sd_spinup_disk(sdkp);
3790 
3791 	*lim = queue_limits_start_update(sdkp->disk->queue);
3792 
3793 	/*
3794 	 * Without media there is no reason to ask; moreover, some devices
3795 	 * react badly if we do.
3796 	 */
3797 	if (sdkp->media_present) {
3798 		sd_read_capacity(sdkp, lim, buffer);
3799 		/*
3800 		 * Some USB/UAS devices return generic values for mode pages
3801 		 * until the media has been accessed. Trigger a READ operation
3802 		 * to force the device to populate mode pages.
3803 		 */
3804 		if (sdp->read_before_ms)
3805 			sd_read_block_zero(sdkp);
3806 		/*
3807 		 * set the default to rotational.  All non-rotational devices
3808 		 * support the block characteristics VPD page, which will
3809 		 * cause this to be updated correctly and any device which
3810 		 * doesn't support it should be treated as rotational.
3811 		 */
3812 		lim->features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
3813 
3814 		if (scsi_device_supports_vpd(sdp)) {
3815 			sd_read_block_provisioning(sdkp);
3816 			sd_read_block_limits(sdkp, lim);
3817 			sd_read_block_limits_ext(sdkp);
3818 			sd_read_block_characteristics(sdkp, lim);
3819 			sd_zbc_read_zones(sdkp, lim, buffer);
3820 		}
3821 
3822 		sd_config_discard(sdkp, lim, sd_discard_mode(sdkp));
3823 
3824 		sd_print_capacity(sdkp, old_capacity);
3825 
3826 		sd_read_write_protect_flag(sdkp, buffer);
3827 		sd_read_cache_type(sdkp, buffer);
3828 		sd_read_io_hints(sdkp, buffer);
3829 		sd_read_app_tag_own(sdkp, buffer);
3830 		sd_read_write_same(sdkp, buffer);
3831 		sd_read_security(sdkp, buffer);
3832 		sd_config_protection(sdkp, lim);
3833 	}
3834 
3835 	/*
3836 	 * We now have all cache related info, determine how we deal
3837 	 * with flush requests.
3838 	 */
3839 	sd_set_flush_flag(sdkp, lim);
3840 
3841 	/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
3842 	dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
3843 
3844 	/* Some devices report a maximum block count for READ/WRITE requests. */
3845 	dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
3846 	lim->max_dev_sectors = logical_to_sectors(sdp, dev_max);
3847 
3848 	if (sd_validate_min_xfer_size(sdkp))
3849 		lim->io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
3850 	else
3851 		lim->io_min = 0;
3852 
3853 	/*
3854 	 * Limit default to SCSI host optimal sector limit if set. There may be
3855 	 * an impact on performance for when the size of a request exceeds this
3856 	 * host limit.
3857 	 */
3858 	lim->io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
3859 	if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3860 		lim->io_opt = min_not_zero(lim->io_opt,
3861 				logical_to_bytes(sdp, sdkp->opt_xfer_blocks));
3862 	}
3863 
3864 	sdkp->first_scan = 0;
3865 
3866 	set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
3867 	sd_config_write_same(sdkp, lim);
3868 
3869 	err = queue_limits_commit_update_frozen(sdkp->disk->queue, lim);
3870 	if (err)
3871 		goto out;
3872 
3873 	/*
3874 	 * Query concurrent positioning ranges after
3875 	 * queue_limits_commit_update() unlocked q->limits_lock to avoid
3876 	 * deadlock with q->sysfs_dir_lock and q->sysfs_lock.
3877 	 */
3878 	if (sdkp->media_present && scsi_device_supports_vpd(sdp))
3879 		sd_read_cpr(sdkp);
3880 
3881 	/*
3882 	 * For a zoned drive, revalidating the zones can be done only once
3883 	 * the gendisk capacity is set. So if this fails, set back the gendisk
3884 	 * capacity to 0.
3885 	 */
3886 	if (sd_zbc_revalidate_zones(sdkp))
3887 		set_capacity_and_notify(disk, 0);
3888 
3889  out:
3890 	kfree(buffer);
3891 	kfree(lim);
3892 
3893 }
3894 
3895 /**
3896  *	sd_unlock_native_capacity - unlock native capacity
3897  *	@disk: struct gendisk to set capacity for
3898  *
3899  *	Block layer calls this function if it detects that partitions
3900  *	on @disk reach beyond the end of the device.  If the SCSI host
3901  *	implements ->unlock_native_capacity() method, it's invoked to
3902  *	give it a chance to adjust the device capacity.
3903  *
3904  *	CONTEXT:
3905  *	Defined by block layer.  Might sleep.
3906  */
3907 static void sd_unlock_native_capacity(struct gendisk *disk)
3908 {
3909 	struct scsi_device *sdev = scsi_disk(disk)->device;
3910 
3911 	if (sdev->host->hostt->unlock_native_capacity)
3912 		sdev->host->hostt->unlock_native_capacity(sdev);
3913 }
3914 
3915 static const struct block_device_operations sd_fops = {
3916 	.owner			= THIS_MODULE,
3917 	.open			= sd_open,
3918 	.release		= sd_release,
3919 	.ioctl			= sd_ioctl,
3920 	.getgeo			= sd_getgeo,
3921 	.compat_ioctl		= blkdev_compat_ptr_ioctl,
3922 	.check_events		= sd_check_events,
3923 	.unlock_native_capacity	= sd_unlock_native_capacity,
3924 	.report_zones		= sd_zbc_report_zones,
3925 	.get_unique_id		= sd_get_unique_id,
3926 	.free_disk		= scsi_disk_free_disk,
3927 	.pr_ops			= &sd_pr_ops,
3928 };
3929 
3930 /**
3931  *	sd_format_disk_name - format disk name
3932  *	@prefix: name prefix - ie. "sd" for SCSI disks
3933  *	@index: index of the disk to format name for
3934  *	@buf: output buffer
3935  *	@buflen: length of the output buffer
3936  *
3937  *	SCSI disk names starts at sda.  The 26th device is sdz and the
3938  *	27th is sdaa.  The last one for two lettered suffix is sdzz
3939  *	which is followed by sdaaa.
3940  *
3941  *	This is basically 26 base counting with one extra 'nil' entry
3942  *	at the beginning from the second digit on and can be
3943  *	determined using similar method as 26 base conversion with the
3944  *	index shifted -1 after each digit is computed.
3945  *
3946  *	CONTEXT:
3947  *	Don't care.
3948  *
3949  *	RETURNS:
3950  *	0 on success, -errno on failure.
3951  */
3952 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
3953 {
3954 	const int base = 'z' - 'a' + 1;
3955 	char *begin = buf + strlen(prefix);
3956 	char *end = buf + buflen;
3957 	char *p;
3958 	int unit;
3959 
3960 	p = end - 1;
3961 	*p = '\0';
3962 	unit = base;
3963 	do {
3964 		if (p == begin)
3965 			return -EINVAL;
3966 		*--p = 'a' + (index % unit);
3967 		index = (index / unit) - 1;
3968 	} while (index >= 0);
3969 
3970 	memmove(begin, p, end - p);
3971 	memcpy(buf, prefix, strlen(prefix));
3972 
3973 	return 0;
3974 }
3975 
3976 /**
3977  *	sd_probe - called during driver initialization and whenever a
3978  *	new scsi device is attached to the system. It is called once
3979  *	for each scsi device (not just disks) present.
3980  *	@sdp: pointer to device object
3981  *
3982  *	Returns 0 if successful (or not interested in this scsi device
3983  *	(e.g. scanner)); 1 when there is an error.
3984  *
3985  *	Note: this function is invoked from the scsi mid-level.
3986  *	This function sets up the mapping between a given
3987  *	<host,channel,id,lun> (found in sdp) and new device name
3988  *	(e.g. /dev/sda). More precisely it is the block device major
3989  *	and minor number that is chosen here.
3990  *
3991  *	Assume sd_probe is not re-entrant (for time being)
3992  *	Also think about sd_probe() and sd_remove() running coincidentally.
3993  **/
3994 static int sd_probe(struct scsi_device *sdp)
3995 {
3996 	struct device *dev = &sdp->sdev_gendev;
3997 	struct scsi_disk *sdkp;
3998 	struct gendisk *gd;
3999 	int index;
4000 	int error;
4001 
4002 	scsi_autopm_get_device(sdp);
4003 	error = -ENODEV;
4004 	if (sdp->type != TYPE_DISK &&
4005 	    sdp->type != TYPE_ZBC &&
4006 	    sdp->type != TYPE_MOD &&
4007 	    sdp->type != TYPE_RBC)
4008 		goto out;
4009 
4010 	if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
4011 		sdev_printk(KERN_WARNING, sdp,
4012 			    "Unsupported ZBC host-managed device.\n");
4013 		goto out;
4014 	}
4015 
4016 	SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
4017 					"sd_probe\n"));
4018 
4019 	error = -ENOMEM;
4020 	sdkp = kzalloc_obj(*sdkp);
4021 	if (!sdkp)
4022 		goto out;
4023 
4024 	gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
4025 					 &sd_bio_compl_lkclass);
4026 	if (!gd)
4027 		goto out_free;
4028 
4029 	index = ida_alloc(&sd_index_ida, GFP_KERNEL);
4030 	if (index < 0) {
4031 		sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
4032 		goto out_put;
4033 	}
4034 
4035 	error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
4036 	if (error) {
4037 		sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
4038 		goto out_free_index;
4039 	}
4040 
4041 	sdkp->device = sdp;
4042 	sdkp->disk = gd;
4043 	sdkp->index = index;
4044 	sdkp->max_retries = SD_MAX_RETRIES;
4045 	atomic_set(&sdkp->openers, 0);
4046 	atomic_set(&sdkp->device->ioerr_cnt, 0);
4047 
4048 	if (!sdp->request_queue->rq_timeout) {
4049 		if (sdp->type != TYPE_MOD)
4050 			blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
4051 		else
4052 			blk_queue_rq_timeout(sdp->request_queue,
4053 					     SD_MOD_TIMEOUT);
4054 	}
4055 
4056 	device_initialize(&sdkp->disk_dev);
4057 	sdkp->disk_dev.parent = get_device(dev);
4058 	sdkp->disk_dev.class = &sd_disk_class;
4059 	dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
4060 
4061 	error = device_add(&sdkp->disk_dev);
4062 	if (error) {
4063 		put_device(&sdkp->disk_dev);
4064 		put_disk(gd);
4065 		goto out;
4066 	}
4067 
4068 	dev_set_drvdata(dev, sdkp);
4069 
4070 	gd->major = sd_major((index & 0xf0) >> 4);
4071 	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
4072 	gd->minors = SD_MINORS;
4073 
4074 	gd->fops = &sd_fops;
4075 	gd->private_data = sdkp;
4076 
4077 	/* defaults, until the device tells us otherwise */
4078 	sdp->sector_size = 512;
4079 	sdkp->capacity = 0;
4080 	sdkp->media_present = 1;
4081 	sdkp->write_prot = 0;
4082 	sdkp->cache_override = 0;
4083 	sdkp->WCE = 0;
4084 	sdkp->RCD = 0;
4085 	sdkp->ATO = 0;
4086 	sdkp->first_scan = 1;
4087 	sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
4088 
4089 	sd_revalidate_disk(gd);
4090 	if (sdp->sector_size > PAGE_SIZE) {
4091 		if (sd_large_pool_create()) {
4092 			error = -ENOMEM;
4093 			goto out_free_index;
4094 		}
4095 	}
4096 
4097 	if (sdp->removable) {
4098 		gd->flags |= GENHD_FL_REMOVABLE;
4099 		gd->events |= DISK_EVENT_MEDIA_CHANGE;
4100 		gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
4101 	}
4102 
4103 	blk_pm_runtime_init(sdp->request_queue, dev);
4104 	if (sdp->rpm_autosuspend) {
4105 		pm_runtime_set_autosuspend_delay(dev,
4106 			sdp->host->rpm_autosuspend_delay);
4107 	}
4108 
4109 	error = device_add_disk(dev, gd, NULL);
4110 	if (error) {
4111 		device_unregister(&sdkp->disk_dev);
4112 		put_disk(gd);
4113 		if (sdp->sector_size > PAGE_SIZE)
4114 			sd_large_pool_destroy();
4115 		goto out;
4116 	}
4117 
4118 	if (sdkp->security) {
4119 		sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
4120 		if (sdkp->opal_dev)
4121 			sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
4122 	}
4123 
4124 	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
4125 		  sdp->removable ? "removable " : "");
4126 	scsi_autopm_put_device(sdp);
4127 
4128 	return 0;
4129 
4130  out_free_index:
4131 	ida_free(&sd_index_ida, index);
4132  out_put:
4133 	put_disk(gd);
4134  out_free:
4135 	kfree(sdkp);
4136  out:
4137 	scsi_autopm_put_device(sdp);
4138 	return error;
4139 }
4140 
4141 static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
4142 {
4143 	unsigned char cmd[6] = { START_STOP };	/* START_VALID */
4144 	struct scsi_sense_hdr sshdr;
4145 	struct scsi_failure failure_defs[] = {
4146 		{
4147 			/* Power on, reset, or bus device reset occurred */
4148 			.sense = UNIT_ATTENTION,
4149 			.asc = 0x29,
4150 			.ascq = 0,
4151 			.result = SAM_STAT_CHECK_CONDITION,
4152 		},
4153 		{
4154 			/* Power on occurred */
4155 			.sense = UNIT_ATTENTION,
4156 			.asc = 0x29,
4157 			.ascq = 1,
4158 			.result = SAM_STAT_CHECK_CONDITION,
4159 		},
4160 		{
4161 			/* SCSI bus reset */
4162 			.sense = UNIT_ATTENTION,
4163 			.asc = 0x29,
4164 			.ascq = 2,
4165 			.result = SAM_STAT_CHECK_CONDITION,
4166 		},
4167 		{}
4168 	};
4169 	struct scsi_failures failures = {
4170 		.total_allowed = 3,
4171 		.failure_definitions = failure_defs,
4172 	};
4173 	const struct scsi_exec_args exec_args = {
4174 		.sshdr = &sshdr,
4175 		.req_flags = BLK_MQ_REQ_PM,
4176 		.failures = &failures,
4177 	};
4178 	struct scsi_device *sdp = sdkp->device;
4179 	int res;
4180 
4181 	if (start)
4182 		cmd[4] |= 1;	/* START */
4183 
4184 	if (sdp->start_stop_pwr_cond)
4185 		cmd[4] |= start ? 1 << 4 : 3 << 4;	/* Active or Standby */
4186 
4187 	if (!scsi_device_online(sdp))
4188 		return -ENODEV;
4189 
4190 	res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT,
4191 			       sdkp->max_retries, &exec_args);
4192 	if (res) {
4193 		sd_print_result(sdkp, "Start/Stop Unit failed", res);
4194 		if (res > 0 && scsi_sense_valid(&sshdr)) {
4195 			sd_print_sense_hdr(sdkp, &sshdr);
4196 			/* 0x3a is medium not present */
4197 			if (sshdr.asc == 0x3a)
4198 				res = 0;
4199 		}
4200 	}
4201 
4202 	/* SCSI error codes must not go to the generic layer */
4203 	if (res)
4204 		return -EIO;
4205 
4206 	return 0;
4207 }
4208 
4209 /*
4210  * Send a SYNCHRONIZE CACHE instruction down to the device through
4211  * the normal SCSI command structure.  Wait for the command to
4212  * complete.
4213  */
4214 static void sd_shutdown(struct scsi_device *sdp)
4215 {
4216 	struct device *dev = &sdp->sdev_gendev;
4217 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
4218 
4219 	if (!sdkp)
4220 		return;         /* this can happen */
4221 
4222 	if (pm_runtime_suspended(dev))
4223 		return;
4224 
4225 	if (sdkp->WCE && sdkp->media_present) {
4226 		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
4227 		sd_sync_cache(sdkp);
4228 	}
4229 
4230 	if ((system_state != SYSTEM_RESTART &&
4231 	     sdkp->device->manage_system_start_stop) ||
4232 	    (system_state == SYSTEM_POWER_OFF &&
4233 	     sdkp->device->manage_shutdown) ||
4234 	    (system_state == SYSTEM_RUNNING &&
4235 	     sdkp->device->manage_runtime_start_stop) ||
4236 	    (system_state == SYSTEM_RESTART &&
4237 	     sdkp->device->manage_restart)) {
4238 		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
4239 		sd_start_stop_device(sdkp, 0);
4240 	}
4241 }
4242 
4243 /**
4244  *	sd_remove - called whenever a scsi disk (previously recognized by
4245  *	sd_probe) is detached from the system. It is called (potentially
4246  *	multiple times) during sd module unload.
4247  *	@sdp: pointer to device object
4248  *
4249  *	Note: this function is invoked from the scsi mid-level.
4250  *	This function potentially frees up a device name (e.g. /dev/sdc)
4251  *	that could be re-used by a subsequent sd_probe().
4252  *	This function is not called when the built-in sd driver is "exit-ed".
4253  **/
4254 static void sd_remove(struct scsi_device *sdp)
4255 {
4256 	struct device *dev = &sdp->sdev_gendev;
4257 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
4258 
4259 	scsi_autopm_get_device(sdkp->device);
4260 
4261 	device_del(&sdkp->disk_dev);
4262 	del_gendisk(sdkp->disk);
4263 	if (!sdkp->suspended)
4264 		sd_shutdown(sdp);
4265 
4266 	put_disk(sdkp->disk);
4267 
4268 	if (sdp->sector_size > PAGE_SIZE)
4269 		sd_large_pool_destroy();
4270 }
4271 
4272 static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
4273 {
4274 	return (sdev->manage_system_start_stop && !runtime) ||
4275 		(sdev->manage_runtime_start_stop && runtime);
4276 }
4277 
4278 static int sd_suspend_common(struct device *dev, bool runtime)
4279 {
4280 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
4281 	int ret = 0;
4282 
4283 	if (!sdkp)	/* E.g.: runtime suspend following sd_remove() */
4284 		return 0;
4285 
4286 	if (sdkp->WCE && sdkp->media_present) {
4287 		if (!sdkp->device->silence_suspend)
4288 			sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
4289 		ret = sd_sync_cache(sdkp);
4290 		/* ignore OFFLINE device */
4291 		if (ret == -ENODEV)
4292 			return 0;
4293 
4294 		if (ret)
4295 			return ret;
4296 	}
4297 
4298 	if (sd_do_start_stop(sdkp->device, runtime)) {
4299 		if (!sdkp->device->silence_suspend)
4300 			sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
4301 		/* an error is not worth aborting a system sleep */
4302 		ret = sd_start_stop_device(sdkp, 0);
4303 		if (!runtime)
4304 			ret = 0;
4305 	}
4306 
4307 	if (!ret)
4308 		sdkp->suspended = true;
4309 
4310 	return ret;
4311 }
4312 
4313 static int sd_suspend_system(struct device *dev)
4314 {
4315 	if (pm_runtime_suspended(dev))
4316 		return 0;
4317 
4318 	return sd_suspend_common(dev, false);
4319 }
4320 
4321 static int sd_suspend_runtime(struct device *dev)
4322 {
4323 	return sd_suspend_common(dev, true);
4324 }
4325 
4326 static int sd_resume(struct device *dev)
4327 {
4328 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
4329 
4330 	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
4331 
4332 	if (opal_unlock_from_suspend(sdkp->opal_dev)) {
4333 		sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
4334 		return -EIO;
4335 	}
4336 
4337 	return 0;
4338 }
4339 
4340 static int sd_resume_common(struct device *dev, bool runtime)
4341 {
4342 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
4343 	int ret;
4344 
4345 	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
4346 		return 0;
4347 
4348 	if (!sd_do_start_stop(sdkp->device, runtime)) {
4349 		sdkp->suspended = false;
4350 		return 0;
4351 	}
4352 
4353 	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
4354 	ret = sd_start_stop_device(sdkp, 1);
4355 	if (!ret) {
4356 		sd_resume(dev);
4357 		sdkp->suspended = false;
4358 	}
4359 
4360 	return ret;
4361 }
4362 
4363 static int sd_resume_system(struct device *dev)
4364 {
4365 	if (pm_runtime_suspended(dev)) {
4366 		struct scsi_disk *sdkp = dev_get_drvdata(dev);
4367 		struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
4368 
4369 		if (sdp && sdp->force_runtime_start_on_system_start)
4370 			pm_request_resume(dev);
4371 
4372 		return 0;
4373 	}
4374 
4375 	return sd_resume_common(dev, false);
4376 }
4377 
4378 static int sd_resume_runtime(struct device *dev)
4379 {
4380 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
4381 	struct scsi_device *sdp;
4382 
4383 	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
4384 		return 0;
4385 
4386 	sdp = sdkp->device;
4387 
4388 	if (sdp->ignore_media_change) {
4389 		/* clear the device's sense data */
4390 		static const u8 cmd[10] = { REQUEST_SENSE };
4391 		const struct scsi_exec_args exec_args = {
4392 			.req_flags = BLK_MQ_REQ_PM,
4393 		};
4394 
4395 		if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
4396 				     sdp->request_queue->rq_timeout, 1,
4397 				     &exec_args))
4398 			sd_printk(KERN_NOTICE, sdkp,
4399 				  "Failed to clear sense data\n");
4400 	}
4401 
4402 	return sd_resume_common(dev, true);
4403 }
4404 
4405 static const struct dev_pm_ops sd_pm_ops = {
4406 	.suspend		= sd_suspend_system,
4407 	.resume			= sd_resume_system,
4408 	.poweroff		= sd_suspend_system,
4409 	.restore		= sd_resume_system,
4410 	.runtime_suspend	= sd_suspend_runtime,
4411 	.runtime_resume		= sd_resume_runtime,
4412 };
4413 
4414 static struct scsi_driver sd_template = {
4415 	.probe = sd_probe,
4416 	.remove = sd_remove,
4417 	.shutdown = sd_shutdown,
4418 	.gendrv = {
4419 		.name		= "sd",
4420 		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
4421 		.pm		= &sd_pm_ops,
4422 	},
4423 	.rescan			= sd_rescan,
4424 	.resume			= sd_resume,
4425 	.init_command		= sd_init_command,
4426 	.uninit_command		= sd_uninit_command,
4427 	.done			= sd_done,
4428 	.eh_action		= sd_eh_action,
4429 	.eh_reset		= sd_eh_reset,
4430 };
4431 
4432 /**
4433  *	init_sd - entry point for this driver (both when built in or when
4434  *	a module).
4435  *
4436  *	Note: this function registers this driver with the scsi mid-level.
4437  **/
4438 static int __init init_sd(void)
4439 {
4440 	int majors = 0, i, err;
4441 
4442 	SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
4443 
4444 	for (i = 0; i < SD_MAJORS; i++) {
4445 		if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
4446 			continue;
4447 		majors++;
4448 	}
4449 
4450 	if (!majors)
4451 		return -ENODEV;
4452 
4453 	err = class_register(&sd_disk_class);
4454 	if (err)
4455 		goto err_out;
4456 
4457 	sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
4458 	if (!sd_page_pool) {
4459 		printk(KERN_ERR "sd: can't init discard page pool\n");
4460 		err = -ENOMEM;
4461 		goto err_out_class;
4462 	}
4463 
4464 	err = scsi_register_driver(&sd_template);
4465 	if (err)
4466 		goto err_out_driver;
4467 
4468 	return 0;
4469 
4470 err_out_driver:
4471 	mempool_destroy(sd_page_pool);
4472 err_out_class:
4473 	class_unregister(&sd_disk_class);
4474 err_out:
4475 	for (i = 0; i < SD_MAJORS; i++)
4476 		unregister_blkdev(sd_major(i), "sd");
4477 	return err;
4478 }
4479 
4480 /**
4481  *	exit_sd - exit point for this driver (when it is a module).
4482  *
4483  *	Note: this function unregisters this driver from the scsi mid-level.
4484  **/
4485 static void __exit exit_sd(void)
4486 {
4487 	int i;
4488 
4489 	SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
4490 
4491 	scsi_unregister_driver(&sd_template);
4492 	mempool_destroy(sd_page_pool);
4493 	if (sd_large_page_pool)
4494 		mempool_destroy(sd_large_page_pool);
4495 
4496 	class_unregister(&sd_disk_class);
4497 
4498 	for (i = 0; i < SD_MAJORS; i++)
4499 		unregister_blkdev(sd_major(i), "sd");
4500 }
4501 
4502 module_init(init_sd);
4503 module_exit(exit_sd);
4504 
4505 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
4506 {
4507 	scsi_print_sense_hdr(sdkp->device,
4508 			     sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
4509 }
4510 
4511 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
4512 {
4513 	const char *hb_string = scsi_hostbyte_string(result);
4514 
4515 	if (hb_string)
4516 		sd_printk(KERN_INFO, sdkp,
4517 			  "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
4518 			  hb_string ? hb_string : "invalid",
4519 			  "DRIVER_OK");
4520 	else
4521 		sd_printk(KERN_INFO, sdkp,
4522 			  "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
4523 			  msg, host_byte(result), "DRIVER_OK");
4524 }
4525