1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * sd.c Copyright (C) 1992 Drew Eckhardt
4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 *
6 * Linux scsi disk driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 * Modification history:
10 * - Drew Eckhardt <drew@colorado.edu> original
11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
12 * outstanding request, and other enhancements.
13 * Support loadable low-level scsi drivers.
14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
15 * eight major numbers.
16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
18 * sd_init and cleanups.
19 * - Alex Davis <letmein@erols.com> Fix problem where partition info
20 * not being read in sd_open. Fix problem where removable media
21 * could be ejected after sd_open.
22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
25 * Support 32k/1M disks.
26 *
27 * Logging policy (needs CONFIG_SCSI_LOGGING defined):
28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
31 * - entering other commands: SCSI_LOG_HLQUEUE level 3
32 * Note: when the logging level is set by the user, it must be greater
33 * than the level indicated above to trigger output.
34 */
35
36 #include <linux/bio-integrity.h>
37 #include <linux/module.h>
38 #include <linux/fs.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/bio-integrity.h>
42 #include <linux/hdreg.h>
43 #include <linux/errno.h>
44 #include <linux/idr.h>
45 #include <linux/interrupt.h>
46 #include <linux/init.h>
47 #include <linux/blkdev.h>
48 #include <linux/blkpg.h>
49 #include <linux/blk-pm.h>
50 #include <linux/delay.h>
51 #include <linux/rw_hint.h>
52 #include <linux/major.h>
53 #include <linux/mutex.h>
54 #include <linux/string_helpers.h>
55 #include <linux/slab.h>
56 #include <linux/sed-opal.h>
57 #include <linux/pm_runtime.h>
58 #include <linux/pr.h>
59 #include <linux/t10-pi.h>
60 #include <linux/uaccess.h>
61 #include <asm/unaligned.h>
62
63 #include <scsi/scsi.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_dbg.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_devinfo.h>
68 #include <scsi/scsi_driver.h>
69 #include <scsi/scsi_eh.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_ioctl.h>
72 #include <scsi/scsicam.h>
73 #include <scsi/scsi_common.h>
74
75 #include "sd.h"
76 #include "scsi_priv.h"
77 #include "scsi_logging.h"
78
79 MODULE_AUTHOR("Eric Youngdale");
80 MODULE_DESCRIPTION("SCSI disk (sd) driver");
81 MODULE_LICENSE("GPL");
82
83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
95 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
96 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
97 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
98 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
99 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
100 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
101 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
102 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
103
104 #define SD_MINORS 16
105
106 static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
107 unsigned int mode);
108 static void sd_config_write_same(struct scsi_disk *sdkp,
109 struct queue_limits *lim);
110 static int sd_revalidate_disk(struct gendisk *);
111 static void sd_unlock_native_capacity(struct gendisk *disk);
112 static void sd_shutdown(struct device *);
113 static void scsi_disk_release(struct device *cdev);
114
115 static DEFINE_IDA(sd_index_ida);
116
117 static mempool_t *sd_page_pool;
118 static struct lock_class_key sd_bio_compl_lkclass;
119
120 static const char *sd_cache_types[] = {
121 "write through", "none", "write back",
122 "write back, no read (daft)"
123 };
124
sd_set_flush_flag(struct scsi_disk * sdkp,struct queue_limits * lim)125 static void sd_set_flush_flag(struct scsi_disk *sdkp,
126 struct queue_limits *lim)
127 {
128 if (sdkp->WCE) {
129 lim->features |= BLK_FEAT_WRITE_CACHE;
130 if (sdkp->DPOFUA)
131 lim->features |= BLK_FEAT_FUA;
132 else
133 lim->features &= ~BLK_FEAT_FUA;
134 } else {
135 lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
136 }
137 }
138
139 static ssize_t
cache_type_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)140 cache_type_store(struct device *dev, struct device_attribute *attr,
141 const char *buf, size_t count)
142 {
143 int ct, rcd, wce, sp;
144 struct scsi_disk *sdkp = to_scsi_disk(dev);
145 struct scsi_device *sdp = sdkp->device;
146 char buffer[64];
147 char *buffer_data;
148 struct scsi_mode_data data;
149 struct scsi_sense_hdr sshdr;
150 static const char temp[] = "temporary ";
151 int len, ret;
152
153 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
154 /* no cache control on RBC devices; theoretically they
155 * can do it, but there's probably so many exceptions
156 * it's not worth the risk */
157 return -EINVAL;
158
159 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
160 buf += sizeof(temp) - 1;
161 sdkp->cache_override = 1;
162 } else {
163 sdkp->cache_override = 0;
164 }
165
166 ct = sysfs_match_string(sd_cache_types, buf);
167 if (ct < 0)
168 return -EINVAL;
169
170 rcd = ct & 0x01 ? 1 : 0;
171 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
172
173 if (sdkp->cache_override) {
174 struct queue_limits lim;
175
176 sdkp->WCE = wce;
177 sdkp->RCD = rcd;
178
179 lim = queue_limits_start_update(sdkp->disk->queue);
180 sd_set_flush_flag(sdkp, &lim);
181 blk_mq_freeze_queue(sdkp->disk->queue);
182 ret = queue_limits_commit_update(sdkp->disk->queue, &lim);
183 blk_mq_unfreeze_queue(sdkp->disk->queue);
184 if (ret)
185 return ret;
186 return count;
187 }
188
189 if (scsi_mode_sense(sdp, 0x08, 8, 0, buffer, sizeof(buffer), SD_TIMEOUT,
190 sdkp->max_retries, &data, NULL))
191 return -EINVAL;
192 len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
193 data.block_descriptor_length);
194 buffer_data = buffer + data.header_length +
195 data.block_descriptor_length;
196 buffer_data[2] &= ~0x05;
197 buffer_data[2] |= wce << 2 | rcd;
198 sp = buffer_data[0] & 0x80 ? 1 : 0;
199 buffer_data[0] &= ~0x80;
200
201 /*
202 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
203 * received mode parameter buffer before doing MODE SELECT.
204 */
205 data.device_specific = 0;
206
207 ret = scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
208 sdkp->max_retries, &data, &sshdr);
209 if (ret) {
210 if (ret > 0 && scsi_sense_valid(&sshdr))
211 sd_print_sense_hdr(sdkp, &sshdr);
212 return -EINVAL;
213 }
214 sd_revalidate_disk(sdkp->disk);
215 return count;
216 }
217
218 static ssize_t
manage_start_stop_show(struct device * dev,struct device_attribute * attr,char * buf)219 manage_start_stop_show(struct device *dev,
220 struct device_attribute *attr, char *buf)
221 {
222 struct scsi_disk *sdkp = to_scsi_disk(dev);
223 struct scsi_device *sdp = sdkp->device;
224
225 return sysfs_emit(buf, "%u\n",
226 sdp->manage_system_start_stop &&
227 sdp->manage_runtime_start_stop &&
228 sdp->manage_shutdown);
229 }
230 static DEVICE_ATTR_RO(manage_start_stop);
231
232 static ssize_t
manage_system_start_stop_show(struct device * dev,struct device_attribute * attr,char * buf)233 manage_system_start_stop_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
235 {
236 struct scsi_disk *sdkp = to_scsi_disk(dev);
237 struct scsi_device *sdp = sdkp->device;
238
239 return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop);
240 }
241
242 static ssize_t
manage_system_start_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)243 manage_system_start_stop_store(struct device *dev,
244 struct device_attribute *attr,
245 const char *buf, size_t count)
246 {
247 struct scsi_disk *sdkp = to_scsi_disk(dev);
248 struct scsi_device *sdp = sdkp->device;
249 bool v;
250
251 if (!capable(CAP_SYS_ADMIN))
252 return -EACCES;
253
254 if (kstrtobool(buf, &v))
255 return -EINVAL;
256
257 sdp->manage_system_start_stop = v;
258
259 return count;
260 }
261 static DEVICE_ATTR_RW(manage_system_start_stop);
262
263 static ssize_t
manage_runtime_start_stop_show(struct device * dev,struct device_attribute * attr,char * buf)264 manage_runtime_start_stop_show(struct device *dev,
265 struct device_attribute *attr, char *buf)
266 {
267 struct scsi_disk *sdkp = to_scsi_disk(dev);
268 struct scsi_device *sdp = sdkp->device;
269
270 return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop);
271 }
272
273 static ssize_t
manage_runtime_start_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)274 manage_runtime_start_stop_store(struct device *dev,
275 struct device_attribute *attr,
276 const char *buf, size_t count)
277 {
278 struct scsi_disk *sdkp = to_scsi_disk(dev);
279 struct scsi_device *sdp = sdkp->device;
280 bool v;
281
282 if (!capable(CAP_SYS_ADMIN))
283 return -EACCES;
284
285 if (kstrtobool(buf, &v))
286 return -EINVAL;
287
288 sdp->manage_runtime_start_stop = v;
289
290 return count;
291 }
292 static DEVICE_ATTR_RW(manage_runtime_start_stop);
293
manage_shutdown_show(struct device * dev,struct device_attribute * attr,char * buf)294 static ssize_t manage_shutdown_show(struct device *dev,
295 struct device_attribute *attr, char *buf)
296 {
297 struct scsi_disk *sdkp = to_scsi_disk(dev);
298 struct scsi_device *sdp = sdkp->device;
299
300 return sysfs_emit(buf, "%u\n", sdp->manage_shutdown);
301 }
302
manage_shutdown_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)303 static ssize_t manage_shutdown_store(struct device *dev,
304 struct device_attribute *attr,
305 const char *buf, size_t count)
306 {
307 struct scsi_disk *sdkp = to_scsi_disk(dev);
308 struct scsi_device *sdp = sdkp->device;
309 bool v;
310
311 if (!capable(CAP_SYS_ADMIN))
312 return -EACCES;
313
314 if (kstrtobool(buf, &v))
315 return -EINVAL;
316
317 sdp->manage_shutdown = v;
318
319 return count;
320 }
321 static DEVICE_ATTR_RW(manage_shutdown);
322
323 static ssize_t
allow_restart_show(struct device * dev,struct device_attribute * attr,char * buf)324 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
325 {
326 struct scsi_disk *sdkp = to_scsi_disk(dev);
327
328 return sprintf(buf, "%u\n", sdkp->device->allow_restart);
329 }
330
331 static ssize_t
allow_restart_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)332 allow_restart_store(struct device *dev, struct device_attribute *attr,
333 const char *buf, size_t count)
334 {
335 bool v;
336 struct scsi_disk *sdkp = to_scsi_disk(dev);
337 struct scsi_device *sdp = sdkp->device;
338
339 if (!capable(CAP_SYS_ADMIN))
340 return -EACCES;
341
342 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
343 return -EINVAL;
344
345 if (kstrtobool(buf, &v))
346 return -EINVAL;
347
348 sdp->allow_restart = v;
349
350 return count;
351 }
352 static DEVICE_ATTR_RW(allow_restart);
353
354 static ssize_t
cache_type_show(struct device * dev,struct device_attribute * attr,char * buf)355 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
356 {
357 struct scsi_disk *sdkp = to_scsi_disk(dev);
358 int ct = sdkp->RCD + 2*sdkp->WCE;
359
360 return sprintf(buf, "%s\n", sd_cache_types[ct]);
361 }
362 static DEVICE_ATTR_RW(cache_type);
363
364 static ssize_t
FUA_show(struct device * dev,struct device_attribute * attr,char * buf)365 FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
366 {
367 struct scsi_disk *sdkp = to_scsi_disk(dev);
368
369 return sprintf(buf, "%u\n", sdkp->DPOFUA);
370 }
371 static DEVICE_ATTR_RO(FUA);
372
373 static ssize_t
protection_type_show(struct device * dev,struct device_attribute * attr,char * buf)374 protection_type_show(struct device *dev, struct device_attribute *attr,
375 char *buf)
376 {
377 struct scsi_disk *sdkp = to_scsi_disk(dev);
378
379 return sprintf(buf, "%u\n", sdkp->protection_type);
380 }
381
382 static ssize_t
protection_type_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)383 protection_type_store(struct device *dev, struct device_attribute *attr,
384 const char *buf, size_t count)
385 {
386 struct scsi_disk *sdkp = to_scsi_disk(dev);
387 unsigned int val;
388 int err;
389
390 if (!capable(CAP_SYS_ADMIN))
391 return -EACCES;
392
393 err = kstrtouint(buf, 10, &val);
394
395 if (err)
396 return err;
397
398 if (val <= T10_PI_TYPE3_PROTECTION)
399 sdkp->protection_type = val;
400
401 return count;
402 }
403 static DEVICE_ATTR_RW(protection_type);
404
405 static ssize_t
protection_mode_show(struct device * dev,struct device_attribute * attr,char * buf)406 protection_mode_show(struct device *dev, struct device_attribute *attr,
407 char *buf)
408 {
409 struct scsi_disk *sdkp = to_scsi_disk(dev);
410 struct scsi_device *sdp = sdkp->device;
411 unsigned int dif, dix;
412
413 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
414 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
415
416 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
417 dif = 0;
418 dix = 1;
419 }
420
421 if (!dif && !dix)
422 return sprintf(buf, "none\n");
423
424 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
425 }
426 static DEVICE_ATTR_RO(protection_mode);
427
428 static ssize_t
app_tag_own_show(struct device * dev,struct device_attribute * attr,char * buf)429 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
430 {
431 struct scsi_disk *sdkp = to_scsi_disk(dev);
432
433 return sprintf(buf, "%u\n", sdkp->ATO);
434 }
435 static DEVICE_ATTR_RO(app_tag_own);
436
437 static ssize_t
thin_provisioning_show(struct device * dev,struct device_attribute * attr,char * buf)438 thin_provisioning_show(struct device *dev, struct device_attribute *attr,
439 char *buf)
440 {
441 struct scsi_disk *sdkp = to_scsi_disk(dev);
442
443 return sprintf(buf, "%u\n", sdkp->lbpme);
444 }
445 static DEVICE_ATTR_RO(thin_provisioning);
446
447 /* sysfs_match_string() requires dense arrays */
448 static const char *lbp_mode[] = {
449 [SD_LBP_FULL] = "full",
450 [SD_LBP_UNMAP] = "unmap",
451 [SD_LBP_WS16] = "writesame_16",
452 [SD_LBP_WS10] = "writesame_10",
453 [SD_LBP_ZERO] = "writesame_zero",
454 [SD_LBP_DISABLE] = "disabled",
455 };
456
457 static ssize_t
provisioning_mode_show(struct device * dev,struct device_attribute * attr,char * buf)458 provisioning_mode_show(struct device *dev, struct device_attribute *attr,
459 char *buf)
460 {
461 struct scsi_disk *sdkp = to_scsi_disk(dev);
462
463 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
464 }
465
466 static ssize_t
provisioning_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)467 provisioning_mode_store(struct device *dev, struct device_attribute *attr,
468 const char *buf, size_t count)
469 {
470 struct scsi_disk *sdkp = to_scsi_disk(dev);
471 struct scsi_device *sdp = sdkp->device;
472 struct queue_limits lim;
473 int mode, err;
474
475 if (!capable(CAP_SYS_ADMIN))
476 return -EACCES;
477
478 if (sdp->type != TYPE_DISK)
479 return -EINVAL;
480
481 mode = sysfs_match_string(lbp_mode, buf);
482 if (mode < 0)
483 return -EINVAL;
484
485 lim = queue_limits_start_update(sdkp->disk->queue);
486 sd_config_discard(sdkp, &lim, mode);
487 blk_mq_freeze_queue(sdkp->disk->queue);
488 err = queue_limits_commit_update(sdkp->disk->queue, &lim);
489 blk_mq_unfreeze_queue(sdkp->disk->queue);
490 if (err)
491 return err;
492 return count;
493 }
494 static DEVICE_ATTR_RW(provisioning_mode);
495
496 /* sysfs_match_string() requires dense arrays */
497 static const char *zeroing_mode[] = {
498 [SD_ZERO_WRITE] = "write",
499 [SD_ZERO_WS] = "writesame",
500 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap",
501 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap",
502 };
503
504 static ssize_t
zeroing_mode_show(struct device * dev,struct device_attribute * attr,char * buf)505 zeroing_mode_show(struct device *dev, struct device_attribute *attr,
506 char *buf)
507 {
508 struct scsi_disk *sdkp = to_scsi_disk(dev);
509
510 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
511 }
512
513 static ssize_t
zeroing_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)514 zeroing_mode_store(struct device *dev, struct device_attribute *attr,
515 const char *buf, size_t count)
516 {
517 struct scsi_disk *sdkp = to_scsi_disk(dev);
518 int mode;
519
520 if (!capable(CAP_SYS_ADMIN))
521 return -EACCES;
522
523 mode = sysfs_match_string(zeroing_mode, buf);
524 if (mode < 0)
525 return -EINVAL;
526
527 sdkp->zeroing_mode = mode;
528
529 return count;
530 }
531 static DEVICE_ATTR_RW(zeroing_mode);
532
533 static ssize_t
max_medium_access_timeouts_show(struct device * dev,struct device_attribute * attr,char * buf)534 max_medium_access_timeouts_show(struct device *dev,
535 struct device_attribute *attr, char *buf)
536 {
537 struct scsi_disk *sdkp = to_scsi_disk(dev);
538
539 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
540 }
541
542 static ssize_t
max_medium_access_timeouts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)543 max_medium_access_timeouts_store(struct device *dev,
544 struct device_attribute *attr, const char *buf,
545 size_t count)
546 {
547 struct scsi_disk *sdkp = to_scsi_disk(dev);
548 int err;
549
550 if (!capable(CAP_SYS_ADMIN))
551 return -EACCES;
552
553 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
554
555 return err ? err : count;
556 }
557 static DEVICE_ATTR_RW(max_medium_access_timeouts);
558
559 static ssize_t
max_write_same_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)560 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
561 char *buf)
562 {
563 struct scsi_disk *sdkp = to_scsi_disk(dev);
564
565 return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
566 }
567
568 static ssize_t
max_write_same_blocks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)569 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
570 const char *buf, size_t count)
571 {
572 struct scsi_disk *sdkp = to_scsi_disk(dev);
573 struct scsi_device *sdp = sdkp->device;
574 struct queue_limits lim;
575 unsigned long max;
576 int err;
577
578 if (!capable(CAP_SYS_ADMIN))
579 return -EACCES;
580
581 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
582 return -EINVAL;
583
584 err = kstrtoul(buf, 10, &max);
585
586 if (err)
587 return err;
588
589 if (max == 0)
590 sdp->no_write_same = 1;
591 else if (max <= SD_MAX_WS16_BLOCKS) {
592 sdp->no_write_same = 0;
593 sdkp->max_ws_blocks = max;
594 }
595
596 lim = queue_limits_start_update(sdkp->disk->queue);
597 sd_config_write_same(sdkp, &lim);
598 blk_mq_freeze_queue(sdkp->disk->queue);
599 err = queue_limits_commit_update(sdkp->disk->queue, &lim);
600 blk_mq_unfreeze_queue(sdkp->disk->queue);
601 if (err)
602 return err;
603 return count;
604 }
605 static DEVICE_ATTR_RW(max_write_same_blocks);
606
607 static ssize_t
zoned_cap_show(struct device * dev,struct device_attribute * attr,char * buf)608 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
609 {
610 struct scsi_disk *sdkp = to_scsi_disk(dev);
611
612 if (sdkp->device->type == TYPE_ZBC)
613 return sprintf(buf, "host-managed\n");
614 if (sdkp->zoned == 1)
615 return sprintf(buf, "host-aware\n");
616 if (sdkp->zoned == 2)
617 return sprintf(buf, "drive-managed\n");
618 return sprintf(buf, "none\n");
619 }
620 static DEVICE_ATTR_RO(zoned_cap);
621
622 static ssize_t
max_retries_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)623 max_retries_store(struct device *dev, struct device_attribute *attr,
624 const char *buf, size_t count)
625 {
626 struct scsi_disk *sdkp = to_scsi_disk(dev);
627 struct scsi_device *sdev = sdkp->device;
628 int retries, err;
629
630 err = kstrtoint(buf, 10, &retries);
631 if (err)
632 return err;
633
634 if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
635 sdkp->max_retries = retries;
636 return count;
637 }
638
639 sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
640 SD_MAX_RETRIES);
641 return -EINVAL;
642 }
643
644 static ssize_t
max_retries_show(struct device * dev,struct device_attribute * attr,char * buf)645 max_retries_show(struct device *dev, struct device_attribute *attr,
646 char *buf)
647 {
648 struct scsi_disk *sdkp = to_scsi_disk(dev);
649
650 return sprintf(buf, "%d\n", sdkp->max_retries);
651 }
652
653 static DEVICE_ATTR_RW(max_retries);
654
655 static struct attribute *sd_disk_attrs[] = {
656 &dev_attr_cache_type.attr,
657 &dev_attr_FUA.attr,
658 &dev_attr_allow_restart.attr,
659 &dev_attr_manage_start_stop.attr,
660 &dev_attr_manage_system_start_stop.attr,
661 &dev_attr_manage_runtime_start_stop.attr,
662 &dev_attr_manage_shutdown.attr,
663 &dev_attr_protection_type.attr,
664 &dev_attr_protection_mode.attr,
665 &dev_attr_app_tag_own.attr,
666 &dev_attr_thin_provisioning.attr,
667 &dev_attr_provisioning_mode.attr,
668 &dev_attr_zeroing_mode.attr,
669 &dev_attr_max_write_same_blocks.attr,
670 &dev_attr_max_medium_access_timeouts.attr,
671 &dev_attr_zoned_cap.attr,
672 &dev_attr_max_retries.attr,
673 NULL,
674 };
675 ATTRIBUTE_GROUPS(sd_disk);
676
677 static struct class sd_disk_class = {
678 .name = "scsi_disk",
679 .dev_release = scsi_disk_release,
680 .dev_groups = sd_disk_groups,
681 };
682
683 /*
684 * Don't request a new module, as that could deadlock in multipath
685 * environment.
686 */
sd_default_probe(dev_t devt)687 static void sd_default_probe(dev_t devt)
688 {
689 }
690
691 /*
692 * Device no to disk mapping:
693 *
694 * major disc2 disc p1
695 * |............|.............|....|....| <- dev_t
696 * 31 20 19 8 7 4 3 0
697 *
698 * Inside a major, we have 16k disks, however mapped non-
699 * contiguously. The first 16 disks are for major0, the next
700 * ones with major1, ... Disk 256 is for major0 again, disk 272
701 * for major1, ...
702 * As we stay compatible with our numbering scheme, we can reuse
703 * the well-know SCSI majors 8, 65--71, 136--143.
704 */
sd_major(int major_idx)705 static int sd_major(int major_idx)
706 {
707 switch (major_idx) {
708 case 0:
709 return SCSI_DISK0_MAJOR;
710 case 1 ... 7:
711 return SCSI_DISK1_MAJOR + major_idx - 1;
712 case 8 ... 15:
713 return SCSI_DISK8_MAJOR + major_idx - 8;
714 default:
715 BUG();
716 return 0; /* shut up gcc */
717 }
718 }
719
720 #ifdef CONFIG_BLK_SED_OPAL
sd_sec_submit(void * data,u16 spsp,u8 secp,void * buffer,size_t len,bool send)721 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
722 size_t len, bool send)
723 {
724 struct scsi_disk *sdkp = data;
725 struct scsi_device *sdev = sdkp->device;
726 u8 cdb[12] = { 0, };
727 const struct scsi_exec_args exec_args = {
728 .req_flags = BLK_MQ_REQ_PM,
729 };
730 int ret;
731
732 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
733 cdb[1] = secp;
734 put_unaligned_be16(spsp, &cdb[2]);
735 put_unaligned_be32(len, &cdb[6]);
736
737 ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
738 buffer, len, SD_TIMEOUT, sdkp->max_retries,
739 &exec_args);
740 return ret <= 0 ? ret : -EIO;
741 }
742 #endif /* CONFIG_BLK_SED_OPAL */
743
744 /*
745 * Look up the DIX operation based on whether the command is read or
746 * write and whether dix and dif are enabled.
747 */
sd_prot_op(bool write,bool dix,bool dif)748 static unsigned int sd_prot_op(bool write, bool dix, bool dif)
749 {
750 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
751 static const unsigned int ops[] = { /* wrt dix dif */
752 SCSI_PROT_NORMAL, /* 0 0 0 */
753 SCSI_PROT_READ_STRIP, /* 0 0 1 */
754 SCSI_PROT_READ_INSERT, /* 0 1 0 */
755 SCSI_PROT_READ_PASS, /* 0 1 1 */
756 SCSI_PROT_NORMAL, /* 1 0 0 */
757 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
758 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
759 SCSI_PROT_WRITE_PASS, /* 1 1 1 */
760 };
761
762 return ops[write << 2 | dix << 1 | dif];
763 }
764
765 /*
766 * Returns a mask of the protection flags that are valid for a given DIX
767 * operation.
768 */
sd_prot_flag_mask(unsigned int prot_op)769 static unsigned int sd_prot_flag_mask(unsigned int prot_op)
770 {
771 static const unsigned int flag_mask[] = {
772 [SCSI_PROT_NORMAL] = 0,
773
774 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
775 SCSI_PROT_GUARD_CHECK |
776 SCSI_PROT_REF_CHECK |
777 SCSI_PROT_REF_INCREMENT,
778
779 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
780 SCSI_PROT_IP_CHECKSUM,
781
782 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
783 SCSI_PROT_GUARD_CHECK |
784 SCSI_PROT_REF_CHECK |
785 SCSI_PROT_REF_INCREMENT |
786 SCSI_PROT_IP_CHECKSUM,
787
788 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
789 SCSI_PROT_REF_INCREMENT,
790
791 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
792 SCSI_PROT_REF_CHECK |
793 SCSI_PROT_REF_INCREMENT |
794 SCSI_PROT_IP_CHECKSUM,
795
796 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
797 SCSI_PROT_GUARD_CHECK |
798 SCSI_PROT_REF_CHECK |
799 SCSI_PROT_REF_INCREMENT |
800 SCSI_PROT_IP_CHECKSUM,
801 };
802
803 return flag_mask[prot_op];
804 }
805
sd_setup_protect_cmnd(struct scsi_cmnd * scmd,unsigned int dix,unsigned int dif)806 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
807 unsigned int dix, unsigned int dif)
808 {
809 struct request *rq = scsi_cmd_to_rq(scmd);
810 struct bio *bio = rq->bio;
811 unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
812 unsigned int protect = 0;
813
814 if (dix) { /* DIX Type 0, 1, 2, 3 */
815 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
816 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
817
818 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
819 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
820 }
821
822 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
823 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
824
825 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
826 scmd->prot_flags |= SCSI_PROT_REF_CHECK;
827 }
828
829 if (dif) { /* DIX/DIF Type 1, 2, 3 */
830 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
831
832 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
833 protect = 3 << 5; /* Disable target PI checking */
834 else
835 protect = 1 << 5; /* Enable target PI checking */
836 }
837
838 scsi_set_prot_op(scmd, prot_op);
839 scsi_set_prot_type(scmd, dif);
840 scmd->prot_flags &= sd_prot_flag_mask(prot_op);
841
842 return protect;
843 }
844
sd_disable_discard(struct scsi_disk * sdkp)845 static void sd_disable_discard(struct scsi_disk *sdkp)
846 {
847 sdkp->provisioning_mode = SD_LBP_DISABLE;
848 blk_queue_disable_discard(sdkp->disk->queue);
849 }
850
sd_config_discard(struct scsi_disk * sdkp,struct queue_limits * lim,unsigned int mode)851 static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
852 unsigned int mode)
853 {
854 unsigned int logical_block_size = sdkp->device->sector_size;
855 unsigned int max_blocks = 0;
856
857 lim->discard_alignment = sdkp->unmap_alignment * logical_block_size;
858 lim->discard_granularity = max(sdkp->physical_block_size,
859 sdkp->unmap_granularity * logical_block_size);
860 sdkp->provisioning_mode = mode;
861
862 switch (mode) {
863
864 case SD_LBP_FULL:
865 case SD_LBP_DISABLE:
866 break;
867
868 case SD_LBP_UNMAP:
869 max_blocks = min_not_zero(sdkp->max_unmap_blocks,
870 (u32)SD_MAX_WS16_BLOCKS);
871 break;
872
873 case SD_LBP_WS16:
874 if (sdkp->device->unmap_limit_for_ws)
875 max_blocks = sdkp->max_unmap_blocks;
876 else
877 max_blocks = sdkp->max_ws_blocks;
878
879 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
880 break;
881
882 case SD_LBP_WS10:
883 if (sdkp->device->unmap_limit_for_ws)
884 max_blocks = sdkp->max_unmap_blocks;
885 else
886 max_blocks = sdkp->max_ws_blocks;
887
888 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
889 break;
890
891 case SD_LBP_ZERO:
892 max_blocks = min_not_zero(sdkp->max_ws_blocks,
893 (u32)SD_MAX_WS10_BLOCKS);
894 break;
895 }
896
897 lim->max_hw_discard_sectors = max_blocks *
898 (logical_block_size >> SECTOR_SHIFT);
899 }
900
sd_set_special_bvec(struct request * rq,unsigned int data_len)901 static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
902 {
903 struct page *page;
904
905 page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
906 if (!page)
907 return NULL;
908 clear_highpage(page);
909 bvec_set_page(&rq->special_vec, page, data_len, 0);
910 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
911 return bvec_virt(&rq->special_vec);
912 }
913
sd_setup_unmap_cmnd(struct scsi_cmnd * cmd)914 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
915 {
916 struct scsi_device *sdp = cmd->device;
917 struct request *rq = scsi_cmd_to_rq(cmd);
918 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
919 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
920 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
921 unsigned int data_len = 24;
922 char *buf;
923
924 buf = sd_set_special_bvec(rq, data_len);
925 if (!buf)
926 return BLK_STS_RESOURCE;
927
928 cmd->cmd_len = 10;
929 cmd->cmnd[0] = UNMAP;
930 cmd->cmnd[8] = 24;
931
932 put_unaligned_be16(6 + 16, &buf[0]);
933 put_unaligned_be16(16, &buf[2]);
934 put_unaligned_be64(lba, &buf[8]);
935 put_unaligned_be32(nr_blocks, &buf[16]);
936
937 cmd->allowed = sdkp->max_retries;
938 cmd->transfersize = data_len;
939 rq->timeout = SD_TIMEOUT;
940
941 return scsi_alloc_sgtables(cmd);
942 }
943
sd_config_atomic(struct scsi_disk * sdkp,struct queue_limits * lim)944 static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim)
945 {
946 unsigned int logical_block_size = sdkp->device->sector_size,
947 physical_block_size_sectors, max_atomic, unit_min, unit_max;
948
949 if ((!sdkp->max_atomic && !sdkp->max_atomic_with_boundary) ||
950 sdkp->protection_type == T10_PI_TYPE2_PROTECTION)
951 return;
952
953 physical_block_size_sectors = sdkp->physical_block_size /
954 sdkp->device->sector_size;
955
956 unit_min = rounddown_pow_of_two(sdkp->atomic_granularity ?
957 sdkp->atomic_granularity :
958 physical_block_size_sectors);
959
960 /*
961 * Only use atomic boundary when we have the odd scenario of
962 * sdkp->max_atomic == 0, which the spec does permit.
963 */
964 if (sdkp->max_atomic) {
965 max_atomic = sdkp->max_atomic;
966 unit_max = rounddown_pow_of_two(sdkp->max_atomic);
967 sdkp->use_atomic_write_boundary = 0;
968 } else {
969 max_atomic = sdkp->max_atomic_with_boundary;
970 unit_max = rounddown_pow_of_two(sdkp->max_atomic_boundary);
971 sdkp->use_atomic_write_boundary = 1;
972 }
973
974 /*
975 * Ensure compliance with granularity and alignment. For now, keep it
976 * simple and just don't support atomic writes for values mismatched
977 * with max_{boundary}atomic, physical block size, and
978 * atomic_granularity itself.
979 *
980 * We're really being distrustful by checking unit_max also...
981 */
982 if (sdkp->atomic_granularity > 1) {
983 if (unit_min > 1 && unit_min % sdkp->atomic_granularity)
984 return;
985 if (unit_max > 1 && unit_max % sdkp->atomic_granularity)
986 return;
987 }
988
989 if (sdkp->atomic_alignment > 1) {
990 if (unit_min > 1 && unit_min % sdkp->atomic_alignment)
991 return;
992 if (unit_max > 1 && unit_max % sdkp->atomic_alignment)
993 return;
994 }
995
996 lim->atomic_write_hw_max = max_atomic * logical_block_size;
997 lim->atomic_write_hw_boundary = 0;
998 lim->atomic_write_hw_unit_min = unit_min * logical_block_size;
999 lim->atomic_write_hw_unit_max = unit_max * logical_block_size;
1000 }
1001
sd_setup_write_same16_cmnd(struct scsi_cmnd * cmd,bool unmap)1002 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
1003 bool unmap)
1004 {
1005 struct scsi_device *sdp = cmd->device;
1006 struct request *rq = scsi_cmd_to_rq(cmd);
1007 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1008 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1009 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1010 u32 data_len = sdp->sector_size;
1011
1012 if (!sd_set_special_bvec(rq, data_len))
1013 return BLK_STS_RESOURCE;
1014
1015 cmd->cmd_len = 16;
1016 cmd->cmnd[0] = WRITE_SAME_16;
1017 if (unmap)
1018 cmd->cmnd[1] = 0x8; /* UNMAP */
1019 put_unaligned_be64(lba, &cmd->cmnd[2]);
1020 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1021
1022 cmd->allowed = sdkp->max_retries;
1023 cmd->transfersize = data_len;
1024 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
1025
1026 return scsi_alloc_sgtables(cmd);
1027 }
1028
sd_setup_write_same10_cmnd(struct scsi_cmnd * cmd,bool unmap)1029 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
1030 bool unmap)
1031 {
1032 struct scsi_device *sdp = cmd->device;
1033 struct request *rq = scsi_cmd_to_rq(cmd);
1034 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1035 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1036 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1037 u32 data_len = sdp->sector_size;
1038
1039 if (!sd_set_special_bvec(rq, data_len))
1040 return BLK_STS_RESOURCE;
1041
1042 cmd->cmd_len = 10;
1043 cmd->cmnd[0] = WRITE_SAME;
1044 if (unmap)
1045 cmd->cmnd[1] = 0x8; /* UNMAP */
1046 put_unaligned_be32(lba, &cmd->cmnd[2]);
1047 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1048
1049 cmd->allowed = sdkp->max_retries;
1050 cmd->transfersize = data_len;
1051 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
1052
1053 return scsi_alloc_sgtables(cmd);
1054 }
1055
sd_setup_write_zeroes_cmnd(struct scsi_cmnd * cmd)1056 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
1057 {
1058 struct request *rq = scsi_cmd_to_rq(cmd);
1059 struct scsi_device *sdp = cmd->device;
1060 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1061 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1062 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1063
1064 if (!(rq->cmd_flags & REQ_NOUNMAP)) {
1065 switch (sdkp->zeroing_mode) {
1066 case SD_ZERO_WS16_UNMAP:
1067 return sd_setup_write_same16_cmnd(cmd, true);
1068 case SD_ZERO_WS10_UNMAP:
1069 return sd_setup_write_same10_cmnd(cmd, true);
1070 }
1071 }
1072
1073 if (sdp->no_write_same) {
1074 rq->rq_flags |= RQF_QUIET;
1075 return BLK_STS_TARGET;
1076 }
1077
1078 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
1079 return sd_setup_write_same16_cmnd(cmd, false);
1080
1081 return sd_setup_write_same10_cmnd(cmd, false);
1082 }
1083
sd_disable_write_same(struct scsi_disk * sdkp)1084 static void sd_disable_write_same(struct scsi_disk *sdkp)
1085 {
1086 sdkp->device->no_write_same = 1;
1087 sdkp->max_ws_blocks = 0;
1088 blk_queue_disable_write_zeroes(sdkp->disk->queue);
1089 }
1090
sd_config_write_same(struct scsi_disk * sdkp,struct queue_limits * lim)1091 static void sd_config_write_same(struct scsi_disk *sdkp,
1092 struct queue_limits *lim)
1093 {
1094 unsigned int logical_block_size = sdkp->device->sector_size;
1095
1096 if (sdkp->device->no_write_same) {
1097 sdkp->max_ws_blocks = 0;
1098 goto out;
1099 }
1100
1101 /* Some devices can not handle block counts above 0xffff despite
1102 * supporting WRITE SAME(16). Consequently we default to 64k
1103 * blocks per I/O unless the device explicitly advertises a
1104 * bigger limit.
1105 */
1106 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
1107 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1108 (u32)SD_MAX_WS16_BLOCKS);
1109 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
1110 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1111 (u32)SD_MAX_WS10_BLOCKS);
1112 else {
1113 sdkp->device->no_write_same = 1;
1114 sdkp->max_ws_blocks = 0;
1115 }
1116
1117 if (sdkp->lbprz && sdkp->lbpws)
1118 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
1119 else if (sdkp->lbprz && sdkp->lbpws10)
1120 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
1121 else if (sdkp->max_ws_blocks)
1122 sdkp->zeroing_mode = SD_ZERO_WS;
1123 else
1124 sdkp->zeroing_mode = SD_ZERO_WRITE;
1125
1126 if (sdkp->max_ws_blocks &&
1127 sdkp->physical_block_size > logical_block_size) {
1128 /*
1129 * Reporting a maximum number of blocks that is not aligned
1130 * on the device physical size would cause a large write same
1131 * request to be split into physically unaligned chunks by
1132 * __blkdev_issue_write_zeroes() even if the caller of this
1133 * functions took care to align the large request. So make sure
1134 * the maximum reported is aligned to the device physical block
1135 * size. This is only an optional optimization for regular
1136 * disks, but this is mandatory to avoid failure of large write
1137 * same requests directed at sequential write required zones of
1138 * host-managed ZBC disks.
1139 */
1140 sdkp->max_ws_blocks =
1141 round_down(sdkp->max_ws_blocks,
1142 bytes_to_logical(sdkp->device,
1143 sdkp->physical_block_size));
1144 }
1145
1146 out:
1147 lim->max_write_zeroes_sectors =
1148 sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT);
1149 }
1150
sd_setup_flush_cmnd(struct scsi_cmnd * cmd)1151 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
1152 {
1153 struct request *rq = scsi_cmd_to_rq(cmd);
1154 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1155
1156 /* flush requests don't perform I/O, zero the S/G table */
1157 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1158
1159 if (cmd->device->use_16_for_sync) {
1160 cmd->cmnd[0] = SYNCHRONIZE_CACHE_16;
1161 cmd->cmd_len = 16;
1162 } else {
1163 cmd->cmnd[0] = SYNCHRONIZE_CACHE;
1164 cmd->cmd_len = 10;
1165 }
1166 cmd->transfersize = 0;
1167 cmd->allowed = sdkp->max_retries;
1168
1169 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
1170 return BLK_STS_OK;
1171 }
1172
1173 /**
1174 * sd_group_number() - Compute the GROUP NUMBER field
1175 * @cmd: SCSI command for which to compute the value of the six-bit GROUP NUMBER
1176 * field.
1177 *
1178 * From SBC-5 r05 (https://www.t10.org/cgi-bin/ac.pl?t=f&f=sbc5r05.pdf):
1179 * 0: no relative lifetime.
1180 * 1: shortest relative lifetime.
1181 * 2: second shortest relative lifetime.
1182 * 3 - 0x3d: intermediate relative lifetimes.
1183 * 0x3e: second longest relative lifetime.
1184 * 0x3f: longest relative lifetime.
1185 */
sd_group_number(struct scsi_cmnd * cmd)1186 static u8 sd_group_number(struct scsi_cmnd *cmd)
1187 {
1188 const struct request *rq = scsi_cmd_to_rq(cmd);
1189 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1190
1191 if (!sdkp->rscs)
1192 return 0;
1193
1194 return min3((u32)rq->write_hint, (u32)sdkp->permanent_stream_count,
1195 0x3fu);
1196 }
1197
sd_setup_rw32_cmnd(struct scsi_cmnd * cmd,bool write,sector_t lba,unsigned int nr_blocks,unsigned char flags,unsigned int dld)1198 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
1199 sector_t lba, unsigned int nr_blocks,
1200 unsigned char flags, unsigned int dld)
1201 {
1202 cmd->cmd_len = SD_EXT_CDB_SIZE;
1203 cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
1204 cmd->cmnd[6] = sd_group_number(cmd);
1205 cmd->cmnd[7] = 0x18; /* Additional CDB len */
1206 cmd->cmnd[9] = write ? WRITE_32 : READ_32;
1207 cmd->cmnd[10] = flags;
1208 cmd->cmnd[11] = dld & 0x07;
1209 put_unaligned_be64(lba, &cmd->cmnd[12]);
1210 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
1211 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
1212
1213 return BLK_STS_OK;
1214 }
1215
sd_setup_rw16_cmnd(struct scsi_cmnd * cmd,bool write,sector_t lba,unsigned int nr_blocks,unsigned char flags,unsigned int dld)1216 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
1217 sector_t lba, unsigned int nr_blocks,
1218 unsigned char flags, unsigned int dld)
1219 {
1220 cmd->cmd_len = 16;
1221 cmd->cmnd[0] = write ? WRITE_16 : READ_16;
1222 cmd->cmnd[1] = flags | ((dld >> 2) & 0x01);
1223 cmd->cmnd[14] = ((dld & 0x03) << 6) | sd_group_number(cmd);
1224 cmd->cmnd[15] = 0;
1225 put_unaligned_be64(lba, &cmd->cmnd[2]);
1226 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1227
1228 return BLK_STS_OK;
1229 }
1230
sd_setup_rw10_cmnd(struct scsi_cmnd * cmd,bool write,sector_t lba,unsigned int nr_blocks,unsigned char flags)1231 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
1232 sector_t lba, unsigned int nr_blocks,
1233 unsigned char flags)
1234 {
1235 cmd->cmd_len = 10;
1236 cmd->cmnd[0] = write ? WRITE_10 : READ_10;
1237 cmd->cmnd[1] = flags;
1238 cmd->cmnd[6] = sd_group_number(cmd);
1239 cmd->cmnd[9] = 0;
1240 put_unaligned_be32(lba, &cmd->cmnd[2]);
1241 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1242
1243 return BLK_STS_OK;
1244 }
1245
sd_setup_rw6_cmnd(struct scsi_cmnd * cmd,bool write,sector_t lba,unsigned int nr_blocks,unsigned char flags)1246 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
1247 sector_t lba, unsigned int nr_blocks,
1248 unsigned char flags)
1249 {
1250 /* Avoid that 0 blocks gets translated into 256 blocks. */
1251 if (WARN_ON_ONCE(nr_blocks == 0))
1252 return BLK_STS_IOERR;
1253
1254 if (unlikely(flags & 0x8)) {
1255 /*
1256 * This happens only if this drive failed 10byte rw
1257 * command with ILLEGAL_REQUEST during operation and
1258 * thus turned off use_10_for_rw.
1259 */
1260 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
1261 return BLK_STS_IOERR;
1262 }
1263
1264 cmd->cmd_len = 6;
1265 cmd->cmnd[0] = write ? WRITE_6 : READ_6;
1266 cmd->cmnd[1] = (lba >> 16) & 0x1f;
1267 cmd->cmnd[2] = (lba >> 8) & 0xff;
1268 cmd->cmnd[3] = lba & 0xff;
1269 cmd->cmnd[4] = nr_blocks;
1270 cmd->cmnd[5] = 0;
1271
1272 return BLK_STS_OK;
1273 }
1274
1275 /*
1276 * Check if a command has a duration limit set. If it does, and the target
1277 * device supports CDL and the feature is enabled, return the limit
1278 * descriptor index to use. Return 0 (no limit) otherwise.
1279 */
sd_cdl_dld(struct scsi_disk * sdkp,struct scsi_cmnd * scmd)1280 static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd)
1281 {
1282 struct scsi_device *sdp = sdkp->device;
1283 int hint;
1284
1285 if (!sdp->cdl_supported || !sdp->cdl_enable)
1286 return 0;
1287
1288 /*
1289 * Use "no limit" if the request ioprio does not specify a duration
1290 * limit hint.
1291 */
1292 hint = IOPRIO_PRIO_HINT(req_get_ioprio(scsi_cmd_to_rq(scmd)));
1293 if (hint < IOPRIO_HINT_DEV_DURATION_LIMIT_1 ||
1294 hint > IOPRIO_HINT_DEV_DURATION_LIMIT_7)
1295 return 0;
1296
1297 return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1;
1298 }
1299
sd_setup_atomic_cmnd(struct scsi_cmnd * cmd,sector_t lba,unsigned int nr_blocks,bool boundary,unsigned char flags)1300 static blk_status_t sd_setup_atomic_cmnd(struct scsi_cmnd *cmd,
1301 sector_t lba, unsigned int nr_blocks,
1302 bool boundary, unsigned char flags)
1303 {
1304 cmd->cmd_len = 16;
1305 cmd->cmnd[0] = WRITE_ATOMIC_16;
1306 cmd->cmnd[1] = flags;
1307 put_unaligned_be64(lba, &cmd->cmnd[2]);
1308 put_unaligned_be16(nr_blocks, &cmd->cmnd[12]);
1309 if (boundary)
1310 put_unaligned_be16(nr_blocks, &cmd->cmnd[10]);
1311 else
1312 put_unaligned_be16(0, &cmd->cmnd[10]);
1313 put_unaligned_be16(nr_blocks, &cmd->cmnd[12]);
1314 cmd->cmnd[14] = 0;
1315 cmd->cmnd[15] = 0;
1316
1317 return BLK_STS_OK;
1318 }
1319
sd_setup_read_write_cmnd(struct scsi_cmnd * cmd)1320 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
1321 {
1322 struct request *rq = scsi_cmd_to_rq(cmd);
1323 struct scsi_device *sdp = cmd->device;
1324 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1325 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1326 sector_t threshold;
1327 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1328 unsigned int mask = logical_to_sectors(sdp, 1) - 1;
1329 bool write = rq_data_dir(rq) == WRITE;
1330 unsigned char protect, fua;
1331 unsigned int dld;
1332 blk_status_t ret;
1333 unsigned int dif;
1334 bool dix;
1335
1336 ret = scsi_alloc_sgtables(cmd);
1337 if (ret != BLK_STS_OK)
1338 return ret;
1339
1340 ret = BLK_STS_IOERR;
1341 if (!scsi_device_online(sdp) || sdp->changed) {
1342 scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
1343 goto fail;
1344 }
1345
1346 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
1347 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
1348 goto fail;
1349 }
1350
1351 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
1352 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
1353 goto fail;
1354 }
1355
1356 /*
1357 * Some SD card readers can't handle accesses which touch the
1358 * last one or two logical blocks. Split accesses as needed.
1359 */
1360 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
1361
1362 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
1363 if (lba < threshold) {
1364 /* Access up to the threshold but not beyond */
1365 nr_blocks = threshold - lba;
1366 } else {
1367 /* Access only a single logical block */
1368 nr_blocks = 1;
1369 }
1370 }
1371
1372 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
1373 dix = scsi_prot_sg_count(cmd);
1374 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
1375 dld = sd_cdl_dld(sdkp, cmd);
1376
1377 if (dif || dix)
1378 protect = sd_setup_protect_cmnd(cmd, dix, dif);
1379 else
1380 protect = 0;
1381
1382 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
1383 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
1384 protect | fua, dld);
1385 } else if (rq->cmd_flags & REQ_ATOMIC) {
1386 ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks,
1387 sdkp->use_atomic_write_boundary,
1388 protect | fua);
1389 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
1390 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
1391 protect | fua, dld);
1392 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
1393 sdp->use_10_for_rw || protect || rq->write_hint) {
1394 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
1395 protect | fua);
1396 } else {
1397 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
1398 protect | fua);
1399 }
1400
1401 if (unlikely(ret != BLK_STS_OK))
1402 goto fail;
1403
1404 /*
1405 * We shouldn't disconnect in the middle of a sector, so with a dumb
1406 * host adapter, it's safe to assume that we can at least transfer
1407 * this many bytes between each connect / disconnect.
1408 */
1409 cmd->transfersize = sdp->sector_size;
1410 cmd->underflow = nr_blocks << 9;
1411 cmd->allowed = sdkp->max_retries;
1412 cmd->sdb.length = nr_blocks * sdp->sector_size;
1413
1414 SCSI_LOG_HLQUEUE(1,
1415 scmd_printk(KERN_INFO, cmd,
1416 "%s: block=%llu, count=%d\n", __func__,
1417 (unsigned long long)blk_rq_pos(rq),
1418 blk_rq_sectors(rq)));
1419 SCSI_LOG_HLQUEUE(2,
1420 scmd_printk(KERN_INFO, cmd,
1421 "%s %d/%u 512 byte blocks.\n",
1422 write ? "writing" : "reading", nr_blocks,
1423 blk_rq_sectors(rq)));
1424
1425 /*
1426 * This indicates that the command is ready from our end to be queued.
1427 */
1428 return BLK_STS_OK;
1429 fail:
1430 scsi_free_sgtables(cmd);
1431 return ret;
1432 }
1433
sd_init_command(struct scsi_cmnd * cmd)1434 static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
1435 {
1436 struct request *rq = scsi_cmd_to_rq(cmd);
1437
1438 switch (req_op(rq)) {
1439 case REQ_OP_DISCARD:
1440 switch (scsi_disk(rq->q->disk)->provisioning_mode) {
1441 case SD_LBP_UNMAP:
1442 return sd_setup_unmap_cmnd(cmd);
1443 case SD_LBP_WS16:
1444 return sd_setup_write_same16_cmnd(cmd, true);
1445 case SD_LBP_WS10:
1446 return sd_setup_write_same10_cmnd(cmd, true);
1447 case SD_LBP_ZERO:
1448 return sd_setup_write_same10_cmnd(cmd, false);
1449 default:
1450 return BLK_STS_TARGET;
1451 }
1452 case REQ_OP_WRITE_ZEROES:
1453 return sd_setup_write_zeroes_cmnd(cmd);
1454 case REQ_OP_FLUSH:
1455 return sd_setup_flush_cmnd(cmd);
1456 case REQ_OP_READ:
1457 case REQ_OP_WRITE:
1458 return sd_setup_read_write_cmnd(cmd);
1459 case REQ_OP_ZONE_RESET:
1460 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1461 false);
1462 case REQ_OP_ZONE_RESET_ALL:
1463 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1464 true);
1465 case REQ_OP_ZONE_OPEN:
1466 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
1467 case REQ_OP_ZONE_CLOSE:
1468 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
1469 case REQ_OP_ZONE_FINISH:
1470 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
1471 default:
1472 WARN_ON_ONCE(1);
1473 return BLK_STS_NOTSUPP;
1474 }
1475 }
1476
sd_uninit_command(struct scsi_cmnd * SCpnt)1477 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1478 {
1479 struct request *rq = scsi_cmd_to_rq(SCpnt);
1480
1481 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1482 mempool_free(rq->special_vec.bv_page, sd_page_pool);
1483 }
1484
sd_need_revalidate(struct gendisk * disk,struct scsi_disk * sdkp)1485 static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp)
1486 {
1487 if (sdkp->device->removable || sdkp->write_prot) {
1488 if (disk_check_media_change(disk))
1489 return true;
1490 }
1491
1492 /*
1493 * Force a full rescan after ioctl(BLKRRPART). While the disk state has
1494 * nothing to do with partitions, BLKRRPART is used to force a full
1495 * revalidate after things like a format for historical reasons.
1496 */
1497 return test_bit(GD_NEED_PART_SCAN, &disk->state);
1498 }
1499
1500 /**
1501 * sd_open - open a scsi disk device
1502 * @disk: disk to open
1503 * @mode: open mode
1504 *
1505 * Returns 0 if successful. Returns a negated errno value in case
1506 * of error.
1507 *
1508 * Note: This can be called from a user context (e.g. fsck(1) )
1509 * or from within the kernel (e.g. as a result of a mount(1) ).
1510 * In the latter case @inode and @filp carry an abridged amount
1511 * of information as noted above.
1512 *
1513 * Locking: called with disk->open_mutex held.
1514 **/
sd_open(struct gendisk * disk,blk_mode_t mode)1515 static int sd_open(struct gendisk *disk, blk_mode_t mode)
1516 {
1517 struct scsi_disk *sdkp = scsi_disk(disk);
1518 struct scsi_device *sdev = sdkp->device;
1519 int retval;
1520
1521 if (scsi_device_get(sdev))
1522 return -ENXIO;
1523
1524 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
1525
1526 /*
1527 * If the device is in error recovery, wait until it is done.
1528 * If the device is offline, then disallow any access to it.
1529 */
1530 retval = -ENXIO;
1531 if (!scsi_block_when_processing_errors(sdev))
1532 goto error_out;
1533
1534 if (sd_need_revalidate(disk, sdkp))
1535 sd_revalidate_disk(disk);
1536
1537 /*
1538 * If the drive is empty, just let the open fail.
1539 */
1540 retval = -ENOMEDIUM;
1541 if (sdev->removable && !sdkp->media_present &&
1542 !(mode & BLK_OPEN_NDELAY))
1543 goto error_out;
1544
1545 /*
1546 * If the device has the write protect tab set, have the open fail
1547 * if the user expects to be able to write to the thing.
1548 */
1549 retval = -EROFS;
1550 if (sdkp->write_prot && (mode & BLK_OPEN_WRITE))
1551 goto error_out;
1552
1553 /*
1554 * It is possible that the disk changing stuff resulted in
1555 * the device being taken offline. If this is the case,
1556 * report this to the user, and don't pretend that the
1557 * open actually succeeded.
1558 */
1559 retval = -ENXIO;
1560 if (!scsi_device_online(sdev))
1561 goto error_out;
1562
1563 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
1564 if (scsi_block_when_processing_errors(sdev))
1565 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
1566 }
1567
1568 return 0;
1569
1570 error_out:
1571 scsi_device_put(sdev);
1572 return retval;
1573 }
1574
1575 /**
1576 * sd_release - invoked when the (last) close(2) is called on this
1577 * scsi disk.
1578 * @disk: disk to release
1579 *
1580 * Returns 0.
1581 *
1582 * Note: may block (uninterruptible) if error recovery is underway
1583 * on this disk.
1584 *
1585 * Locking: called with disk->open_mutex held.
1586 **/
sd_release(struct gendisk * disk)1587 static void sd_release(struct gendisk *disk)
1588 {
1589 struct scsi_disk *sdkp = scsi_disk(disk);
1590 struct scsi_device *sdev = sdkp->device;
1591
1592 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
1593
1594 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
1595 if (scsi_block_when_processing_errors(sdev))
1596 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1597 }
1598
1599 scsi_device_put(sdev);
1600 }
1601
sd_getgeo(struct block_device * bdev,struct hd_geometry * geo)1602 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1603 {
1604 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1605 struct scsi_device *sdp = sdkp->device;
1606 struct Scsi_Host *host = sdp->host;
1607 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
1608 int diskinfo[4];
1609
1610 /* default to most commonly used values */
1611 diskinfo[0] = 0x40; /* 1 << 6 */
1612 diskinfo[1] = 0x20; /* 1 << 5 */
1613 diskinfo[2] = capacity >> 11;
1614
1615 /* override with calculated, extended default, or driver values */
1616 if (host->hostt->bios_param)
1617 host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
1618 else
1619 scsicam_bios_param(bdev, capacity, diskinfo);
1620
1621 geo->heads = diskinfo[0];
1622 geo->sectors = diskinfo[1];
1623 geo->cylinders = diskinfo[2];
1624 return 0;
1625 }
1626
1627 /**
1628 * sd_ioctl - process an ioctl
1629 * @bdev: target block device
1630 * @mode: open mode
1631 * @cmd: ioctl command number
1632 * @arg: this is third argument given to ioctl(2) system call.
1633 * Often contains a pointer.
1634 *
1635 * Returns 0 if successful (some ioctls return positive numbers on
1636 * success as well). Returns a negated errno value in case of error.
1637 *
1638 * Note: most ioctls are forward onto the block subsystem or further
1639 * down in the scsi subsystem.
1640 **/
sd_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)1641 static int sd_ioctl(struct block_device *bdev, blk_mode_t mode,
1642 unsigned int cmd, unsigned long arg)
1643 {
1644 struct gendisk *disk = bdev->bd_disk;
1645 struct scsi_disk *sdkp = scsi_disk(disk);
1646 struct scsi_device *sdp = sdkp->device;
1647 void __user *p = (void __user *)arg;
1648 int error;
1649
1650 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
1651 "cmd=0x%x\n", disk->disk_name, cmd));
1652
1653 if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
1654 return -ENOIOCTLCMD;
1655
1656 /*
1657 * If we are in the middle of error recovery, don't let anyone
1658 * else try and use this device. Also, if error recovery fails, it
1659 * may try and take the device offline, in which case all further
1660 * access to the device is prohibited.
1661 */
1662 error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
1663 (mode & BLK_OPEN_NDELAY));
1664 if (error)
1665 return error;
1666
1667 if (is_sed_ioctl(cmd))
1668 return sed_ioctl(sdkp->opal_dev, cmd, p);
1669 return scsi_ioctl(sdp, mode & BLK_OPEN_WRITE, cmd, p);
1670 }
1671
set_media_not_present(struct scsi_disk * sdkp)1672 static void set_media_not_present(struct scsi_disk *sdkp)
1673 {
1674 if (sdkp->media_present)
1675 sdkp->device->changed = 1;
1676
1677 if (sdkp->device->removable) {
1678 sdkp->media_present = 0;
1679 sdkp->capacity = 0;
1680 }
1681 }
1682
media_not_present(struct scsi_disk * sdkp,struct scsi_sense_hdr * sshdr)1683 static int media_not_present(struct scsi_disk *sdkp,
1684 struct scsi_sense_hdr *sshdr)
1685 {
1686 if (!scsi_sense_valid(sshdr))
1687 return 0;
1688
1689 /* not invoked for commands that could return deferred errors */
1690 switch (sshdr->sense_key) {
1691 case UNIT_ATTENTION:
1692 case NOT_READY:
1693 /* medium not present */
1694 if (sshdr->asc == 0x3A) {
1695 set_media_not_present(sdkp);
1696 return 1;
1697 }
1698 }
1699 return 0;
1700 }
1701
1702 /**
1703 * sd_check_events - check media events
1704 * @disk: kernel device descriptor
1705 * @clearing: disk events currently being cleared
1706 *
1707 * Returns mask of DISK_EVENT_*.
1708 *
1709 * Note: this function is invoked from the block subsystem.
1710 **/
sd_check_events(struct gendisk * disk,unsigned int clearing)1711 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1712 {
1713 struct scsi_disk *sdkp = disk->private_data;
1714 struct scsi_device *sdp;
1715 int retval;
1716 bool disk_changed;
1717
1718 if (!sdkp)
1719 return 0;
1720
1721 sdp = sdkp->device;
1722 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1723
1724 /*
1725 * If the device is offline, don't send any commands - just pretend as
1726 * if the command failed. If the device ever comes back online, we
1727 * can deal with it then. It is only because of unrecoverable errors
1728 * that we would ever take a device offline in the first place.
1729 */
1730 if (!scsi_device_online(sdp)) {
1731 set_media_not_present(sdkp);
1732 goto out;
1733 }
1734
1735 /*
1736 * Using TEST_UNIT_READY enables differentiation between drive with
1737 * no cartridge loaded - NOT READY, drive with changed cartridge -
1738 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1739 *
1740 * Drives that auto spin down. eg iomega jaz 1G, will be started
1741 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1742 * sd_revalidate() is called.
1743 */
1744 if (scsi_block_when_processing_errors(sdp)) {
1745 struct scsi_sense_hdr sshdr = { 0, };
1746
1747 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
1748 &sshdr);
1749
1750 /* failed to execute TUR, assume media not present */
1751 if (retval < 0 || host_byte(retval)) {
1752 set_media_not_present(sdkp);
1753 goto out;
1754 }
1755
1756 if (media_not_present(sdkp, &sshdr))
1757 goto out;
1758 }
1759
1760 /*
1761 * For removable scsi disk we have to recognise the presence
1762 * of a disk in the drive.
1763 */
1764 if (!sdkp->media_present)
1765 sdp->changed = 1;
1766 sdkp->media_present = 1;
1767 out:
1768 /*
1769 * sdp->changed is set under the following conditions:
1770 *
1771 * Medium present state has changed in either direction.
1772 * Device has indicated UNIT_ATTENTION.
1773 */
1774 disk_changed = sdp->changed;
1775 sdp->changed = 0;
1776 return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1777 }
1778
sd_sync_cache(struct scsi_disk * sdkp)1779 static int sd_sync_cache(struct scsi_disk *sdkp)
1780 {
1781 int res;
1782 struct scsi_device *sdp = sdkp->device;
1783 const int timeout = sdp->request_queue->rq_timeout
1784 * SD_FLUSH_TIMEOUT_MULTIPLIER;
1785 /* Leave the rest of the command zero to indicate flush everything. */
1786 const unsigned char cmd[16] = { sdp->use_16_for_sync ?
1787 SYNCHRONIZE_CACHE_16 : SYNCHRONIZE_CACHE };
1788 struct scsi_sense_hdr sshdr;
1789 struct scsi_failure failure_defs[] = {
1790 {
1791 .allowed = 3,
1792 .result = SCMD_FAILURE_RESULT_ANY,
1793 },
1794 {}
1795 };
1796 struct scsi_failures failures = {
1797 .failure_definitions = failure_defs,
1798 };
1799 const struct scsi_exec_args exec_args = {
1800 .req_flags = BLK_MQ_REQ_PM,
1801 .sshdr = &sshdr,
1802 .failures = &failures,
1803 };
1804
1805 if (!scsi_device_online(sdp))
1806 return -ENODEV;
1807
1808 res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, timeout,
1809 sdkp->max_retries, &exec_args);
1810 if (res) {
1811 sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1812
1813 if (res < 0)
1814 return res;
1815
1816 if (scsi_status_is_check_condition(res) &&
1817 scsi_sense_valid(&sshdr)) {
1818 sd_print_sense_hdr(sdkp, &sshdr);
1819
1820 /* we need to evaluate the error return */
1821 if (sshdr.asc == 0x3a || /* medium not present */
1822 sshdr.asc == 0x20 || /* invalid command */
1823 (sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */
1824 /* this is no error here */
1825 return 0;
1826
1827 /*
1828 * If a format is in progress or if the drive does not
1829 * support sync, there is not much we can do because
1830 * this is called during shutdown or suspend so just
1831 * return success so those operations can proceed.
1832 */
1833 if ((sshdr.asc == 0x04 && sshdr.ascq == 0x04) ||
1834 sshdr.sense_key == ILLEGAL_REQUEST)
1835 return 0;
1836 }
1837
1838 switch (host_byte(res)) {
1839 /* ignore errors due to racing a disconnection */
1840 case DID_BAD_TARGET:
1841 case DID_NO_CONNECT:
1842 return 0;
1843 /* signal the upper layer it might try again */
1844 case DID_BUS_BUSY:
1845 case DID_IMM_RETRY:
1846 case DID_REQUEUE:
1847 case DID_SOFT_ERROR:
1848 return -EBUSY;
1849 default:
1850 return -EIO;
1851 }
1852 }
1853 return 0;
1854 }
1855
sd_rescan(struct device * dev)1856 static void sd_rescan(struct device *dev)
1857 {
1858 struct scsi_disk *sdkp = dev_get_drvdata(dev);
1859
1860 sd_revalidate_disk(sdkp->disk);
1861 }
1862
sd_get_unique_id(struct gendisk * disk,u8 id[16],enum blk_unique_id type)1863 static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
1864 enum blk_unique_id type)
1865 {
1866 struct scsi_device *sdev = scsi_disk(disk)->device;
1867 const struct scsi_vpd *vpd;
1868 const unsigned char *d;
1869 int ret = -ENXIO, len;
1870
1871 rcu_read_lock();
1872 vpd = rcu_dereference(sdev->vpd_pg83);
1873 if (!vpd)
1874 goto out_unlock;
1875
1876 ret = -EINVAL;
1877 for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
1878 /* we only care about designators with LU association */
1879 if (((d[1] >> 4) & 0x3) != 0x00)
1880 continue;
1881 if ((d[1] & 0xf) != type)
1882 continue;
1883
1884 /*
1885 * Only exit early if a 16-byte descriptor was found. Otherwise
1886 * keep looking as one with more entropy might still show up.
1887 */
1888 len = d[3];
1889 if (len != 8 && len != 12 && len != 16)
1890 continue;
1891 ret = len;
1892 memcpy(id, d + 4, len);
1893 if (len == 16)
1894 break;
1895 }
1896 out_unlock:
1897 rcu_read_unlock();
1898 return ret;
1899 }
1900
sd_scsi_to_pr_err(struct scsi_sense_hdr * sshdr,int result)1901 static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result)
1902 {
1903 switch (host_byte(result)) {
1904 case DID_TRANSPORT_MARGINAL:
1905 case DID_TRANSPORT_DISRUPTED:
1906 case DID_BUS_BUSY:
1907 return PR_STS_RETRY_PATH_FAILURE;
1908 case DID_NO_CONNECT:
1909 return PR_STS_PATH_FAILED;
1910 case DID_TRANSPORT_FAILFAST:
1911 return PR_STS_PATH_FAST_FAILED;
1912 }
1913
1914 switch (status_byte(result)) {
1915 case SAM_STAT_RESERVATION_CONFLICT:
1916 return PR_STS_RESERVATION_CONFLICT;
1917 case SAM_STAT_CHECK_CONDITION:
1918 if (!scsi_sense_valid(sshdr))
1919 return PR_STS_IOERR;
1920
1921 if (sshdr->sense_key == ILLEGAL_REQUEST &&
1922 (sshdr->asc == 0x26 || sshdr->asc == 0x24))
1923 return -EINVAL;
1924
1925 fallthrough;
1926 default:
1927 return PR_STS_IOERR;
1928 }
1929 }
1930
sd_pr_in_command(struct block_device * bdev,u8 sa,unsigned char * data,int data_len)1931 static int sd_pr_in_command(struct block_device *bdev, u8 sa,
1932 unsigned char *data, int data_len)
1933 {
1934 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1935 struct scsi_device *sdev = sdkp->device;
1936 struct scsi_sense_hdr sshdr;
1937 u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa };
1938 struct scsi_failure failure_defs[] = {
1939 {
1940 .sense = UNIT_ATTENTION,
1941 .asc = SCMD_FAILURE_ASC_ANY,
1942 .ascq = SCMD_FAILURE_ASCQ_ANY,
1943 .allowed = 5,
1944 .result = SAM_STAT_CHECK_CONDITION,
1945 },
1946 {}
1947 };
1948 struct scsi_failures failures = {
1949 .failure_definitions = failure_defs,
1950 };
1951 const struct scsi_exec_args exec_args = {
1952 .sshdr = &sshdr,
1953 .failures = &failures,
1954 };
1955 int result;
1956
1957 put_unaligned_be16(data_len, &cmd[7]);
1958
1959 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len,
1960 SD_TIMEOUT, sdkp->max_retries, &exec_args);
1961 if (scsi_status_is_check_condition(result) &&
1962 scsi_sense_valid(&sshdr)) {
1963 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
1964 scsi_print_sense_hdr(sdev, NULL, &sshdr);
1965 }
1966
1967 if (result <= 0)
1968 return result;
1969
1970 return sd_scsi_to_pr_err(&sshdr, result);
1971 }
1972
sd_pr_read_keys(struct block_device * bdev,struct pr_keys * keys_info)1973 static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info)
1974 {
1975 int result, i, data_offset, num_copy_keys;
1976 u32 num_keys = keys_info->num_keys;
1977 int data_len = num_keys * 8 + 8;
1978 u8 *data;
1979
1980 data = kzalloc(data_len, GFP_KERNEL);
1981 if (!data)
1982 return -ENOMEM;
1983
1984 result = sd_pr_in_command(bdev, READ_KEYS, data, data_len);
1985 if (result)
1986 goto free_data;
1987
1988 keys_info->generation = get_unaligned_be32(&data[0]);
1989 keys_info->num_keys = get_unaligned_be32(&data[4]) / 8;
1990
1991 data_offset = 8;
1992 num_copy_keys = min(num_keys, keys_info->num_keys);
1993
1994 for (i = 0; i < num_copy_keys; i++) {
1995 keys_info->keys[i] = get_unaligned_be64(&data[data_offset]);
1996 data_offset += 8;
1997 }
1998
1999 free_data:
2000 kfree(data);
2001 return result;
2002 }
2003
sd_pr_read_reservation(struct block_device * bdev,struct pr_held_reservation * rsv)2004 static int sd_pr_read_reservation(struct block_device *bdev,
2005 struct pr_held_reservation *rsv)
2006 {
2007 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
2008 struct scsi_device *sdev = sdkp->device;
2009 u8 data[24] = { };
2010 int result, len;
2011
2012 result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data));
2013 if (result)
2014 return result;
2015
2016 len = get_unaligned_be32(&data[4]);
2017 if (!len)
2018 return 0;
2019
2020 /* Make sure we have at least the key and type */
2021 if (len < 14) {
2022 sdev_printk(KERN_INFO, sdev,
2023 "READ RESERVATION failed due to short return buffer of %d bytes\n",
2024 len);
2025 return -EINVAL;
2026 }
2027
2028 rsv->generation = get_unaligned_be32(&data[0]);
2029 rsv->key = get_unaligned_be64(&data[8]);
2030 rsv->type = scsi_pr_type_to_block(data[21] & 0x0f);
2031 return 0;
2032 }
2033
sd_pr_out_command(struct block_device * bdev,u8 sa,u64 key,u64 sa_key,enum scsi_pr_type type,u8 flags)2034 static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key,
2035 u64 sa_key, enum scsi_pr_type type, u8 flags)
2036 {
2037 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
2038 struct scsi_device *sdev = sdkp->device;
2039 struct scsi_sense_hdr sshdr;
2040 struct scsi_failure failure_defs[] = {
2041 {
2042 .sense = UNIT_ATTENTION,
2043 .asc = SCMD_FAILURE_ASC_ANY,
2044 .ascq = SCMD_FAILURE_ASCQ_ANY,
2045 .allowed = 5,
2046 .result = SAM_STAT_CHECK_CONDITION,
2047 },
2048 {}
2049 };
2050 struct scsi_failures failures = {
2051 .failure_definitions = failure_defs,
2052 };
2053 const struct scsi_exec_args exec_args = {
2054 .sshdr = &sshdr,
2055 .failures = &failures,
2056 };
2057 int result;
2058 u8 cmd[16] = { 0, };
2059 u8 data[24] = { 0, };
2060
2061 cmd[0] = PERSISTENT_RESERVE_OUT;
2062 cmd[1] = sa;
2063 cmd[2] = type;
2064 put_unaligned_be32(sizeof(data), &cmd[5]);
2065
2066 put_unaligned_be64(key, &data[0]);
2067 put_unaligned_be64(sa_key, &data[8]);
2068 data[20] = flags;
2069
2070 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data,
2071 sizeof(data), SD_TIMEOUT, sdkp->max_retries,
2072 &exec_args);
2073
2074 if (scsi_status_is_check_condition(result) &&
2075 scsi_sense_valid(&sshdr)) {
2076 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
2077 scsi_print_sense_hdr(sdev, NULL, &sshdr);
2078 }
2079
2080 if (result <= 0)
2081 return result;
2082
2083 return sd_scsi_to_pr_err(&sshdr, result);
2084 }
2085
sd_pr_register(struct block_device * bdev,u64 old_key,u64 new_key,u32 flags)2086 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
2087 u32 flags)
2088 {
2089 if (flags & ~PR_FL_IGNORE_KEY)
2090 return -EOPNOTSUPP;
2091 return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
2092 old_key, new_key, 0,
2093 (1 << 0) /* APTPL */);
2094 }
2095
sd_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,u32 flags)2096 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
2097 u32 flags)
2098 {
2099 if (flags)
2100 return -EOPNOTSUPP;
2101 return sd_pr_out_command(bdev, 0x01, key, 0,
2102 block_pr_type_to_scsi(type), 0);
2103 }
2104
sd_pr_release(struct block_device * bdev,u64 key,enum pr_type type)2105 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2106 {
2107 return sd_pr_out_command(bdev, 0x02, key, 0,
2108 block_pr_type_to_scsi(type), 0);
2109 }
2110
sd_pr_preempt(struct block_device * bdev,u64 old_key,u64 new_key,enum pr_type type,bool abort)2111 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
2112 enum pr_type type, bool abort)
2113 {
2114 return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
2115 block_pr_type_to_scsi(type), 0);
2116 }
2117
sd_pr_clear(struct block_device * bdev,u64 key)2118 static int sd_pr_clear(struct block_device *bdev, u64 key)
2119 {
2120 return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0);
2121 }
2122
2123 static const struct pr_ops sd_pr_ops = {
2124 .pr_register = sd_pr_register,
2125 .pr_reserve = sd_pr_reserve,
2126 .pr_release = sd_pr_release,
2127 .pr_preempt = sd_pr_preempt,
2128 .pr_clear = sd_pr_clear,
2129 .pr_read_keys = sd_pr_read_keys,
2130 .pr_read_reservation = sd_pr_read_reservation,
2131 };
2132
scsi_disk_free_disk(struct gendisk * disk)2133 static void scsi_disk_free_disk(struct gendisk *disk)
2134 {
2135 struct scsi_disk *sdkp = scsi_disk(disk);
2136
2137 put_device(&sdkp->disk_dev);
2138 }
2139
2140 static const struct block_device_operations sd_fops = {
2141 .owner = THIS_MODULE,
2142 .open = sd_open,
2143 .release = sd_release,
2144 .ioctl = sd_ioctl,
2145 .getgeo = sd_getgeo,
2146 .compat_ioctl = blkdev_compat_ptr_ioctl,
2147 .check_events = sd_check_events,
2148 .unlock_native_capacity = sd_unlock_native_capacity,
2149 .report_zones = sd_zbc_report_zones,
2150 .get_unique_id = sd_get_unique_id,
2151 .free_disk = scsi_disk_free_disk,
2152 .pr_ops = &sd_pr_ops,
2153 };
2154
2155 /**
2156 * sd_eh_reset - reset error handling callback
2157 * @scmd: sd-issued command that has failed
2158 *
2159 * This function is called by the SCSI midlayer before starting
2160 * SCSI EH. When counting medium access failures we have to be
2161 * careful to register it only only once per device and SCSI EH run;
2162 * there might be several timed out commands which will cause the
2163 * 'max_medium_access_timeouts' counter to trigger after the first
2164 * SCSI EH run already and set the device to offline.
2165 * So this function resets the internal counter before starting SCSI EH.
2166 **/
sd_eh_reset(struct scsi_cmnd * scmd)2167 static void sd_eh_reset(struct scsi_cmnd *scmd)
2168 {
2169 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
2170
2171 /* New SCSI EH run, reset gate variable */
2172 sdkp->ignore_medium_access_errors = false;
2173 }
2174
2175 /**
2176 * sd_eh_action - error handling callback
2177 * @scmd: sd-issued command that has failed
2178 * @eh_disp: The recovery disposition suggested by the midlayer
2179 *
2180 * This function is called by the SCSI midlayer upon completion of an
2181 * error test command (currently TEST UNIT READY). The result of sending
2182 * the eh command is passed in eh_disp. We're looking for devices that
2183 * fail medium access commands but are OK with non access commands like
2184 * test unit ready (so wrongly see the device as having a successful
2185 * recovery)
2186 **/
sd_eh_action(struct scsi_cmnd * scmd,int eh_disp)2187 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
2188 {
2189 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
2190 struct scsi_device *sdev = scmd->device;
2191
2192 if (!scsi_device_online(sdev) ||
2193 !scsi_medium_access_command(scmd) ||
2194 host_byte(scmd->result) != DID_TIME_OUT ||
2195 eh_disp != SUCCESS)
2196 return eh_disp;
2197
2198 /*
2199 * The device has timed out executing a medium access command.
2200 * However, the TEST UNIT READY command sent during error
2201 * handling completed successfully. Either the device is in the
2202 * process of recovering or has it suffered an internal failure
2203 * that prevents access to the storage medium.
2204 */
2205 if (!sdkp->ignore_medium_access_errors) {
2206 sdkp->medium_access_timed_out++;
2207 sdkp->ignore_medium_access_errors = true;
2208 }
2209
2210 /*
2211 * If the device keeps failing read/write commands but TEST UNIT
2212 * READY always completes successfully we assume that medium
2213 * access is no longer possible and take the device offline.
2214 */
2215 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
2216 scmd_printk(KERN_ERR, scmd,
2217 "Medium access timeout failure. Offlining disk!\n");
2218 mutex_lock(&sdev->state_mutex);
2219 scsi_device_set_state(sdev, SDEV_OFFLINE);
2220 mutex_unlock(&sdev->state_mutex);
2221
2222 return SUCCESS;
2223 }
2224
2225 return eh_disp;
2226 }
2227
sd_completed_bytes(struct scsi_cmnd * scmd)2228 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
2229 {
2230 struct request *req = scsi_cmd_to_rq(scmd);
2231 struct scsi_device *sdev = scmd->device;
2232 unsigned int transferred, good_bytes;
2233 u64 start_lba, end_lba, bad_lba;
2234
2235 /*
2236 * Some commands have a payload smaller than the device logical
2237 * block size (e.g. INQUIRY on a 4K disk).
2238 */
2239 if (scsi_bufflen(scmd) <= sdev->sector_size)
2240 return 0;
2241
2242 /* Check if we have a 'bad_lba' information */
2243 if (!scsi_get_sense_info_fld(scmd->sense_buffer,
2244 SCSI_SENSE_BUFFERSIZE,
2245 &bad_lba))
2246 return 0;
2247
2248 /*
2249 * If the bad lba was reported incorrectly, we have no idea where
2250 * the error is.
2251 */
2252 start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
2253 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
2254 if (bad_lba < start_lba || bad_lba >= end_lba)
2255 return 0;
2256
2257 /*
2258 * resid is optional but mostly filled in. When it's unused,
2259 * its value is zero, so we assume the whole buffer transferred
2260 */
2261 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
2262
2263 /* This computation should always be done in terms of the
2264 * resolution of the device's medium.
2265 */
2266 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
2267
2268 return min(good_bytes, transferred);
2269 }
2270
2271 /**
2272 * sd_done - bottom half handler: called when the lower level
2273 * driver has completed (successfully or otherwise) a scsi command.
2274 * @SCpnt: mid-level's per command structure.
2275 *
2276 * Note: potentially run from within an ISR. Must not block.
2277 **/
sd_done(struct scsi_cmnd * SCpnt)2278 static int sd_done(struct scsi_cmnd *SCpnt)
2279 {
2280 int result = SCpnt->result;
2281 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
2282 unsigned int sector_size = SCpnt->device->sector_size;
2283 unsigned int resid;
2284 struct scsi_sense_hdr sshdr;
2285 struct request *req = scsi_cmd_to_rq(SCpnt);
2286 struct scsi_disk *sdkp = scsi_disk(req->q->disk);
2287 int sense_valid = 0;
2288 int sense_deferred = 0;
2289
2290 switch (req_op(req)) {
2291 case REQ_OP_DISCARD:
2292 case REQ_OP_WRITE_ZEROES:
2293 case REQ_OP_ZONE_RESET:
2294 case REQ_OP_ZONE_RESET_ALL:
2295 case REQ_OP_ZONE_OPEN:
2296 case REQ_OP_ZONE_CLOSE:
2297 case REQ_OP_ZONE_FINISH:
2298 if (!result) {
2299 good_bytes = blk_rq_bytes(req);
2300 scsi_set_resid(SCpnt, 0);
2301 } else {
2302 good_bytes = 0;
2303 scsi_set_resid(SCpnt, blk_rq_bytes(req));
2304 }
2305 break;
2306 default:
2307 /*
2308 * In case of bogus fw or device, we could end up having
2309 * an unaligned partial completion. Check this here and force
2310 * alignment.
2311 */
2312 resid = scsi_get_resid(SCpnt);
2313 if (resid & (sector_size - 1)) {
2314 sd_printk(KERN_INFO, sdkp,
2315 "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
2316 resid, sector_size);
2317 scsi_print_command(SCpnt);
2318 resid = min(scsi_bufflen(SCpnt),
2319 round_up(resid, sector_size));
2320 scsi_set_resid(SCpnt, resid);
2321 }
2322 }
2323
2324 if (result) {
2325 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
2326 if (sense_valid)
2327 sense_deferred = scsi_sense_is_deferred(&sshdr);
2328 }
2329 sdkp->medium_access_timed_out = 0;
2330
2331 if (!scsi_status_is_check_condition(result) &&
2332 (!sense_valid || sense_deferred))
2333 goto out;
2334
2335 switch (sshdr.sense_key) {
2336 case HARDWARE_ERROR:
2337 case MEDIUM_ERROR:
2338 good_bytes = sd_completed_bytes(SCpnt);
2339 break;
2340 case RECOVERED_ERROR:
2341 good_bytes = scsi_bufflen(SCpnt);
2342 break;
2343 case NO_SENSE:
2344 /* This indicates a false check condition, so ignore it. An
2345 * unknown amount of data was transferred so treat it as an
2346 * error.
2347 */
2348 SCpnt->result = 0;
2349 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2350 break;
2351 case ABORTED_COMMAND:
2352 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
2353 good_bytes = sd_completed_bytes(SCpnt);
2354 break;
2355 case ILLEGAL_REQUEST:
2356 switch (sshdr.asc) {
2357 case 0x10: /* DIX: Host detected corruption */
2358 good_bytes = sd_completed_bytes(SCpnt);
2359 break;
2360 case 0x20: /* INVALID COMMAND OPCODE */
2361 case 0x24: /* INVALID FIELD IN CDB */
2362 switch (SCpnt->cmnd[0]) {
2363 case UNMAP:
2364 sd_disable_discard(sdkp);
2365 break;
2366 case WRITE_SAME_16:
2367 case WRITE_SAME:
2368 if (SCpnt->cmnd[1] & 8) { /* UNMAP */
2369 sd_disable_discard(sdkp);
2370 } else {
2371 sd_disable_write_same(sdkp);
2372 req->rq_flags |= RQF_QUIET;
2373 }
2374 break;
2375 }
2376 }
2377 break;
2378 default:
2379 break;
2380 }
2381
2382 out:
2383 if (sdkp->device->type == TYPE_ZBC)
2384 good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
2385
2386 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
2387 "sd_done: completed %d of %d bytes\n",
2388 good_bytes, scsi_bufflen(SCpnt)));
2389
2390 return good_bytes;
2391 }
2392
2393 /*
2394 * spinup disk - called only in sd_revalidate_disk()
2395 */
2396 static void
sd_spinup_disk(struct scsi_disk * sdkp)2397 sd_spinup_disk(struct scsi_disk *sdkp)
2398 {
2399 static const u8 cmd[10] = { TEST_UNIT_READY };
2400 unsigned long spintime_expire = 0;
2401 int spintime, sense_valid = 0;
2402 unsigned int the_result;
2403 struct scsi_sense_hdr sshdr;
2404 struct scsi_failure failure_defs[] = {
2405 /* Do not retry Medium Not Present */
2406 {
2407 .sense = UNIT_ATTENTION,
2408 .asc = 0x3A,
2409 .ascq = SCMD_FAILURE_ASCQ_ANY,
2410 .result = SAM_STAT_CHECK_CONDITION,
2411 },
2412 {
2413 .sense = NOT_READY,
2414 .asc = 0x3A,
2415 .ascq = SCMD_FAILURE_ASCQ_ANY,
2416 .result = SAM_STAT_CHECK_CONDITION,
2417 },
2418 /* Retry when scsi_status_is_good would return false 3 times */
2419 {
2420 .result = SCMD_FAILURE_STAT_ANY,
2421 .allowed = 3,
2422 },
2423 {}
2424 };
2425 struct scsi_failures failures = {
2426 .failure_definitions = failure_defs,
2427 };
2428 const struct scsi_exec_args exec_args = {
2429 .sshdr = &sshdr,
2430 .failures = &failures,
2431 };
2432
2433 spintime = 0;
2434
2435 /* Spin up drives, as required. Only do this at boot time */
2436 /* Spinup needs to be done for module loads too. */
2437 do {
2438 bool media_was_present = sdkp->media_present;
2439
2440 scsi_failures_reset_retries(&failures);
2441
2442 the_result = scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN,
2443 NULL, 0, SD_TIMEOUT,
2444 sdkp->max_retries, &exec_args);
2445
2446
2447 if (the_result > 0) {
2448 /*
2449 * If the drive has indicated to us that it doesn't
2450 * have any media in it, don't bother with any more
2451 * polling.
2452 */
2453 if (media_not_present(sdkp, &sshdr)) {
2454 if (media_was_present)
2455 sd_printk(KERN_NOTICE, sdkp,
2456 "Media removed, stopped polling\n");
2457 return;
2458 }
2459 sense_valid = scsi_sense_valid(&sshdr);
2460 }
2461
2462 if (!scsi_status_is_check_condition(the_result)) {
2463 /* no sense, TUR either succeeded or failed
2464 * with a status error */
2465 if(!spintime && !scsi_status_is_good(the_result)) {
2466 sd_print_result(sdkp, "Test Unit Ready failed",
2467 the_result);
2468 }
2469 break;
2470 }
2471
2472 /*
2473 * The device does not want the automatic start to be issued.
2474 */
2475 if (sdkp->device->no_start_on_add)
2476 break;
2477
2478 if (sense_valid && sshdr.sense_key == NOT_READY) {
2479 if (sshdr.asc == 4 && sshdr.ascq == 3)
2480 break; /* manual intervention required */
2481 if (sshdr.asc == 4 && sshdr.ascq == 0xb)
2482 break; /* standby */
2483 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2484 break; /* unavailable */
2485 if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2486 break; /* sanitize in progress */
2487 if (sshdr.asc == 4 && sshdr.ascq == 0x24)
2488 break; /* depopulation in progress */
2489 if (sshdr.asc == 4 && sshdr.ascq == 0x25)
2490 break; /* depopulation restoration in progress */
2491 /*
2492 * Issue command to spin up drive when not ready
2493 */
2494 if (!spintime) {
2495 /* Return immediately and start spin cycle */
2496 const u8 start_cmd[10] = {
2497 [0] = START_STOP,
2498 [1] = 1,
2499 [4] = sdkp->device->start_stop_pwr_cond ?
2500 0x11 : 1,
2501 };
2502
2503 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
2504 scsi_execute_cmd(sdkp->device, start_cmd,
2505 REQ_OP_DRV_IN, NULL, 0,
2506 SD_TIMEOUT, sdkp->max_retries,
2507 &exec_args);
2508 spintime_expire = jiffies + 100 * HZ;
2509 spintime = 1;
2510 }
2511 /* Wait 1 second for next try */
2512 msleep(1000);
2513 printk(KERN_CONT ".");
2514
2515 /*
2516 * Wait for USB flash devices with slow firmware.
2517 * Yes, this sense key/ASC combination shouldn't
2518 * occur here. It's characteristic of these devices.
2519 */
2520 } else if (sense_valid &&
2521 sshdr.sense_key == UNIT_ATTENTION &&
2522 sshdr.asc == 0x28) {
2523 if (!spintime) {
2524 spintime_expire = jiffies + 5 * HZ;
2525 spintime = 1;
2526 }
2527 /* Wait 1 second for next try */
2528 msleep(1000);
2529 } else {
2530 /* we don't understand the sense code, so it's
2531 * probably pointless to loop */
2532 if(!spintime) {
2533 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
2534 sd_print_sense_hdr(sdkp, &sshdr);
2535 }
2536 break;
2537 }
2538
2539 } while (spintime && time_before_eq(jiffies, spintime_expire));
2540
2541 if (spintime) {
2542 if (scsi_status_is_good(the_result))
2543 printk(KERN_CONT "ready\n");
2544 else
2545 printk(KERN_CONT "not responding...\n");
2546 }
2547 }
2548
2549 /*
2550 * Determine whether disk supports Data Integrity Field.
2551 */
sd_read_protection_type(struct scsi_disk * sdkp,unsigned char * buffer)2552 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
2553 {
2554 struct scsi_device *sdp = sdkp->device;
2555 u8 type;
2556
2557 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
2558 sdkp->protection_type = 0;
2559 return 0;
2560 }
2561
2562 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
2563
2564 if (type > T10_PI_TYPE3_PROTECTION) {
2565 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
2566 " protection type %u. Disabling disk!\n",
2567 type);
2568 sdkp->protection_type = 0;
2569 return -ENODEV;
2570 }
2571
2572 sdkp->protection_type = type;
2573
2574 return 0;
2575 }
2576
sd_config_protection(struct scsi_disk * sdkp,struct queue_limits * lim)2577 static void sd_config_protection(struct scsi_disk *sdkp,
2578 struct queue_limits *lim)
2579 {
2580 struct scsi_device *sdp = sdkp->device;
2581
2582 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
2583 sd_dif_config_host(sdkp, lim);
2584
2585 if (!sdkp->protection_type)
2586 return;
2587
2588 if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
2589 sd_first_printk(KERN_NOTICE, sdkp,
2590 "Disabling DIF Type %u protection\n",
2591 sdkp->protection_type);
2592 sdkp->protection_type = 0;
2593 }
2594
2595 sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
2596 sdkp->protection_type);
2597 }
2598
read_capacity_error(struct scsi_disk * sdkp,struct scsi_device * sdp,struct scsi_sense_hdr * sshdr,int sense_valid,int the_result)2599 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2600 struct scsi_sense_hdr *sshdr, int sense_valid,
2601 int the_result)
2602 {
2603 if (sense_valid)
2604 sd_print_sense_hdr(sdkp, sshdr);
2605 else
2606 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
2607
2608 /*
2609 * Set dirty bit for removable devices if not ready -
2610 * sometimes drives will not report this properly.
2611 */
2612 if (sdp->removable &&
2613 sense_valid && sshdr->sense_key == NOT_READY)
2614 set_media_not_present(sdkp);
2615
2616 /*
2617 * We used to set media_present to 0 here to indicate no media
2618 * in the drive, but some drives fail read capacity even with
2619 * media present, so we can't do that.
2620 */
2621 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
2622 }
2623
2624 #define RC16_LEN 32
2625 #if RC16_LEN > SD_BUF_SIZE
2626 #error RC16_LEN must not be more than SD_BUF_SIZE
2627 #endif
2628
2629 #define READ_CAPACITY_RETRIES_ON_RESET 10
2630
read_capacity_16(struct scsi_disk * sdkp,struct scsi_device * sdp,struct queue_limits * lim,unsigned char * buffer)2631 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2632 struct queue_limits *lim, unsigned char *buffer)
2633 {
2634 unsigned char cmd[16];
2635 struct scsi_sense_hdr sshdr;
2636 const struct scsi_exec_args exec_args = {
2637 .sshdr = &sshdr,
2638 };
2639 int sense_valid = 0;
2640 int the_result;
2641 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2642 unsigned int alignment;
2643 unsigned long long lba;
2644 unsigned sector_size;
2645
2646 if (sdp->no_read_capacity_16)
2647 return -EINVAL;
2648
2649 do {
2650 memset(cmd, 0, 16);
2651 cmd[0] = SERVICE_ACTION_IN_16;
2652 cmd[1] = SAI_READ_CAPACITY_16;
2653 cmd[13] = RC16_LEN;
2654 memset(buffer, 0, RC16_LEN);
2655
2656 the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
2657 buffer, RC16_LEN, SD_TIMEOUT,
2658 sdkp->max_retries, &exec_args);
2659 if (the_result > 0) {
2660 if (media_not_present(sdkp, &sshdr))
2661 return -ENODEV;
2662
2663 sense_valid = scsi_sense_valid(&sshdr);
2664 if (sense_valid &&
2665 sshdr.sense_key == ILLEGAL_REQUEST &&
2666 (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
2667 sshdr.ascq == 0x00)
2668 /* Invalid Command Operation Code or
2669 * Invalid Field in CDB, just retry
2670 * silently with RC10 */
2671 return -EINVAL;
2672 if (sense_valid &&
2673 sshdr.sense_key == UNIT_ATTENTION &&
2674 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2675 /* Device reset might occur several times,
2676 * give it one more chance */
2677 if (--reset_retries > 0)
2678 continue;
2679 }
2680 retries--;
2681
2682 } while (the_result && retries);
2683
2684 if (the_result) {
2685 sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
2686 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2687 return -EINVAL;
2688 }
2689
2690 sector_size = get_unaligned_be32(&buffer[8]);
2691 lba = get_unaligned_be64(&buffer[0]);
2692
2693 if (sd_read_protection_type(sdkp, buffer) < 0) {
2694 sdkp->capacity = 0;
2695 return -ENODEV;
2696 }
2697
2698 /* Logical blocks per physical block exponent */
2699 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
2700
2701 /* RC basis */
2702 sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
2703
2704 /* Lowest aligned logical block */
2705 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
2706 lim->alignment_offset = alignment;
2707 if (alignment && sdkp->first_scan)
2708 sd_printk(KERN_NOTICE, sdkp,
2709 "physical block alignment offset: %u\n", alignment);
2710
2711 if (buffer[14] & 0x80) { /* LBPME */
2712 sdkp->lbpme = 1;
2713
2714 if (buffer[14] & 0x40) /* LBPRZ */
2715 sdkp->lbprz = 1;
2716 }
2717
2718 sdkp->capacity = lba + 1;
2719 return sector_size;
2720 }
2721
read_capacity_10(struct scsi_disk * sdkp,struct scsi_device * sdp,unsigned char * buffer)2722 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2723 unsigned char *buffer)
2724 {
2725 static const u8 cmd[10] = { READ_CAPACITY };
2726 struct scsi_sense_hdr sshdr;
2727 struct scsi_failure failure_defs[] = {
2728 /* Do not retry Medium Not Present */
2729 {
2730 .sense = UNIT_ATTENTION,
2731 .asc = 0x3A,
2732 .result = SAM_STAT_CHECK_CONDITION,
2733 },
2734 {
2735 .sense = NOT_READY,
2736 .asc = 0x3A,
2737 .result = SAM_STAT_CHECK_CONDITION,
2738 },
2739 /* Device reset might occur several times so retry a lot */
2740 {
2741 .sense = UNIT_ATTENTION,
2742 .asc = 0x29,
2743 .allowed = READ_CAPACITY_RETRIES_ON_RESET,
2744 .result = SAM_STAT_CHECK_CONDITION,
2745 },
2746 /* Any other error not listed above retry 3 times */
2747 {
2748 .result = SCMD_FAILURE_RESULT_ANY,
2749 .allowed = 3,
2750 },
2751 {}
2752 };
2753 struct scsi_failures failures = {
2754 .failure_definitions = failure_defs,
2755 };
2756 const struct scsi_exec_args exec_args = {
2757 .sshdr = &sshdr,
2758 .failures = &failures,
2759 };
2760 int sense_valid = 0;
2761 int the_result;
2762 sector_t lba;
2763 unsigned sector_size;
2764
2765 memset(buffer, 0, 8);
2766
2767 the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
2768 8, SD_TIMEOUT, sdkp->max_retries,
2769 &exec_args);
2770
2771 if (the_result > 0) {
2772 sense_valid = scsi_sense_valid(&sshdr);
2773
2774 if (media_not_present(sdkp, &sshdr))
2775 return -ENODEV;
2776 }
2777
2778 if (the_result) {
2779 sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
2780 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2781 return -EINVAL;
2782 }
2783
2784 sector_size = get_unaligned_be32(&buffer[4]);
2785 lba = get_unaligned_be32(&buffer[0]);
2786
2787 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
2788 /* Some buggy (usb cardreader) devices return an lba of
2789 0xffffffff when the want to report a size of 0 (with
2790 which they really mean no media is present) */
2791 sdkp->capacity = 0;
2792 sdkp->physical_block_size = sector_size;
2793 return sector_size;
2794 }
2795
2796 sdkp->capacity = lba + 1;
2797 sdkp->physical_block_size = sector_size;
2798 return sector_size;
2799 }
2800
sd_try_rc16_first(struct scsi_device * sdp)2801 static int sd_try_rc16_first(struct scsi_device *sdp)
2802 {
2803 if (sdp->host->max_cmd_len < 16)
2804 return 0;
2805 if (sdp->try_rc_10_first)
2806 return 0;
2807 if (sdp->scsi_level > SCSI_SPC_2)
2808 return 1;
2809 if (scsi_device_protection(sdp))
2810 return 1;
2811 return 0;
2812 }
2813
2814 /*
2815 * read disk capacity
2816 */
2817 static void
sd_read_capacity(struct scsi_disk * sdkp,struct queue_limits * lim,unsigned char * buffer)2818 sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim,
2819 unsigned char *buffer)
2820 {
2821 int sector_size;
2822 struct scsi_device *sdp = sdkp->device;
2823
2824 if (sd_try_rc16_first(sdp)) {
2825 sector_size = read_capacity_16(sdkp, sdp, lim, buffer);
2826 if (sector_size == -EOVERFLOW)
2827 goto got_data;
2828 if (sector_size == -ENODEV)
2829 return;
2830 if (sector_size < 0)
2831 sector_size = read_capacity_10(sdkp, sdp, buffer);
2832 if (sector_size < 0)
2833 return;
2834 } else {
2835 sector_size = read_capacity_10(sdkp, sdp, buffer);
2836 if (sector_size == -EOVERFLOW)
2837 goto got_data;
2838 if (sector_size < 0)
2839 return;
2840 if ((sizeof(sdkp->capacity) > 4) &&
2841 (sdkp->capacity > 0xffffffffULL)) {
2842 int old_sector_size = sector_size;
2843 sd_printk(KERN_NOTICE, sdkp, "Very big device. "
2844 "Trying to use READ CAPACITY(16).\n");
2845 sector_size = read_capacity_16(sdkp, sdp, lim, buffer);
2846 if (sector_size < 0) {
2847 sd_printk(KERN_NOTICE, sdkp,
2848 "Using 0xffffffff as device size\n");
2849 sdkp->capacity = 1 + (sector_t) 0xffffffff;
2850 sector_size = old_sector_size;
2851 goto got_data;
2852 }
2853 /* Remember that READ CAPACITY(16) succeeded */
2854 sdp->try_rc_10_first = 0;
2855 }
2856 }
2857
2858 /* Some devices are known to return the total number of blocks,
2859 * not the highest block number. Some devices have versions
2860 * which do this and others which do not. Some devices we might
2861 * suspect of doing this but we don't know for certain.
2862 *
2863 * If we know the reported capacity is wrong, decrement it. If
2864 * we can only guess, then assume the number of blocks is even
2865 * (usually true but not always) and err on the side of lowering
2866 * the capacity.
2867 */
2868 if (sdp->fix_capacity ||
2869 (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
2870 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
2871 "from its reported value: %llu\n",
2872 (unsigned long long) sdkp->capacity);
2873 --sdkp->capacity;
2874 }
2875
2876 got_data:
2877 if (sector_size == 0) {
2878 sector_size = 512;
2879 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
2880 "assuming 512.\n");
2881 }
2882
2883 if (sector_size != 512 &&
2884 sector_size != 1024 &&
2885 sector_size != 2048 &&
2886 sector_size != 4096) {
2887 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2888 sector_size);
2889 /*
2890 * The user might want to re-format the drive with
2891 * a supported sectorsize. Once this happens, it
2892 * would be relatively trivial to set the thing up.
2893 * For this reason, we leave the thing in the table.
2894 */
2895 sdkp->capacity = 0;
2896 /*
2897 * set a bogus sector size so the normal read/write
2898 * logic in the block layer will eventually refuse any
2899 * request on this device without tripping over power
2900 * of two sector size assumptions
2901 */
2902 sector_size = 512;
2903 }
2904 lim->logical_block_size = sector_size;
2905 lim->physical_block_size = sdkp->physical_block_size;
2906 sdkp->device->sector_size = sector_size;
2907
2908 if (sdkp->capacity > 0xffffffff)
2909 sdp->use_16_for_rw = 1;
2910
2911 }
2912
2913 /*
2914 * Print disk capacity
2915 */
2916 static void
sd_print_capacity(struct scsi_disk * sdkp,sector_t old_capacity)2917 sd_print_capacity(struct scsi_disk *sdkp,
2918 sector_t old_capacity)
2919 {
2920 int sector_size = sdkp->device->sector_size;
2921 char cap_str_2[10], cap_str_10[10];
2922
2923 if (!sdkp->first_scan && old_capacity == sdkp->capacity)
2924 return;
2925
2926 string_get_size(sdkp->capacity, sector_size,
2927 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
2928 string_get_size(sdkp->capacity, sector_size,
2929 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
2930
2931 sd_printk(KERN_NOTICE, sdkp,
2932 "%llu %d-byte logical blocks: (%s/%s)\n",
2933 (unsigned long long)sdkp->capacity,
2934 sector_size, cap_str_10, cap_str_2);
2935
2936 if (sdkp->physical_block_size != sector_size)
2937 sd_printk(KERN_NOTICE, sdkp,
2938 "%u-byte physical blocks\n",
2939 sdkp->physical_block_size);
2940 }
2941
2942 /* called with buffer of length 512 */
2943 static inline int
sd_do_mode_sense(struct scsi_disk * sdkp,int dbd,int modepage,unsigned char * buffer,int len,struct scsi_mode_data * data,struct scsi_sense_hdr * sshdr)2944 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
2945 unsigned char *buffer, int len, struct scsi_mode_data *data,
2946 struct scsi_sense_hdr *sshdr)
2947 {
2948 /*
2949 * If we must use MODE SENSE(10), make sure that the buffer length
2950 * is at least 8 bytes so that the mode sense header fits.
2951 */
2952 if (sdkp->device->use_10_for_ms && len < 8)
2953 len = 8;
2954
2955 return scsi_mode_sense(sdkp->device, dbd, modepage, 0, buffer, len,
2956 SD_TIMEOUT, sdkp->max_retries, data, sshdr);
2957 }
2958
2959 /*
2960 * read write protect setting, if possible - called only in sd_revalidate_disk()
2961 * called with buffer of length SD_BUF_SIZE
2962 */
2963 static void
sd_read_write_protect_flag(struct scsi_disk * sdkp,unsigned char * buffer)2964 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2965 {
2966 int res;
2967 struct scsi_device *sdp = sdkp->device;
2968 struct scsi_mode_data data;
2969 int old_wp = sdkp->write_prot;
2970
2971 set_disk_ro(sdkp->disk, 0);
2972 if (sdp->skip_ms_page_3f) {
2973 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
2974 return;
2975 }
2976
2977 if (sdp->use_192_bytes_for_3f) {
2978 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
2979 } else {
2980 /*
2981 * First attempt: ask for all pages (0x3F), but only 4 bytes.
2982 * We have to start carefully: some devices hang if we ask
2983 * for more than is available.
2984 */
2985 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
2986
2987 /*
2988 * Second attempt: ask for page 0 When only page 0 is
2989 * implemented, a request for page 3F may return Sense Key
2990 * 5: Illegal Request, Sense Code 24: Invalid field in
2991 * CDB.
2992 */
2993 if (res < 0)
2994 res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
2995
2996 /*
2997 * Third attempt: ask 255 bytes, as we did earlier.
2998 */
2999 if (res < 0)
3000 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
3001 &data, NULL);
3002 }
3003
3004 if (res < 0) {
3005 sd_first_printk(KERN_WARNING, sdkp,
3006 "Test WP failed, assume Write Enabled\n");
3007 } else {
3008 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
3009 set_disk_ro(sdkp->disk, sdkp->write_prot);
3010 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
3011 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
3012 sdkp->write_prot ? "on" : "off");
3013 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
3014 }
3015 }
3016 }
3017
3018 /*
3019 * sd_read_cache_type - called only from sd_revalidate_disk()
3020 * called with buffer of length SD_BUF_SIZE
3021 */
3022 static void
sd_read_cache_type(struct scsi_disk * sdkp,unsigned char * buffer)3023 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
3024 {
3025 int len = 0, res;
3026 struct scsi_device *sdp = sdkp->device;
3027
3028 int dbd;
3029 int modepage;
3030 int first_len;
3031 struct scsi_mode_data data;
3032 struct scsi_sense_hdr sshdr;
3033 int old_wce = sdkp->WCE;
3034 int old_rcd = sdkp->RCD;
3035 int old_dpofua = sdkp->DPOFUA;
3036
3037
3038 if (sdkp->cache_override)
3039 return;
3040
3041 first_len = 4;
3042 if (sdp->skip_ms_page_8) {
3043 if (sdp->type == TYPE_RBC)
3044 goto defaults;
3045 else {
3046 if (sdp->skip_ms_page_3f)
3047 goto defaults;
3048 modepage = 0x3F;
3049 if (sdp->use_192_bytes_for_3f)
3050 first_len = 192;
3051 dbd = 0;
3052 }
3053 } else if (sdp->type == TYPE_RBC) {
3054 modepage = 6;
3055 dbd = 8;
3056 } else {
3057 modepage = 8;
3058 dbd = 0;
3059 }
3060
3061 /* cautiously ask */
3062 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
3063 &data, &sshdr);
3064
3065 if (res < 0)
3066 goto bad_sense;
3067
3068 if (!data.header_length) {
3069 modepage = 6;
3070 first_len = 0;
3071 sd_first_printk(KERN_ERR, sdkp,
3072 "Missing header in MODE_SENSE response\n");
3073 }
3074
3075 /* that went OK, now ask for the proper length */
3076 len = data.length;
3077
3078 /*
3079 * We're only interested in the first three bytes, actually.
3080 * But the data cache page is defined for the first 20.
3081 */
3082 if (len < 3)
3083 goto bad_sense;
3084 else if (len > SD_BUF_SIZE) {
3085 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
3086 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
3087 len = SD_BUF_SIZE;
3088 }
3089 if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
3090 len = 192;
3091
3092 /* Get the data */
3093 if (len > first_len)
3094 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
3095 &data, &sshdr);
3096
3097 if (!res) {
3098 int offset = data.header_length + data.block_descriptor_length;
3099
3100 while (offset < len) {
3101 u8 page_code = buffer[offset] & 0x3F;
3102 u8 spf = buffer[offset] & 0x40;
3103
3104 if (page_code == 8 || page_code == 6) {
3105 /* We're interested only in the first 3 bytes.
3106 */
3107 if (len - offset <= 2) {
3108 sd_first_printk(KERN_ERR, sdkp,
3109 "Incomplete mode parameter "
3110 "data\n");
3111 goto defaults;
3112 } else {
3113 modepage = page_code;
3114 goto Page_found;
3115 }
3116 } else {
3117 /* Go to the next page */
3118 if (spf && len - offset > 3)
3119 offset += 4 + (buffer[offset+2] << 8) +
3120 buffer[offset+3];
3121 else if (!spf && len - offset > 1)
3122 offset += 2 + buffer[offset+1];
3123 else {
3124 sd_first_printk(KERN_ERR, sdkp,
3125 "Incomplete mode "
3126 "parameter data\n");
3127 goto defaults;
3128 }
3129 }
3130 }
3131
3132 sd_first_printk(KERN_WARNING, sdkp,
3133 "No Caching mode page found\n");
3134 goto defaults;
3135
3136 Page_found:
3137 if (modepage == 8) {
3138 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
3139 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
3140 } else {
3141 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
3142 sdkp->RCD = 0;
3143 }
3144
3145 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
3146 if (sdp->broken_fua) {
3147 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
3148 sdkp->DPOFUA = 0;
3149 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
3150 !sdkp->device->use_16_for_rw) {
3151 sd_first_printk(KERN_NOTICE, sdkp,
3152 "Uses READ/WRITE(6), disabling FUA\n");
3153 sdkp->DPOFUA = 0;
3154 }
3155
3156 /* No cache flush allowed for write protected devices */
3157 if (sdkp->WCE && sdkp->write_prot)
3158 sdkp->WCE = 0;
3159
3160 if (sdkp->first_scan || old_wce != sdkp->WCE ||
3161 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
3162 sd_printk(KERN_NOTICE, sdkp,
3163 "Write cache: %s, read cache: %s, %s\n",
3164 sdkp->WCE ? "enabled" : "disabled",
3165 sdkp->RCD ? "disabled" : "enabled",
3166 sdkp->DPOFUA ? "supports DPO and FUA"
3167 : "doesn't support DPO or FUA");
3168
3169 return;
3170 }
3171
3172 bad_sense:
3173 if (res == -EIO && scsi_sense_valid(&sshdr) &&
3174 sshdr.sense_key == ILLEGAL_REQUEST &&
3175 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
3176 /* Invalid field in CDB */
3177 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
3178 else
3179 sd_first_printk(KERN_ERR, sdkp,
3180 "Asking for cache data failed\n");
3181
3182 defaults:
3183 if (sdp->wce_default_on) {
3184 sd_first_printk(KERN_NOTICE, sdkp,
3185 "Assuming drive cache: write back\n");
3186 sdkp->WCE = 1;
3187 } else {
3188 sd_first_printk(KERN_WARNING, sdkp,
3189 "Assuming drive cache: write through\n");
3190 sdkp->WCE = 0;
3191 }
3192 sdkp->RCD = 0;
3193 sdkp->DPOFUA = 0;
3194 }
3195
sd_is_perm_stream(struct scsi_disk * sdkp,unsigned int stream_id)3196 static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id)
3197 {
3198 u8 cdb[16] = { SERVICE_ACTION_IN_16, SAI_GET_STREAM_STATUS };
3199 struct {
3200 struct scsi_stream_status_header h;
3201 struct scsi_stream_status s;
3202 } buf;
3203 struct scsi_device *sdev = sdkp->device;
3204 struct scsi_sense_hdr sshdr;
3205 const struct scsi_exec_args exec_args = {
3206 .sshdr = &sshdr,
3207 };
3208 int res;
3209
3210 put_unaligned_be16(stream_id, &cdb[4]);
3211 put_unaligned_be32(sizeof(buf), &cdb[10]);
3212
3213 res = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, &buf, sizeof(buf),
3214 SD_TIMEOUT, sdkp->max_retries, &exec_args);
3215 if (res < 0)
3216 return false;
3217 if (scsi_status_is_check_condition(res) && scsi_sense_valid(&sshdr))
3218 sd_print_sense_hdr(sdkp, &sshdr);
3219 if (res)
3220 return false;
3221 if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status))
3222 return false;
3223 return buf.h.stream_status[0].perm;
3224 }
3225
sd_read_io_hints(struct scsi_disk * sdkp,unsigned char * buffer)3226 static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
3227 {
3228 struct scsi_device *sdp = sdkp->device;
3229 const struct scsi_io_group_descriptor *desc, *start, *end;
3230 u16 permanent_stream_count_old;
3231 struct scsi_sense_hdr sshdr;
3232 struct scsi_mode_data data;
3233 int res;
3234
3235 if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS)
3236 return;
3237
3238 res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
3239 /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
3240 sdkp->max_retries, &data, &sshdr);
3241 if (res < 0)
3242 return;
3243 start = (void *)buffer + data.header_length + 16;
3244 end = (void *)buffer + ALIGN_DOWN(data.header_length + data.length,
3245 sizeof(*end));
3246 /*
3247 * From "SBC-5 Constrained Streams with Data Lifetimes": Device severs
3248 * should assign the lowest numbered stream identifiers to permanent
3249 * streams.
3250 */
3251 for (desc = start; desc < end; desc++)
3252 if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
3253 break;
3254 permanent_stream_count_old = sdkp->permanent_stream_count;
3255 sdkp->permanent_stream_count = desc - start;
3256 if (sdkp->rscs && sdkp->permanent_stream_count < 2)
3257 sd_printk(KERN_INFO, sdkp,
3258 "Unexpected: RSCS has been set and the permanent stream count is %u\n",
3259 sdkp->permanent_stream_count);
3260 else if (sdkp->permanent_stream_count != permanent_stream_count_old)
3261 sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
3262 sdkp->permanent_stream_count);
3263 }
3264
3265 /*
3266 * The ATO bit indicates whether the DIF application tag is available
3267 * for use by the operating system.
3268 */
sd_read_app_tag_own(struct scsi_disk * sdkp,unsigned char * buffer)3269 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
3270 {
3271 int res, offset;
3272 struct scsi_device *sdp = sdkp->device;
3273 struct scsi_mode_data data;
3274 struct scsi_sense_hdr sshdr;
3275
3276 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
3277 return;
3278
3279 if (sdkp->protection_type == 0)
3280 return;
3281
3282 res = scsi_mode_sense(sdp, 1, 0x0a, 0, buffer, 36, SD_TIMEOUT,
3283 sdkp->max_retries, &data, &sshdr);
3284
3285 if (res < 0 || !data.header_length ||
3286 data.length < 6) {
3287 sd_first_printk(KERN_WARNING, sdkp,
3288 "getting Control mode page failed, assume no ATO\n");
3289
3290 if (res == -EIO && scsi_sense_valid(&sshdr))
3291 sd_print_sense_hdr(sdkp, &sshdr);
3292
3293 return;
3294 }
3295
3296 offset = data.header_length + data.block_descriptor_length;
3297
3298 if ((buffer[offset] & 0x3f) != 0x0a) {
3299 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
3300 return;
3301 }
3302
3303 if ((buffer[offset + 5] & 0x80) == 0)
3304 return;
3305
3306 sdkp->ATO = 1;
3307
3308 return;
3309 }
3310
sd_discard_mode(struct scsi_disk * sdkp)3311 static unsigned int sd_discard_mode(struct scsi_disk *sdkp)
3312 {
3313 if (!sdkp->lbpme)
3314 return SD_LBP_FULL;
3315
3316 if (!sdkp->lbpvpd) {
3317 /* LBP VPD page not provided */
3318 if (sdkp->max_unmap_blocks)
3319 return SD_LBP_UNMAP;
3320 return SD_LBP_WS16;
3321 }
3322
3323 /* LBP VPD page tells us what to use */
3324 if (sdkp->lbpu && sdkp->max_unmap_blocks)
3325 return SD_LBP_UNMAP;
3326 if (sdkp->lbpws)
3327 return SD_LBP_WS16;
3328 if (sdkp->lbpws10)
3329 return SD_LBP_WS10;
3330 return SD_LBP_DISABLE;
3331 }
3332
3333 /*
3334 * Query disk device for preferred I/O sizes.
3335 */
sd_read_block_limits(struct scsi_disk * sdkp,struct queue_limits * lim)3336 static void sd_read_block_limits(struct scsi_disk *sdkp,
3337 struct queue_limits *lim)
3338 {
3339 struct scsi_vpd *vpd;
3340
3341 rcu_read_lock();
3342
3343 vpd = rcu_dereference(sdkp->device->vpd_pgb0);
3344 if (!vpd || vpd->len < 16)
3345 goto out;
3346
3347 sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]);
3348 sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
3349 sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
3350
3351 if (vpd->len >= 64) {
3352 unsigned int lba_count, desc_count;
3353
3354 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
3355
3356 if (!sdkp->lbpme)
3357 goto config_atomic;
3358
3359 lba_count = get_unaligned_be32(&vpd->data[20]);
3360 desc_count = get_unaligned_be32(&vpd->data[24]);
3361
3362 if (lba_count && desc_count)
3363 sdkp->max_unmap_blocks = lba_count;
3364
3365 sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
3366
3367 if (vpd->data[32] & 0x80)
3368 sdkp->unmap_alignment =
3369 get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
3370
3371 config_atomic:
3372 sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]);
3373 sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]);
3374 sdkp->atomic_granularity = get_unaligned_be32(&vpd->data[52]);
3375 sdkp->max_atomic_with_boundary = get_unaligned_be32(&vpd->data[56]);
3376 sdkp->max_atomic_boundary = get_unaligned_be32(&vpd->data[60]);
3377
3378 sd_config_atomic(sdkp, lim);
3379 }
3380
3381 out:
3382 rcu_read_unlock();
3383 }
3384
3385 /* Parse the Block Limits Extension VPD page (0xb7) */
sd_read_block_limits_ext(struct scsi_disk * sdkp)3386 static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
3387 {
3388 struct scsi_vpd *vpd;
3389
3390 rcu_read_lock();
3391 vpd = rcu_dereference(sdkp->device->vpd_pgb7);
3392 if (vpd && vpd->len >= 2)
3393 sdkp->rscs = vpd->data[5] & 1;
3394 rcu_read_unlock();
3395 }
3396
3397 /* Query block device characteristics */
sd_read_block_characteristics(struct scsi_disk * sdkp,struct queue_limits * lim)3398 static void sd_read_block_characteristics(struct scsi_disk *sdkp,
3399 struct queue_limits *lim)
3400 {
3401 struct scsi_vpd *vpd;
3402 u16 rot;
3403
3404 rcu_read_lock();
3405 vpd = rcu_dereference(sdkp->device->vpd_pgb1);
3406
3407 if (!vpd || vpd->len < 8) {
3408 rcu_read_unlock();
3409 return;
3410 }
3411
3412 rot = get_unaligned_be16(&vpd->data[4]);
3413 sdkp->zoned = (vpd->data[8] >> 4) & 3;
3414 rcu_read_unlock();
3415
3416 if (rot == 1)
3417 lim->features &= ~(BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
3418
3419 if (!sdkp->first_scan)
3420 return;
3421
3422 if (sdkp->device->type == TYPE_ZBC)
3423 sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n");
3424 else if (sdkp->zoned == 1)
3425 sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n");
3426 else if (sdkp->zoned == 2)
3427 sd_printk(KERN_NOTICE, sdkp, "Drive-managed SMR disk\n");
3428 }
3429
3430 /**
3431 * sd_read_block_provisioning - Query provisioning VPD page
3432 * @sdkp: disk to query
3433 */
sd_read_block_provisioning(struct scsi_disk * sdkp)3434 static void sd_read_block_provisioning(struct scsi_disk *sdkp)
3435 {
3436 struct scsi_vpd *vpd;
3437
3438 if (sdkp->lbpme == 0)
3439 return;
3440
3441 rcu_read_lock();
3442 vpd = rcu_dereference(sdkp->device->vpd_pgb2);
3443
3444 if (!vpd || vpd->len < 8) {
3445 rcu_read_unlock();
3446 return;
3447 }
3448
3449 sdkp->lbpvpd = 1;
3450 sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */
3451 sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
3452 sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
3453 rcu_read_unlock();
3454 }
3455
sd_read_write_same(struct scsi_disk * sdkp,unsigned char * buffer)3456 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
3457 {
3458 struct scsi_device *sdev = sdkp->device;
3459
3460 if (sdev->host->no_write_same) {
3461 sdev->no_write_same = 1;
3462
3463 return;
3464 }
3465
3466 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY, 0) < 0) {
3467 struct scsi_vpd *vpd;
3468
3469 sdev->no_report_opcodes = 1;
3470
3471 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
3472 * CODES is unsupported and the device has an ATA
3473 * Information VPD page (SAT).
3474 */
3475 rcu_read_lock();
3476 vpd = rcu_dereference(sdev->vpd_pg89);
3477 if (vpd)
3478 sdev->no_write_same = 1;
3479 rcu_read_unlock();
3480 }
3481
3482 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16, 0) == 1)
3483 sdkp->ws16 = 1;
3484
3485 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME, 0) == 1)
3486 sdkp->ws10 = 1;
3487 }
3488
sd_read_security(struct scsi_disk * sdkp,unsigned char * buffer)3489 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
3490 {
3491 struct scsi_device *sdev = sdkp->device;
3492
3493 if (!sdev->security_supported)
3494 return;
3495
3496 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3497 SECURITY_PROTOCOL_IN, 0) == 1 &&
3498 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3499 SECURITY_PROTOCOL_OUT, 0) == 1)
3500 sdkp->security = 1;
3501 }
3502
sd64_to_sectors(struct scsi_disk * sdkp,u8 * buf)3503 static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
3504 {
3505 return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
3506 }
3507
3508 /**
3509 * sd_read_cpr - Query concurrent positioning ranges
3510 * @sdkp: disk to query
3511 */
sd_read_cpr(struct scsi_disk * sdkp)3512 static void sd_read_cpr(struct scsi_disk *sdkp)
3513 {
3514 struct blk_independent_access_ranges *iars = NULL;
3515 unsigned char *buffer = NULL;
3516 unsigned int nr_cpr = 0;
3517 int i, vpd_len, buf_len = SD_BUF_SIZE;
3518 u8 *desc;
3519
3520 /*
3521 * We need to have the capacity set first for the block layer to be
3522 * able to check the ranges.
3523 */
3524 if (sdkp->first_scan)
3525 return;
3526
3527 if (!sdkp->capacity)
3528 goto out;
3529
3530 /*
3531 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
3532 * leading to a maximum page size of 64 + 256*32 bytes.
3533 */
3534 buf_len = 64 + 256*32;
3535 buffer = kmalloc(buf_len, GFP_KERNEL);
3536 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
3537 goto out;
3538
3539 /* We must have at least a 64B header and one 32B range descriptor */
3540 vpd_len = get_unaligned_be16(&buffer[2]) + 4;
3541 if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
3542 sd_printk(KERN_ERR, sdkp,
3543 "Invalid Concurrent Positioning Ranges VPD page\n");
3544 goto out;
3545 }
3546
3547 nr_cpr = (vpd_len - 64) / 32;
3548 if (nr_cpr == 1) {
3549 nr_cpr = 0;
3550 goto out;
3551 }
3552
3553 iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
3554 if (!iars) {
3555 nr_cpr = 0;
3556 goto out;
3557 }
3558
3559 desc = &buffer[64];
3560 for (i = 0; i < nr_cpr; i++, desc += 32) {
3561 if (desc[0] != i) {
3562 sd_printk(KERN_ERR, sdkp,
3563 "Invalid Concurrent Positioning Range number\n");
3564 nr_cpr = 0;
3565 break;
3566 }
3567
3568 iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
3569 iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
3570 }
3571
3572 out:
3573 disk_set_independent_access_ranges(sdkp->disk, iars);
3574 if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
3575 sd_printk(KERN_NOTICE, sdkp,
3576 "%u concurrent positioning ranges\n", nr_cpr);
3577 sdkp->nr_actuators = nr_cpr;
3578 }
3579
3580 kfree(buffer);
3581 }
3582
sd_validate_min_xfer_size(struct scsi_disk * sdkp)3583 static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp)
3584 {
3585 struct scsi_device *sdp = sdkp->device;
3586 unsigned int min_xfer_bytes =
3587 logical_to_bytes(sdp, sdkp->min_xfer_blocks);
3588
3589 if (sdkp->min_xfer_blocks == 0)
3590 return false;
3591
3592 if (min_xfer_bytes & (sdkp->physical_block_size - 1)) {
3593 sd_first_printk(KERN_WARNING, sdkp,
3594 "Preferred minimum I/O size %u bytes not a " \
3595 "multiple of physical block size (%u bytes)\n",
3596 min_xfer_bytes, sdkp->physical_block_size);
3597 sdkp->min_xfer_blocks = 0;
3598 return false;
3599 }
3600
3601 sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n",
3602 min_xfer_bytes);
3603 return true;
3604 }
3605
3606 /*
3607 * Determine the device's preferred I/O size for reads and writes
3608 * unless the reported value is unreasonably small, large, not a
3609 * multiple of the physical block size, or simply garbage.
3610 */
sd_validate_opt_xfer_size(struct scsi_disk * sdkp,unsigned int dev_max)3611 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3612 unsigned int dev_max)
3613 {
3614 struct scsi_device *sdp = sdkp->device;
3615 unsigned int opt_xfer_bytes =
3616 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3617 unsigned int min_xfer_bytes =
3618 logical_to_bytes(sdp, sdkp->min_xfer_blocks);
3619
3620 if (sdkp->opt_xfer_blocks == 0)
3621 return false;
3622
3623 if (sdkp->opt_xfer_blocks > dev_max) {
3624 sd_first_printk(KERN_WARNING, sdkp,
3625 "Optimal transfer size %u logical blocks " \
3626 "> dev_max (%u logical blocks)\n",
3627 sdkp->opt_xfer_blocks, dev_max);
3628 return false;
3629 }
3630
3631 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
3632 sd_first_printk(KERN_WARNING, sdkp,
3633 "Optimal transfer size %u logical blocks " \
3634 "> sd driver limit (%u logical blocks)\n",
3635 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
3636 return false;
3637 }
3638
3639 if (opt_xfer_bytes < PAGE_SIZE) {
3640 sd_first_printk(KERN_WARNING, sdkp,
3641 "Optimal transfer size %u bytes < " \
3642 "PAGE_SIZE (%u bytes)\n",
3643 opt_xfer_bytes, (unsigned int)PAGE_SIZE);
3644 return false;
3645 }
3646
3647 if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) {
3648 sd_first_printk(KERN_WARNING, sdkp,
3649 "Optimal transfer size %u bytes not a " \
3650 "multiple of preferred minimum block " \
3651 "size (%u bytes)\n",
3652 opt_xfer_bytes, min_xfer_bytes);
3653 return false;
3654 }
3655
3656 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
3657 sd_first_printk(KERN_WARNING, sdkp,
3658 "Optimal transfer size %u bytes not a " \
3659 "multiple of physical block size (%u bytes)\n",
3660 opt_xfer_bytes, sdkp->physical_block_size);
3661 return false;
3662 }
3663
3664 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
3665 opt_xfer_bytes);
3666 return true;
3667 }
3668
sd_read_block_zero(struct scsi_disk * sdkp)3669 static void sd_read_block_zero(struct scsi_disk *sdkp)
3670 {
3671 struct scsi_device *sdev = sdkp->device;
3672 unsigned int buf_len = sdev->sector_size;
3673 u8 *buffer, cmd[16] = { };
3674
3675 buffer = kmalloc(buf_len, GFP_KERNEL);
3676 if (!buffer)
3677 return;
3678
3679 if (sdev->use_16_for_rw) {
3680 cmd[0] = READ_16;
3681 put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
3682 put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
3683 } else {
3684 cmd[0] = READ_10;
3685 put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
3686 put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
3687 }
3688
3689 scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
3690 SD_TIMEOUT, sdkp->max_retries, NULL);
3691 kfree(buffer);
3692 }
3693
3694 /**
3695 * sd_revalidate_disk - called the first time a new disk is seen,
3696 * performs disk spin up, read_capacity, etc.
3697 * @disk: struct gendisk we care about
3698 **/
sd_revalidate_disk(struct gendisk * disk)3699 static int sd_revalidate_disk(struct gendisk *disk)
3700 {
3701 struct scsi_disk *sdkp = scsi_disk(disk);
3702 struct scsi_device *sdp = sdkp->device;
3703 sector_t old_capacity = sdkp->capacity;
3704 struct queue_limits lim;
3705 unsigned char *buffer;
3706 unsigned int dev_max;
3707 int err;
3708
3709 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
3710 "sd_revalidate_disk\n"));
3711
3712 /*
3713 * If the device is offline, don't try and read capacity or any
3714 * of the other niceties.
3715 */
3716 if (!scsi_device_online(sdp))
3717 goto out;
3718
3719 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
3720 if (!buffer) {
3721 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
3722 "allocation failure.\n");
3723 goto out;
3724 }
3725
3726 sd_spinup_disk(sdkp);
3727
3728 lim = queue_limits_start_update(sdkp->disk->queue);
3729
3730 /*
3731 * Without media there is no reason to ask; moreover, some devices
3732 * react badly if we do.
3733 */
3734 if (sdkp->media_present) {
3735 sd_read_capacity(sdkp, &lim, buffer);
3736 /*
3737 * Some USB/UAS devices return generic values for mode pages
3738 * until the media has been accessed. Trigger a READ operation
3739 * to force the device to populate mode pages.
3740 */
3741 if (sdp->read_before_ms)
3742 sd_read_block_zero(sdkp);
3743 /*
3744 * set the default to rotational. All non-rotational devices
3745 * support the block characteristics VPD page, which will
3746 * cause this to be updated correctly and any device which
3747 * doesn't support it should be treated as rotational.
3748 */
3749 lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
3750
3751 if (scsi_device_supports_vpd(sdp)) {
3752 sd_read_block_provisioning(sdkp);
3753 sd_read_block_limits(sdkp, &lim);
3754 sd_read_block_limits_ext(sdkp);
3755 sd_read_block_characteristics(sdkp, &lim);
3756 sd_zbc_read_zones(sdkp, &lim, buffer);
3757 }
3758
3759 sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp));
3760
3761 sd_print_capacity(sdkp, old_capacity);
3762
3763 sd_read_write_protect_flag(sdkp, buffer);
3764 sd_read_cache_type(sdkp, buffer);
3765 sd_read_io_hints(sdkp, buffer);
3766 sd_read_app_tag_own(sdkp, buffer);
3767 sd_read_write_same(sdkp, buffer);
3768 sd_read_security(sdkp, buffer);
3769 sd_config_protection(sdkp, &lim);
3770 }
3771
3772 /*
3773 * We now have all cache related info, determine how we deal
3774 * with flush requests.
3775 */
3776 sd_set_flush_flag(sdkp, &lim);
3777
3778 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
3779 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
3780
3781 /* Some devices report a maximum block count for READ/WRITE requests. */
3782 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
3783 lim.max_dev_sectors = logical_to_sectors(sdp, dev_max);
3784
3785 if (sd_validate_min_xfer_size(sdkp))
3786 lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
3787 else
3788 lim.io_min = 0;
3789
3790 /*
3791 * Limit default to SCSI host optimal sector limit if set. There may be
3792 * an impact on performance for when the size of a request exceeds this
3793 * host limit.
3794 */
3795 lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
3796 if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3797 lim.io_opt = min_not_zero(lim.io_opt,
3798 logical_to_bytes(sdp, sdkp->opt_xfer_blocks));
3799 }
3800
3801 sdkp->first_scan = 0;
3802
3803 set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
3804 sd_config_write_same(sdkp, &lim);
3805 kfree(buffer);
3806
3807 blk_mq_freeze_queue(sdkp->disk->queue);
3808 err = queue_limits_commit_update(sdkp->disk->queue, &lim);
3809 blk_mq_unfreeze_queue(sdkp->disk->queue);
3810 if (err)
3811 return err;
3812
3813 /*
3814 * Query concurrent positioning ranges after
3815 * queue_limits_commit_update() unlocked q->limits_lock to avoid
3816 * deadlock with q->sysfs_dir_lock and q->sysfs_lock.
3817 */
3818 if (sdkp->media_present && scsi_device_supports_vpd(sdp))
3819 sd_read_cpr(sdkp);
3820
3821 /*
3822 * For a zoned drive, revalidating the zones can be done only once
3823 * the gendisk capacity is set. So if this fails, set back the gendisk
3824 * capacity to 0.
3825 */
3826 if (sd_zbc_revalidate_zones(sdkp))
3827 set_capacity_and_notify(disk, 0);
3828
3829 out:
3830 return 0;
3831 }
3832
3833 /**
3834 * sd_unlock_native_capacity - unlock native capacity
3835 * @disk: struct gendisk to set capacity for
3836 *
3837 * Block layer calls this function if it detects that partitions
3838 * on @disk reach beyond the end of the device. If the SCSI host
3839 * implements ->unlock_native_capacity() method, it's invoked to
3840 * give it a chance to adjust the device capacity.
3841 *
3842 * CONTEXT:
3843 * Defined by block layer. Might sleep.
3844 */
sd_unlock_native_capacity(struct gendisk * disk)3845 static void sd_unlock_native_capacity(struct gendisk *disk)
3846 {
3847 struct scsi_device *sdev = scsi_disk(disk)->device;
3848
3849 if (sdev->host->hostt->unlock_native_capacity)
3850 sdev->host->hostt->unlock_native_capacity(sdev);
3851 }
3852
3853 /**
3854 * sd_format_disk_name - format disk name
3855 * @prefix: name prefix - ie. "sd" for SCSI disks
3856 * @index: index of the disk to format name for
3857 * @buf: output buffer
3858 * @buflen: length of the output buffer
3859 *
3860 * SCSI disk names starts at sda. The 26th device is sdz and the
3861 * 27th is sdaa. The last one for two lettered suffix is sdzz
3862 * which is followed by sdaaa.
3863 *
3864 * This is basically 26 base counting with one extra 'nil' entry
3865 * at the beginning from the second digit on and can be
3866 * determined using similar method as 26 base conversion with the
3867 * index shifted -1 after each digit is computed.
3868 *
3869 * CONTEXT:
3870 * Don't care.
3871 *
3872 * RETURNS:
3873 * 0 on success, -errno on failure.
3874 */
sd_format_disk_name(char * prefix,int index,char * buf,int buflen)3875 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
3876 {
3877 const int base = 'z' - 'a' + 1;
3878 char *begin = buf + strlen(prefix);
3879 char *end = buf + buflen;
3880 char *p;
3881 int unit;
3882
3883 p = end - 1;
3884 *p = '\0';
3885 unit = base;
3886 do {
3887 if (p == begin)
3888 return -EINVAL;
3889 *--p = 'a' + (index % unit);
3890 index = (index / unit) - 1;
3891 } while (index >= 0);
3892
3893 memmove(begin, p, end - p);
3894 memcpy(buf, prefix, strlen(prefix));
3895
3896 return 0;
3897 }
3898
3899 /**
3900 * sd_probe - called during driver initialization and whenever a
3901 * new scsi device is attached to the system. It is called once
3902 * for each scsi device (not just disks) present.
3903 * @dev: pointer to device object
3904 *
3905 * Returns 0 if successful (or not interested in this scsi device
3906 * (e.g. scanner)); 1 when there is an error.
3907 *
3908 * Note: this function is invoked from the scsi mid-level.
3909 * This function sets up the mapping between a given
3910 * <host,channel,id,lun> (found in sdp) and new device name
3911 * (e.g. /dev/sda). More precisely it is the block device major
3912 * and minor number that is chosen here.
3913 *
3914 * Assume sd_probe is not re-entrant (for time being)
3915 * Also think about sd_probe() and sd_remove() running coincidentally.
3916 **/
sd_probe(struct device * dev)3917 static int sd_probe(struct device *dev)
3918 {
3919 struct scsi_device *sdp = to_scsi_device(dev);
3920 struct scsi_disk *sdkp;
3921 struct gendisk *gd;
3922 int index;
3923 int error;
3924
3925 scsi_autopm_get_device(sdp);
3926 error = -ENODEV;
3927 if (sdp->type != TYPE_DISK &&
3928 sdp->type != TYPE_ZBC &&
3929 sdp->type != TYPE_MOD &&
3930 sdp->type != TYPE_RBC)
3931 goto out;
3932
3933 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
3934 sdev_printk(KERN_WARNING, sdp,
3935 "Unsupported ZBC host-managed device.\n");
3936 goto out;
3937 }
3938
3939 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
3940 "sd_probe\n"));
3941
3942 error = -ENOMEM;
3943 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
3944 if (!sdkp)
3945 goto out;
3946
3947 gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
3948 &sd_bio_compl_lkclass);
3949 if (!gd)
3950 goto out_free;
3951
3952 index = ida_alloc(&sd_index_ida, GFP_KERNEL);
3953 if (index < 0) {
3954 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
3955 goto out_put;
3956 }
3957
3958 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
3959 if (error) {
3960 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
3961 goto out_free_index;
3962 }
3963
3964 sdkp->device = sdp;
3965 sdkp->disk = gd;
3966 sdkp->index = index;
3967 sdkp->max_retries = SD_MAX_RETRIES;
3968 atomic_set(&sdkp->openers, 0);
3969 atomic_set(&sdkp->device->ioerr_cnt, 0);
3970
3971 if (!sdp->request_queue->rq_timeout) {
3972 if (sdp->type != TYPE_MOD)
3973 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
3974 else
3975 blk_queue_rq_timeout(sdp->request_queue,
3976 SD_MOD_TIMEOUT);
3977 }
3978
3979 device_initialize(&sdkp->disk_dev);
3980 sdkp->disk_dev.parent = get_device(dev);
3981 sdkp->disk_dev.class = &sd_disk_class;
3982 dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
3983
3984 error = device_add(&sdkp->disk_dev);
3985 if (error) {
3986 put_device(&sdkp->disk_dev);
3987 goto out;
3988 }
3989
3990 dev_set_drvdata(dev, sdkp);
3991
3992 gd->major = sd_major((index & 0xf0) >> 4);
3993 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
3994 gd->minors = SD_MINORS;
3995
3996 gd->fops = &sd_fops;
3997 gd->private_data = sdkp;
3998
3999 /* defaults, until the device tells us otherwise */
4000 sdp->sector_size = 512;
4001 sdkp->capacity = 0;
4002 sdkp->media_present = 1;
4003 sdkp->write_prot = 0;
4004 sdkp->cache_override = 0;
4005 sdkp->WCE = 0;
4006 sdkp->RCD = 0;
4007 sdkp->ATO = 0;
4008 sdkp->first_scan = 1;
4009 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
4010
4011 sd_revalidate_disk(gd);
4012
4013 if (sdp->removable) {
4014 gd->flags |= GENHD_FL_REMOVABLE;
4015 gd->events |= DISK_EVENT_MEDIA_CHANGE;
4016 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
4017 }
4018
4019 blk_pm_runtime_init(sdp->request_queue, dev);
4020 if (sdp->rpm_autosuspend) {
4021 pm_runtime_set_autosuspend_delay(dev,
4022 sdp->host->rpm_autosuspend_delay);
4023 }
4024
4025 error = device_add_disk(dev, gd, NULL);
4026 if (error) {
4027 device_unregister(&sdkp->disk_dev);
4028 put_disk(gd);
4029 goto out;
4030 }
4031
4032 if (sdkp->security) {
4033 sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
4034 if (sdkp->opal_dev)
4035 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
4036 }
4037
4038 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
4039 sdp->removable ? "removable " : "");
4040 scsi_autopm_put_device(sdp);
4041
4042 return 0;
4043
4044 out_free_index:
4045 ida_free(&sd_index_ida, index);
4046 out_put:
4047 put_disk(gd);
4048 out_free:
4049 kfree(sdkp);
4050 out:
4051 scsi_autopm_put_device(sdp);
4052 return error;
4053 }
4054
4055 /**
4056 * sd_remove - called whenever a scsi disk (previously recognized by
4057 * sd_probe) is detached from the system. It is called (potentially
4058 * multiple times) during sd module unload.
4059 * @dev: pointer to device object
4060 *
4061 * Note: this function is invoked from the scsi mid-level.
4062 * This function potentially frees up a device name (e.g. /dev/sdc)
4063 * that could be re-used by a subsequent sd_probe().
4064 * This function is not called when the built-in sd driver is "exit-ed".
4065 **/
sd_remove(struct device * dev)4066 static int sd_remove(struct device *dev)
4067 {
4068 struct scsi_disk *sdkp = dev_get_drvdata(dev);
4069
4070 scsi_autopm_get_device(sdkp->device);
4071
4072 device_del(&sdkp->disk_dev);
4073 del_gendisk(sdkp->disk);
4074 if (!sdkp->suspended)
4075 sd_shutdown(dev);
4076
4077 put_disk(sdkp->disk);
4078 return 0;
4079 }
4080
scsi_disk_release(struct device * dev)4081 static void scsi_disk_release(struct device *dev)
4082 {
4083 struct scsi_disk *sdkp = to_scsi_disk(dev);
4084
4085 ida_free(&sd_index_ida, sdkp->index);
4086 put_device(&sdkp->device->sdev_gendev);
4087 free_opal_dev(sdkp->opal_dev);
4088
4089 kfree(sdkp);
4090 }
4091
sd_start_stop_device(struct scsi_disk * sdkp,int start)4092 static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
4093 {
4094 unsigned char cmd[6] = { START_STOP }; /* START_VALID */
4095 struct scsi_sense_hdr sshdr;
4096 const struct scsi_exec_args exec_args = {
4097 .sshdr = &sshdr,
4098 .req_flags = BLK_MQ_REQ_PM,
4099 };
4100 struct scsi_device *sdp = sdkp->device;
4101 int res;
4102
4103 if (start)
4104 cmd[4] |= 1; /* START */
4105
4106 if (sdp->start_stop_pwr_cond)
4107 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
4108
4109 if (!scsi_device_online(sdp))
4110 return -ENODEV;
4111
4112 res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT,
4113 sdkp->max_retries, &exec_args);
4114 if (res) {
4115 sd_print_result(sdkp, "Start/Stop Unit failed", res);
4116 if (res > 0 && scsi_sense_valid(&sshdr)) {
4117 sd_print_sense_hdr(sdkp, &sshdr);
4118 /* 0x3a is medium not present */
4119 if (sshdr.asc == 0x3a)
4120 res = 0;
4121 }
4122 }
4123
4124 /* SCSI error codes must not go to the generic layer */
4125 if (res)
4126 return -EIO;
4127
4128 return 0;
4129 }
4130
4131 /*
4132 * Send a SYNCHRONIZE CACHE instruction down to the device through
4133 * the normal SCSI command structure. Wait for the command to
4134 * complete.
4135 */
sd_shutdown(struct device * dev)4136 static void sd_shutdown(struct device *dev)
4137 {
4138 struct scsi_disk *sdkp = dev_get_drvdata(dev);
4139
4140 if (!sdkp)
4141 return; /* this can happen */
4142
4143 if (pm_runtime_suspended(dev))
4144 return;
4145
4146 if (sdkp->WCE && sdkp->media_present) {
4147 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
4148 sd_sync_cache(sdkp);
4149 }
4150
4151 if ((system_state != SYSTEM_RESTART &&
4152 sdkp->device->manage_system_start_stop) ||
4153 (system_state == SYSTEM_POWER_OFF &&
4154 sdkp->device->manage_shutdown)) {
4155 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
4156 sd_start_stop_device(sdkp, 0);
4157 }
4158 }
4159
sd_do_start_stop(struct scsi_device * sdev,bool runtime)4160 static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
4161 {
4162 return (sdev->manage_system_start_stop && !runtime) ||
4163 (sdev->manage_runtime_start_stop && runtime);
4164 }
4165
sd_suspend_common(struct device * dev,bool runtime)4166 static int sd_suspend_common(struct device *dev, bool runtime)
4167 {
4168 struct scsi_disk *sdkp = dev_get_drvdata(dev);
4169 int ret = 0;
4170
4171 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
4172 return 0;
4173
4174 if (sdkp->WCE && sdkp->media_present) {
4175 if (!sdkp->device->silence_suspend)
4176 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
4177 ret = sd_sync_cache(sdkp);
4178 /* ignore OFFLINE device */
4179 if (ret == -ENODEV)
4180 return 0;
4181
4182 if (ret)
4183 return ret;
4184 }
4185
4186 if (sd_do_start_stop(sdkp->device, runtime)) {
4187 if (!sdkp->device->silence_suspend)
4188 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
4189 /* an error is not worth aborting a system sleep */
4190 ret = sd_start_stop_device(sdkp, 0);
4191 if (!runtime)
4192 ret = 0;
4193 }
4194
4195 if (!ret)
4196 sdkp->suspended = true;
4197
4198 return ret;
4199 }
4200
sd_suspend_system(struct device * dev)4201 static int sd_suspend_system(struct device *dev)
4202 {
4203 if (pm_runtime_suspended(dev))
4204 return 0;
4205
4206 return sd_suspend_common(dev, false);
4207 }
4208
sd_suspend_runtime(struct device * dev)4209 static int sd_suspend_runtime(struct device *dev)
4210 {
4211 return sd_suspend_common(dev, true);
4212 }
4213
sd_resume(struct device * dev)4214 static int sd_resume(struct device *dev)
4215 {
4216 struct scsi_disk *sdkp = dev_get_drvdata(dev);
4217
4218 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
4219
4220 if (opal_unlock_from_suspend(sdkp->opal_dev)) {
4221 sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
4222 return -EIO;
4223 }
4224
4225 return 0;
4226 }
4227
sd_resume_common(struct device * dev,bool runtime)4228 static int sd_resume_common(struct device *dev, bool runtime)
4229 {
4230 struct scsi_disk *sdkp = dev_get_drvdata(dev);
4231 int ret;
4232
4233 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
4234 return 0;
4235
4236 if (!sd_do_start_stop(sdkp->device, runtime)) {
4237 sdkp->suspended = false;
4238 return 0;
4239 }
4240
4241 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
4242 ret = sd_start_stop_device(sdkp, 1);
4243 if (!ret) {
4244 sd_resume(dev);
4245 sdkp->suspended = false;
4246 }
4247
4248 return ret;
4249 }
4250
sd_resume_system(struct device * dev)4251 static int sd_resume_system(struct device *dev)
4252 {
4253 if (pm_runtime_suspended(dev)) {
4254 struct scsi_disk *sdkp = dev_get_drvdata(dev);
4255 struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
4256
4257 if (sdp && sdp->force_runtime_start_on_system_start)
4258 pm_request_resume(dev);
4259
4260 return 0;
4261 }
4262
4263 return sd_resume_common(dev, false);
4264 }
4265
sd_resume_runtime(struct device * dev)4266 static int sd_resume_runtime(struct device *dev)
4267 {
4268 struct scsi_disk *sdkp = dev_get_drvdata(dev);
4269 struct scsi_device *sdp;
4270
4271 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
4272 return 0;
4273
4274 sdp = sdkp->device;
4275
4276 if (sdp->ignore_media_change) {
4277 /* clear the device's sense data */
4278 static const u8 cmd[10] = { REQUEST_SENSE };
4279 const struct scsi_exec_args exec_args = {
4280 .req_flags = BLK_MQ_REQ_PM,
4281 };
4282
4283 if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
4284 sdp->request_queue->rq_timeout, 1,
4285 &exec_args))
4286 sd_printk(KERN_NOTICE, sdkp,
4287 "Failed to clear sense data\n");
4288 }
4289
4290 return sd_resume_common(dev, true);
4291 }
4292
4293 static const struct dev_pm_ops sd_pm_ops = {
4294 .suspend = sd_suspend_system,
4295 .resume = sd_resume_system,
4296 .poweroff = sd_suspend_system,
4297 .restore = sd_resume_system,
4298 .runtime_suspend = sd_suspend_runtime,
4299 .runtime_resume = sd_resume_runtime,
4300 };
4301
4302 static struct scsi_driver sd_template = {
4303 .gendrv = {
4304 .name = "sd",
4305 .probe = sd_probe,
4306 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
4307 .remove = sd_remove,
4308 .shutdown = sd_shutdown,
4309 .pm = &sd_pm_ops,
4310 },
4311 .rescan = sd_rescan,
4312 .resume = sd_resume,
4313 .init_command = sd_init_command,
4314 .uninit_command = sd_uninit_command,
4315 .done = sd_done,
4316 .eh_action = sd_eh_action,
4317 .eh_reset = sd_eh_reset,
4318 };
4319
4320 /**
4321 * init_sd - entry point for this driver (both when built in or when
4322 * a module).
4323 *
4324 * Note: this function registers this driver with the scsi mid-level.
4325 **/
init_sd(void)4326 static int __init init_sd(void)
4327 {
4328 int majors = 0, i, err;
4329
4330 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
4331
4332 for (i = 0; i < SD_MAJORS; i++) {
4333 if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
4334 continue;
4335 majors++;
4336 }
4337
4338 if (!majors)
4339 return -ENODEV;
4340
4341 err = class_register(&sd_disk_class);
4342 if (err)
4343 goto err_out;
4344
4345 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
4346 if (!sd_page_pool) {
4347 printk(KERN_ERR "sd: can't init discard page pool\n");
4348 err = -ENOMEM;
4349 goto err_out_class;
4350 }
4351
4352 err = scsi_register_driver(&sd_template.gendrv);
4353 if (err)
4354 goto err_out_driver;
4355
4356 return 0;
4357
4358 err_out_driver:
4359 mempool_destroy(sd_page_pool);
4360 err_out_class:
4361 class_unregister(&sd_disk_class);
4362 err_out:
4363 for (i = 0; i < SD_MAJORS; i++)
4364 unregister_blkdev(sd_major(i), "sd");
4365 return err;
4366 }
4367
4368 /**
4369 * exit_sd - exit point for this driver (when it is a module).
4370 *
4371 * Note: this function unregisters this driver from the scsi mid-level.
4372 **/
exit_sd(void)4373 static void __exit exit_sd(void)
4374 {
4375 int i;
4376
4377 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
4378
4379 scsi_unregister_driver(&sd_template.gendrv);
4380 mempool_destroy(sd_page_pool);
4381
4382 class_unregister(&sd_disk_class);
4383
4384 for (i = 0; i < SD_MAJORS; i++)
4385 unregister_blkdev(sd_major(i), "sd");
4386 }
4387
4388 module_init(init_sd);
4389 module_exit(exit_sd);
4390
sd_print_sense_hdr(struct scsi_disk * sdkp,struct scsi_sense_hdr * sshdr)4391 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
4392 {
4393 scsi_print_sense_hdr(sdkp->device,
4394 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
4395 }
4396
sd_print_result(const struct scsi_disk * sdkp,const char * msg,int result)4397 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
4398 {
4399 const char *hb_string = scsi_hostbyte_string(result);
4400
4401 if (hb_string)
4402 sd_printk(KERN_INFO, sdkp,
4403 "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
4404 hb_string ? hb_string : "invalid",
4405 "DRIVER_OK");
4406 else
4407 sd_printk(KERN_INFO, sdkp,
4408 "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
4409 msg, host_byte(result), "DRIVER_OK");
4410 }
4411