1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * sd.c Copyright (C) 1992 Drew Eckhardt 4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 5 * 6 * Linux scsi disk driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * Modification history: 10 * - Drew Eckhardt <drew@colorado.edu> original 11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple 12 * outstanding request, and other enhancements. 13 * Support loadable low-level scsi drivers. 14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using 15 * eight major numbers. 16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs. 17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in 18 * sd_init and cleanups. 19 * - Alex Davis <letmein@erols.com> Fix problem where partition info 20 * not being read in sd_open. Fix problem where removable media 21 * could be ejected after sd_open. 22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x 23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox 24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>: 25 * Support 32k/1M disks. 26 * 27 * Logging policy (needs CONFIG_SCSI_LOGGING defined): 28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2 29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1 30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1 31 * - entering other commands: SCSI_LOG_HLQUEUE level 3 32 * Note: when the logging level is set by the user, it must be greater 33 * than the level indicated above to trigger output. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/fs.h> 38 #include <linux/kernel.h> 39 #include <linux/mm.h> 40 #include <linux/bio.h> 41 #include <linux/hdreg.h> 42 #include <linux/errno.h> 43 #include <linux/idr.h> 44 #include <linux/interrupt.h> 45 #include <linux/init.h> 46 #include <linux/blkdev.h> 47 #include <linux/blkpg.h> 48 #include <linux/blk-pm.h> 49 #include <linux/delay.h> 50 #include <linux/major.h> 51 #include <linux/mutex.h> 52 #include <linux/string_helpers.h> 53 #include <linux/slab.h> 54 #include <linux/sed-opal.h> 55 #include <linux/pm_runtime.h> 56 #include <linux/pr.h> 57 #include <linux/t10-pi.h> 58 #include <linux/uaccess.h> 59 #include <asm/unaligned.h> 60 61 #include <scsi/scsi.h> 62 #include <scsi/scsi_cmnd.h> 63 #include <scsi/scsi_dbg.h> 64 #include <scsi/scsi_device.h> 65 #include <scsi/scsi_driver.h> 66 #include <scsi/scsi_eh.h> 67 #include <scsi/scsi_host.h> 68 #include <scsi/scsi_ioctl.h> 69 #include <scsi/scsicam.h> 70 #include <scsi/scsi_common.h> 71 72 #include "sd.h" 73 #include "scsi_priv.h" 74 #include "scsi_logging.h" 75 76 MODULE_AUTHOR("Eric Youngdale"); 77 MODULE_DESCRIPTION("SCSI disk (sd) driver"); 78 MODULE_LICENSE("GPL"); 79 80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR); 81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR); 82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR); 83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR); 84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR); 85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR); 86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR); 87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR); 88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR); 89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR); 90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR); 91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR); 92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR); 93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); 94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); 95 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); 96 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); 97 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 98 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 99 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); 100 101 #define SD_MINORS 16 102 103 static void sd_config_discard(struct scsi_disk *, unsigned int); 104 static void sd_config_write_same(struct scsi_disk *); 105 static int sd_revalidate_disk(struct gendisk *); 106 static void sd_unlock_native_capacity(struct gendisk *disk); 107 static void sd_shutdown(struct device *); 108 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 109 static void scsi_disk_release(struct device *cdev); 110 111 static DEFINE_IDA(sd_index_ida); 112 113 static mempool_t *sd_page_pool; 114 static struct lock_class_key sd_bio_compl_lkclass; 115 116 static const char *sd_cache_types[] = { 117 "write through", "none", "write back", 118 "write back, no read (daft)" 119 }; 120 121 static void sd_set_flush_flag(struct scsi_disk *sdkp) 122 { 123 bool wc = false, fua = false; 124 125 if (sdkp->WCE) { 126 wc = true; 127 if (sdkp->DPOFUA) 128 fua = true; 129 } 130 131 blk_queue_write_cache(sdkp->disk->queue, wc, fua); 132 } 133 134 static ssize_t 135 cache_type_store(struct device *dev, struct device_attribute *attr, 136 const char *buf, size_t count) 137 { 138 int ct, rcd, wce, sp; 139 struct scsi_disk *sdkp = to_scsi_disk(dev); 140 struct scsi_device *sdp = sdkp->device; 141 char buffer[64]; 142 char *buffer_data; 143 struct scsi_mode_data data; 144 struct scsi_sense_hdr sshdr; 145 static const char temp[] = "temporary "; 146 int len; 147 148 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 149 /* no cache control on RBC devices; theoretically they 150 * can do it, but there's probably so many exceptions 151 * it's not worth the risk */ 152 return -EINVAL; 153 154 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { 155 buf += sizeof(temp) - 1; 156 sdkp->cache_override = 1; 157 } else { 158 sdkp->cache_override = 0; 159 } 160 161 ct = sysfs_match_string(sd_cache_types, buf); 162 if (ct < 0) 163 return -EINVAL; 164 165 rcd = ct & 0x01 ? 1 : 0; 166 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; 167 168 if (sdkp->cache_override) { 169 sdkp->WCE = wce; 170 sdkp->RCD = rcd; 171 sd_set_flush_flag(sdkp); 172 return count; 173 } 174 175 if (scsi_mode_sense(sdp, 0x08, 8, 0, buffer, sizeof(buffer), SD_TIMEOUT, 176 sdkp->max_retries, &data, NULL)) 177 return -EINVAL; 178 len = min_t(size_t, sizeof(buffer), data.length - data.header_length - 179 data.block_descriptor_length); 180 buffer_data = buffer + data.header_length + 181 data.block_descriptor_length; 182 buffer_data[2] &= ~0x05; 183 buffer_data[2] |= wce << 2 | rcd; 184 sp = buffer_data[0] & 0x80 ? 1 : 0; 185 buffer_data[0] &= ~0x80; 186 187 /* 188 * Ensure WP, DPOFUA, and RESERVED fields are cleared in 189 * received mode parameter buffer before doing MODE SELECT. 190 */ 191 data.device_specific = 0; 192 193 if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT, 194 sdkp->max_retries, &data, &sshdr)) { 195 if (scsi_sense_valid(&sshdr)) 196 sd_print_sense_hdr(sdkp, &sshdr); 197 return -EINVAL; 198 } 199 sd_revalidate_disk(sdkp->disk); 200 return count; 201 } 202 203 static ssize_t 204 manage_start_stop_show(struct device *dev, struct device_attribute *attr, 205 char *buf) 206 { 207 struct scsi_disk *sdkp = to_scsi_disk(dev); 208 struct scsi_device *sdp = sdkp->device; 209 210 return sprintf(buf, "%u\n", sdp->manage_start_stop); 211 } 212 213 static ssize_t 214 manage_start_stop_store(struct device *dev, struct device_attribute *attr, 215 const char *buf, size_t count) 216 { 217 struct scsi_disk *sdkp = to_scsi_disk(dev); 218 struct scsi_device *sdp = sdkp->device; 219 bool v; 220 221 if (!capable(CAP_SYS_ADMIN)) 222 return -EACCES; 223 224 if (kstrtobool(buf, &v)) 225 return -EINVAL; 226 227 sdp->manage_start_stop = v; 228 229 return count; 230 } 231 static DEVICE_ATTR_RW(manage_start_stop); 232 233 static ssize_t 234 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) 235 { 236 struct scsi_disk *sdkp = to_scsi_disk(dev); 237 238 return sprintf(buf, "%u\n", sdkp->device->allow_restart); 239 } 240 241 static ssize_t 242 allow_restart_store(struct device *dev, struct device_attribute *attr, 243 const char *buf, size_t count) 244 { 245 bool v; 246 struct scsi_disk *sdkp = to_scsi_disk(dev); 247 struct scsi_device *sdp = sdkp->device; 248 249 if (!capable(CAP_SYS_ADMIN)) 250 return -EACCES; 251 252 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 253 return -EINVAL; 254 255 if (kstrtobool(buf, &v)) 256 return -EINVAL; 257 258 sdp->allow_restart = v; 259 260 return count; 261 } 262 static DEVICE_ATTR_RW(allow_restart); 263 264 static ssize_t 265 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) 266 { 267 struct scsi_disk *sdkp = to_scsi_disk(dev); 268 int ct = sdkp->RCD + 2*sdkp->WCE; 269 270 return sprintf(buf, "%s\n", sd_cache_types[ct]); 271 } 272 static DEVICE_ATTR_RW(cache_type); 273 274 static ssize_t 275 FUA_show(struct device *dev, struct device_attribute *attr, char *buf) 276 { 277 struct scsi_disk *sdkp = to_scsi_disk(dev); 278 279 return sprintf(buf, "%u\n", sdkp->DPOFUA); 280 } 281 static DEVICE_ATTR_RO(FUA); 282 283 static ssize_t 284 protection_type_show(struct device *dev, struct device_attribute *attr, 285 char *buf) 286 { 287 struct scsi_disk *sdkp = to_scsi_disk(dev); 288 289 return sprintf(buf, "%u\n", sdkp->protection_type); 290 } 291 292 static ssize_t 293 protection_type_store(struct device *dev, struct device_attribute *attr, 294 const char *buf, size_t count) 295 { 296 struct scsi_disk *sdkp = to_scsi_disk(dev); 297 unsigned int val; 298 int err; 299 300 if (!capable(CAP_SYS_ADMIN)) 301 return -EACCES; 302 303 err = kstrtouint(buf, 10, &val); 304 305 if (err) 306 return err; 307 308 if (val <= T10_PI_TYPE3_PROTECTION) 309 sdkp->protection_type = val; 310 311 return count; 312 } 313 static DEVICE_ATTR_RW(protection_type); 314 315 static ssize_t 316 protection_mode_show(struct device *dev, struct device_attribute *attr, 317 char *buf) 318 { 319 struct scsi_disk *sdkp = to_scsi_disk(dev); 320 struct scsi_device *sdp = sdkp->device; 321 unsigned int dif, dix; 322 323 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 324 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); 325 326 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) { 327 dif = 0; 328 dix = 1; 329 } 330 331 if (!dif && !dix) 332 return sprintf(buf, "none\n"); 333 334 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif); 335 } 336 static DEVICE_ATTR_RO(protection_mode); 337 338 static ssize_t 339 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) 340 { 341 struct scsi_disk *sdkp = to_scsi_disk(dev); 342 343 return sprintf(buf, "%u\n", sdkp->ATO); 344 } 345 static DEVICE_ATTR_RO(app_tag_own); 346 347 static ssize_t 348 thin_provisioning_show(struct device *dev, struct device_attribute *attr, 349 char *buf) 350 { 351 struct scsi_disk *sdkp = to_scsi_disk(dev); 352 353 return sprintf(buf, "%u\n", sdkp->lbpme); 354 } 355 static DEVICE_ATTR_RO(thin_provisioning); 356 357 /* sysfs_match_string() requires dense arrays */ 358 static const char *lbp_mode[] = { 359 [SD_LBP_FULL] = "full", 360 [SD_LBP_UNMAP] = "unmap", 361 [SD_LBP_WS16] = "writesame_16", 362 [SD_LBP_WS10] = "writesame_10", 363 [SD_LBP_ZERO] = "writesame_zero", 364 [SD_LBP_DISABLE] = "disabled", 365 }; 366 367 static ssize_t 368 provisioning_mode_show(struct device *dev, struct device_attribute *attr, 369 char *buf) 370 { 371 struct scsi_disk *sdkp = to_scsi_disk(dev); 372 373 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); 374 } 375 376 static ssize_t 377 provisioning_mode_store(struct device *dev, struct device_attribute *attr, 378 const char *buf, size_t count) 379 { 380 struct scsi_disk *sdkp = to_scsi_disk(dev); 381 struct scsi_device *sdp = sdkp->device; 382 int mode; 383 384 if (!capable(CAP_SYS_ADMIN)) 385 return -EACCES; 386 387 if (sd_is_zoned(sdkp)) { 388 sd_config_discard(sdkp, SD_LBP_DISABLE); 389 return count; 390 } 391 392 if (sdp->type != TYPE_DISK) 393 return -EINVAL; 394 395 mode = sysfs_match_string(lbp_mode, buf); 396 if (mode < 0) 397 return -EINVAL; 398 399 sd_config_discard(sdkp, mode); 400 401 return count; 402 } 403 static DEVICE_ATTR_RW(provisioning_mode); 404 405 /* sysfs_match_string() requires dense arrays */ 406 static const char *zeroing_mode[] = { 407 [SD_ZERO_WRITE] = "write", 408 [SD_ZERO_WS] = "writesame", 409 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap", 410 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap", 411 }; 412 413 static ssize_t 414 zeroing_mode_show(struct device *dev, struct device_attribute *attr, 415 char *buf) 416 { 417 struct scsi_disk *sdkp = to_scsi_disk(dev); 418 419 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); 420 } 421 422 static ssize_t 423 zeroing_mode_store(struct device *dev, struct device_attribute *attr, 424 const char *buf, size_t count) 425 { 426 struct scsi_disk *sdkp = to_scsi_disk(dev); 427 int mode; 428 429 if (!capable(CAP_SYS_ADMIN)) 430 return -EACCES; 431 432 mode = sysfs_match_string(zeroing_mode, buf); 433 if (mode < 0) 434 return -EINVAL; 435 436 sdkp->zeroing_mode = mode; 437 438 return count; 439 } 440 static DEVICE_ATTR_RW(zeroing_mode); 441 442 static ssize_t 443 max_medium_access_timeouts_show(struct device *dev, 444 struct device_attribute *attr, char *buf) 445 { 446 struct scsi_disk *sdkp = to_scsi_disk(dev); 447 448 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); 449 } 450 451 static ssize_t 452 max_medium_access_timeouts_store(struct device *dev, 453 struct device_attribute *attr, const char *buf, 454 size_t count) 455 { 456 struct scsi_disk *sdkp = to_scsi_disk(dev); 457 int err; 458 459 if (!capable(CAP_SYS_ADMIN)) 460 return -EACCES; 461 462 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts); 463 464 return err ? err : count; 465 } 466 static DEVICE_ATTR_RW(max_medium_access_timeouts); 467 468 static ssize_t 469 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, 470 char *buf) 471 { 472 struct scsi_disk *sdkp = to_scsi_disk(dev); 473 474 return sprintf(buf, "%u\n", sdkp->max_ws_blocks); 475 } 476 477 static ssize_t 478 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, 479 const char *buf, size_t count) 480 { 481 struct scsi_disk *sdkp = to_scsi_disk(dev); 482 struct scsi_device *sdp = sdkp->device; 483 unsigned long max; 484 int err; 485 486 if (!capable(CAP_SYS_ADMIN)) 487 return -EACCES; 488 489 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 490 return -EINVAL; 491 492 err = kstrtoul(buf, 10, &max); 493 494 if (err) 495 return err; 496 497 if (max == 0) 498 sdp->no_write_same = 1; 499 else if (max <= SD_MAX_WS16_BLOCKS) { 500 sdp->no_write_same = 0; 501 sdkp->max_ws_blocks = max; 502 } 503 504 sd_config_write_same(sdkp); 505 506 return count; 507 } 508 static DEVICE_ATTR_RW(max_write_same_blocks); 509 510 static ssize_t 511 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf) 512 { 513 struct scsi_disk *sdkp = to_scsi_disk(dev); 514 515 if (sdkp->device->type == TYPE_ZBC) 516 return sprintf(buf, "host-managed\n"); 517 if (sdkp->zoned == 1) 518 return sprintf(buf, "host-aware\n"); 519 if (sdkp->zoned == 2) 520 return sprintf(buf, "drive-managed\n"); 521 return sprintf(buf, "none\n"); 522 } 523 static DEVICE_ATTR_RO(zoned_cap); 524 525 static ssize_t 526 max_retries_store(struct device *dev, struct device_attribute *attr, 527 const char *buf, size_t count) 528 { 529 struct scsi_disk *sdkp = to_scsi_disk(dev); 530 struct scsi_device *sdev = sdkp->device; 531 int retries, err; 532 533 err = kstrtoint(buf, 10, &retries); 534 if (err) 535 return err; 536 537 if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) { 538 sdkp->max_retries = retries; 539 return count; 540 } 541 542 sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n", 543 SD_MAX_RETRIES); 544 return -EINVAL; 545 } 546 547 static ssize_t 548 max_retries_show(struct device *dev, struct device_attribute *attr, 549 char *buf) 550 { 551 struct scsi_disk *sdkp = to_scsi_disk(dev); 552 553 return sprintf(buf, "%d\n", sdkp->max_retries); 554 } 555 556 static DEVICE_ATTR_RW(max_retries); 557 558 static struct attribute *sd_disk_attrs[] = { 559 &dev_attr_cache_type.attr, 560 &dev_attr_FUA.attr, 561 &dev_attr_allow_restart.attr, 562 &dev_attr_manage_start_stop.attr, 563 &dev_attr_protection_type.attr, 564 &dev_attr_protection_mode.attr, 565 &dev_attr_app_tag_own.attr, 566 &dev_attr_thin_provisioning.attr, 567 &dev_attr_provisioning_mode.attr, 568 &dev_attr_zeroing_mode.attr, 569 &dev_attr_max_write_same_blocks.attr, 570 &dev_attr_max_medium_access_timeouts.attr, 571 &dev_attr_zoned_cap.attr, 572 &dev_attr_max_retries.attr, 573 NULL, 574 }; 575 ATTRIBUTE_GROUPS(sd_disk); 576 577 static struct class sd_disk_class = { 578 .name = "scsi_disk", 579 .dev_release = scsi_disk_release, 580 .dev_groups = sd_disk_groups, 581 }; 582 583 /* 584 * Don't request a new module, as that could deadlock in multipath 585 * environment. 586 */ 587 static void sd_default_probe(dev_t devt) 588 { 589 } 590 591 /* 592 * Device no to disk mapping: 593 * 594 * major disc2 disc p1 595 * |............|.............|....|....| <- dev_t 596 * 31 20 19 8 7 4 3 0 597 * 598 * Inside a major, we have 16k disks, however mapped non- 599 * contiguously. The first 16 disks are for major0, the next 600 * ones with major1, ... Disk 256 is for major0 again, disk 272 601 * for major1, ... 602 * As we stay compatible with our numbering scheme, we can reuse 603 * the well-know SCSI majors 8, 65--71, 136--143. 604 */ 605 static int sd_major(int major_idx) 606 { 607 switch (major_idx) { 608 case 0: 609 return SCSI_DISK0_MAJOR; 610 case 1 ... 7: 611 return SCSI_DISK1_MAJOR + major_idx - 1; 612 case 8 ... 15: 613 return SCSI_DISK8_MAJOR + major_idx - 8; 614 default: 615 BUG(); 616 return 0; /* shut up gcc */ 617 } 618 } 619 620 #ifdef CONFIG_BLK_SED_OPAL 621 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, 622 size_t len, bool send) 623 { 624 struct scsi_disk *sdkp = data; 625 struct scsi_device *sdev = sdkp->device; 626 u8 cdb[12] = { 0, }; 627 const struct scsi_exec_args exec_args = { 628 .req_flags = BLK_MQ_REQ_PM, 629 }; 630 int ret; 631 632 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN; 633 cdb[1] = secp; 634 put_unaligned_be16(spsp, &cdb[2]); 635 put_unaligned_be32(len, &cdb[6]); 636 637 ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 638 buffer, len, SD_TIMEOUT, sdkp->max_retries, 639 &exec_args); 640 return ret <= 0 ? ret : -EIO; 641 } 642 #endif /* CONFIG_BLK_SED_OPAL */ 643 644 /* 645 * Look up the DIX operation based on whether the command is read or 646 * write and whether dix and dif are enabled. 647 */ 648 static unsigned int sd_prot_op(bool write, bool dix, bool dif) 649 { 650 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ 651 static const unsigned int ops[] = { /* wrt dix dif */ 652 SCSI_PROT_NORMAL, /* 0 0 0 */ 653 SCSI_PROT_READ_STRIP, /* 0 0 1 */ 654 SCSI_PROT_READ_INSERT, /* 0 1 0 */ 655 SCSI_PROT_READ_PASS, /* 0 1 1 */ 656 SCSI_PROT_NORMAL, /* 1 0 0 */ 657 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ 658 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ 659 SCSI_PROT_WRITE_PASS, /* 1 1 1 */ 660 }; 661 662 return ops[write << 2 | dix << 1 | dif]; 663 } 664 665 /* 666 * Returns a mask of the protection flags that are valid for a given DIX 667 * operation. 668 */ 669 static unsigned int sd_prot_flag_mask(unsigned int prot_op) 670 { 671 static const unsigned int flag_mask[] = { 672 [SCSI_PROT_NORMAL] = 0, 673 674 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | 675 SCSI_PROT_GUARD_CHECK | 676 SCSI_PROT_REF_CHECK | 677 SCSI_PROT_REF_INCREMENT, 678 679 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | 680 SCSI_PROT_IP_CHECKSUM, 681 682 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | 683 SCSI_PROT_GUARD_CHECK | 684 SCSI_PROT_REF_CHECK | 685 SCSI_PROT_REF_INCREMENT | 686 SCSI_PROT_IP_CHECKSUM, 687 688 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | 689 SCSI_PROT_REF_INCREMENT, 690 691 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | 692 SCSI_PROT_REF_CHECK | 693 SCSI_PROT_REF_INCREMENT | 694 SCSI_PROT_IP_CHECKSUM, 695 696 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | 697 SCSI_PROT_GUARD_CHECK | 698 SCSI_PROT_REF_CHECK | 699 SCSI_PROT_REF_INCREMENT | 700 SCSI_PROT_IP_CHECKSUM, 701 }; 702 703 return flag_mask[prot_op]; 704 } 705 706 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, 707 unsigned int dix, unsigned int dif) 708 { 709 struct request *rq = scsi_cmd_to_rq(scmd); 710 struct bio *bio = rq->bio; 711 unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif); 712 unsigned int protect = 0; 713 714 if (dix) { /* DIX Type 0, 1, 2, 3 */ 715 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) 716 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; 717 718 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 719 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; 720 } 721 722 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ 723 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; 724 725 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 726 scmd->prot_flags |= SCSI_PROT_REF_CHECK; 727 } 728 729 if (dif) { /* DIX/DIF Type 1, 2, 3 */ 730 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI; 731 732 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) 733 protect = 3 << 5; /* Disable target PI checking */ 734 else 735 protect = 1 << 5; /* Enable target PI checking */ 736 } 737 738 scsi_set_prot_op(scmd, prot_op); 739 scsi_set_prot_type(scmd, dif); 740 scmd->prot_flags &= sd_prot_flag_mask(prot_op); 741 742 return protect; 743 } 744 745 static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) 746 { 747 struct request_queue *q = sdkp->disk->queue; 748 unsigned int logical_block_size = sdkp->device->sector_size; 749 unsigned int max_blocks = 0; 750 751 q->limits.discard_alignment = 752 sdkp->unmap_alignment * logical_block_size; 753 q->limits.discard_granularity = 754 max(sdkp->physical_block_size, 755 sdkp->unmap_granularity * logical_block_size); 756 sdkp->provisioning_mode = mode; 757 758 switch (mode) { 759 760 case SD_LBP_FULL: 761 case SD_LBP_DISABLE: 762 blk_queue_max_discard_sectors(q, 0); 763 return; 764 765 case SD_LBP_UNMAP: 766 max_blocks = min_not_zero(sdkp->max_unmap_blocks, 767 (u32)SD_MAX_WS16_BLOCKS); 768 break; 769 770 case SD_LBP_WS16: 771 if (sdkp->device->unmap_limit_for_ws) 772 max_blocks = sdkp->max_unmap_blocks; 773 else 774 max_blocks = sdkp->max_ws_blocks; 775 776 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); 777 break; 778 779 case SD_LBP_WS10: 780 if (sdkp->device->unmap_limit_for_ws) 781 max_blocks = sdkp->max_unmap_blocks; 782 else 783 max_blocks = sdkp->max_ws_blocks; 784 785 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); 786 break; 787 788 case SD_LBP_ZERO: 789 max_blocks = min_not_zero(sdkp->max_ws_blocks, 790 (u32)SD_MAX_WS10_BLOCKS); 791 break; 792 } 793 794 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); 795 } 796 797 static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) 798 { 799 struct page *page; 800 801 page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 802 if (!page) 803 return NULL; 804 clear_highpage(page); 805 bvec_set_page(&rq->special_vec, page, data_len, 0); 806 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 807 return bvec_virt(&rq->special_vec); 808 } 809 810 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) 811 { 812 struct scsi_device *sdp = cmd->device; 813 struct request *rq = scsi_cmd_to_rq(cmd); 814 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 815 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 816 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 817 unsigned int data_len = 24; 818 char *buf; 819 820 buf = sd_set_special_bvec(rq, data_len); 821 if (!buf) 822 return BLK_STS_RESOURCE; 823 824 cmd->cmd_len = 10; 825 cmd->cmnd[0] = UNMAP; 826 cmd->cmnd[8] = 24; 827 828 put_unaligned_be16(6 + 16, &buf[0]); 829 put_unaligned_be16(16, &buf[2]); 830 put_unaligned_be64(lba, &buf[8]); 831 put_unaligned_be32(nr_blocks, &buf[16]); 832 833 cmd->allowed = sdkp->max_retries; 834 cmd->transfersize = data_len; 835 rq->timeout = SD_TIMEOUT; 836 837 return scsi_alloc_sgtables(cmd); 838 } 839 840 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, 841 bool unmap) 842 { 843 struct scsi_device *sdp = cmd->device; 844 struct request *rq = scsi_cmd_to_rq(cmd); 845 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 846 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 847 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 848 u32 data_len = sdp->sector_size; 849 850 if (!sd_set_special_bvec(rq, data_len)) 851 return BLK_STS_RESOURCE; 852 853 cmd->cmd_len = 16; 854 cmd->cmnd[0] = WRITE_SAME_16; 855 if (unmap) 856 cmd->cmnd[1] = 0x8; /* UNMAP */ 857 put_unaligned_be64(lba, &cmd->cmnd[2]); 858 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 859 860 cmd->allowed = sdkp->max_retries; 861 cmd->transfersize = data_len; 862 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 863 864 return scsi_alloc_sgtables(cmd); 865 } 866 867 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, 868 bool unmap) 869 { 870 struct scsi_device *sdp = cmd->device; 871 struct request *rq = scsi_cmd_to_rq(cmd); 872 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 873 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 874 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 875 u32 data_len = sdp->sector_size; 876 877 if (!sd_set_special_bvec(rq, data_len)) 878 return BLK_STS_RESOURCE; 879 880 cmd->cmd_len = 10; 881 cmd->cmnd[0] = WRITE_SAME; 882 if (unmap) 883 cmd->cmnd[1] = 0x8; /* UNMAP */ 884 put_unaligned_be32(lba, &cmd->cmnd[2]); 885 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 886 887 cmd->allowed = sdkp->max_retries; 888 cmd->transfersize = data_len; 889 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 890 891 return scsi_alloc_sgtables(cmd); 892 } 893 894 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) 895 { 896 struct request *rq = scsi_cmd_to_rq(cmd); 897 struct scsi_device *sdp = cmd->device; 898 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 899 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 900 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 901 902 if (!(rq->cmd_flags & REQ_NOUNMAP)) { 903 switch (sdkp->zeroing_mode) { 904 case SD_ZERO_WS16_UNMAP: 905 return sd_setup_write_same16_cmnd(cmd, true); 906 case SD_ZERO_WS10_UNMAP: 907 return sd_setup_write_same10_cmnd(cmd, true); 908 } 909 } 910 911 if (sdp->no_write_same) { 912 rq->rq_flags |= RQF_QUIET; 913 return BLK_STS_TARGET; 914 } 915 916 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) 917 return sd_setup_write_same16_cmnd(cmd, false); 918 919 return sd_setup_write_same10_cmnd(cmd, false); 920 } 921 922 static void sd_config_write_same(struct scsi_disk *sdkp) 923 { 924 struct request_queue *q = sdkp->disk->queue; 925 unsigned int logical_block_size = sdkp->device->sector_size; 926 927 if (sdkp->device->no_write_same) { 928 sdkp->max_ws_blocks = 0; 929 goto out; 930 } 931 932 /* Some devices can not handle block counts above 0xffff despite 933 * supporting WRITE SAME(16). Consequently we default to 64k 934 * blocks per I/O unless the device explicitly advertises a 935 * bigger limit. 936 */ 937 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) 938 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 939 (u32)SD_MAX_WS16_BLOCKS); 940 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) 941 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 942 (u32)SD_MAX_WS10_BLOCKS); 943 else { 944 sdkp->device->no_write_same = 1; 945 sdkp->max_ws_blocks = 0; 946 } 947 948 if (sdkp->lbprz && sdkp->lbpws) 949 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; 950 else if (sdkp->lbprz && sdkp->lbpws10) 951 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; 952 else if (sdkp->max_ws_blocks) 953 sdkp->zeroing_mode = SD_ZERO_WS; 954 else 955 sdkp->zeroing_mode = SD_ZERO_WRITE; 956 957 if (sdkp->max_ws_blocks && 958 sdkp->physical_block_size > logical_block_size) { 959 /* 960 * Reporting a maximum number of blocks that is not aligned 961 * on the device physical size would cause a large write same 962 * request to be split into physically unaligned chunks by 963 * __blkdev_issue_write_zeroes() even if the caller of this 964 * functions took care to align the large request. So make sure 965 * the maximum reported is aligned to the device physical block 966 * size. This is only an optional optimization for regular 967 * disks, but this is mandatory to avoid failure of large write 968 * same requests directed at sequential write required zones of 969 * host-managed ZBC disks. 970 */ 971 sdkp->max_ws_blocks = 972 round_down(sdkp->max_ws_blocks, 973 bytes_to_logical(sdkp->device, 974 sdkp->physical_block_size)); 975 } 976 977 out: 978 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * 979 (logical_block_size >> 9)); 980 } 981 982 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 983 { 984 struct request *rq = scsi_cmd_to_rq(cmd); 985 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 986 987 /* flush requests don't perform I/O, zero the S/G table */ 988 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 989 990 if (cmd->device->use_16_for_sync) { 991 cmd->cmnd[0] = SYNCHRONIZE_CACHE_16; 992 cmd->cmd_len = 16; 993 } else { 994 cmd->cmnd[0] = SYNCHRONIZE_CACHE; 995 cmd->cmd_len = 10; 996 } 997 cmd->transfersize = 0; 998 cmd->allowed = sdkp->max_retries; 999 1000 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; 1001 return BLK_STS_OK; 1002 } 1003 1004 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, 1005 sector_t lba, unsigned int nr_blocks, 1006 unsigned char flags, unsigned int dld) 1007 { 1008 cmd->cmd_len = SD_EXT_CDB_SIZE; 1009 cmd->cmnd[0] = VARIABLE_LENGTH_CMD; 1010 cmd->cmnd[7] = 0x18; /* Additional CDB len */ 1011 cmd->cmnd[9] = write ? WRITE_32 : READ_32; 1012 cmd->cmnd[10] = flags; 1013 cmd->cmnd[11] = dld & 0x07; 1014 put_unaligned_be64(lba, &cmd->cmnd[12]); 1015 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ 1016 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); 1017 1018 return BLK_STS_OK; 1019 } 1020 1021 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, 1022 sector_t lba, unsigned int nr_blocks, 1023 unsigned char flags, unsigned int dld) 1024 { 1025 cmd->cmd_len = 16; 1026 cmd->cmnd[0] = write ? WRITE_16 : READ_16; 1027 cmd->cmnd[1] = flags | ((dld >> 2) & 0x01); 1028 cmd->cmnd[14] = (dld & 0x03) << 6; 1029 cmd->cmnd[15] = 0; 1030 put_unaligned_be64(lba, &cmd->cmnd[2]); 1031 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 1032 1033 return BLK_STS_OK; 1034 } 1035 1036 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, 1037 sector_t lba, unsigned int nr_blocks, 1038 unsigned char flags) 1039 { 1040 cmd->cmd_len = 10; 1041 cmd->cmnd[0] = write ? WRITE_10 : READ_10; 1042 cmd->cmnd[1] = flags; 1043 cmd->cmnd[6] = 0; 1044 cmd->cmnd[9] = 0; 1045 put_unaligned_be32(lba, &cmd->cmnd[2]); 1046 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 1047 1048 return BLK_STS_OK; 1049 } 1050 1051 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, 1052 sector_t lba, unsigned int nr_blocks, 1053 unsigned char flags) 1054 { 1055 /* Avoid that 0 blocks gets translated into 256 blocks. */ 1056 if (WARN_ON_ONCE(nr_blocks == 0)) 1057 return BLK_STS_IOERR; 1058 1059 if (unlikely(flags & 0x8)) { 1060 /* 1061 * This happens only if this drive failed 10byte rw 1062 * command with ILLEGAL_REQUEST during operation and 1063 * thus turned off use_10_for_rw. 1064 */ 1065 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n"); 1066 return BLK_STS_IOERR; 1067 } 1068 1069 cmd->cmd_len = 6; 1070 cmd->cmnd[0] = write ? WRITE_6 : READ_6; 1071 cmd->cmnd[1] = (lba >> 16) & 0x1f; 1072 cmd->cmnd[2] = (lba >> 8) & 0xff; 1073 cmd->cmnd[3] = lba & 0xff; 1074 cmd->cmnd[4] = nr_blocks; 1075 cmd->cmnd[5] = 0; 1076 1077 return BLK_STS_OK; 1078 } 1079 1080 /* 1081 * Check if a command has a duration limit set. If it does, and the target 1082 * device supports CDL and the feature is enabled, return the limit 1083 * descriptor index to use. Return 0 (no limit) otherwise. 1084 */ 1085 static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd) 1086 { 1087 struct scsi_device *sdp = sdkp->device; 1088 int hint; 1089 1090 if (!sdp->cdl_supported || !sdp->cdl_enable) 1091 return 0; 1092 1093 /* 1094 * Use "no limit" if the request ioprio does not specify a duration 1095 * limit hint. 1096 */ 1097 hint = IOPRIO_PRIO_HINT(req_get_ioprio(scsi_cmd_to_rq(scmd))); 1098 if (hint < IOPRIO_HINT_DEV_DURATION_LIMIT_1 || 1099 hint > IOPRIO_HINT_DEV_DURATION_LIMIT_7) 1100 return 0; 1101 1102 return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1; 1103 } 1104 1105 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) 1106 { 1107 struct request *rq = scsi_cmd_to_rq(cmd); 1108 struct scsi_device *sdp = cmd->device; 1109 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1110 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1111 sector_t threshold; 1112 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1113 unsigned int mask = logical_to_sectors(sdp, 1) - 1; 1114 bool write = rq_data_dir(rq) == WRITE; 1115 unsigned char protect, fua; 1116 unsigned int dld; 1117 blk_status_t ret; 1118 unsigned int dif; 1119 bool dix; 1120 1121 ret = scsi_alloc_sgtables(cmd); 1122 if (ret != BLK_STS_OK) 1123 return ret; 1124 1125 ret = BLK_STS_IOERR; 1126 if (!scsi_device_online(sdp) || sdp->changed) { 1127 scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); 1128 goto fail; 1129 } 1130 1131 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) { 1132 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); 1133 goto fail; 1134 } 1135 1136 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { 1137 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); 1138 goto fail; 1139 } 1140 1141 /* 1142 * Some SD card readers can't handle accesses which touch the 1143 * last one or two logical blocks. Split accesses as needed. 1144 */ 1145 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS; 1146 1147 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) { 1148 if (lba < threshold) { 1149 /* Access up to the threshold but not beyond */ 1150 nr_blocks = threshold - lba; 1151 } else { 1152 /* Access only a single logical block */ 1153 nr_blocks = 1; 1154 } 1155 } 1156 1157 if (req_op(rq) == REQ_OP_ZONE_APPEND) { 1158 ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks); 1159 if (ret) 1160 goto fail; 1161 } 1162 1163 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; 1164 dix = scsi_prot_sg_count(cmd); 1165 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); 1166 dld = sd_cdl_dld(sdkp, cmd); 1167 1168 if (dif || dix) 1169 protect = sd_setup_protect_cmnd(cmd, dix, dif); 1170 else 1171 protect = 0; 1172 1173 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { 1174 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, 1175 protect | fua, dld); 1176 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { 1177 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, 1178 protect | fua, dld); 1179 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || 1180 sdp->use_10_for_rw || protect) { 1181 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, 1182 protect | fua); 1183 } else { 1184 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks, 1185 protect | fua); 1186 } 1187 1188 if (unlikely(ret != BLK_STS_OK)) 1189 goto fail; 1190 1191 /* 1192 * We shouldn't disconnect in the middle of a sector, so with a dumb 1193 * host adapter, it's safe to assume that we can at least transfer 1194 * this many bytes between each connect / disconnect. 1195 */ 1196 cmd->transfersize = sdp->sector_size; 1197 cmd->underflow = nr_blocks << 9; 1198 cmd->allowed = sdkp->max_retries; 1199 cmd->sdb.length = nr_blocks * sdp->sector_size; 1200 1201 SCSI_LOG_HLQUEUE(1, 1202 scmd_printk(KERN_INFO, cmd, 1203 "%s: block=%llu, count=%d\n", __func__, 1204 (unsigned long long)blk_rq_pos(rq), 1205 blk_rq_sectors(rq))); 1206 SCSI_LOG_HLQUEUE(2, 1207 scmd_printk(KERN_INFO, cmd, 1208 "%s %d/%u 512 byte blocks.\n", 1209 write ? "writing" : "reading", nr_blocks, 1210 blk_rq_sectors(rq))); 1211 1212 /* 1213 * This indicates that the command is ready from our end to be queued. 1214 */ 1215 return BLK_STS_OK; 1216 fail: 1217 scsi_free_sgtables(cmd); 1218 return ret; 1219 } 1220 1221 static blk_status_t sd_init_command(struct scsi_cmnd *cmd) 1222 { 1223 struct request *rq = scsi_cmd_to_rq(cmd); 1224 1225 switch (req_op(rq)) { 1226 case REQ_OP_DISCARD: 1227 switch (scsi_disk(rq->q->disk)->provisioning_mode) { 1228 case SD_LBP_UNMAP: 1229 return sd_setup_unmap_cmnd(cmd); 1230 case SD_LBP_WS16: 1231 return sd_setup_write_same16_cmnd(cmd, true); 1232 case SD_LBP_WS10: 1233 return sd_setup_write_same10_cmnd(cmd, true); 1234 case SD_LBP_ZERO: 1235 return sd_setup_write_same10_cmnd(cmd, false); 1236 default: 1237 return BLK_STS_TARGET; 1238 } 1239 case REQ_OP_WRITE_ZEROES: 1240 return sd_setup_write_zeroes_cmnd(cmd); 1241 case REQ_OP_FLUSH: 1242 return sd_setup_flush_cmnd(cmd); 1243 case REQ_OP_READ: 1244 case REQ_OP_WRITE: 1245 case REQ_OP_ZONE_APPEND: 1246 return sd_setup_read_write_cmnd(cmd); 1247 case REQ_OP_ZONE_RESET: 1248 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1249 false); 1250 case REQ_OP_ZONE_RESET_ALL: 1251 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1252 true); 1253 case REQ_OP_ZONE_OPEN: 1254 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false); 1255 case REQ_OP_ZONE_CLOSE: 1256 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false); 1257 case REQ_OP_ZONE_FINISH: 1258 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false); 1259 default: 1260 WARN_ON_ONCE(1); 1261 return BLK_STS_NOTSUPP; 1262 } 1263 } 1264 1265 static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1266 { 1267 struct request *rq = scsi_cmd_to_rq(SCpnt); 1268 1269 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1270 mempool_free(rq->special_vec.bv_page, sd_page_pool); 1271 } 1272 1273 static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp) 1274 { 1275 if (sdkp->device->removable || sdkp->write_prot) { 1276 if (disk_check_media_change(disk)) 1277 return true; 1278 } 1279 1280 /* 1281 * Force a full rescan after ioctl(BLKRRPART). While the disk state has 1282 * nothing to do with partitions, BLKRRPART is used to force a full 1283 * revalidate after things like a format for historical reasons. 1284 */ 1285 return test_bit(GD_NEED_PART_SCAN, &disk->state); 1286 } 1287 1288 /** 1289 * sd_open - open a scsi disk device 1290 * @disk: disk to open 1291 * @mode: open mode 1292 * 1293 * Returns 0 if successful. Returns a negated errno value in case 1294 * of error. 1295 * 1296 * Note: This can be called from a user context (e.g. fsck(1) ) 1297 * or from within the kernel (e.g. as a result of a mount(1) ). 1298 * In the latter case @inode and @filp carry an abridged amount 1299 * of information as noted above. 1300 * 1301 * Locking: called with disk->open_mutex held. 1302 **/ 1303 static int sd_open(struct gendisk *disk, blk_mode_t mode) 1304 { 1305 struct scsi_disk *sdkp = scsi_disk(disk); 1306 struct scsi_device *sdev = sdkp->device; 1307 int retval; 1308 1309 if (scsi_device_get(sdev)) 1310 return -ENXIO; 1311 1312 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); 1313 1314 /* 1315 * If the device is in error recovery, wait until it is done. 1316 * If the device is offline, then disallow any access to it. 1317 */ 1318 retval = -ENXIO; 1319 if (!scsi_block_when_processing_errors(sdev)) 1320 goto error_out; 1321 1322 if (sd_need_revalidate(disk, sdkp)) 1323 sd_revalidate_disk(disk); 1324 1325 /* 1326 * If the drive is empty, just let the open fail. 1327 */ 1328 retval = -ENOMEDIUM; 1329 if (sdev->removable && !sdkp->media_present && 1330 !(mode & BLK_OPEN_NDELAY)) 1331 goto error_out; 1332 1333 /* 1334 * If the device has the write protect tab set, have the open fail 1335 * if the user expects to be able to write to the thing. 1336 */ 1337 retval = -EROFS; 1338 if (sdkp->write_prot && (mode & BLK_OPEN_WRITE)) 1339 goto error_out; 1340 1341 /* 1342 * It is possible that the disk changing stuff resulted in 1343 * the device being taken offline. If this is the case, 1344 * report this to the user, and don't pretend that the 1345 * open actually succeeded. 1346 */ 1347 retval = -ENXIO; 1348 if (!scsi_device_online(sdev)) 1349 goto error_out; 1350 1351 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { 1352 if (scsi_block_when_processing_errors(sdev)) 1353 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 1354 } 1355 1356 return 0; 1357 1358 error_out: 1359 scsi_device_put(sdev); 1360 return retval; 1361 } 1362 1363 /** 1364 * sd_release - invoked when the (last) close(2) is called on this 1365 * scsi disk. 1366 * @disk: disk to release 1367 * 1368 * Returns 0. 1369 * 1370 * Note: may block (uninterruptible) if error recovery is underway 1371 * on this disk. 1372 * 1373 * Locking: called with disk->open_mutex held. 1374 **/ 1375 static void sd_release(struct gendisk *disk) 1376 { 1377 struct scsi_disk *sdkp = scsi_disk(disk); 1378 struct scsi_device *sdev = sdkp->device; 1379 1380 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 1381 1382 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { 1383 if (scsi_block_when_processing_errors(sdev)) 1384 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 1385 } 1386 1387 scsi_device_put(sdev); 1388 } 1389 1390 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1391 { 1392 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1393 struct scsi_device *sdp = sdkp->device; 1394 struct Scsi_Host *host = sdp->host; 1395 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); 1396 int diskinfo[4]; 1397 1398 /* default to most commonly used values */ 1399 diskinfo[0] = 0x40; /* 1 << 6 */ 1400 diskinfo[1] = 0x20; /* 1 << 5 */ 1401 diskinfo[2] = capacity >> 11; 1402 1403 /* override with calculated, extended default, or driver values */ 1404 if (host->hostt->bios_param) 1405 host->hostt->bios_param(sdp, bdev, capacity, diskinfo); 1406 else 1407 scsicam_bios_param(bdev, capacity, diskinfo); 1408 1409 geo->heads = diskinfo[0]; 1410 geo->sectors = diskinfo[1]; 1411 geo->cylinders = diskinfo[2]; 1412 return 0; 1413 } 1414 1415 /** 1416 * sd_ioctl - process an ioctl 1417 * @bdev: target block device 1418 * @mode: open mode 1419 * @cmd: ioctl command number 1420 * @arg: this is third argument given to ioctl(2) system call. 1421 * Often contains a pointer. 1422 * 1423 * Returns 0 if successful (some ioctls return positive numbers on 1424 * success as well). Returns a negated errno value in case of error. 1425 * 1426 * Note: most ioctls are forward onto the block subsystem or further 1427 * down in the scsi subsystem. 1428 **/ 1429 static int sd_ioctl(struct block_device *bdev, blk_mode_t mode, 1430 unsigned int cmd, unsigned long arg) 1431 { 1432 struct gendisk *disk = bdev->bd_disk; 1433 struct scsi_disk *sdkp = scsi_disk(disk); 1434 struct scsi_device *sdp = sdkp->device; 1435 void __user *p = (void __user *)arg; 1436 int error; 1437 1438 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " 1439 "cmd=0x%x\n", disk->disk_name, cmd)); 1440 1441 if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) 1442 return -ENOIOCTLCMD; 1443 1444 /* 1445 * If we are in the middle of error recovery, don't let anyone 1446 * else try and use this device. Also, if error recovery fails, it 1447 * may try and take the device offline, in which case all further 1448 * access to the device is prohibited. 1449 */ 1450 error = scsi_ioctl_block_when_processing_errors(sdp, cmd, 1451 (mode & BLK_OPEN_NDELAY)); 1452 if (error) 1453 return error; 1454 1455 if (is_sed_ioctl(cmd)) 1456 return sed_ioctl(sdkp->opal_dev, cmd, p); 1457 return scsi_ioctl(sdp, mode & BLK_OPEN_WRITE, cmd, p); 1458 } 1459 1460 static void set_media_not_present(struct scsi_disk *sdkp) 1461 { 1462 if (sdkp->media_present) 1463 sdkp->device->changed = 1; 1464 1465 if (sdkp->device->removable) { 1466 sdkp->media_present = 0; 1467 sdkp->capacity = 0; 1468 } 1469 } 1470 1471 static int media_not_present(struct scsi_disk *sdkp, 1472 struct scsi_sense_hdr *sshdr) 1473 { 1474 if (!scsi_sense_valid(sshdr)) 1475 return 0; 1476 1477 /* not invoked for commands that could return deferred errors */ 1478 switch (sshdr->sense_key) { 1479 case UNIT_ATTENTION: 1480 case NOT_READY: 1481 /* medium not present */ 1482 if (sshdr->asc == 0x3A) { 1483 set_media_not_present(sdkp); 1484 return 1; 1485 } 1486 } 1487 return 0; 1488 } 1489 1490 /** 1491 * sd_check_events - check media events 1492 * @disk: kernel device descriptor 1493 * @clearing: disk events currently being cleared 1494 * 1495 * Returns mask of DISK_EVENT_*. 1496 * 1497 * Note: this function is invoked from the block subsystem. 1498 **/ 1499 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) 1500 { 1501 struct scsi_disk *sdkp = disk->private_data; 1502 struct scsi_device *sdp; 1503 int retval; 1504 bool disk_changed; 1505 1506 if (!sdkp) 1507 return 0; 1508 1509 sdp = sdkp->device; 1510 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); 1511 1512 /* 1513 * If the device is offline, don't send any commands - just pretend as 1514 * if the command failed. If the device ever comes back online, we 1515 * can deal with it then. It is only because of unrecoverable errors 1516 * that we would ever take a device offline in the first place. 1517 */ 1518 if (!scsi_device_online(sdp)) { 1519 set_media_not_present(sdkp); 1520 goto out; 1521 } 1522 1523 /* 1524 * Using TEST_UNIT_READY enables differentiation between drive with 1525 * no cartridge loaded - NOT READY, drive with changed cartridge - 1526 * UNIT ATTENTION, or with same cartridge - GOOD STATUS. 1527 * 1528 * Drives that auto spin down. eg iomega jaz 1G, will be started 1529 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever 1530 * sd_revalidate() is called. 1531 */ 1532 if (scsi_block_when_processing_errors(sdp)) { 1533 struct scsi_sense_hdr sshdr = { 0, }; 1534 1535 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries, 1536 &sshdr); 1537 1538 /* failed to execute TUR, assume media not present */ 1539 if (retval < 0 || host_byte(retval)) { 1540 set_media_not_present(sdkp); 1541 goto out; 1542 } 1543 1544 if (media_not_present(sdkp, &sshdr)) 1545 goto out; 1546 } 1547 1548 /* 1549 * For removable scsi disk we have to recognise the presence 1550 * of a disk in the drive. 1551 */ 1552 if (!sdkp->media_present) 1553 sdp->changed = 1; 1554 sdkp->media_present = 1; 1555 out: 1556 /* 1557 * sdp->changed is set under the following conditions: 1558 * 1559 * Medium present state has changed in either direction. 1560 * Device has indicated UNIT_ATTENTION. 1561 */ 1562 disk_changed = sdp->changed; 1563 sdp->changed = 0; 1564 return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0; 1565 } 1566 1567 static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 1568 { 1569 int retries, res; 1570 struct scsi_device *sdp = sdkp->device; 1571 const int timeout = sdp->request_queue->rq_timeout 1572 * SD_FLUSH_TIMEOUT_MULTIPLIER; 1573 struct scsi_sense_hdr my_sshdr; 1574 const struct scsi_exec_args exec_args = { 1575 .req_flags = BLK_MQ_REQ_PM, 1576 /* caller might not be interested in sense, but we need it */ 1577 .sshdr = sshdr ? : &my_sshdr, 1578 }; 1579 1580 if (!scsi_device_online(sdp)) 1581 return -ENODEV; 1582 1583 sshdr = exec_args.sshdr; 1584 1585 for (retries = 3; retries > 0; --retries) { 1586 unsigned char cmd[16] = { 0 }; 1587 1588 if (sdp->use_16_for_sync) 1589 cmd[0] = SYNCHRONIZE_CACHE_16; 1590 else 1591 cmd[0] = SYNCHRONIZE_CACHE; 1592 /* 1593 * Leave the rest of the command zero to indicate 1594 * flush everything. 1595 */ 1596 res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, 1597 timeout, sdkp->max_retries, &exec_args); 1598 if (res == 0) 1599 break; 1600 } 1601 1602 if (res) { 1603 sd_print_result(sdkp, "Synchronize Cache(10) failed", res); 1604 1605 if (res < 0) 1606 return res; 1607 1608 if (scsi_status_is_check_condition(res) && 1609 scsi_sense_valid(sshdr)) { 1610 sd_print_sense_hdr(sdkp, sshdr); 1611 1612 /* we need to evaluate the error return */ 1613 if (sshdr->asc == 0x3a || /* medium not present */ 1614 sshdr->asc == 0x20 || /* invalid command */ 1615 (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */ 1616 /* this is no error here */ 1617 return 0; 1618 } 1619 1620 switch (host_byte(res)) { 1621 /* ignore errors due to racing a disconnection */ 1622 case DID_BAD_TARGET: 1623 case DID_NO_CONNECT: 1624 return 0; 1625 /* signal the upper layer it might try again */ 1626 case DID_BUS_BUSY: 1627 case DID_IMM_RETRY: 1628 case DID_REQUEUE: 1629 case DID_SOFT_ERROR: 1630 return -EBUSY; 1631 default: 1632 return -EIO; 1633 } 1634 } 1635 return 0; 1636 } 1637 1638 static void sd_rescan(struct device *dev) 1639 { 1640 struct scsi_disk *sdkp = dev_get_drvdata(dev); 1641 1642 sd_revalidate_disk(sdkp->disk); 1643 } 1644 1645 static int sd_get_unique_id(struct gendisk *disk, u8 id[16], 1646 enum blk_unique_id type) 1647 { 1648 struct scsi_device *sdev = scsi_disk(disk)->device; 1649 const struct scsi_vpd *vpd; 1650 const unsigned char *d; 1651 int ret = -ENXIO, len; 1652 1653 rcu_read_lock(); 1654 vpd = rcu_dereference(sdev->vpd_pg83); 1655 if (!vpd) 1656 goto out_unlock; 1657 1658 ret = -EINVAL; 1659 for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) { 1660 /* we only care about designators with LU association */ 1661 if (((d[1] >> 4) & 0x3) != 0x00) 1662 continue; 1663 if ((d[1] & 0xf) != type) 1664 continue; 1665 1666 /* 1667 * Only exit early if a 16-byte descriptor was found. Otherwise 1668 * keep looking as one with more entropy might still show up. 1669 */ 1670 len = d[3]; 1671 if (len != 8 && len != 12 && len != 16) 1672 continue; 1673 ret = len; 1674 memcpy(id, d + 4, len); 1675 if (len == 16) 1676 break; 1677 } 1678 out_unlock: 1679 rcu_read_unlock(); 1680 return ret; 1681 } 1682 1683 static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result) 1684 { 1685 switch (host_byte(result)) { 1686 case DID_TRANSPORT_MARGINAL: 1687 case DID_TRANSPORT_DISRUPTED: 1688 case DID_BUS_BUSY: 1689 return PR_STS_RETRY_PATH_FAILURE; 1690 case DID_NO_CONNECT: 1691 return PR_STS_PATH_FAILED; 1692 case DID_TRANSPORT_FAILFAST: 1693 return PR_STS_PATH_FAST_FAILED; 1694 } 1695 1696 switch (status_byte(result)) { 1697 case SAM_STAT_RESERVATION_CONFLICT: 1698 return PR_STS_RESERVATION_CONFLICT; 1699 case SAM_STAT_CHECK_CONDITION: 1700 if (!scsi_sense_valid(sshdr)) 1701 return PR_STS_IOERR; 1702 1703 if (sshdr->sense_key == ILLEGAL_REQUEST && 1704 (sshdr->asc == 0x26 || sshdr->asc == 0x24)) 1705 return -EINVAL; 1706 1707 fallthrough; 1708 default: 1709 return PR_STS_IOERR; 1710 } 1711 } 1712 1713 static int sd_pr_in_command(struct block_device *bdev, u8 sa, 1714 unsigned char *data, int data_len) 1715 { 1716 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1717 struct scsi_device *sdev = sdkp->device; 1718 struct scsi_sense_hdr sshdr; 1719 u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa }; 1720 const struct scsi_exec_args exec_args = { 1721 .sshdr = &sshdr, 1722 }; 1723 int result; 1724 1725 put_unaligned_be16(data_len, &cmd[7]); 1726 1727 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len, 1728 SD_TIMEOUT, sdkp->max_retries, &exec_args); 1729 if (scsi_status_is_check_condition(result) && 1730 scsi_sense_valid(&sshdr)) { 1731 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); 1732 scsi_print_sense_hdr(sdev, NULL, &sshdr); 1733 } 1734 1735 if (result <= 0) 1736 return result; 1737 1738 return sd_scsi_to_pr_err(&sshdr, result); 1739 } 1740 1741 static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info) 1742 { 1743 int result, i, data_offset, num_copy_keys; 1744 u32 num_keys = keys_info->num_keys; 1745 int data_len = num_keys * 8 + 8; 1746 u8 *data; 1747 1748 data = kzalloc(data_len, GFP_KERNEL); 1749 if (!data) 1750 return -ENOMEM; 1751 1752 result = sd_pr_in_command(bdev, READ_KEYS, data, data_len); 1753 if (result) 1754 goto free_data; 1755 1756 keys_info->generation = get_unaligned_be32(&data[0]); 1757 keys_info->num_keys = get_unaligned_be32(&data[4]) / 8; 1758 1759 data_offset = 8; 1760 num_copy_keys = min(num_keys, keys_info->num_keys); 1761 1762 for (i = 0; i < num_copy_keys; i++) { 1763 keys_info->keys[i] = get_unaligned_be64(&data[data_offset]); 1764 data_offset += 8; 1765 } 1766 1767 free_data: 1768 kfree(data); 1769 return result; 1770 } 1771 1772 static int sd_pr_read_reservation(struct block_device *bdev, 1773 struct pr_held_reservation *rsv) 1774 { 1775 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1776 struct scsi_device *sdev = sdkp->device; 1777 u8 data[24] = { }; 1778 int result, len; 1779 1780 result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data)); 1781 if (result) 1782 return result; 1783 1784 len = get_unaligned_be32(&data[4]); 1785 if (!len) 1786 return 0; 1787 1788 /* Make sure we have at least the key and type */ 1789 if (len < 14) { 1790 sdev_printk(KERN_INFO, sdev, 1791 "READ RESERVATION failed due to short return buffer of %d bytes\n", 1792 len); 1793 return -EINVAL; 1794 } 1795 1796 rsv->generation = get_unaligned_be32(&data[0]); 1797 rsv->key = get_unaligned_be64(&data[8]); 1798 rsv->type = scsi_pr_type_to_block(data[21] & 0x0f); 1799 return 0; 1800 } 1801 1802 static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key, 1803 u64 sa_key, enum scsi_pr_type type, u8 flags) 1804 { 1805 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1806 struct scsi_device *sdev = sdkp->device; 1807 struct scsi_sense_hdr sshdr; 1808 const struct scsi_exec_args exec_args = { 1809 .sshdr = &sshdr, 1810 }; 1811 int result; 1812 u8 cmd[16] = { 0, }; 1813 u8 data[24] = { 0, }; 1814 1815 cmd[0] = PERSISTENT_RESERVE_OUT; 1816 cmd[1] = sa; 1817 cmd[2] = type; 1818 put_unaligned_be32(sizeof(data), &cmd[5]); 1819 1820 put_unaligned_be64(key, &data[0]); 1821 put_unaligned_be64(sa_key, &data[8]); 1822 data[20] = flags; 1823 1824 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data, 1825 sizeof(data), SD_TIMEOUT, sdkp->max_retries, 1826 &exec_args); 1827 1828 if (scsi_status_is_check_condition(result) && 1829 scsi_sense_valid(&sshdr)) { 1830 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); 1831 scsi_print_sense_hdr(sdev, NULL, &sshdr); 1832 } 1833 1834 if (result <= 0) 1835 return result; 1836 1837 return sd_scsi_to_pr_err(&sshdr, result); 1838 } 1839 1840 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 1841 u32 flags) 1842 { 1843 if (flags & ~PR_FL_IGNORE_KEY) 1844 return -EOPNOTSUPP; 1845 return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, 1846 old_key, new_key, 0, 1847 (1 << 0) /* APTPL */); 1848 } 1849 1850 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 1851 u32 flags) 1852 { 1853 if (flags) 1854 return -EOPNOTSUPP; 1855 return sd_pr_out_command(bdev, 0x01, key, 0, 1856 block_pr_type_to_scsi(type), 0); 1857 } 1858 1859 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1860 { 1861 return sd_pr_out_command(bdev, 0x02, key, 0, 1862 block_pr_type_to_scsi(type), 0); 1863 } 1864 1865 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 1866 enum pr_type type, bool abort) 1867 { 1868 return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, 1869 block_pr_type_to_scsi(type), 0); 1870 } 1871 1872 static int sd_pr_clear(struct block_device *bdev, u64 key) 1873 { 1874 return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0); 1875 } 1876 1877 static const struct pr_ops sd_pr_ops = { 1878 .pr_register = sd_pr_register, 1879 .pr_reserve = sd_pr_reserve, 1880 .pr_release = sd_pr_release, 1881 .pr_preempt = sd_pr_preempt, 1882 .pr_clear = sd_pr_clear, 1883 .pr_read_keys = sd_pr_read_keys, 1884 .pr_read_reservation = sd_pr_read_reservation, 1885 }; 1886 1887 static void scsi_disk_free_disk(struct gendisk *disk) 1888 { 1889 struct scsi_disk *sdkp = scsi_disk(disk); 1890 1891 put_device(&sdkp->disk_dev); 1892 } 1893 1894 static const struct block_device_operations sd_fops = { 1895 .owner = THIS_MODULE, 1896 .open = sd_open, 1897 .release = sd_release, 1898 .ioctl = sd_ioctl, 1899 .getgeo = sd_getgeo, 1900 .compat_ioctl = blkdev_compat_ptr_ioctl, 1901 .check_events = sd_check_events, 1902 .unlock_native_capacity = sd_unlock_native_capacity, 1903 .report_zones = sd_zbc_report_zones, 1904 .get_unique_id = sd_get_unique_id, 1905 .free_disk = scsi_disk_free_disk, 1906 .pr_ops = &sd_pr_ops, 1907 }; 1908 1909 /** 1910 * sd_eh_reset - reset error handling callback 1911 * @scmd: sd-issued command that has failed 1912 * 1913 * This function is called by the SCSI midlayer before starting 1914 * SCSI EH. When counting medium access failures we have to be 1915 * careful to register it only only once per device and SCSI EH run; 1916 * there might be several timed out commands which will cause the 1917 * 'max_medium_access_timeouts' counter to trigger after the first 1918 * SCSI EH run already and set the device to offline. 1919 * So this function resets the internal counter before starting SCSI EH. 1920 **/ 1921 static void sd_eh_reset(struct scsi_cmnd *scmd) 1922 { 1923 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); 1924 1925 /* New SCSI EH run, reset gate variable */ 1926 sdkp->ignore_medium_access_errors = false; 1927 } 1928 1929 /** 1930 * sd_eh_action - error handling callback 1931 * @scmd: sd-issued command that has failed 1932 * @eh_disp: The recovery disposition suggested by the midlayer 1933 * 1934 * This function is called by the SCSI midlayer upon completion of an 1935 * error test command (currently TEST UNIT READY). The result of sending 1936 * the eh command is passed in eh_disp. We're looking for devices that 1937 * fail medium access commands but are OK with non access commands like 1938 * test unit ready (so wrongly see the device as having a successful 1939 * recovery) 1940 **/ 1941 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) 1942 { 1943 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); 1944 struct scsi_device *sdev = scmd->device; 1945 1946 if (!scsi_device_online(sdev) || 1947 !scsi_medium_access_command(scmd) || 1948 host_byte(scmd->result) != DID_TIME_OUT || 1949 eh_disp != SUCCESS) 1950 return eh_disp; 1951 1952 /* 1953 * The device has timed out executing a medium access command. 1954 * However, the TEST UNIT READY command sent during error 1955 * handling completed successfully. Either the device is in the 1956 * process of recovering or has it suffered an internal failure 1957 * that prevents access to the storage medium. 1958 */ 1959 if (!sdkp->ignore_medium_access_errors) { 1960 sdkp->medium_access_timed_out++; 1961 sdkp->ignore_medium_access_errors = true; 1962 } 1963 1964 /* 1965 * If the device keeps failing read/write commands but TEST UNIT 1966 * READY always completes successfully we assume that medium 1967 * access is no longer possible and take the device offline. 1968 */ 1969 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { 1970 scmd_printk(KERN_ERR, scmd, 1971 "Medium access timeout failure. Offlining disk!\n"); 1972 mutex_lock(&sdev->state_mutex); 1973 scsi_device_set_state(sdev, SDEV_OFFLINE); 1974 mutex_unlock(&sdev->state_mutex); 1975 1976 return SUCCESS; 1977 } 1978 1979 return eh_disp; 1980 } 1981 1982 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 1983 { 1984 struct request *req = scsi_cmd_to_rq(scmd); 1985 struct scsi_device *sdev = scmd->device; 1986 unsigned int transferred, good_bytes; 1987 u64 start_lba, end_lba, bad_lba; 1988 1989 /* 1990 * Some commands have a payload smaller than the device logical 1991 * block size (e.g. INQUIRY on a 4K disk). 1992 */ 1993 if (scsi_bufflen(scmd) <= sdev->sector_size) 1994 return 0; 1995 1996 /* Check if we have a 'bad_lba' information */ 1997 if (!scsi_get_sense_info_fld(scmd->sense_buffer, 1998 SCSI_SENSE_BUFFERSIZE, 1999 &bad_lba)) 2000 return 0; 2001 2002 /* 2003 * If the bad lba was reported incorrectly, we have no idea where 2004 * the error is. 2005 */ 2006 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); 2007 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd)); 2008 if (bad_lba < start_lba || bad_lba >= end_lba) 2009 return 0; 2010 2011 /* 2012 * resid is optional but mostly filled in. When it's unused, 2013 * its value is zero, so we assume the whole buffer transferred 2014 */ 2015 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); 2016 2017 /* This computation should always be done in terms of the 2018 * resolution of the device's medium. 2019 */ 2020 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); 2021 2022 return min(good_bytes, transferred); 2023 } 2024 2025 /** 2026 * sd_done - bottom half handler: called when the lower level 2027 * driver has completed (successfully or otherwise) a scsi command. 2028 * @SCpnt: mid-level's per command structure. 2029 * 2030 * Note: potentially run from within an ISR. Must not block. 2031 **/ 2032 static int sd_done(struct scsi_cmnd *SCpnt) 2033 { 2034 int result = SCpnt->result; 2035 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 2036 unsigned int sector_size = SCpnt->device->sector_size; 2037 unsigned int resid; 2038 struct scsi_sense_hdr sshdr; 2039 struct request *req = scsi_cmd_to_rq(SCpnt); 2040 struct scsi_disk *sdkp = scsi_disk(req->q->disk); 2041 int sense_valid = 0; 2042 int sense_deferred = 0; 2043 2044 switch (req_op(req)) { 2045 case REQ_OP_DISCARD: 2046 case REQ_OP_WRITE_ZEROES: 2047 case REQ_OP_ZONE_RESET: 2048 case REQ_OP_ZONE_RESET_ALL: 2049 case REQ_OP_ZONE_OPEN: 2050 case REQ_OP_ZONE_CLOSE: 2051 case REQ_OP_ZONE_FINISH: 2052 if (!result) { 2053 good_bytes = blk_rq_bytes(req); 2054 scsi_set_resid(SCpnt, 0); 2055 } else { 2056 good_bytes = 0; 2057 scsi_set_resid(SCpnt, blk_rq_bytes(req)); 2058 } 2059 break; 2060 default: 2061 /* 2062 * In case of bogus fw or device, we could end up having 2063 * an unaligned partial completion. Check this here and force 2064 * alignment. 2065 */ 2066 resid = scsi_get_resid(SCpnt); 2067 if (resid & (sector_size - 1)) { 2068 sd_printk(KERN_INFO, sdkp, 2069 "Unaligned partial completion (resid=%u, sector_sz=%u)\n", 2070 resid, sector_size); 2071 scsi_print_command(SCpnt); 2072 resid = min(scsi_bufflen(SCpnt), 2073 round_up(resid, sector_size)); 2074 scsi_set_resid(SCpnt, resid); 2075 } 2076 } 2077 2078 if (result) { 2079 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); 2080 if (sense_valid) 2081 sense_deferred = scsi_sense_is_deferred(&sshdr); 2082 } 2083 sdkp->medium_access_timed_out = 0; 2084 2085 if (!scsi_status_is_check_condition(result) && 2086 (!sense_valid || sense_deferred)) 2087 goto out; 2088 2089 switch (sshdr.sense_key) { 2090 case HARDWARE_ERROR: 2091 case MEDIUM_ERROR: 2092 good_bytes = sd_completed_bytes(SCpnt); 2093 break; 2094 case RECOVERED_ERROR: 2095 good_bytes = scsi_bufflen(SCpnt); 2096 break; 2097 case NO_SENSE: 2098 /* This indicates a false check condition, so ignore it. An 2099 * unknown amount of data was transferred so treat it as an 2100 * error. 2101 */ 2102 SCpnt->result = 0; 2103 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2104 break; 2105 case ABORTED_COMMAND: 2106 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ 2107 good_bytes = sd_completed_bytes(SCpnt); 2108 break; 2109 case ILLEGAL_REQUEST: 2110 switch (sshdr.asc) { 2111 case 0x10: /* DIX: Host detected corruption */ 2112 good_bytes = sd_completed_bytes(SCpnt); 2113 break; 2114 case 0x20: /* INVALID COMMAND OPCODE */ 2115 case 0x24: /* INVALID FIELD IN CDB */ 2116 switch (SCpnt->cmnd[0]) { 2117 case UNMAP: 2118 sd_config_discard(sdkp, SD_LBP_DISABLE); 2119 break; 2120 case WRITE_SAME_16: 2121 case WRITE_SAME: 2122 if (SCpnt->cmnd[1] & 8) { /* UNMAP */ 2123 sd_config_discard(sdkp, SD_LBP_DISABLE); 2124 } else { 2125 sdkp->device->no_write_same = 1; 2126 sd_config_write_same(sdkp); 2127 req->rq_flags |= RQF_QUIET; 2128 } 2129 break; 2130 } 2131 } 2132 break; 2133 default: 2134 break; 2135 } 2136 2137 out: 2138 if (sd_is_zoned(sdkp)) 2139 good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr); 2140 2141 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, 2142 "sd_done: completed %d of %d bytes\n", 2143 good_bytes, scsi_bufflen(SCpnt))); 2144 2145 return good_bytes; 2146 } 2147 2148 /* 2149 * spinup disk - called only in sd_revalidate_disk() 2150 */ 2151 static void 2152 sd_spinup_disk(struct scsi_disk *sdkp) 2153 { 2154 unsigned char cmd[10]; 2155 unsigned long spintime_expire = 0; 2156 int retries, spintime; 2157 unsigned int the_result; 2158 struct scsi_sense_hdr sshdr; 2159 const struct scsi_exec_args exec_args = { 2160 .sshdr = &sshdr, 2161 }; 2162 int sense_valid = 0; 2163 2164 spintime = 0; 2165 2166 /* Spin up drives, as required. Only do this at boot time */ 2167 /* Spinup needs to be done for module loads too. */ 2168 do { 2169 retries = 0; 2170 2171 do { 2172 bool media_was_present = sdkp->media_present; 2173 2174 cmd[0] = TEST_UNIT_READY; 2175 memset((void *) &cmd[1], 0, 9); 2176 2177 the_result = scsi_execute_cmd(sdkp->device, cmd, 2178 REQ_OP_DRV_IN, NULL, 0, 2179 SD_TIMEOUT, 2180 sdkp->max_retries, 2181 &exec_args); 2182 2183 /* 2184 * If the drive has indicated to us that it 2185 * doesn't have any media in it, don't bother 2186 * with any more polling. 2187 */ 2188 if (media_not_present(sdkp, &sshdr)) { 2189 if (media_was_present) 2190 sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); 2191 return; 2192 } 2193 2194 if (the_result) 2195 sense_valid = scsi_sense_valid(&sshdr); 2196 retries++; 2197 } while (retries < 3 && 2198 (!scsi_status_is_good(the_result) || 2199 (scsi_status_is_check_condition(the_result) && 2200 sense_valid && sshdr.sense_key == UNIT_ATTENTION))); 2201 2202 if (!scsi_status_is_check_condition(the_result)) { 2203 /* no sense, TUR either succeeded or failed 2204 * with a status error */ 2205 if(!spintime && !scsi_status_is_good(the_result)) { 2206 sd_print_result(sdkp, "Test Unit Ready failed", 2207 the_result); 2208 } 2209 break; 2210 } 2211 2212 /* 2213 * The device does not want the automatic start to be issued. 2214 */ 2215 if (sdkp->device->no_start_on_add) 2216 break; 2217 2218 if (sense_valid && sshdr.sense_key == NOT_READY) { 2219 if (sshdr.asc == 4 && sshdr.ascq == 3) 2220 break; /* manual intervention required */ 2221 if (sshdr.asc == 4 && sshdr.ascq == 0xb) 2222 break; /* standby */ 2223 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 2224 break; /* unavailable */ 2225 if (sshdr.asc == 4 && sshdr.ascq == 0x1b) 2226 break; /* sanitize in progress */ 2227 /* 2228 * Issue command to spin up drive when not ready 2229 */ 2230 if (!spintime) { 2231 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); 2232 cmd[0] = START_STOP; 2233 cmd[1] = 1; /* Return immediately */ 2234 memset((void *) &cmd[2], 0, 8); 2235 cmd[4] = 1; /* Start spin cycle */ 2236 if (sdkp->device->start_stop_pwr_cond) 2237 cmd[4] |= 1 << 4; 2238 scsi_execute_cmd(sdkp->device, cmd, 2239 REQ_OP_DRV_IN, NULL, 0, 2240 SD_TIMEOUT, sdkp->max_retries, 2241 &exec_args); 2242 spintime_expire = jiffies + 100 * HZ; 2243 spintime = 1; 2244 } 2245 /* Wait 1 second for next try */ 2246 msleep(1000); 2247 printk(KERN_CONT "."); 2248 2249 /* 2250 * Wait for USB flash devices with slow firmware. 2251 * Yes, this sense key/ASC combination shouldn't 2252 * occur here. It's characteristic of these devices. 2253 */ 2254 } else if (sense_valid && 2255 sshdr.sense_key == UNIT_ATTENTION && 2256 sshdr.asc == 0x28) { 2257 if (!spintime) { 2258 spintime_expire = jiffies + 5 * HZ; 2259 spintime = 1; 2260 } 2261 /* Wait 1 second for next try */ 2262 msleep(1000); 2263 } else { 2264 /* we don't understand the sense code, so it's 2265 * probably pointless to loop */ 2266 if(!spintime) { 2267 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); 2268 sd_print_sense_hdr(sdkp, &sshdr); 2269 } 2270 break; 2271 } 2272 2273 } while (spintime && time_before_eq(jiffies, spintime_expire)); 2274 2275 if (spintime) { 2276 if (scsi_status_is_good(the_result)) 2277 printk(KERN_CONT "ready\n"); 2278 else 2279 printk(KERN_CONT "not responding...\n"); 2280 } 2281 } 2282 2283 /* 2284 * Determine whether disk supports Data Integrity Field. 2285 */ 2286 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) 2287 { 2288 struct scsi_device *sdp = sdkp->device; 2289 u8 type; 2290 2291 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { 2292 sdkp->protection_type = 0; 2293 return 0; 2294 } 2295 2296 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 2297 2298 if (type > T10_PI_TYPE3_PROTECTION) { 2299 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \ 2300 " protection type %u. Disabling disk!\n", 2301 type); 2302 sdkp->protection_type = 0; 2303 return -ENODEV; 2304 } 2305 2306 sdkp->protection_type = type; 2307 2308 return 0; 2309 } 2310 2311 static void sd_config_protection(struct scsi_disk *sdkp) 2312 { 2313 struct scsi_device *sdp = sdkp->device; 2314 2315 sd_dif_config_host(sdkp); 2316 2317 if (!sdkp->protection_type) 2318 return; 2319 2320 if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) { 2321 sd_first_printk(KERN_NOTICE, sdkp, 2322 "Disabling DIF Type %u protection\n", 2323 sdkp->protection_type); 2324 sdkp->protection_type = 0; 2325 } 2326 2327 sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n", 2328 sdkp->protection_type); 2329 } 2330 2331 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, 2332 struct scsi_sense_hdr *sshdr, int sense_valid, 2333 int the_result) 2334 { 2335 if (sense_valid) 2336 sd_print_sense_hdr(sdkp, sshdr); 2337 else 2338 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); 2339 2340 /* 2341 * Set dirty bit for removable devices if not ready - 2342 * sometimes drives will not report this properly. 2343 */ 2344 if (sdp->removable && 2345 sense_valid && sshdr->sense_key == NOT_READY) 2346 set_media_not_present(sdkp); 2347 2348 /* 2349 * We used to set media_present to 0 here to indicate no media 2350 * in the drive, but some drives fail read capacity even with 2351 * media present, so we can't do that. 2352 */ 2353 sdkp->capacity = 0; /* unknown mapped to zero - as usual */ 2354 } 2355 2356 #define RC16_LEN 32 2357 #if RC16_LEN > SD_BUF_SIZE 2358 #error RC16_LEN must not be more than SD_BUF_SIZE 2359 #endif 2360 2361 #define READ_CAPACITY_RETRIES_ON_RESET 10 2362 2363 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2364 unsigned char *buffer) 2365 { 2366 unsigned char cmd[16]; 2367 struct scsi_sense_hdr sshdr; 2368 const struct scsi_exec_args exec_args = { 2369 .sshdr = &sshdr, 2370 }; 2371 int sense_valid = 0; 2372 int the_result; 2373 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2374 unsigned int alignment; 2375 unsigned long long lba; 2376 unsigned sector_size; 2377 2378 if (sdp->no_read_capacity_16) 2379 return -EINVAL; 2380 2381 do { 2382 memset(cmd, 0, 16); 2383 cmd[0] = SERVICE_ACTION_IN_16; 2384 cmd[1] = SAI_READ_CAPACITY_16; 2385 cmd[13] = RC16_LEN; 2386 memset(buffer, 0, RC16_LEN); 2387 2388 the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, 2389 buffer, RC16_LEN, SD_TIMEOUT, 2390 sdkp->max_retries, &exec_args); 2391 2392 if (media_not_present(sdkp, &sshdr)) 2393 return -ENODEV; 2394 2395 if (the_result > 0) { 2396 sense_valid = scsi_sense_valid(&sshdr); 2397 if (sense_valid && 2398 sshdr.sense_key == ILLEGAL_REQUEST && 2399 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && 2400 sshdr.ascq == 0x00) 2401 /* Invalid Command Operation Code or 2402 * Invalid Field in CDB, just retry 2403 * silently with RC10 */ 2404 return -EINVAL; 2405 if (sense_valid && 2406 sshdr.sense_key == UNIT_ATTENTION && 2407 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2408 /* Device reset might occur several times, 2409 * give it one more chance */ 2410 if (--reset_retries > 0) 2411 continue; 2412 } 2413 retries--; 2414 2415 } while (the_result && retries); 2416 2417 if (the_result) { 2418 sd_print_result(sdkp, "Read Capacity(16) failed", the_result); 2419 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2420 return -EINVAL; 2421 } 2422 2423 sector_size = get_unaligned_be32(&buffer[8]); 2424 lba = get_unaligned_be64(&buffer[0]); 2425 2426 if (sd_read_protection_type(sdkp, buffer) < 0) { 2427 sdkp->capacity = 0; 2428 return -ENODEV; 2429 } 2430 2431 /* Logical blocks per physical block exponent */ 2432 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; 2433 2434 /* RC basis */ 2435 sdkp->rc_basis = (buffer[12] >> 4) & 0x3; 2436 2437 /* Lowest aligned logical block */ 2438 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; 2439 blk_queue_alignment_offset(sdp->request_queue, alignment); 2440 if (alignment && sdkp->first_scan) 2441 sd_printk(KERN_NOTICE, sdkp, 2442 "physical block alignment offset: %u\n", alignment); 2443 2444 if (buffer[14] & 0x80) { /* LBPME */ 2445 sdkp->lbpme = 1; 2446 2447 if (buffer[14] & 0x40) /* LBPRZ */ 2448 sdkp->lbprz = 1; 2449 2450 sd_config_discard(sdkp, SD_LBP_WS16); 2451 } 2452 2453 sdkp->capacity = lba + 1; 2454 return sector_size; 2455 } 2456 2457 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, 2458 unsigned char *buffer) 2459 { 2460 unsigned char cmd[16]; 2461 struct scsi_sense_hdr sshdr; 2462 const struct scsi_exec_args exec_args = { 2463 .sshdr = &sshdr, 2464 }; 2465 int sense_valid = 0; 2466 int the_result; 2467 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2468 sector_t lba; 2469 unsigned sector_size; 2470 2471 do { 2472 cmd[0] = READ_CAPACITY; 2473 memset(&cmd[1], 0, 9); 2474 memset(buffer, 0, 8); 2475 2476 the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer, 2477 8, SD_TIMEOUT, sdkp->max_retries, 2478 &exec_args); 2479 2480 if (media_not_present(sdkp, &sshdr)) 2481 return -ENODEV; 2482 2483 if (the_result > 0) { 2484 sense_valid = scsi_sense_valid(&sshdr); 2485 if (sense_valid && 2486 sshdr.sense_key == UNIT_ATTENTION && 2487 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2488 /* Device reset might occur several times, 2489 * give it one more chance */ 2490 if (--reset_retries > 0) 2491 continue; 2492 } 2493 retries--; 2494 2495 } while (the_result && retries); 2496 2497 if (the_result) { 2498 sd_print_result(sdkp, "Read Capacity(10) failed", the_result); 2499 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2500 return -EINVAL; 2501 } 2502 2503 sector_size = get_unaligned_be32(&buffer[4]); 2504 lba = get_unaligned_be32(&buffer[0]); 2505 2506 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) { 2507 /* Some buggy (usb cardreader) devices return an lba of 2508 0xffffffff when the want to report a size of 0 (with 2509 which they really mean no media is present) */ 2510 sdkp->capacity = 0; 2511 sdkp->physical_block_size = sector_size; 2512 return sector_size; 2513 } 2514 2515 sdkp->capacity = lba + 1; 2516 sdkp->physical_block_size = sector_size; 2517 return sector_size; 2518 } 2519 2520 static int sd_try_rc16_first(struct scsi_device *sdp) 2521 { 2522 if (sdp->host->max_cmd_len < 16) 2523 return 0; 2524 if (sdp->try_rc_10_first) 2525 return 0; 2526 if (sdp->scsi_level > SCSI_SPC_2) 2527 return 1; 2528 if (scsi_device_protection(sdp)) 2529 return 1; 2530 return 0; 2531 } 2532 2533 /* 2534 * read disk capacity 2535 */ 2536 static void 2537 sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) 2538 { 2539 int sector_size; 2540 struct scsi_device *sdp = sdkp->device; 2541 2542 if (sd_try_rc16_first(sdp)) { 2543 sector_size = read_capacity_16(sdkp, sdp, buffer); 2544 if (sector_size == -EOVERFLOW) 2545 goto got_data; 2546 if (sector_size == -ENODEV) 2547 return; 2548 if (sector_size < 0) 2549 sector_size = read_capacity_10(sdkp, sdp, buffer); 2550 if (sector_size < 0) 2551 return; 2552 } else { 2553 sector_size = read_capacity_10(sdkp, sdp, buffer); 2554 if (sector_size == -EOVERFLOW) 2555 goto got_data; 2556 if (sector_size < 0) 2557 return; 2558 if ((sizeof(sdkp->capacity) > 4) && 2559 (sdkp->capacity > 0xffffffffULL)) { 2560 int old_sector_size = sector_size; 2561 sd_printk(KERN_NOTICE, sdkp, "Very big device. " 2562 "Trying to use READ CAPACITY(16).\n"); 2563 sector_size = read_capacity_16(sdkp, sdp, buffer); 2564 if (sector_size < 0) { 2565 sd_printk(KERN_NOTICE, sdkp, 2566 "Using 0xffffffff as device size\n"); 2567 sdkp->capacity = 1 + (sector_t) 0xffffffff; 2568 sector_size = old_sector_size; 2569 goto got_data; 2570 } 2571 /* Remember that READ CAPACITY(16) succeeded */ 2572 sdp->try_rc_10_first = 0; 2573 } 2574 } 2575 2576 /* Some devices are known to return the total number of blocks, 2577 * not the highest block number. Some devices have versions 2578 * which do this and others which do not. Some devices we might 2579 * suspect of doing this but we don't know for certain. 2580 * 2581 * If we know the reported capacity is wrong, decrement it. If 2582 * we can only guess, then assume the number of blocks is even 2583 * (usually true but not always) and err on the side of lowering 2584 * the capacity. 2585 */ 2586 if (sdp->fix_capacity || 2587 (sdp->guess_capacity && (sdkp->capacity & 0x01))) { 2588 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count " 2589 "from its reported value: %llu\n", 2590 (unsigned long long) sdkp->capacity); 2591 --sdkp->capacity; 2592 } 2593 2594 got_data: 2595 if (sector_size == 0) { 2596 sector_size = 512; 2597 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, " 2598 "assuming 512.\n"); 2599 } 2600 2601 if (sector_size != 512 && 2602 sector_size != 1024 && 2603 sector_size != 2048 && 2604 sector_size != 4096) { 2605 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", 2606 sector_size); 2607 /* 2608 * The user might want to re-format the drive with 2609 * a supported sectorsize. Once this happens, it 2610 * would be relatively trivial to set the thing up. 2611 * For this reason, we leave the thing in the table. 2612 */ 2613 sdkp->capacity = 0; 2614 /* 2615 * set a bogus sector size so the normal read/write 2616 * logic in the block layer will eventually refuse any 2617 * request on this device without tripping over power 2618 * of two sector size assumptions 2619 */ 2620 sector_size = 512; 2621 } 2622 blk_queue_logical_block_size(sdp->request_queue, sector_size); 2623 blk_queue_physical_block_size(sdp->request_queue, 2624 sdkp->physical_block_size); 2625 sdkp->device->sector_size = sector_size; 2626 2627 if (sdkp->capacity > 0xffffffff) 2628 sdp->use_16_for_rw = 1; 2629 2630 } 2631 2632 /* 2633 * Print disk capacity 2634 */ 2635 static void 2636 sd_print_capacity(struct scsi_disk *sdkp, 2637 sector_t old_capacity) 2638 { 2639 int sector_size = sdkp->device->sector_size; 2640 char cap_str_2[10], cap_str_10[10]; 2641 2642 if (!sdkp->first_scan && old_capacity == sdkp->capacity) 2643 return; 2644 2645 string_get_size(sdkp->capacity, sector_size, 2646 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 2647 string_get_size(sdkp->capacity, sector_size, 2648 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 2649 2650 sd_printk(KERN_NOTICE, sdkp, 2651 "%llu %d-byte logical blocks: (%s/%s)\n", 2652 (unsigned long long)sdkp->capacity, 2653 sector_size, cap_str_10, cap_str_2); 2654 2655 if (sdkp->physical_block_size != sector_size) 2656 sd_printk(KERN_NOTICE, sdkp, 2657 "%u-byte physical blocks\n", 2658 sdkp->physical_block_size); 2659 } 2660 2661 /* called with buffer of length 512 */ 2662 static inline int 2663 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage, 2664 unsigned char *buffer, int len, struct scsi_mode_data *data, 2665 struct scsi_sense_hdr *sshdr) 2666 { 2667 /* 2668 * If we must use MODE SENSE(10), make sure that the buffer length 2669 * is at least 8 bytes so that the mode sense header fits. 2670 */ 2671 if (sdkp->device->use_10_for_ms && len < 8) 2672 len = 8; 2673 2674 return scsi_mode_sense(sdkp->device, dbd, modepage, 0, buffer, len, 2675 SD_TIMEOUT, sdkp->max_retries, data, sshdr); 2676 } 2677 2678 /* 2679 * read write protect setting, if possible - called only in sd_revalidate_disk() 2680 * called with buffer of length SD_BUF_SIZE 2681 */ 2682 static void 2683 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) 2684 { 2685 int res; 2686 struct scsi_device *sdp = sdkp->device; 2687 struct scsi_mode_data data; 2688 int old_wp = sdkp->write_prot; 2689 2690 set_disk_ro(sdkp->disk, 0); 2691 if (sdp->skip_ms_page_3f) { 2692 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); 2693 return; 2694 } 2695 2696 if (sdp->use_192_bytes_for_3f) { 2697 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL); 2698 } else { 2699 /* 2700 * First attempt: ask for all pages (0x3F), but only 4 bytes. 2701 * We have to start carefully: some devices hang if we ask 2702 * for more than is available. 2703 */ 2704 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL); 2705 2706 /* 2707 * Second attempt: ask for page 0 When only page 0 is 2708 * implemented, a request for page 3F may return Sense Key 2709 * 5: Illegal Request, Sense Code 24: Invalid field in 2710 * CDB. 2711 */ 2712 if (res < 0) 2713 res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL); 2714 2715 /* 2716 * Third attempt: ask 255 bytes, as we did earlier. 2717 */ 2718 if (res < 0) 2719 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255, 2720 &data, NULL); 2721 } 2722 2723 if (res < 0) { 2724 sd_first_printk(KERN_WARNING, sdkp, 2725 "Test WP failed, assume Write Enabled\n"); 2726 } else { 2727 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2728 set_disk_ro(sdkp->disk, sdkp->write_prot); 2729 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2730 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2731 sdkp->write_prot ? "on" : "off"); 2732 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); 2733 } 2734 } 2735 } 2736 2737 /* 2738 * sd_read_cache_type - called only from sd_revalidate_disk() 2739 * called with buffer of length SD_BUF_SIZE 2740 */ 2741 static void 2742 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) 2743 { 2744 int len = 0, res; 2745 struct scsi_device *sdp = sdkp->device; 2746 2747 int dbd; 2748 int modepage; 2749 int first_len; 2750 struct scsi_mode_data data; 2751 struct scsi_sense_hdr sshdr; 2752 int old_wce = sdkp->WCE; 2753 int old_rcd = sdkp->RCD; 2754 int old_dpofua = sdkp->DPOFUA; 2755 2756 2757 if (sdkp->cache_override) 2758 return; 2759 2760 first_len = 4; 2761 if (sdp->skip_ms_page_8) { 2762 if (sdp->type == TYPE_RBC) 2763 goto defaults; 2764 else { 2765 if (sdp->skip_ms_page_3f) 2766 goto defaults; 2767 modepage = 0x3F; 2768 if (sdp->use_192_bytes_for_3f) 2769 first_len = 192; 2770 dbd = 0; 2771 } 2772 } else if (sdp->type == TYPE_RBC) { 2773 modepage = 6; 2774 dbd = 8; 2775 } else { 2776 modepage = 8; 2777 dbd = 0; 2778 } 2779 2780 /* cautiously ask */ 2781 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len, 2782 &data, &sshdr); 2783 2784 if (res < 0) 2785 goto bad_sense; 2786 2787 if (!data.header_length) { 2788 modepage = 6; 2789 first_len = 0; 2790 sd_first_printk(KERN_ERR, sdkp, 2791 "Missing header in MODE_SENSE response\n"); 2792 } 2793 2794 /* that went OK, now ask for the proper length */ 2795 len = data.length; 2796 2797 /* 2798 * We're only interested in the first three bytes, actually. 2799 * But the data cache page is defined for the first 20. 2800 */ 2801 if (len < 3) 2802 goto bad_sense; 2803 else if (len > SD_BUF_SIZE) { 2804 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " 2805 "data from %d to %d bytes\n", len, SD_BUF_SIZE); 2806 len = SD_BUF_SIZE; 2807 } 2808 if (modepage == 0x3F && sdp->use_192_bytes_for_3f) 2809 len = 192; 2810 2811 /* Get the data */ 2812 if (len > first_len) 2813 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len, 2814 &data, &sshdr); 2815 2816 if (!res) { 2817 int offset = data.header_length + data.block_descriptor_length; 2818 2819 while (offset < len) { 2820 u8 page_code = buffer[offset] & 0x3F; 2821 u8 spf = buffer[offset] & 0x40; 2822 2823 if (page_code == 8 || page_code == 6) { 2824 /* We're interested only in the first 3 bytes. 2825 */ 2826 if (len - offset <= 2) { 2827 sd_first_printk(KERN_ERR, sdkp, 2828 "Incomplete mode parameter " 2829 "data\n"); 2830 goto defaults; 2831 } else { 2832 modepage = page_code; 2833 goto Page_found; 2834 } 2835 } else { 2836 /* Go to the next page */ 2837 if (spf && len - offset > 3) 2838 offset += 4 + (buffer[offset+2] << 8) + 2839 buffer[offset+3]; 2840 else if (!spf && len - offset > 1) 2841 offset += 2 + buffer[offset+1]; 2842 else { 2843 sd_first_printk(KERN_ERR, sdkp, 2844 "Incomplete mode " 2845 "parameter data\n"); 2846 goto defaults; 2847 } 2848 } 2849 } 2850 2851 sd_first_printk(KERN_WARNING, sdkp, 2852 "No Caching mode page found\n"); 2853 goto defaults; 2854 2855 Page_found: 2856 if (modepage == 8) { 2857 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2858 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 2859 } else { 2860 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0); 2861 sdkp->RCD = 0; 2862 } 2863 2864 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 2865 if (sdp->broken_fua) { 2866 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); 2867 sdkp->DPOFUA = 0; 2868 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && 2869 !sdkp->device->use_16_for_rw) { 2870 sd_first_printk(KERN_NOTICE, sdkp, 2871 "Uses READ/WRITE(6), disabling FUA\n"); 2872 sdkp->DPOFUA = 0; 2873 } 2874 2875 /* No cache flush allowed for write protected devices */ 2876 if (sdkp->WCE && sdkp->write_prot) 2877 sdkp->WCE = 0; 2878 2879 if (sdkp->first_scan || old_wce != sdkp->WCE || 2880 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) 2881 sd_printk(KERN_NOTICE, sdkp, 2882 "Write cache: %s, read cache: %s, %s\n", 2883 sdkp->WCE ? "enabled" : "disabled", 2884 sdkp->RCD ? "disabled" : "enabled", 2885 sdkp->DPOFUA ? "supports DPO and FUA" 2886 : "doesn't support DPO or FUA"); 2887 2888 return; 2889 } 2890 2891 bad_sense: 2892 if (scsi_sense_valid(&sshdr) && 2893 sshdr.sense_key == ILLEGAL_REQUEST && 2894 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 2895 /* Invalid field in CDB */ 2896 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); 2897 else 2898 sd_first_printk(KERN_ERR, sdkp, 2899 "Asking for cache data failed\n"); 2900 2901 defaults: 2902 if (sdp->wce_default_on) { 2903 sd_first_printk(KERN_NOTICE, sdkp, 2904 "Assuming drive cache: write back\n"); 2905 sdkp->WCE = 1; 2906 } else { 2907 sd_first_printk(KERN_WARNING, sdkp, 2908 "Assuming drive cache: write through\n"); 2909 sdkp->WCE = 0; 2910 } 2911 sdkp->RCD = 0; 2912 sdkp->DPOFUA = 0; 2913 } 2914 2915 /* 2916 * The ATO bit indicates whether the DIF application tag is available 2917 * for use by the operating system. 2918 */ 2919 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) 2920 { 2921 int res, offset; 2922 struct scsi_device *sdp = sdkp->device; 2923 struct scsi_mode_data data; 2924 struct scsi_sense_hdr sshdr; 2925 2926 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 2927 return; 2928 2929 if (sdkp->protection_type == 0) 2930 return; 2931 2932 res = scsi_mode_sense(sdp, 1, 0x0a, 0, buffer, 36, SD_TIMEOUT, 2933 sdkp->max_retries, &data, &sshdr); 2934 2935 if (res < 0 || !data.header_length || 2936 data.length < 6) { 2937 sd_first_printk(KERN_WARNING, sdkp, 2938 "getting Control mode page failed, assume no ATO\n"); 2939 2940 if (scsi_sense_valid(&sshdr)) 2941 sd_print_sense_hdr(sdkp, &sshdr); 2942 2943 return; 2944 } 2945 2946 offset = data.header_length + data.block_descriptor_length; 2947 2948 if ((buffer[offset] & 0x3f) != 0x0a) { 2949 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); 2950 return; 2951 } 2952 2953 if ((buffer[offset + 5] & 0x80) == 0) 2954 return; 2955 2956 sdkp->ATO = 1; 2957 2958 return; 2959 } 2960 2961 /** 2962 * sd_read_block_limits - Query disk device for preferred I/O sizes. 2963 * @sdkp: disk to query 2964 */ 2965 static void sd_read_block_limits(struct scsi_disk *sdkp) 2966 { 2967 struct scsi_vpd *vpd; 2968 2969 rcu_read_lock(); 2970 2971 vpd = rcu_dereference(sdkp->device->vpd_pgb0); 2972 if (!vpd || vpd->len < 16) 2973 goto out; 2974 2975 sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]); 2976 sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]); 2977 sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]); 2978 2979 if (vpd->len >= 64) { 2980 unsigned int lba_count, desc_count; 2981 2982 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]); 2983 2984 if (!sdkp->lbpme) 2985 goto out; 2986 2987 lba_count = get_unaligned_be32(&vpd->data[20]); 2988 desc_count = get_unaligned_be32(&vpd->data[24]); 2989 2990 if (lba_count && desc_count) 2991 sdkp->max_unmap_blocks = lba_count; 2992 2993 sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]); 2994 2995 if (vpd->data[32] & 0x80) 2996 sdkp->unmap_alignment = 2997 get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); 2998 2999 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ 3000 3001 if (sdkp->max_unmap_blocks) 3002 sd_config_discard(sdkp, SD_LBP_UNMAP); 3003 else 3004 sd_config_discard(sdkp, SD_LBP_WS16); 3005 3006 } else { /* LBP VPD page tells us what to use */ 3007 if (sdkp->lbpu && sdkp->max_unmap_blocks) 3008 sd_config_discard(sdkp, SD_LBP_UNMAP); 3009 else if (sdkp->lbpws) 3010 sd_config_discard(sdkp, SD_LBP_WS16); 3011 else if (sdkp->lbpws10) 3012 sd_config_discard(sdkp, SD_LBP_WS10); 3013 else 3014 sd_config_discard(sdkp, SD_LBP_DISABLE); 3015 } 3016 } 3017 3018 out: 3019 rcu_read_unlock(); 3020 } 3021 3022 /** 3023 * sd_read_block_characteristics - Query block dev. characteristics 3024 * @sdkp: disk to query 3025 */ 3026 static void sd_read_block_characteristics(struct scsi_disk *sdkp) 3027 { 3028 struct request_queue *q = sdkp->disk->queue; 3029 struct scsi_vpd *vpd; 3030 u16 rot; 3031 u8 zoned; 3032 3033 rcu_read_lock(); 3034 vpd = rcu_dereference(sdkp->device->vpd_pgb1); 3035 3036 if (!vpd || vpd->len < 8) { 3037 rcu_read_unlock(); 3038 return; 3039 } 3040 3041 rot = get_unaligned_be16(&vpd->data[4]); 3042 zoned = (vpd->data[8] >> 4) & 3; 3043 rcu_read_unlock(); 3044 3045 if (rot == 1) { 3046 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 3047 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 3048 } 3049 3050 if (sdkp->device->type == TYPE_ZBC) { 3051 /* 3052 * Host-managed: Per ZBC and ZAC specifications, writes in 3053 * sequential write required zones of host-managed devices must 3054 * be aligned to the device physical block size. 3055 */ 3056 disk_set_zoned(sdkp->disk, BLK_ZONED_HM); 3057 blk_queue_zone_write_granularity(q, sdkp->physical_block_size); 3058 } else { 3059 sdkp->zoned = zoned; 3060 if (sdkp->zoned == 1) { 3061 /* Host-aware */ 3062 disk_set_zoned(sdkp->disk, BLK_ZONED_HA); 3063 } else { 3064 /* Regular disk or drive managed disk */ 3065 disk_set_zoned(sdkp->disk, BLK_ZONED_NONE); 3066 } 3067 } 3068 3069 if (!sdkp->first_scan) 3070 return; 3071 3072 if (blk_queue_is_zoned(q)) { 3073 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", 3074 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); 3075 } else { 3076 if (sdkp->zoned == 1) 3077 sd_printk(KERN_NOTICE, sdkp, 3078 "Host-aware SMR disk used as regular disk\n"); 3079 else if (sdkp->zoned == 2) 3080 sd_printk(KERN_NOTICE, sdkp, 3081 "Drive-managed SMR disk\n"); 3082 } 3083 } 3084 3085 /** 3086 * sd_read_block_provisioning - Query provisioning VPD page 3087 * @sdkp: disk to query 3088 */ 3089 static void sd_read_block_provisioning(struct scsi_disk *sdkp) 3090 { 3091 struct scsi_vpd *vpd; 3092 3093 if (sdkp->lbpme == 0) 3094 return; 3095 3096 rcu_read_lock(); 3097 vpd = rcu_dereference(sdkp->device->vpd_pgb2); 3098 3099 if (!vpd || vpd->len < 8) { 3100 rcu_read_unlock(); 3101 return; 3102 } 3103 3104 sdkp->lbpvpd = 1; 3105 sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */ 3106 sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */ 3107 sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */ 3108 rcu_read_unlock(); 3109 } 3110 3111 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) 3112 { 3113 struct scsi_device *sdev = sdkp->device; 3114 3115 if (sdev->host->no_write_same) { 3116 sdev->no_write_same = 1; 3117 3118 return; 3119 } 3120 3121 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY, 0) < 0) { 3122 struct scsi_vpd *vpd; 3123 3124 sdev->no_report_opcodes = 1; 3125 3126 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION 3127 * CODES is unsupported and the device has an ATA 3128 * Information VPD page (SAT). 3129 */ 3130 rcu_read_lock(); 3131 vpd = rcu_dereference(sdev->vpd_pg89); 3132 if (vpd) 3133 sdev->no_write_same = 1; 3134 rcu_read_unlock(); 3135 } 3136 3137 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16, 0) == 1) 3138 sdkp->ws16 = 1; 3139 3140 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME, 0) == 1) 3141 sdkp->ws10 = 1; 3142 } 3143 3144 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) 3145 { 3146 struct scsi_device *sdev = sdkp->device; 3147 3148 if (!sdev->security_supported) 3149 return; 3150 3151 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3152 SECURITY_PROTOCOL_IN, 0) == 1 && 3153 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3154 SECURITY_PROTOCOL_OUT, 0) == 1) 3155 sdkp->security = 1; 3156 } 3157 3158 static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf) 3159 { 3160 return logical_to_sectors(sdkp->device, get_unaligned_be64(buf)); 3161 } 3162 3163 /** 3164 * sd_read_cpr - Query concurrent positioning ranges 3165 * @sdkp: disk to query 3166 */ 3167 static void sd_read_cpr(struct scsi_disk *sdkp) 3168 { 3169 struct blk_independent_access_ranges *iars = NULL; 3170 unsigned char *buffer = NULL; 3171 unsigned int nr_cpr = 0; 3172 int i, vpd_len, buf_len = SD_BUF_SIZE; 3173 u8 *desc; 3174 3175 /* 3176 * We need to have the capacity set first for the block layer to be 3177 * able to check the ranges. 3178 */ 3179 if (sdkp->first_scan) 3180 return; 3181 3182 if (!sdkp->capacity) 3183 goto out; 3184 3185 /* 3186 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges, 3187 * leading to a maximum page size of 64 + 256*32 bytes. 3188 */ 3189 buf_len = 64 + 256*32; 3190 buffer = kmalloc(buf_len, GFP_KERNEL); 3191 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len)) 3192 goto out; 3193 3194 /* We must have at least a 64B header and one 32B range descriptor */ 3195 vpd_len = get_unaligned_be16(&buffer[2]) + 4; 3196 if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { 3197 sd_printk(KERN_ERR, sdkp, 3198 "Invalid Concurrent Positioning Ranges VPD page\n"); 3199 goto out; 3200 } 3201 3202 nr_cpr = (vpd_len - 64) / 32; 3203 if (nr_cpr == 1) { 3204 nr_cpr = 0; 3205 goto out; 3206 } 3207 3208 iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr); 3209 if (!iars) { 3210 nr_cpr = 0; 3211 goto out; 3212 } 3213 3214 desc = &buffer[64]; 3215 for (i = 0; i < nr_cpr; i++, desc += 32) { 3216 if (desc[0] != i) { 3217 sd_printk(KERN_ERR, sdkp, 3218 "Invalid Concurrent Positioning Range number\n"); 3219 nr_cpr = 0; 3220 break; 3221 } 3222 3223 iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8); 3224 iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16); 3225 } 3226 3227 out: 3228 disk_set_independent_access_ranges(sdkp->disk, iars); 3229 if (nr_cpr && sdkp->nr_actuators != nr_cpr) { 3230 sd_printk(KERN_NOTICE, sdkp, 3231 "%u concurrent positioning ranges\n", nr_cpr); 3232 sdkp->nr_actuators = nr_cpr; 3233 } 3234 3235 kfree(buffer); 3236 } 3237 3238 static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp) 3239 { 3240 struct scsi_device *sdp = sdkp->device; 3241 unsigned int min_xfer_bytes = 3242 logical_to_bytes(sdp, sdkp->min_xfer_blocks); 3243 3244 if (sdkp->min_xfer_blocks == 0) 3245 return false; 3246 3247 if (min_xfer_bytes & (sdkp->physical_block_size - 1)) { 3248 sd_first_printk(KERN_WARNING, sdkp, 3249 "Preferred minimum I/O size %u bytes not a " \ 3250 "multiple of physical block size (%u bytes)\n", 3251 min_xfer_bytes, sdkp->physical_block_size); 3252 sdkp->min_xfer_blocks = 0; 3253 return false; 3254 } 3255 3256 sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n", 3257 min_xfer_bytes); 3258 return true; 3259 } 3260 3261 /* 3262 * Determine the device's preferred I/O size for reads and writes 3263 * unless the reported value is unreasonably small, large, not a 3264 * multiple of the physical block size, or simply garbage. 3265 */ 3266 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, 3267 unsigned int dev_max) 3268 { 3269 struct scsi_device *sdp = sdkp->device; 3270 unsigned int opt_xfer_bytes = 3271 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3272 unsigned int min_xfer_bytes = 3273 logical_to_bytes(sdp, sdkp->min_xfer_blocks); 3274 3275 if (sdkp->opt_xfer_blocks == 0) 3276 return false; 3277 3278 if (sdkp->opt_xfer_blocks > dev_max) { 3279 sd_first_printk(KERN_WARNING, sdkp, 3280 "Optimal transfer size %u logical blocks " \ 3281 "> dev_max (%u logical blocks)\n", 3282 sdkp->opt_xfer_blocks, dev_max); 3283 return false; 3284 } 3285 3286 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { 3287 sd_first_printk(KERN_WARNING, sdkp, 3288 "Optimal transfer size %u logical blocks " \ 3289 "> sd driver limit (%u logical blocks)\n", 3290 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); 3291 return false; 3292 } 3293 3294 if (opt_xfer_bytes < PAGE_SIZE) { 3295 sd_first_printk(KERN_WARNING, sdkp, 3296 "Optimal transfer size %u bytes < " \ 3297 "PAGE_SIZE (%u bytes)\n", 3298 opt_xfer_bytes, (unsigned int)PAGE_SIZE); 3299 return false; 3300 } 3301 3302 if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) { 3303 sd_first_printk(KERN_WARNING, sdkp, 3304 "Optimal transfer size %u bytes not a " \ 3305 "multiple of preferred minimum block " \ 3306 "size (%u bytes)\n", 3307 opt_xfer_bytes, min_xfer_bytes); 3308 return false; 3309 } 3310 3311 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { 3312 sd_first_printk(KERN_WARNING, sdkp, 3313 "Optimal transfer size %u bytes not a " \ 3314 "multiple of physical block size (%u bytes)\n", 3315 opt_xfer_bytes, sdkp->physical_block_size); 3316 return false; 3317 } 3318 3319 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", 3320 opt_xfer_bytes); 3321 return true; 3322 } 3323 3324 /** 3325 * sd_revalidate_disk - called the first time a new disk is seen, 3326 * performs disk spin up, read_capacity, etc. 3327 * @disk: struct gendisk we care about 3328 **/ 3329 static int sd_revalidate_disk(struct gendisk *disk) 3330 { 3331 struct scsi_disk *sdkp = scsi_disk(disk); 3332 struct scsi_device *sdp = sdkp->device; 3333 struct request_queue *q = sdkp->disk->queue; 3334 sector_t old_capacity = sdkp->capacity; 3335 unsigned char *buffer; 3336 unsigned int dev_max, rw_max; 3337 3338 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 3339 "sd_revalidate_disk\n")); 3340 3341 /* 3342 * If the device is offline, don't try and read capacity or any 3343 * of the other niceties. 3344 */ 3345 if (!scsi_device_online(sdp)) 3346 goto out; 3347 3348 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); 3349 if (!buffer) { 3350 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " 3351 "allocation failure.\n"); 3352 goto out; 3353 } 3354 3355 sd_spinup_disk(sdkp); 3356 3357 /* 3358 * Without media there is no reason to ask; moreover, some devices 3359 * react badly if we do. 3360 */ 3361 if (sdkp->media_present) { 3362 sd_read_capacity(sdkp, buffer); 3363 3364 /* 3365 * set the default to rotational. All non-rotational devices 3366 * support the block characteristics VPD page, which will 3367 * cause this to be updated correctly and any device which 3368 * doesn't support it should be treated as rotational. 3369 */ 3370 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); 3371 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); 3372 3373 if (scsi_device_supports_vpd(sdp)) { 3374 sd_read_block_provisioning(sdkp); 3375 sd_read_block_limits(sdkp); 3376 sd_read_block_characteristics(sdkp); 3377 sd_zbc_read_zones(sdkp, buffer); 3378 sd_read_cpr(sdkp); 3379 } 3380 3381 sd_print_capacity(sdkp, old_capacity); 3382 3383 sd_read_write_protect_flag(sdkp, buffer); 3384 sd_read_cache_type(sdkp, buffer); 3385 sd_read_app_tag_own(sdkp, buffer); 3386 sd_read_write_same(sdkp, buffer); 3387 sd_read_security(sdkp, buffer); 3388 sd_config_protection(sdkp); 3389 } 3390 3391 /* 3392 * We now have all cache related info, determine how we deal 3393 * with flush requests. 3394 */ 3395 sd_set_flush_flag(sdkp); 3396 3397 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ 3398 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; 3399 3400 /* Some devices report a maximum block count for READ/WRITE requests. */ 3401 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); 3402 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); 3403 3404 if (sd_validate_min_xfer_size(sdkp)) 3405 blk_queue_io_min(sdkp->disk->queue, 3406 logical_to_bytes(sdp, sdkp->min_xfer_blocks)); 3407 else 3408 blk_queue_io_min(sdkp->disk->queue, 0); 3409 3410 if (sd_validate_opt_xfer_size(sdkp, dev_max)) { 3411 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3412 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 3413 } else { 3414 q->limits.io_opt = 0; 3415 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), 3416 (sector_t)BLK_DEF_MAX_SECTORS); 3417 } 3418 3419 /* 3420 * Limit default to SCSI host optimal sector limit if set. There may be 3421 * an impact on performance for when the size of a request exceeds this 3422 * host limit. 3423 */ 3424 rw_max = min_not_zero(rw_max, sdp->host->opt_sectors); 3425 3426 /* Do not exceed controller limit */ 3427 rw_max = min(rw_max, queue_max_hw_sectors(q)); 3428 3429 /* 3430 * Only update max_sectors if previously unset or if the current value 3431 * exceeds the capabilities of the hardware. 3432 */ 3433 if (sdkp->first_scan || 3434 q->limits.max_sectors > q->limits.max_dev_sectors || 3435 q->limits.max_sectors > q->limits.max_hw_sectors) 3436 q->limits.max_sectors = rw_max; 3437 3438 sdkp->first_scan = 0; 3439 3440 set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); 3441 sd_config_write_same(sdkp); 3442 kfree(buffer); 3443 3444 /* 3445 * For a zoned drive, revalidating the zones can be done only once 3446 * the gendisk capacity is set. So if this fails, set back the gendisk 3447 * capacity to 0. 3448 */ 3449 if (sd_zbc_revalidate_zones(sdkp)) 3450 set_capacity_and_notify(disk, 0); 3451 3452 out: 3453 return 0; 3454 } 3455 3456 /** 3457 * sd_unlock_native_capacity - unlock native capacity 3458 * @disk: struct gendisk to set capacity for 3459 * 3460 * Block layer calls this function if it detects that partitions 3461 * on @disk reach beyond the end of the device. If the SCSI host 3462 * implements ->unlock_native_capacity() method, it's invoked to 3463 * give it a chance to adjust the device capacity. 3464 * 3465 * CONTEXT: 3466 * Defined by block layer. Might sleep. 3467 */ 3468 static void sd_unlock_native_capacity(struct gendisk *disk) 3469 { 3470 struct scsi_device *sdev = scsi_disk(disk)->device; 3471 3472 if (sdev->host->hostt->unlock_native_capacity) 3473 sdev->host->hostt->unlock_native_capacity(sdev); 3474 } 3475 3476 /** 3477 * sd_format_disk_name - format disk name 3478 * @prefix: name prefix - ie. "sd" for SCSI disks 3479 * @index: index of the disk to format name for 3480 * @buf: output buffer 3481 * @buflen: length of the output buffer 3482 * 3483 * SCSI disk names starts at sda. The 26th device is sdz and the 3484 * 27th is sdaa. The last one for two lettered suffix is sdzz 3485 * which is followed by sdaaa. 3486 * 3487 * This is basically 26 base counting with one extra 'nil' entry 3488 * at the beginning from the second digit on and can be 3489 * determined using similar method as 26 base conversion with the 3490 * index shifted -1 after each digit is computed. 3491 * 3492 * CONTEXT: 3493 * Don't care. 3494 * 3495 * RETURNS: 3496 * 0 on success, -errno on failure. 3497 */ 3498 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen) 3499 { 3500 const int base = 'z' - 'a' + 1; 3501 char *begin = buf + strlen(prefix); 3502 char *end = buf + buflen; 3503 char *p; 3504 int unit; 3505 3506 p = end - 1; 3507 *p = '\0'; 3508 unit = base; 3509 do { 3510 if (p == begin) 3511 return -EINVAL; 3512 *--p = 'a' + (index % unit); 3513 index = (index / unit) - 1; 3514 } while (index >= 0); 3515 3516 memmove(begin, p, end - p); 3517 memcpy(buf, prefix, strlen(prefix)); 3518 3519 return 0; 3520 } 3521 3522 /** 3523 * sd_probe - called during driver initialization and whenever a 3524 * new scsi device is attached to the system. It is called once 3525 * for each scsi device (not just disks) present. 3526 * @dev: pointer to device object 3527 * 3528 * Returns 0 if successful (or not interested in this scsi device 3529 * (e.g. scanner)); 1 when there is an error. 3530 * 3531 * Note: this function is invoked from the scsi mid-level. 3532 * This function sets up the mapping between a given 3533 * <host,channel,id,lun> (found in sdp) and new device name 3534 * (e.g. /dev/sda). More precisely it is the block device major 3535 * and minor number that is chosen here. 3536 * 3537 * Assume sd_probe is not re-entrant (for time being) 3538 * Also think about sd_probe() and sd_remove() running coincidentally. 3539 **/ 3540 static int sd_probe(struct device *dev) 3541 { 3542 struct scsi_device *sdp = to_scsi_device(dev); 3543 struct scsi_disk *sdkp; 3544 struct gendisk *gd; 3545 int index; 3546 int error; 3547 3548 scsi_autopm_get_device(sdp); 3549 error = -ENODEV; 3550 if (sdp->type != TYPE_DISK && 3551 sdp->type != TYPE_ZBC && 3552 sdp->type != TYPE_MOD && 3553 sdp->type != TYPE_RBC) 3554 goto out; 3555 3556 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) { 3557 sdev_printk(KERN_WARNING, sdp, 3558 "Unsupported ZBC host-managed device.\n"); 3559 goto out; 3560 } 3561 3562 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, 3563 "sd_probe\n")); 3564 3565 error = -ENOMEM; 3566 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); 3567 if (!sdkp) 3568 goto out; 3569 3570 gd = blk_mq_alloc_disk_for_queue(sdp->request_queue, 3571 &sd_bio_compl_lkclass); 3572 if (!gd) 3573 goto out_free; 3574 3575 index = ida_alloc(&sd_index_ida, GFP_KERNEL); 3576 if (index < 0) { 3577 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); 3578 goto out_put; 3579 } 3580 3581 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 3582 if (error) { 3583 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); 3584 goto out_free_index; 3585 } 3586 3587 sdkp->device = sdp; 3588 sdkp->disk = gd; 3589 sdkp->index = index; 3590 sdkp->max_retries = SD_MAX_RETRIES; 3591 atomic_set(&sdkp->openers, 0); 3592 atomic_set(&sdkp->device->ioerr_cnt, 0); 3593 3594 if (!sdp->request_queue->rq_timeout) { 3595 if (sdp->type != TYPE_MOD) 3596 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); 3597 else 3598 blk_queue_rq_timeout(sdp->request_queue, 3599 SD_MOD_TIMEOUT); 3600 } 3601 3602 device_initialize(&sdkp->disk_dev); 3603 sdkp->disk_dev.parent = get_device(dev); 3604 sdkp->disk_dev.class = &sd_disk_class; 3605 dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev)); 3606 3607 error = device_add(&sdkp->disk_dev); 3608 if (error) { 3609 put_device(&sdkp->disk_dev); 3610 goto out; 3611 } 3612 3613 dev_set_drvdata(dev, sdkp); 3614 3615 gd->major = sd_major((index & 0xf0) >> 4); 3616 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 3617 gd->minors = SD_MINORS; 3618 3619 gd->fops = &sd_fops; 3620 gd->private_data = sdkp; 3621 3622 /* defaults, until the device tells us otherwise */ 3623 sdp->sector_size = 512; 3624 sdkp->capacity = 0; 3625 sdkp->media_present = 1; 3626 sdkp->write_prot = 0; 3627 sdkp->cache_override = 0; 3628 sdkp->WCE = 0; 3629 sdkp->RCD = 0; 3630 sdkp->ATO = 0; 3631 sdkp->first_scan = 1; 3632 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; 3633 3634 sd_revalidate_disk(gd); 3635 3636 if (sdp->removable) { 3637 gd->flags |= GENHD_FL_REMOVABLE; 3638 gd->events |= DISK_EVENT_MEDIA_CHANGE; 3639 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT; 3640 } 3641 3642 blk_pm_runtime_init(sdp->request_queue, dev); 3643 if (sdp->rpm_autosuspend) { 3644 pm_runtime_set_autosuspend_delay(dev, 3645 sdp->host->hostt->rpm_autosuspend_delay); 3646 } 3647 3648 error = device_add_disk(dev, gd, NULL); 3649 if (error) { 3650 put_device(&sdkp->disk_dev); 3651 put_disk(gd); 3652 goto out; 3653 } 3654 3655 if (sdkp->security) { 3656 sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit); 3657 if (sdkp->opal_dev) 3658 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n"); 3659 } 3660 3661 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 3662 sdp->removable ? "removable " : ""); 3663 scsi_autopm_put_device(sdp); 3664 3665 return 0; 3666 3667 out_free_index: 3668 ida_free(&sd_index_ida, index); 3669 out_put: 3670 put_disk(gd); 3671 out_free: 3672 kfree(sdkp); 3673 out: 3674 scsi_autopm_put_device(sdp); 3675 return error; 3676 } 3677 3678 /** 3679 * sd_remove - called whenever a scsi disk (previously recognized by 3680 * sd_probe) is detached from the system. It is called (potentially 3681 * multiple times) during sd module unload. 3682 * @dev: pointer to device object 3683 * 3684 * Note: this function is invoked from the scsi mid-level. 3685 * This function potentially frees up a device name (e.g. /dev/sdc) 3686 * that could be re-used by a subsequent sd_probe(). 3687 * This function is not called when the built-in sd driver is "exit-ed". 3688 **/ 3689 static int sd_remove(struct device *dev) 3690 { 3691 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3692 3693 scsi_autopm_get_device(sdkp->device); 3694 3695 device_del(&sdkp->disk_dev); 3696 del_gendisk(sdkp->disk); 3697 sd_shutdown(dev); 3698 3699 put_disk(sdkp->disk); 3700 return 0; 3701 } 3702 3703 static void scsi_disk_release(struct device *dev) 3704 { 3705 struct scsi_disk *sdkp = to_scsi_disk(dev); 3706 3707 ida_free(&sd_index_ida, sdkp->index); 3708 sd_zbc_free_zone_info(sdkp); 3709 put_device(&sdkp->device->sdev_gendev); 3710 free_opal_dev(sdkp->opal_dev); 3711 3712 kfree(sdkp); 3713 } 3714 3715 static int sd_start_stop_device(struct scsi_disk *sdkp, int start) 3716 { 3717 unsigned char cmd[6] = { START_STOP }; /* START_VALID */ 3718 struct scsi_sense_hdr sshdr; 3719 const struct scsi_exec_args exec_args = { 3720 .sshdr = &sshdr, 3721 .req_flags = BLK_MQ_REQ_PM, 3722 }; 3723 struct scsi_device *sdp = sdkp->device; 3724 int res; 3725 3726 if (start) 3727 cmd[4] |= 1; /* START */ 3728 3729 if (sdp->start_stop_pwr_cond) 3730 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */ 3731 3732 if (!scsi_device_online(sdp)) 3733 return -ENODEV; 3734 3735 res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT, 3736 sdkp->max_retries, &exec_args); 3737 if (res) { 3738 sd_print_result(sdkp, "Start/Stop Unit failed", res); 3739 if (res > 0 && scsi_sense_valid(&sshdr)) { 3740 sd_print_sense_hdr(sdkp, &sshdr); 3741 /* 0x3a is medium not present */ 3742 if (sshdr.asc == 0x3a) 3743 res = 0; 3744 } 3745 } 3746 3747 /* SCSI error codes must not go to the generic layer */ 3748 if (res) 3749 return -EIO; 3750 3751 return 0; 3752 } 3753 3754 /* 3755 * Send a SYNCHRONIZE CACHE instruction down to the device through 3756 * the normal SCSI command structure. Wait for the command to 3757 * complete. 3758 */ 3759 static void sd_shutdown(struct device *dev) 3760 { 3761 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3762 3763 if (!sdkp) 3764 return; /* this can happen */ 3765 3766 if (pm_runtime_suspended(dev)) 3767 return; 3768 3769 if (sdkp->WCE && sdkp->media_present) { 3770 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3771 sd_sync_cache(sdkp, NULL); 3772 } 3773 3774 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { 3775 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3776 sd_start_stop_device(sdkp, 0); 3777 } 3778 } 3779 3780 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) 3781 { 3782 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3783 struct scsi_sense_hdr sshdr; 3784 int ret = 0; 3785 3786 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ 3787 return 0; 3788 3789 if (sdkp->WCE && sdkp->media_present) { 3790 if (!sdkp->device->silence_suspend) 3791 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3792 ret = sd_sync_cache(sdkp, &sshdr); 3793 3794 if (ret) { 3795 /* ignore OFFLINE device */ 3796 if (ret == -ENODEV) 3797 return 0; 3798 3799 if (!scsi_sense_valid(&sshdr) || 3800 sshdr.sense_key != ILLEGAL_REQUEST) 3801 return ret; 3802 3803 /* 3804 * sshdr.sense_key == ILLEGAL_REQUEST means this drive 3805 * doesn't support sync. There's not much to do and 3806 * suspend shouldn't fail. 3807 */ 3808 ret = 0; 3809 } 3810 } 3811 3812 if (sdkp->device->manage_start_stop) { 3813 if (!sdkp->device->silence_suspend) 3814 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3815 /* an error is not worth aborting a system sleep */ 3816 ret = sd_start_stop_device(sdkp, 0); 3817 if (ignore_stop_errors) 3818 ret = 0; 3819 } 3820 3821 return ret; 3822 } 3823 3824 static int sd_suspend_system(struct device *dev) 3825 { 3826 if (pm_runtime_suspended(dev)) 3827 return 0; 3828 3829 return sd_suspend_common(dev, true); 3830 } 3831 3832 static int sd_suspend_runtime(struct device *dev) 3833 { 3834 return sd_suspend_common(dev, false); 3835 } 3836 3837 static int sd_resume(struct device *dev) 3838 { 3839 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3840 int ret = 0; 3841 3842 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 3843 return 0; 3844 3845 if (!sdkp->device->manage_start_stop) 3846 return 0; 3847 3848 if (!sdkp->device->no_start_on_resume) { 3849 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 3850 ret = sd_start_stop_device(sdkp, 1); 3851 } 3852 3853 if (!ret) 3854 opal_unlock_from_suspend(sdkp->opal_dev); 3855 return ret; 3856 } 3857 3858 static int sd_resume_system(struct device *dev) 3859 { 3860 if (pm_runtime_suspended(dev)) 3861 return 0; 3862 3863 return sd_resume(dev); 3864 } 3865 3866 static int sd_resume_runtime(struct device *dev) 3867 { 3868 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3869 struct scsi_device *sdp; 3870 3871 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 3872 return 0; 3873 3874 sdp = sdkp->device; 3875 3876 if (sdp->ignore_media_change) { 3877 /* clear the device's sense data */ 3878 static const u8 cmd[10] = { REQUEST_SENSE }; 3879 const struct scsi_exec_args exec_args = { 3880 .req_flags = BLK_MQ_REQ_PM, 3881 }; 3882 3883 if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, 3884 sdp->request_queue->rq_timeout, 1, 3885 &exec_args)) 3886 sd_printk(KERN_NOTICE, sdkp, 3887 "Failed to clear sense data\n"); 3888 } 3889 3890 return sd_resume(dev); 3891 } 3892 3893 static const struct dev_pm_ops sd_pm_ops = { 3894 .suspend = sd_suspend_system, 3895 .resume = sd_resume_system, 3896 .poweroff = sd_suspend_system, 3897 .restore = sd_resume_system, 3898 .runtime_suspend = sd_suspend_runtime, 3899 .runtime_resume = sd_resume_runtime, 3900 }; 3901 3902 static struct scsi_driver sd_template = { 3903 .gendrv = { 3904 .name = "sd", 3905 .owner = THIS_MODULE, 3906 .probe = sd_probe, 3907 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 3908 .remove = sd_remove, 3909 .shutdown = sd_shutdown, 3910 .pm = &sd_pm_ops, 3911 }, 3912 .rescan = sd_rescan, 3913 .init_command = sd_init_command, 3914 .uninit_command = sd_uninit_command, 3915 .done = sd_done, 3916 .eh_action = sd_eh_action, 3917 .eh_reset = sd_eh_reset, 3918 }; 3919 3920 /** 3921 * init_sd - entry point for this driver (both when built in or when 3922 * a module). 3923 * 3924 * Note: this function registers this driver with the scsi mid-level. 3925 **/ 3926 static int __init init_sd(void) 3927 { 3928 int majors = 0, i, err; 3929 3930 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); 3931 3932 for (i = 0; i < SD_MAJORS; i++) { 3933 if (__register_blkdev(sd_major(i), "sd", sd_default_probe)) 3934 continue; 3935 majors++; 3936 } 3937 3938 if (!majors) 3939 return -ENODEV; 3940 3941 err = class_register(&sd_disk_class); 3942 if (err) 3943 goto err_out; 3944 3945 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); 3946 if (!sd_page_pool) { 3947 printk(KERN_ERR "sd: can't init discard page pool\n"); 3948 err = -ENOMEM; 3949 goto err_out_class; 3950 } 3951 3952 err = scsi_register_driver(&sd_template.gendrv); 3953 if (err) 3954 goto err_out_driver; 3955 3956 return 0; 3957 3958 err_out_driver: 3959 mempool_destroy(sd_page_pool); 3960 err_out_class: 3961 class_unregister(&sd_disk_class); 3962 err_out: 3963 for (i = 0; i < SD_MAJORS; i++) 3964 unregister_blkdev(sd_major(i), "sd"); 3965 return err; 3966 } 3967 3968 /** 3969 * exit_sd - exit point for this driver (when it is a module). 3970 * 3971 * Note: this function unregisters this driver from the scsi mid-level. 3972 **/ 3973 static void __exit exit_sd(void) 3974 { 3975 int i; 3976 3977 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); 3978 3979 scsi_unregister_driver(&sd_template.gendrv); 3980 mempool_destroy(sd_page_pool); 3981 3982 class_unregister(&sd_disk_class); 3983 3984 for (i = 0; i < SD_MAJORS; i++) 3985 unregister_blkdev(sd_major(i), "sd"); 3986 } 3987 3988 module_init(init_sd); 3989 module_exit(exit_sd); 3990 3991 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 3992 { 3993 scsi_print_sense_hdr(sdkp->device, 3994 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); 3995 } 3996 3997 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result) 3998 { 3999 const char *hb_string = scsi_hostbyte_string(result); 4000 4001 if (hb_string) 4002 sd_printk(KERN_INFO, sdkp, 4003 "%s: Result: hostbyte=%s driverbyte=%s\n", msg, 4004 hb_string ? hb_string : "invalid", 4005 "DRIVER_OK"); 4006 else 4007 sd_printk(KERN_INFO, sdkp, 4008 "%s: Result: hostbyte=0x%02x driverbyte=%s\n", 4009 msg, host_byte(result), "DRIVER_OK"); 4010 } 4011