1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * sd.c Copyright (C) 1992 Drew Eckhardt 4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 5 * 6 * Linux scsi disk driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * Modification history: 10 * - Drew Eckhardt <drew@colorado.edu> original 11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple 12 * outstanding request, and other enhancements. 13 * Support loadable low-level scsi drivers. 14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using 15 * eight major numbers. 16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs. 17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in 18 * sd_init and cleanups. 19 * - Alex Davis <letmein@erols.com> Fix problem where partition info 20 * not being read in sd_open. Fix problem where removable media 21 * could be ejected after sd_open. 22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x 23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox 24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>: 25 * Support 32k/1M disks. 26 * 27 * Logging policy (needs CONFIG_SCSI_LOGGING defined): 28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2 29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1 30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1 31 * - entering other commands: SCSI_LOG_HLQUEUE level 3 32 * Note: when the logging level is set by the user, it must be greater 33 * than the level indicated above to trigger output. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/fs.h> 38 #include <linux/kernel.h> 39 #include <linux/mm.h> 40 #include <linux/bio.h> 41 #include <linux/genhd.h> 42 #include <linux/hdreg.h> 43 #include <linux/errno.h> 44 #include <linux/idr.h> 45 #include <linux/interrupt.h> 46 #include <linux/init.h> 47 #include <linux/blkdev.h> 48 #include <linux/blkpg.h> 49 #include <linux/blk-pm.h> 50 #include <linux/delay.h> 51 #include <linux/mutex.h> 52 #include <linux/string_helpers.h> 53 #include <linux/async.h> 54 #include <linux/slab.h> 55 #include <linux/sed-opal.h> 56 #include <linux/pm_runtime.h> 57 #include <linux/pr.h> 58 #include <linux/t10-pi.h> 59 #include <linux/uaccess.h> 60 #include <asm/unaligned.h> 61 62 #include <scsi/scsi.h> 63 #include <scsi/scsi_cmnd.h> 64 #include <scsi/scsi_dbg.h> 65 #include <scsi/scsi_device.h> 66 #include <scsi/scsi_driver.h> 67 #include <scsi/scsi_eh.h> 68 #include <scsi/scsi_host.h> 69 #include <scsi/scsi_ioctl.h> 70 #include <scsi/scsicam.h> 71 72 #include "sd.h" 73 #include "scsi_priv.h" 74 #include "scsi_logging.h" 75 76 MODULE_AUTHOR("Eric Youngdale"); 77 MODULE_DESCRIPTION("SCSI disk (sd) driver"); 78 MODULE_LICENSE("GPL"); 79 80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR); 81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR); 82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR); 83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR); 84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR); 85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR); 86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR); 87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR); 88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR); 89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR); 90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR); 91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR); 92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR); 93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); 94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); 95 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); 96 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); 97 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 98 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 99 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); 100 101 #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT) 102 #define SD_MINORS 16 103 #else 104 #define SD_MINORS 0 105 #endif 106 107 static void sd_config_discard(struct scsi_disk *, unsigned int); 108 static void sd_config_write_same(struct scsi_disk *); 109 static int sd_revalidate_disk(struct gendisk *); 110 static void sd_unlock_native_capacity(struct gendisk *disk); 111 static int sd_probe(struct device *); 112 static int sd_remove(struct device *); 113 static void sd_shutdown(struct device *); 114 static int sd_suspend_system(struct device *); 115 static int sd_suspend_runtime(struct device *); 116 static int sd_resume(struct device *); 117 static void sd_rescan(struct device *); 118 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt); 119 static void sd_uninit_command(struct scsi_cmnd *SCpnt); 120 static int sd_done(struct scsi_cmnd *); 121 static void sd_eh_reset(struct scsi_cmnd *); 122 static int sd_eh_action(struct scsi_cmnd *, int); 123 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 124 static void scsi_disk_release(struct device *cdev); 125 126 static DEFINE_IDA(sd_index_ida); 127 128 /* This semaphore is used to mediate the 0->1 reference get in the 129 * face of object destruction (i.e. we can't allow a get on an 130 * object after last put) */ 131 static DEFINE_MUTEX(sd_ref_mutex); 132 133 static struct kmem_cache *sd_cdb_cache; 134 static mempool_t *sd_cdb_pool; 135 static mempool_t *sd_page_pool; 136 137 static const char *sd_cache_types[] = { 138 "write through", "none", "write back", 139 "write back, no read (daft)" 140 }; 141 142 static void sd_set_flush_flag(struct scsi_disk *sdkp) 143 { 144 bool wc = false, fua = false; 145 146 if (sdkp->WCE) { 147 wc = true; 148 if (sdkp->DPOFUA) 149 fua = true; 150 } 151 152 blk_queue_write_cache(sdkp->disk->queue, wc, fua); 153 } 154 155 static ssize_t 156 cache_type_store(struct device *dev, struct device_attribute *attr, 157 const char *buf, size_t count) 158 { 159 int ct, rcd, wce, sp; 160 struct scsi_disk *sdkp = to_scsi_disk(dev); 161 struct scsi_device *sdp = sdkp->device; 162 char buffer[64]; 163 char *buffer_data; 164 struct scsi_mode_data data; 165 struct scsi_sense_hdr sshdr; 166 static const char temp[] = "temporary "; 167 int len; 168 169 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 170 /* no cache control on RBC devices; theoretically they 171 * can do it, but there's probably so many exceptions 172 * it's not worth the risk */ 173 return -EINVAL; 174 175 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { 176 buf += sizeof(temp) - 1; 177 sdkp->cache_override = 1; 178 } else { 179 sdkp->cache_override = 0; 180 } 181 182 ct = sysfs_match_string(sd_cache_types, buf); 183 if (ct < 0) 184 return -EINVAL; 185 186 rcd = ct & 0x01 ? 1 : 0; 187 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; 188 189 if (sdkp->cache_override) { 190 sdkp->WCE = wce; 191 sdkp->RCD = rcd; 192 sd_set_flush_flag(sdkp); 193 return count; 194 } 195 196 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, 197 SD_MAX_RETRIES, &data, NULL)) 198 return -EINVAL; 199 len = min_t(size_t, sizeof(buffer), data.length - data.header_length - 200 data.block_descriptor_length); 201 buffer_data = buffer + data.header_length + 202 data.block_descriptor_length; 203 buffer_data[2] &= ~0x05; 204 buffer_data[2] |= wce << 2 | rcd; 205 sp = buffer_data[0] & 0x80 ? 1 : 0; 206 buffer_data[0] &= ~0x80; 207 208 /* 209 * Ensure WP, DPOFUA, and RESERVED fields are cleared in 210 * received mode parameter buffer before doing MODE SELECT. 211 */ 212 data.device_specific = 0; 213 214 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, 215 SD_MAX_RETRIES, &data, &sshdr)) { 216 if (scsi_sense_valid(&sshdr)) 217 sd_print_sense_hdr(sdkp, &sshdr); 218 return -EINVAL; 219 } 220 revalidate_disk(sdkp->disk); 221 return count; 222 } 223 224 static ssize_t 225 manage_start_stop_show(struct device *dev, struct device_attribute *attr, 226 char *buf) 227 { 228 struct scsi_disk *sdkp = to_scsi_disk(dev); 229 struct scsi_device *sdp = sdkp->device; 230 231 return sprintf(buf, "%u\n", sdp->manage_start_stop); 232 } 233 234 static ssize_t 235 manage_start_stop_store(struct device *dev, struct device_attribute *attr, 236 const char *buf, size_t count) 237 { 238 struct scsi_disk *sdkp = to_scsi_disk(dev); 239 struct scsi_device *sdp = sdkp->device; 240 bool v; 241 242 if (!capable(CAP_SYS_ADMIN)) 243 return -EACCES; 244 245 if (kstrtobool(buf, &v)) 246 return -EINVAL; 247 248 sdp->manage_start_stop = v; 249 250 return count; 251 } 252 static DEVICE_ATTR_RW(manage_start_stop); 253 254 static ssize_t 255 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) 256 { 257 struct scsi_disk *sdkp = to_scsi_disk(dev); 258 259 return sprintf(buf, "%u\n", sdkp->device->allow_restart); 260 } 261 262 static ssize_t 263 allow_restart_store(struct device *dev, struct device_attribute *attr, 264 const char *buf, size_t count) 265 { 266 bool v; 267 struct scsi_disk *sdkp = to_scsi_disk(dev); 268 struct scsi_device *sdp = sdkp->device; 269 270 if (!capable(CAP_SYS_ADMIN)) 271 return -EACCES; 272 273 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 274 return -EINVAL; 275 276 if (kstrtobool(buf, &v)) 277 return -EINVAL; 278 279 sdp->allow_restart = v; 280 281 return count; 282 } 283 static DEVICE_ATTR_RW(allow_restart); 284 285 static ssize_t 286 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) 287 { 288 struct scsi_disk *sdkp = to_scsi_disk(dev); 289 int ct = sdkp->RCD + 2*sdkp->WCE; 290 291 return sprintf(buf, "%s\n", sd_cache_types[ct]); 292 } 293 static DEVICE_ATTR_RW(cache_type); 294 295 static ssize_t 296 FUA_show(struct device *dev, struct device_attribute *attr, char *buf) 297 { 298 struct scsi_disk *sdkp = to_scsi_disk(dev); 299 300 return sprintf(buf, "%u\n", sdkp->DPOFUA); 301 } 302 static DEVICE_ATTR_RO(FUA); 303 304 static ssize_t 305 protection_type_show(struct device *dev, struct device_attribute *attr, 306 char *buf) 307 { 308 struct scsi_disk *sdkp = to_scsi_disk(dev); 309 310 return sprintf(buf, "%u\n", sdkp->protection_type); 311 } 312 313 static ssize_t 314 protection_type_store(struct device *dev, struct device_attribute *attr, 315 const char *buf, size_t count) 316 { 317 struct scsi_disk *sdkp = to_scsi_disk(dev); 318 unsigned int val; 319 int err; 320 321 if (!capable(CAP_SYS_ADMIN)) 322 return -EACCES; 323 324 err = kstrtouint(buf, 10, &val); 325 326 if (err) 327 return err; 328 329 if (val <= T10_PI_TYPE3_PROTECTION) 330 sdkp->protection_type = val; 331 332 return count; 333 } 334 static DEVICE_ATTR_RW(protection_type); 335 336 static ssize_t 337 protection_mode_show(struct device *dev, struct device_attribute *attr, 338 char *buf) 339 { 340 struct scsi_disk *sdkp = to_scsi_disk(dev); 341 struct scsi_device *sdp = sdkp->device; 342 unsigned int dif, dix; 343 344 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 345 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); 346 347 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) { 348 dif = 0; 349 dix = 1; 350 } 351 352 if (!dif && !dix) 353 return sprintf(buf, "none\n"); 354 355 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif); 356 } 357 static DEVICE_ATTR_RO(protection_mode); 358 359 static ssize_t 360 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) 361 { 362 struct scsi_disk *sdkp = to_scsi_disk(dev); 363 364 return sprintf(buf, "%u\n", sdkp->ATO); 365 } 366 static DEVICE_ATTR_RO(app_tag_own); 367 368 static ssize_t 369 thin_provisioning_show(struct device *dev, struct device_attribute *attr, 370 char *buf) 371 { 372 struct scsi_disk *sdkp = to_scsi_disk(dev); 373 374 return sprintf(buf, "%u\n", sdkp->lbpme); 375 } 376 static DEVICE_ATTR_RO(thin_provisioning); 377 378 /* sysfs_match_string() requires dense arrays */ 379 static const char *lbp_mode[] = { 380 [SD_LBP_FULL] = "full", 381 [SD_LBP_UNMAP] = "unmap", 382 [SD_LBP_WS16] = "writesame_16", 383 [SD_LBP_WS10] = "writesame_10", 384 [SD_LBP_ZERO] = "writesame_zero", 385 [SD_LBP_DISABLE] = "disabled", 386 }; 387 388 static ssize_t 389 provisioning_mode_show(struct device *dev, struct device_attribute *attr, 390 char *buf) 391 { 392 struct scsi_disk *sdkp = to_scsi_disk(dev); 393 394 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); 395 } 396 397 static ssize_t 398 provisioning_mode_store(struct device *dev, struct device_attribute *attr, 399 const char *buf, size_t count) 400 { 401 struct scsi_disk *sdkp = to_scsi_disk(dev); 402 struct scsi_device *sdp = sdkp->device; 403 int mode; 404 405 if (!capable(CAP_SYS_ADMIN)) 406 return -EACCES; 407 408 if (sd_is_zoned(sdkp)) { 409 sd_config_discard(sdkp, SD_LBP_DISABLE); 410 return count; 411 } 412 413 if (sdp->type != TYPE_DISK) 414 return -EINVAL; 415 416 mode = sysfs_match_string(lbp_mode, buf); 417 if (mode < 0) 418 return -EINVAL; 419 420 sd_config_discard(sdkp, mode); 421 422 return count; 423 } 424 static DEVICE_ATTR_RW(provisioning_mode); 425 426 /* sysfs_match_string() requires dense arrays */ 427 static const char *zeroing_mode[] = { 428 [SD_ZERO_WRITE] = "write", 429 [SD_ZERO_WS] = "writesame", 430 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap", 431 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap", 432 }; 433 434 static ssize_t 435 zeroing_mode_show(struct device *dev, struct device_attribute *attr, 436 char *buf) 437 { 438 struct scsi_disk *sdkp = to_scsi_disk(dev); 439 440 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); 441 } 442 443 static ssize_t 444 zeroing_mode_store(struct device *dev, struct device_attribute *attr, 445 const char *buf, size_t count) 446 { 447 struct scsi_disk *sdkp = to_scsi_disk(dev); 448 int mode; 449 450 if (!capable(CAP_SYS_ADMIN)) 451 return -EACCES; 452 453 mode = sysfs_match_string(zeroing_mode, buf); 454 if (mode < 0) 455 return -EINVAL; 456 457 sdkp->zeroing_mode = mode; 458 459 return count; 460 } 461 static DEVICE_ATTR_RW(zeroing_mode); 462 463 static ssize_t 464 max_medium_access_timeouts_show(struct device *dev, 465 struct device_attribute *attr, char *buf) 466 { 467 struct scsi_disk *sdkp = to_scsi_disk(dev); 468 469 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); 470 } 471 472 static ssize_t 473 max_medium_access_timeouts_store(struct device *dev, 474 struct device_attribute *attr, const char *buf, 475 size_t count) 476 { 477 struct scsi_disk *sdkp = to_scsi_disk(dev); 478 int err; 479 480 if (!capable(CAP_SYS_ADMIN)) 481 return -EACCES; 482 483 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts); 484 485 return err ? err : count; 486 } 487 static DEVICE_ATTR_RW(max_medium_access_timeouts); 488 489 static ssize_t 490 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, 491 char *buf) 492 { 493 struct scsi_disk *sdkp = to_scsi_disk(dev); 494 495 return sprintf(buf, "%u\n", sdkp->max_ws_blocks); 496 } 497 498 static ssize_t 499 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, 500 const char *buf, size_t count) 501 { 502 struct scsi_disk *sdkp = to_scsi_disk(dev); 503 struct scsi_device *sdp = sdkp->device; 504 unsigned long max; 505 int err; 506 507 if (!capable(CAP_SYS_ADMIN)) 508 return -EACCES; 509 510 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 511 return -EINVAL; 512 513 err = kstrtoul(buf, 10, &max); 514 515 if (err) 516 return err; 517 518 if (max == 0) 519 sdp->no_write_same = 1; 520 else if (max <= SD_MAX_WS16_BLOCKS) { 521 sdp->no_write_same = 0; 522 sdkp->max_ws_blocks = max; 523 } 524 525 sd_config_write_same(sdkp); 526 527 return count; 528 } 529 static DEVICE_ATTR_RW(max_write_same_blocks); 530 531 static struct attribute *sd_disk_attrs[] = { 532 &dev_attr_cache_type.attr, 533 &dev_attr_FUA.attr, 534 &dev_attr_allow_restart.attr, 535 &dev_attr_manage_start_stop.attr, 536 &dev_attr_protection_type.attr, 537 &dev_attr_protection_mode.attr, 538 &dev_attr_app_tag_own.attr, 539 &dev_attr_thin_provisioning.attr, 540 &dev_attr_provisioning_mode.attr, 541 &dev_attr_zeroing_mode.attr, 542 &dev_attr_max_write_same_blocks.attr, 543 &dev_attr_max_medium_access_timeouts.attr, 544 NULL, 545 }; 546 ATTRIBUTE_GROUPS(sd_disk); 547 548 static struct class sd_disk_class = { 549 .name = "scsi_disk", 550 .owner = THIS_MODULE, 551 .dev_release = scsi_disk_release, 552 .dev_groups = sd_disk_groups, 553 }; 554 555 static const struct dev_pm_ops sd_pm_ops = { 556 .suspend = sd_suspend_system, 557 .resume = sd_resume, 558 .poweroff = sd_suspend_system, 559 .restore = sd_resume, 560 .runtime_suspend = sd_suspend_runtime, 561 .runtime_resume = sd_resume, 562 }; 563 564 static struct scsi_driver sd_template = { 565 .gendrv = { 566 .name = "sd", 567 .owner = THIS_MODULE, 568 .probe = sd_probe, 569 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 570 .remove = sd_remove, 571 .shutdown = sd_shutdown, 572 .pm = &sd_pm_ops, 573 }, 574 .rescan = sd_rescan, 575 .init_command = sd_init_command, 576 .uninit_command = sd_uninit_command, 577 .done = sd_done, 578 .eh_action = sd_eh_action, 579 .eh_reset = sd_eh_reset, 580 }; 581 582 /* 583 * Dummy kobj_map->probe function. 584 * The default ->probe function will call modprobe, which is 585 * pointless as this module is already loaded. 586 */ 587 static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data) 588 { 589 return NULL; 590 } 591 592 /* 593 * Device no to disk mapping: 594 * 595 * major disc2 disc p1 596 * |............|.............|....|....| <- dev_t 597 * 31 20 19 8 7 4 3 0 598 * 599 * Inside a major, we have 16k disks, however mapped non- 600 * contiguously. The first 16 disks are for major0, the next 601 * ones with major1, ... Disk 256 is for major0 again, disk 272 602 * for major1, ... 603 * As we stay compatible with our numbering scheme, we can reuse 604 * the well-know SCSI majors 8, 65--71, 136--143. 605 */ 606 static int sd_major(int major_idx) 607 { 608 switch (major_idx) { 609 case 0: 610 return SCSI_DISK0_MAJOR; 611 case 1 ... 7: 612 return SCSI_DISK1_MAJOR + major_idx - 1; 613 case 8 ... 15: 614 return SCSI_DISK8_MAJOR + major_idx - 8; 615 default: 616 BUG(); 617 return 0; /* shut up gcc */ 618 } 619 } 620 621 static struct scsi_disk *scsi_disk_get(struct gendisk *disk) 622 { 623 struct scsi_disk *sdkp = NULL; 624 625 mutex_lock(&sd_ref_mutex); 626 627 if (disk->private_data) { 628 sdkp = scsi_disk(disk); 629 if (scsi_device_get(sdkp->device) == 0) 630 get_device(&sdkp->dev); 631 else 632 sdkp = NULL; 633 } 634 mutex_unlock(&sd_ref_mutex); 635 return sdkp; 636 } 637 638 static void scsi_disk_put(struct scsi_disk *sdkp) 639 { 640 struct scsi_device *sdev = sdkp->device; 641 642 mutex_lock(&sd_ref_mutex); 643 put_device(&sdkp->dev); 644 scsi_device_put(sdev); 645 mutex_unlock(&sd_ref_mutex); 646 } 647 648 #ifdef CONFIG_BLK_SED_OPAL 649 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, 650 size_t len, bool send) 651 { 652 struct scsi_device *sdev = data; 653 u8 cdb[12] = { 0, }; 654 int ret; 655 656 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN; 657 cdb[1] = secp; 658 put_unaligned_be16(spsp, &cdb[2]); 659 put_unaligned_be32(len, &cdb[6]); 660 661 ret = scsi_execute_req(sdev, cdb, 662 send ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 663 buffer, len, NULL, SD_TIMEOUT, SD_MAX_RETRIES, NULL); 664 return ret <= 0 ? ret : -EIO; 665 } 666 #endif /* CONFIG_BLK_SED_OPAL */ 667 668 /* 669 * Look up the DIX operation based on whether the command is read or 670 * write and whether dix and dif are enabled. 671 */ 672 static unsigned int sd_prot_op(bool write, bool dix, bool dif) 673 { 674 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ 675 static const unsigned int ops[] = { /* wrt dix dif */ 676 SCSI_PROT_NORMAL, /* 0 0 0 */ 677 SCSI_PROT_READ_STRIP, /* 0 0 1 */ 678 SCSI_PROT_READ_INSERT, /* 0 1 0 */ 679 SCSI_PROT_READ_PASS, /* 0 1 1 */ 680 SCSI_PROT_NORMAL, /* 1 0 0 */ 681 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ 682 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ 683 SCSI_PROT_WRITE_PASS, /* 1 1 1 */ 684 }; 685 686 return ops[write << 2 | dix << 1 | dif]; 687 } 688 689 /* 690 * Returns a mask of the protection flags that are valid for a given DIX 691 * operation. 692 */ 693 static unsigned int sd_prot_flag_mask(unsigned int prot_op) 694 { 695 static const unsigned int flag_mask[] = { 696 [SCSI_PROT_NORMAL] = 0, 697 698 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | 699 SCSI_PROT_GUARD_CHECK | 700 SCSI_PROT_REF_CHECK | 701 SCSI_PROT_REF_INCREMENT, 702 703 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | 704 SCSI_PROT_IP_CHECKSUM, 705 706 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | 707 SCSI_PROT_GUARD_CHECK | 708 SCSI_PROT_REF_CHECK | 709 SCSI_PROT_REF_INCREMENT | 710 SCSI_PROT_IP_CHECKSUM, 711 712 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | 713 SCSI_PROT_REF_INCREMENT, 714 715 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | 716 SCSI_PROT_REF_CHECK | 717 SCSI_PROT_REF_INCREMENT | 718 SCSI_PROT_IP_CHECKSUM, 719 720 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | 721 SCSI_PROT_GUARD_CHECK | 722 SCSI_PROT_REF_CHECK | 723 SCSI_PROT_REF_INCREMENT | 724 SCSI_PROT_IP_CHECKSUM, 725 }; 726 727 return flag_mask[prot_op]; 728 } 729 730 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, 731 unsigned int dix, unsigned int dif) 732 { 733 struct bio *bio = scmd->request->bio; 734 unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif); 735 unsigned int protect = 0; 736 737 if (dix) { /* DIX Type 0, 1, 2, 3 */ 738 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) 739 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; 740 741 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 742 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; 743 } 744 745 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ 746 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; 747 748 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 749 scmd->prot_flags |= SCSI_PROT_REF_CHECK; 750 } 751 752 if (dif) { /* DIX/DIF Type 1, 2, 3 */ 753 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI; 754 755 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) 756 protect = 3 << 5; /* Disable target PI checking */ 757 else 758 protect = 1 << 5; /* Enable target PI checking */ 759 } 760 761 scsi_set_prot_op(scmd, prot_op); 762 scsi_set_prot_type(scmd, dif); 763 scmd->prot_flags &= sd_prot_flag_mask(prot_op); 764 765 return protect; 766 } 767 768 static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) 769 { 770 struct request_queue *q = sdkp->disk->queue; 771 unsigned int logical_block_size = sdkp->device->sector_size; 772 unsigned int max_blocks = 0; 773 774 q->limits.discard_alignment = 775 sdkp->unmap_alignment * logical_block_size; 776 q->limits.discard_granularity = 777 max(sdkp->physical_block_size, 778 sdkp->unmap_granularity * logical_block_size); 779 sdkp->provisioning_mode = mode; 780 781 switch (mode) { 782 783 case SD_LBP_FULL: 784 case SD_LBP_DISABLE: 785 blk_queue_max_discard_sectors(q, 0); 786 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); 787 return; 788 789 case SD_LBP_UNMAP: 790 max_blocks = min_not_zero(sdkp->max_unmap_blocks, 791 (u32)SD_MAX_WS16_BLOCKS); 792 break; 793 794 case SD_LBP_WS16: 795 if (sdkp->device->unmap_limit_for_ws) 796 max_blocks = sdkp->max_unmap_blocks; 797 else 798 max_blocks = sdkp->max_ws_blocks; 799 800 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); 801 break; 802 803 case SD_LBP_WS10: 804 if (sdkp->device->unmap_limit_for_ws) 805 max_blocks = sdkp->max_unmap_blocks; 806 else 807 max_blocks = sdkp->max_ws_blocks; 808 809 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); 810 break; 811 812 case SD_LBP_ZERO: 813 max_blocks = min_not_zero(sdkp->max_ws_blocks, 814 (u32)SD_MAX_WS10_BLOCKS); 815 break; 816 } 817 818 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); 819 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 820 } 821 822 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) 823 { 824 struct scsi_device *sdp = cmd->device; 825 struct request *rq = cmd->request; 826 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 827 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 828 unsigned int data_len = 24; 829 char *buf; 830 831 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 832 if (!rq->special_vec.bv_page) 833 return BLK_STS_RESOURCE; 834 clear_highpage(rq->special_vec.bv_page); 835 rq->special_vec.bv_offset = 0; 836 rq->special_vec.bv_len = data_len; 837 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 838 839 cmd->cmd_len = 10; 840 cmd->cmnd[0] = UNMAP; 841 cmd->cmnd[8] = 24; 842 843 buf = page_address(rq->special_vec.bv_page); 844 put_unaligned_be16(6 + 16, &buf[0]); 845 put_unaligned_be16(16, &buf[2]); 846 put_unaligned_be64(lba, &buf[8]); 847 put_unaligned_be32(nr_blocks, &buf[16]); 848 849 cmd->allowed = SD_MAX_RETRIES; 850 cmd->transfersize = data_len; 851 rq->timeout = SD_TIMEOUT; 852 853 return scsi_init_io(cmd); 854 } 855 856 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, 857 bool unmap) 858 { 859 struct scsi_device *sdp = cmd->device; 860 struct request *rq = cmd->request; 861 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 862 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 863 u32 data_len = sdp->sector_size; 864 865 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 866 if (!rq->special_vec.bv_page) 867 return BLK_STS_RESOURCE; 868 clear_highpage(rq->special_vec.bv_page); 869 rq->special_vec.bv_offset = 0; 870 rq->special_vec.bv_len = data_len; 871 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 872 873 cmd->cmd_len = 16; 874 cmd->cmnd[0] = WRITE_SAME_16; 875 if (unmap) 876 cmd->cmnd[1] = 0x8; /* UNMAP */ 877 put_unaligned_be64(lba, &cmd->cmnd[2]); 878 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 879 880 cmd->allowed = SD_MAX_RETRIES; 881 cmd->transfersize = data_len; 882 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 883 884 return scsi_init_io(cmd); 885 } 886 887 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, 888 bool unmap) 889 { 890 struct scsi_device *sdp = cmd->device; 891 struct request *rq = cmd->request; 892 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 893 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 894 u32 data_len = sdp->sector_size; 895 896 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 897 if (!rq->special_vec.bv_page) 898 return BLK_STS_RESOURCE; 899 clear_highpage(rq->special_vec.bv_page); 900 rq->special_vec.bv_offset = 0; 901 rq->special_vec.bv_len = data_len; 902 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 903 904 cmd->cmd_len = 10; 905 cmd->cmnd[0] = WRITE_SAME; 906 if (unmap) 907 cmd->cmnd[1] = 0x8; /* UNMAP */ 908 put_unaligned_be32(lba, &cmd->cmnd[2]); 909 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 910 911 cmd->allowed = SD_MAX_RETRIES; 912 cmd->transfersize = data_len; 913 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 914 915 return scsi_init_io(cmd); 916 } 917 918 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) 919 { 920 struct request *rq = cmd->request; 921 struct scsi_device *sdp = cmd->device; 922 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 923 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 924 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 925 926 if (!(rq->cmd_flags & REQ_NOUNMAP)) { 927 switch (sdkp->zeroing_mode) { 928 case SD_ZERO_WS16_UNMAP: 929 return sd_setup_write_same16_cmnd(cmd, true); 930 case SD_ZERO_WS10_UNMAP: 931 return sd_setup_write_same10_cmnd(cmd, true); 932 } 933 } 934 935 if (sdp->no_write_same) 936 return BLK_STS_TARGET; 937 938 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) 939 return sd_setup_write_same16_cmnd(cmd, false); 940 941 return sd_setup_write_same10_cmnd(cmd, false); 942 } 943 944 static void sd_config_write_same(struct scsi_disk *sdkp) 945 { 946 struct request_queue *q = sdkp->disk->queue; 947 unsigned int logical_block_size = sdkp->device->sector_size; 948 949 if (sdkp->device->no_write_same) { 950 sdkp->max_ws_blocks = 0; 951 goto out; 952 } 953 954 /* Some devices can not handle block counts above 0xffff despite 955 * supporting WRITE SAME(16). Consequently we default to 64k 956 * blocks per I/O unless the device explicitly advertises a 957 * bigger limit. 958 */ 959 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) 960 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 961 (u32)SD_MAX_WS16_BLOCKS); 962 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) 963 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 964 (u32)SD_MAX_WS10_BLOCKS); 965 else { 966 sdkp->device->no_write_same = 1; 967 sdkp->max_ws_blocks = 0; 968 } 969 970 if (sdkp->lbprz && sdkp->lbpws) 971 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; 972 else if (sdkp->lbprz && sdkp->lbpws10) 973 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; 974 else if (sdkp->max_ws_blocks) 975 sdkp->zeroing_mode = SD_ZERO_WS; 976 else 977 sdkp->zeroing_mode = SD_ZERO_WRITE; 978 979 if (sdkp->max_ws_blocks && 980 sdkp->physical_block_size > logical_block_size) { 981 /* 982 * Reporting a maximum number of blocks that is not aligned 983 * on the device physical size would cause a large write same 984 * request to be split into physically unaligned chunks by 985 * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same() 986 * even if the caller of these functions took care to align the 987 * large request. So make sure the maximum reported is aligned 988 * to the device physical block size. This is only an optional 989 * optimization for regular disks, but this is mandatory to 990 * avoid failure of large write same requests directed at 991 * sequential write required zones of host-managed ZBC disks. 992 */ 993 sdkp->max_ws_blocks = 994 round_down(sdkp->max_ws_blocks, 995 bytes_to_logical(sdkp->device, 996 sdkp->physical_block_size)); 997 } 998 999 out: 1000 blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks * 1001 (logical_block_size >> 9)); 1002 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * 1003 (logical_block_size >> 9)); 1004 } 1005 1006 /** 1007 * sd_setup_write_same_cmnd - write the same data to multiple blocks 1008 * @cmd: command to prepare 1009 * 1010 * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on 1011 * the preference indicated by the target device. 1012 **/ 1013 static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) 1014 { 1015 struct request *rq = cmd->request; 1016 struct scsi_device *sdp = cmd->device; 1017 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 1018 struct bio *bio = rq->bio; 1019 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1020 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1021 blk_status_t ret; 1022 1023 if (sdkp->device->no_write_same) 1024 return BLK_STS_TARGET; 1025 1026 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); 1027 1028 rq->timeout = SD_WRITE_SAME_TIMEOUT; 1029 1030 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) { 1031 cmd->cmd_len = 16; 1032 cmd->cmnd[0] = WRITE_SAME_16; 1033 put_unaligned_be64(lba, &cmd->cmnd[2]); 1034 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 1035 } else { 1036 cmd->cmd_len = 10; 1037 cmd->cmnd[0] = WRITE_SAME; 1038 put_unaligned_be32(lba, &cmd->cmnd[2]); 1039 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 1040 } 1041 1042 cmd->transfersize = sdp->sector_size; 1043 cmd->allowed = SD_MAX_RETRIES; 1044 1045 /* 1046 * For WRITE SAME the data transferred via the DATA OUT buffer is 1047 * different from the amount of data actually written to the target. 1048 * 1049 * We set up __data_len to the amount of data transferred via the 1050 * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list 1051 * to transfer a single sector of data first, but then reset it to 1052 * the amount of data to be written right after so that the I/O path 1053 * knows how much to actually write. 1054 */ 1055 rq->__data_len = sdp->sector_size; 1056 ret = scsi_init_io(cmd); 1057 rq->__data_len = blk_rq_bytes(rq); 1058 1059 return ret; 1060 } 1061 1062 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 1063 { 1064 struct request *rq = cmd->request; 1065 1066 /* flush requests don't perform I/O, zero the S/G table */ 1067 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1068 1069 cmd->cmnd[0] = SYNCHRONIZE_CACHE; 1070 cmd->cmd_len = 10; 1071 cmd->transfersize = 0; 1072 cmd->allowed = SD_MAX_RETRIES; 1073 1074 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; 1075 return BLK_STS_OK; 1076 } 1077 1078 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, 1079 sector_t lba, unsigned int nr_blocks, 1080 unsigned char flags) 1081 { 1082 cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC); 1083 if (unlikely(cmd->cmnd == NULL)) 1084 return BLK_STS_RESOURCE; 1085 1086 cmd->cmd_len = SD_EXT_CDB_SIZE; 1087 memset(cmd->cmnd, 0, cmd->cmd_len); 1088 1089 cmd->cmnd[0] = VARIABLE_LENGTH_CMD; 1090 cmd->cmnd[7] = 0x18; /* Additional CDB len */ 1091 cmd->cmnd[9] = write ? WRITE_32 : READ_32; 1092 cmd->cmnd[10] = flags; 1093 put_unaligned_be64(lba, &cmd->cmnd[12]); 1094 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ 1095 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); 1096 1097 return BLK_STS_OK; 1098 } 1099 1100 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, 1101 sector_t lba, unsigned int nr_blocks, 1102 unsigned char flags) 1103 { 1104 cmd->cmd_len = 16; 1105 cmd->cmnd[0] = write ? WRITE_16 : READ_16; 1106 cmd->cmnd[1] = flags; 1107 cmd->cmnd[14] = 0; 1108 cmd->cmnd[15] = 0; 1109 put_unaligned_be64(lba, &cmd->cmnd[2]); 1110 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 1111 1112 return BLK_STS_OK; 1113 } 1114 1115 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, 1116 sector_t lba, unsigned int nr_blocks, 1117 unsigned char flags) 1118 { 1119 cmd->cmd_len = 10; 1120 cmd->cmnd[0] = write ? WRITE_10 : READ_10; 1121 cmd->cmnd[1] = flags; 1122 cmd->cmnd[6] = 0; 1123 cmd->cmnd[9] = 0; 1124 put_unaligned_be32(lba, &cmd->cmnd[2]); 1125 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 1126 1127 return BLK_STS_OK; 1128 } 1129 1130 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, 1131 sector_t lba, unsigned int nr_blocks, 1132 unsigned char flags) 1133 { 1134 /* Avoid that 0 blocks gets translated into 256 blocks. */ 1135 if (WARN_ON_ONCE(nr_blocks == 0)) 1136 return BLK_STS_IOERR; 1137 1138 if (unlikely(flags & 0x8)) { 1139 /* 1140 * This happens only if this drive failed 10byte rw 1141 * command with ILLEGAL_REQUEST during operation and 1142 * thus turned off use_10_for_rw. 1143 */ 1144 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n"); 1145 return BLK_STS_IOERR; 1146 } 1147 1148 cmd->cmd_len = 6; 1149 cmd->cmnd[0] = write ? WRITE_6 : READ_6; 1150 cmd->cmnd[1] = (lba >> 16) & 0x1f; 1151 cmd->cmnd[2] = (lba >> 8) & 0xff; 1152 cmd->cmnd[3] = lba & 0xff; 1153 cmd->cmnd[4] = nr_blocks; 1154 cmd->cmnd[5] = 0; 1155 1156 return BLK_STS_OK; 1157 } 1158 1159 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) 1160 { 1161 struct request *rq = cmd->request; 1162 struct scsi_device *sdp = cmd->device; 1163 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 1164 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1165 sector_t threshold; 1166 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1167 unsigned int mask = logical_to_sectors(sdp, 1) - 1; 1168 bool write = rq_data_dir(rq) == WRITE; 1169 unsigned char protect, fua; 1170 blk_status_t ret; 1171 unsigned int dif; 1172 bool dix; 1173 1174 ret = scsi_init_io(cmd); 1175 if (ret != BLK_STS_OK) 1176 return ret; 1177 1178 if (!scsi_device_online(sdp) || sdp->changed) { 1179 scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); 1180 return BLK_STS_IOERR; 1181 } 1182 1183 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) { 1184 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); 1185 return BLK_STS_IOERR; 1186 } 1187 1188 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { 1189 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); 1190 return BLK_STS_IOERR; 1191 } 1192 1193 /* 1194 * Some SD card readers can't handle accesses which touch the 1195 * last one or two logical blocks. Split accesses as needed. 1196 */ 1197 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS; 1198 1199 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) { 1200 if (lba < threshold) { 1201 /* Access up to the threshold but not beyond */ 1202 nr_blocks = threshold - lba; 1203 } else { 1204 /* Access only a single logical block */ 1205 nr_blocks = 1; 1206 } 1207 } 1208 1209 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; 1210 dix = scsi_prot_sg_count(cmd); 1211 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); 1212 1213 if (dif || dix) 1214 protect = sd_setup_protect_cmnd(cmd, dix, dif); 1215 else 1216 protect = 0; 1217 1218 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { 1219 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, 1220 protect | fua); 1221 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { 1222 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, 1223 protect | fua); 1224 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || 1225 sdp->use_10_for_rw || protect) { 1226 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, 1227 protect | fua); 1228 } else { 1229 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks, 1230 protect | fua); 1231 } 1232 1233 if (unlikely(ret != BLK_STS_OK)) 1234 return ret; 1235 1236 /* 1237 * We shouldn't disconnect in the middle of a sector, so with a dumb 1238 * host adapter, it's safe to assume that we can at least transfer 1239 * this many bytes between each connect / disconnect. 1240 */ 1241 cmd->transfersize = sdp->sector_size; 1242 cmd->underflow = nr_blocks << 9; 1243 cmd->allowed = SD_MAX_RETRIES; 1244 cmd->sdb.length = nr_blocks * sdp->sector_size; 1245 1246 SCSI_LOG_HLQUEUE(1, 1247 scmd_printk(KERN_INFO, cmd, 1248 "%s: block=%llu, count=%d\n", __func__, 1249 (unsigned long long)blk_rq_pos(rq), 1250 blk_rq_sectors(rq))); 1251 SCSI_LOG_HLQUEUE(2, 1252 scmd_printk(KERN_INFO, cmd, 1253 "%s %d/%u 512 byte blocks.\n", 1254 write ? "writing" : "reading", nr_blocks, 1255 blk_rq_sectors(rq))); 1256 1257 /* 1258 * This indicates that the command is ready from our end to be 1259 * queued. 1260 */ 1261 return BLK_STS_OK; 1262 } 1263 1264 static blk_status_t sd_init_command(struct scsi_cmnd *cmd) 1265 { 1266 struct request *rq = cmd->request; 1267 1268 switch (req_op(rq)) { 1269 case REQ_OP_DISCARD: 1270 switch (scsi_disk(rq->rq_disk)->provisioning_mode) { 1271 case SD_LBP_UNMAP: 1272 return sd_setup_unmap_cmnd(cmd); 1273 case SD_LBP_WS16: 1274 return sd_setup_write_same16_cmnd(cmd, true); 1275 case SD_LBP_WS10: 1276 return sd_setup_write_same10_cmnd(cmd, true); 1277 case SD_LBP_ZERO: 1278 return sd_setup_write_same10_cmnd(cmd, false); 1279 default: 1280 return BLK_STS_TARGET; 1281 } 1282 case REQ_OP_WRITE_ZEROES: 1283 return sd_setup_write_zeroes_cmnd(cmd); 1284 case REQ_OP_WRITE_SAME: 1285 return sd_setup_write_same_cmnd(cmd); 1286 case REQ_OP_FLUSH: 1287 return sd_setup_flush_cmnd(cmd); 1288 case REQ_OP_READ: 1289 case REQ_OP_WRITE: 1290 return sd_setup_read_write_cmnd(cmd); 1291 case REQ_OP_ZONE_RESET: 1292 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1293 false); 1294 case REQ_OP_ZONE_RESET_ALL: 1295 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1296 true); 1297 case REQ_OP_ZONE_OPEN: 1298 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false); 1299 case REQ_OP_ZONE_CLOSE: 1300 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false); 1301 case REQ_OP_ZONE_FINISH: 1302 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false); 1303 default: 1304 WARN_ON_ONCE(1); 1305 return BLK_STS_NOTSUPP; 1306 } 1307 } 1308 1309 static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1310 { 1311 struct request *rq = SCpnt->request; 1312 u8 *cmnd; 1313 1314 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1315 mempool_free(rq->special_vec.bv_page, sd_page_pool); 1316 1317 if (SCpnt->cmnd != scsi_req(rq)->cmd) { 1318 cmnd = SCpnt->cmnd; 1319 SCpnt->cmnd = NULL; 1320 SCpnt->cmd_len = 0; 1321 mempool_free(cmnd, sd_cdb_pool); 1322 } 1323 } 1324 1325 /** 1326 * sd_open - open a scsi disk device 1327 * @bdev: Block device of the scsi disk to open 1328 * @mode: FMODE_* mask 1329 * 1330 * Returns 0 if successful. Returns a negated errno value in case 1331 * of error. 1332 * 1333 * Note: This can be called from a user context (e.g. fsck(1) ) 1334 * or from within the kernel (e.g. as a result of a mount(1) ). 1335 * In the latter case @inode and @filp carry an abridged amount 1336 * of information as noted above. 1337 * 1338 * Locking: called with bdev->bd_mutex held. 1339 **/ 1340 static int sd_open(struct block_device *bdev, fmode_t mode) 1341 { 1342 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk); 1343 struct scsi_device *sdev; 1344 int retval; 1345 1346 if (!sdkp) 1347 return -ENXIO; 1348 1349 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); 1350 1351 sdev = sdkp->device; 1352 1353 /* 1354 * If the device is in error recovery, wait until it is done. 1355 * If the device is offline, then disallow any access to it. 1356 */ 1357 retval = -ENXIO; 1358 if (!scsi_block_when_processing_errors(sdev)) 1359 goto error_out; 1360 1361 if (sdev->removable || sdkp->write_prot) 1362 check_disk_change(bdev); 1363 1364 /* 1365 * If the drive is empty, just let the open fail. 1366 */ 1367 retval = -ENOMEDIUM; 1368 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY)) 1369 goto error_out; 1370 1371 /* 1372 * If the device has the write protect tab set, have the open fail 1373 * if the user expects to be able to write to the thing. 1374 */ 1375 retval = -EROFS; 1376 if (sdkp->write_prot && (mode & FMODE_WRITE)) 1377 goto error_out; 1378 1379 /* 1380 * It is possible that the disk changing stuff resulted in 1381 * the device being taken offline. If this is the case, 1382 * report this to the user, and don't pretend that the 1383 * open actually succeeded. 1384 */ 1385 retval = -ENXIO; 1386 if (!scsi_device_online(sdev)) 1387 goto error_out; 1388 1389 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { 1390 if (scsi_block_when_processing_errors(sdev)) 1391 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 1392 } 1393 1394 return 0; 1395 1396 error_out: 1397 scsi_disk_put(sdkp); 1398 return retval; 1399 } 1400 1401 /** 1402 * sd_release - invoked when the (last) close(2) is called on this 1403 * scsi disk. 1404 * @disk: disk to release 1405 * @mode: FMODE_* mask 1406 * 1407 * Returns 0. 1408 * 1409 * Note: may block (uninterruptible) if error recovery is underway 1410 * on this disk. 1411 * 1412 * Locking: called with bdev->bd_mutex held. 1413 **/ 1414 static void sd_release(struct gendisk *disk, fmode_t mode) 1415 { 1416 struct scsi_disk *sdkp = scsi_disk(disk); 1417 struct scsi_device *sdev = sdkp->device; 1418 1419 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 1420 1421 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { 1422 if (scsi_block_when_processing_errors(sdev)) 1423 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 1424 } 1425 1426 scsi_disk_put(sdkp); 1427 } 1428 1429 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1430 { 1431 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1432 struct scsi_device *sdp = sdkp->device; 1433 struct Scsi_Host *host = sdp->host; 1434 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); 1435 int diskinfo[4]; 1436 1437 /* default to most commonly used values */ 1438 diskinfo[0] = 0x40; /* 1 << 6 */ 1439 diskinfo[1] = 0x20; /* 1 << 5 */ 1440 diskinfo[2] = capacity >> 11; 1441 1442 /* override with calculated, extended default, or driver values */ 1443 if (host->hostt->bios_param) 1444 host->hostt->bios_param(sdp, bdev, capacity, diskinfo); 1445 else 1446 scsicam_bios_param(bdev, capacity, diskinfo); 1447 1448 geo->heads = diskinfo[0]; 1449 geo->sectors = diskinfo[1]; 1450 geo->cylinders = diskinfo[2]; 1451 return 0; 1452 } 1453 1454 /** 1455 * sd_ioctl - process an ioctl 1456 * @bdev: target block device 1457 * @mode: FMODE_* mask 1458 * @cmd: ioctl command number 1459 * @arg: this is third argument given to ioctl(2) system call. 1460 * Often contains a pointer. 1461 * 1462 * Returns 0 if successful (some ioctls return positive numbers on 1463 * success as well). Returns a negated errno value in case of error. 1464 * 1465 * Note: most ioctls are forward onto the block subsystem or further 1466 * down in the scsi subsystem. 1467 **/ 1468 static int sd_ioctl(struct block_device *bdev, fmode_t mode, 1469 unsigned int cmd, unsigned long arg) 1470 { 1471 struct gendisk *disk = bdev->bd_disk; 1472 struct scsi_disk *sdkp = scsi_disk(disk); 1473 struct scsi_device *sdp = sdkp->device; 1474 void __user *p = (void __user *)arg; 1475 int error; 1476 1477 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " 1478 "cmd=0x%x\n", disk->disk_name, cmd)); 1479 1480 error = scsi_verify_blk_ioctl(bdev, cmd); 1481 if (error < 0) 1482 return error; 1483 1484 /* 1485 * If we are in the middle of error recovery, don't let anyone 1486 * else try and use this device. Also, if error recovery fails, it 1487 * may try and take the device offline, in which case all further 1488 * access to the device is prohibited. 1489 */ 1490 error = scsi_ioctl_block_when_processing_errors(sdp, cmd, 1491 (mode & FMODE_NDELAY) != 0); 1492 if (error) 1493 goto out; 1494 1495 if (is_sed_ioctl(cmd)) 1496 return sed_ioctl(sdkp->opal_dev, cmd, p); 1497 1498 /* 1499 * Send SCSI addressing ioctls directly to mid level, send other 1500 * ioctls to block level and then onto mid level if they can't be 1501 * resolved. 1502 */ 1503 switch (cmd) { 1504 case SCSI_IOCTL_GET_IDLUN: 1505 case SCSI_IOCTL_GET_BUS_NUMBER: 1506 error = scsi_ioctl(sdp, cmd, p); 1507 break; 1508 default: 1509 error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p); 1510 if (error != -ENOTTY) 1511 break; 1512 error = scsi_ioctl(sdp, cmd, p); 1513 break; 1514 } 1515 out: 1516 return error; 1517 } 1518 1519 static void set_media_not_present(struct scsi_disk *sdkp) 1520 { 1521 if (sdkp->media_present) 1522 sdkp->device->changed = 1; 1523 1524 if (sdkp->device->removable) { 1525 sdkp->media_present = 0; 1526 sdkp->capacity = 0; 1527 } 1528 } 1529 1530 static int media_not_present(struct scsi_disk *sdkp, 1531 struct scsi_sense_hdr *sshdr) 1532 { 1533 if (!scsi_sense_valid(sshdr)) 1534 return 0; 1535 1536 /* not invoked for commands that could return deferred errors */ 1537 switch (sshdr->sense_key) { 1538 case UNIT_ATTENTION: 1539 case NOT_READY: 1540 /* medium not present */ 1541 if (sshdr->asc == 0x3A) { 1542 set_media_not_present(sdkp); 1543 return 1; 1544 } 1545 } 1546 return 0; 1547 } 1548 1549 /** 1550 * sd_check_events - check media events 1551 * @disk: kernel device descriptor 1552 * @clearing: disk events currently being cleared 1553 * 1554 * Returns mask of DISK_EVENT_*. 1555 * 1556 * Note: this function is invoked from the block subsystem. 1557 **/ 1558 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) 1559 { 1560 struct scsi_disk *sdkp = scsi_disk_get(disk); 1561 struct scsi_device *sdp; 1562 int retval; 1563 1564 if (!sdkp) 1565 return 0; 1566 1567 sdp = sdkp->device; 1568 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); 1569 1570 /* 1571 * If the device is offline, don't send any commands - just pretend as 1572 * if the command failed. If the device ever comes back online, we 1573 * can deal with it then. It is only because of unrecoverable errors 1574 * that we would ever take a device offline in the first place. 1575 */ 1576 if (!scsi_device_online(sdp)) { 1577 set_media_not_present(sdkp); 1578 goto out; 1579 } 1580 1581 /* 1582 * Using TEST_UNIT_READY enables differentiation between drive with 1583 * no cartridge loaded - NOT READY, drive with changed cartridge - 1584 * UNIT ATTENTION, or with same cartridge - GOOD STATUS. 1585 * 1586 * Drives that auto spin down. eg iomega jaz 1G, will be started 1587 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever 1588 * sd_revalidate() is called. 1589 */ 1590 if (scsi_block_when_processing_errors(sdp)) { 1591 struct scsi_sense_hdr sshdr = { 0, }; 1592 1593 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, 1594 &sshdr); 1595 1596 /* failed to execute TUR, assume media not present */ 1597 if (host_byte(retval)) { 1598 set_media_not_present(sdkp); 1599 goto out; 1600 } 1601 1602 if (media_not_present(sdkp, &sshdr)) 1603 goto out; 1604 } 1605 1606 /* 1607 * For removable scsi disk we have to recognise the presence 1608 * of a disk in the drive. 1609 */ 1610 if (!sdkp->media_present) 1611 sdp->changed = 1; 1612 sdkp->media_present = 1; 1613 out: 1614 /* 1615 * sdp->changed is set under the following conditions: 1616 * 1617 * Medium present state has changed in either direction. 1618 * Device has indicated UNIT_ATTENTION. 1619 */ 1620 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; 1621 sdp->changed = 0; 1622 scsi_disk_put(sdkp); 1623 return retval; 1624 } 1625 1626 static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 1627 { 1628 int retries, res; 1629 struct scsi_device *sdp = sdkp->device; 1630 const int timeout = sdp->request_queue->rq_timeout 1631 * SD_FLUSH_TIMEOUT_MULTIPLIER; 1632 struct scsi_sense_hdr my_sshdr; 1633 1634 if (!scsi_device_online(sdp)) 1635 return -ENODEV; 1636 1637 /* caller might not be interested in sense, but we need it */ 1638 if (!sshdr) 1639 sshdr = &my_sshdr; 1640 1641 for (retries = 3; retries > 0; --retries) { 1642 unsigned char cmd[10] = { 0 }; 1643 1644 cmd[0] = SYNCHRONIZE_CACHE; 1645 /* 1646 * Leave the rest of the command zero to indicate 1647 * flush everything. 1648 */ 1649 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr, 1650 timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); 1651 if (res == 0) 1652 break; 1653 } 1654 1655 if (res) { 1656 sd_print_result(sdkp, "Synchronize Cache(10) failed", res); 1657 1658 if (driver_byte(res) == DRIVER_SENSE) 1659 sd_print_sense_hdr(sdkp, sshdr); 1660 1661 /* we need to evaluate the error return */ 1662 if (scsi_sense_valid(sshdr) && 1663 (sshdr->asc == 0x3a || /* medium not present */ 1664 sshdr->asc == 0x20 || /* invalid command */ 1665 (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */ 1666 /* this is no error here */ 1667 return 0; 1668 1669 switch (host_byte(res)) { 1670 /* ignore errors due to racing a disconnection */ 1671 case DID_BAD_TARGET: 1672 case DID_NO_CONNECT: 1673 return 0; 1674 /* signal the upper layer it might try again */ 1675 case DID_BUS_BUSY: 1676 case DID_IMM_RETRY: 1677 case DID_REQUEUE: 1678 case DID_SOFT_ERROR: 1679 return -EBUSY; 1680 default: 1681 return -EIO; 1682 } 1683 } 1684 return 0; 1685 } 1686 1687 static void sd_rescan(struct device *dev) 1688 { 1689 struct scsi_disk *sdkp = dev_get_drvdata(dev); 1690 1691 revalidate_disk(sdkp->disk); 1692 } 1693 1694 1695 #ifdef CONFIG_COMPAT 1696 /* 1697 * This gets directly called from VFS. When the ioctl 1698 * is not recognized we go back to the other translation paths. 1699 */ 1700 static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, 1701 unsigned int cmd, unsigned long arg) 1702 { 1703 struct gendisk *disk = bdev->bd_disk; 1704 struct scsi_disk *sdkp = scsi_disk(disk); 1705 struct scsi_device *sdev = sdkp->device; 1706 void __user *p = compat_ptr(arg); 1707 int error; 1708 1709 error = scsi_verify_blk_ioctl(bdev, cmd); 1710 if (error < 0) 1711 return error; 1712 1713 error = scsi_ioctl_block_when_processing_errors(sdev, cmd, 1714 (mode & FMODE_NDELAY) != 0); 1715 if (error) 1716 return error; 1717 1718 if (is_sed_ioctl(cmd)) 1719 return sed_ioctl(sdkp->opal_dev, cmd, p); 1720 1721 /* 1722 * Let the static ioctl translation table take care of it. 1723 */ 1724 if (!sdev->host->hostt->compat_ioctl) 1725 return -ENOIOCTLCMD; 1726 return sdev->host->hostt->compat_ioctl(sdev, cmd, p); 1727 } 1728 #endif 1729 1730 static char sd_pr_type(enum pr_type type) 1731 { 1732 switch (type) { 1733 case PR_WRITE_EXCLUSIVE: 1734 return 0x01; 1735 case PR_EXCLUSIVE_ACCESS: 1736 return 0x03; 1737 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1738 return 0x05; 1739 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1740 return 0x06; 1741 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1742 return 0x07; 1743 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1744 return 0x08; 1745 default: 1746 return 0; 1747 } 1748 }; 1749 1750 static int sd_pr_command(struct block_device *bdev, u8 sa, 1751 u64 key, u64 sa_key, u8 type, u8 flags) 1752 { 1753 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; 1754 struct scsi_sense_hdr sshdr; 1755 int result; 1756 u8 cmd[16] = { 0, }; 1757 u8 data[24] = { 0, }; 1758 1759 cmd[0] = PERSISTENT_RESERVE_OUT; 1760 cmd[1] = sa; 1761 cmd[2] = type; 1762 put_unaligned_be32(sizeof(data), &cmd[5]); 1763 1764 put_unaligned_be64(key, &data[0]); 1765 put_unaligned_be64(sa_key, &data[8]); 1766 data[20] = flags; 1767 1768 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data), 1769 &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL); 1770 1771 if (driver_byte(result) == DRIVER_SENSE && 1772 scsi_sense_valid(&sshdr)) { 1773 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); 1774 scsi_print_sense_hdr(sdev, NULL, &sshdr); 1775 } 1776 1777 return result; 1778 } 1779 1780 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 1781 u32 flags) 1782 { 1783 if (flags & ~PR_FL_IGNORE_KEY) 1784 return -EOPNOTSUPP; 1785 return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, 1786 old_key, new_key, 0, 1787 (1 << 0) /* APTPL */); 1788 } 1789 1790 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 1791 u32 flags) 1792 { 1793 if (flags) 1794 return -EOPNOTSUPP; 1795 return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0); 1796 } 1797 1798 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1799 { 1800 return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0); 1801 } 1802 1803 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 1804 enum pr_type type, bool abort) 1805 { 1806 return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, 1807 sd_pr_type(type), 0); 1808 } 1809 1810 static int sd_pr_clear(struct block_device *bdev, u64 key) 1811 { 1812 return sd_pr_command(bdev, 0x03, key, 0, 0, 0); 1813 } 1814 1815 static const struct pr_ops sd_pr_ops = { 1816 .pr_register = sd_pr_register, 1817 .pr_reserve = sd_pr_reserve, 1818 .pr_release = sd_pr_release, 1819 .pr_preempt = sd_pr_preempt, 1820 .pr_clear = sd_pr_clear, 1821 }; 1822 1823 static const struct block_device_operations sd_fops = { 1824 .owner = THIS_MODULE, 1825 .open = sd_open, 1826 .release = sd_release, 1827 .ioctl = sd_ioctl, 1828 .getgeo = sd_getgeo, 1829 #ifdef CONFIG_COMPAT 1830 .compat_ioctl = sd_compat_ioctl, 1831 #endif 1832 .check_events = sd_check_events, 1833 .revalidate_disk = sd_revalidate_disk, 1834 .unlock_native_capacity = sd_unlock_native_capacity, 1835 .report_zones = sd_zbc_report_zones, 1836 .pr_ops = &sd_pr_ops, 1837 }; 1838 1839 /** 1840 * sd_eh_reset - reset error handling callback 1841 * @scmd: sd-issued command that has failed 1842 * 1843 * This function is called by the SCSI midlayer before starting 1844 * SCSI EH. When counting medium access failures we have to be 1845 * careful to register it only only once per device and SCSI EH run; 1846 * there might be several timed out commands which will cause the 1847 * 'max_medium_access_timeouts' counter to trigger after the first 1848 * SCSI EH run already and set the device to offline. 1849 * So this function resets the internal counter before starting SCSI EH. 1850 **/ 1851 static void sd_eh_reset(struct scsi_cmnd *scmd) 1852 { 1853 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); 1854 1855 /* New SCSI EH run, reset gate variable */ 1856 sdkp->ignore_medium_access_errors = false; 1857 } 1858 1859 /** 1860 * sd_eh_action - error handling callback 1861 * @scmd: sd-issued command that has failed 1862 * @eh_disp: The recovery disposition suggested by the midlayer 1863 * 1864 * This function is called by the SCSI midlayer upon completion of an 1865 * error test command (currently TEST UNIT READY). The result of sending 1866 * the eh command is passed in eh_disp. We're looking for devices that 1867 * fail medium access commands but are OK with non access commands like 1868 * test unit ready (so wrongly see the device as having a successful 1869 * recovery) 1870 **/ 1871 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) 1872 { 1873 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); 1874 struct scsi_device *sdev = scmd->device; 1875 1876 if (!scsi_device_online(sdev) || 1877 !scsi_medium_access_command(scmd) || 1878 host_byte(scmd->result) != DID_TIME_OUT || 1879 eh_disp != SUCCESS) 1880 return eh_disp; 1881 1882 /* 1883 * The device has timed out executing a medium access command. 1884 * However, the TEST UNIT READY command sent during error 1885 * handling completed successfully. Either the device is in the 1886 * process of recovering or has it suffered an internal failure 1887 * that prevents access to the storage medium. 1888 */ 1889 if (!sdkp->ignore_medium_access_errors) { 1890 sdkp->medium_access_timed_out++; 1891 sdkp->ignore_medium_access_errors = true; 1892 } 1893 1894 /* 1895 * If the device keeps failing read/write commands but TEST UNIT 1896 * READY always completes successfully we assume that medium 1897 * access is no longer possible and take the device offline. 1898 */ 1899 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { 1900 scmd_printk(KERN_ERR, scmd, 1901 "Medium access timeout failure. Offlining disk!\n"); 1902 mutex_lock(&sdev->state_mutex); 1903 scsi_device_set_state(sdev, SDEV_OFFLINE); 1904 mutex_unlock(&sdev->state_mutex); 1905 1906 return SUCCESS; 1907 } 1908 1909 return eh_disp; 1910 } 1911 1912 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 1913 { 1914 struct request *req = scmd->request; 1915 struct scsi_device *sdev = scmd->device; 1916 unsigned int transferred, good_bytes; 1917 u64 start_lba, end_lba, bad_lba; 1918 1919 /* 1920 * Some commands have a payload smaller than the device logical 1921 * block size (e.g. INQUIRY on a 4K disk). 1922 */ 1923 if (scsi_bufflen(scmd) <= sdev->sector_size) 1924 return 0; 1925 1926 /* Check if we have a 'bad_lba' information */ 1927 if (!scsi_get_sense_info_fld(scmd->sense_buffer, 1928 SCSI_SENSE_BUFFERSIZE, 1929 &bad_lba)) 1930 return 0; 1931 1932 /* 1933 * If the bad lba was reported incorrectly, we have no idea where 1934 * the error is. 1935 */ 1936 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); 1937 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd)); 1938 if (bad_lba < start_lba || bad_lba >= end_lba) 1939 return 0; 1940 1941 /* 1942 * resid is optional but mostly filled in. When it's unused, 1943 * its value is zero, so we assume the whole buffer transferred 1944 */ 1945 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); 1946 1947 /* This computation should always be done in terms of the 1948 * resolution of the device's medium. 1949 */ 1950 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); 1951 1952 return min(good_bytes, transferred); 1953 } 1954 1955 /** 1956 * sd_done - bottom half handler: called when the lower level 1957 * driver has completed (successfully or otherwise) a scsi command. 1958 * @SCpnt: mid-level's per command structure. 1959 * 1960 * Note: potentially run from within an ISR. Must not block. 1961 **/ 1962 static int sd_done(struct scsi_cmnd *SCpnt) 1963 { 1964 int result = SCpnt->result; 1965 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 1966 unsigned int sector_size = SCpnt->device->sector_size; 1967 unsigned int resid; 1968 struct scsi_sense_hdr sshdr; 1969 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); 1970 struct request *req = SCpnt->request; 1971 int sense_valid = 0; 1972 int sense_deferred = 0; 1973 1974 switch (req_op(req)) { 1975 case REQ_OP_DISCARD: 1976 case REQ_OP_WRITE_ZEROES: 1977 case REQ_OP_WRITE_SAME: 1978 case REQ_OP_ZONE_RESET: 1979 case REQ_OP_ZONE_RESET_ALL: 1980 case REQ_OP_ZONE_OPEN: 1981 case REQ_OP_ZONE_CLOSE: 1982 case REQ_OP_ZONE_FINISH: 1983 if (!result) { 1984 good_bytes = blk_rq_bytes(req); 1985 scsi_set_resid(SCpnt, 0); 1986 } else { 1987 good_bytes = 0; 1988 scsi_set_resid(SCpnt, blk_rq_bytes(req)); 1989 } 1990 break; 1991 default: 1992 /* 1993 * In case of bogus fw or device, we could end up having 1994 * an unaligned partial completion. Check this here and force 1995 * alignment. 1996 */ 1997 resid = scsi_get_resid(SCpnt); 1998 if (resid & (sector_size - 1)) { 1999 sd_printk(KERN_INFO, sdkp, 2000 "Unaligned partial completion (resid=%u, sector_sz=%u)\n", 2001 resid, sector_size); 2002 scsi_print_command(SCpnt); 2003 resid = min(scsi_bufflen(SCpnt), 2004 round_up(resid, sector_size)); 2005 scsi_set_resid(SCpnt, resid); 2006 } 2007 } 2008 2009 if (result) { 2010 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); 2011 if (sense_valid) 2012 sense_deferred = scsi_sense_is_deferred(&sshdr); 2013 } 2014 sdkp->medium_access_timed_out = 0; 2015 2016 if (driver_byte(result) != DRIVER_SENSE && 2017 (!sense_valid || sense_deferred)) 2018 goto out; 2019 2020 switch (sshdr.sense_key) { 2021 case HARDWARE_ERROR: 2022 case MEDIUM_ERROR: 2023 good_bytes = sd_completed_bytes(SCpnt); 2024 break; 2025 case RECOVERED_ERROR: 2026 good_bytes = scsi_bufflen(SCpnt); 2027 break; 2028 case NO_SENSE: 2029 /* This indicates a false check condition, so ignore it. An 2030 * unknown amount of data was transferred so treat it as an 2031 * error. 2032 */ 2033 SCpnt->result = 0; 2034 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2035 break; 2036 case ABORTED_COMMAND: 2037 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ 2038 good_bytes = sd_completed_bytes(SCpnt); 2039 break; 2040 case ILLEGAL_REQUEST: 2041 switch (sshdr.asc) { 2042 case 0x10: /* DIX: Host detected corruption */ 2043 good_bytes = sd_completed_bytes(SCpnt); 2044 break; 2045 case 0x20: /* INVALID COMMAND OPCODE */ 2046 case 0x24: /* INVALID FIELD IN CDB */ 2047 switch (SCpnt->cmnd[0]) { 2048 case UNMAP: 2049 sd_config_discard(sdkp, SD_LBP_DISABLE); 2050 break; 2051 case WRITE_SAME_16: 2052 case WRITE_SAME: 2053 if (SCpnt->cmnd[1] & 8) { /* UNMAP */ 2054 sd_config_discard(sdkp, SD_LBP_DISABLE); 2055 } else { 2056 sdkp->device->no_write_same = 1; 2057 sd_config_write_same(sdkp); 2058 req->rq_flags |= RQF_QUIET; 2059 } 2060 break; 2061 } 2062 } 2063 break; 2064 default: 2065 break; 2066 } 2067 2068 out: 2069 if (sd_is_zoned(sdkp)) 2070 sd_zbc_complete(SCpnt, good_bytes, &sshdr); 2071 2072 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, 2073 "sd_done: completed %d of %d bytes\n", 2074 good_bytes, scsi_bufflen(SCpnt))); 2075 2076 return good_bytes; 2077 } 2078 2079 /* 2080 * spinup disk - called only in sd_revalidate_disk() 2081 */ 2082 static void 2083 sd_spinup_disk(struct scsi_disk *sdkp) 2084 { 2085 unsigned char cmd[10]; 2086 unsigned long spintime_expire = 0; 2087 int retries, spintime; 2088 unsigned int the_result; 2089 struct scsi_sense_hdr sshdr; 2090 int sense_valid = 0; 2091 2092 spintime = 0; 2093 2094 /* Spin up drives, as required. Only do this at boot time */ 2095 /* Spinup needs to be done for module loads too. */ 2096 do { 2097 retries = 0; 2098 2099 do { 2100 cmd[0] = TEST_UNIT_READY; 2101 memset((void *) &cmd[1], 0, 9); 2102 2103 the_result = scsi_execute_req(sdkp->device, cmd, 2104 DMA_NONE, NULL, 0, 2105 &sshdr, SD_TIMEOUT, 2106 SD_MAX_RETRIES, NULL); 2107 2108 /* 2109 * If the drive has indicated to us that it 2110 * doesn't have any media in it, don't bother 2111 * with any more polling. 2112 */ 2113 if (media_not_present(sdkp, &sshdr)) 2114 return; 2115 2116 if (the_result) 2117 sense_valid = scsi_sense_valid(&sshdr); 2118 retries++; 2119 } while (retries < 3 && 2120 (!scsi_status_is_good(the_result) || 2121 ((driver_byte(the_result) == DRIVER_SENSE) && 2122 sense_valid && sshdr.sense_key == UNIT_ATTENTION))); 2123 2124 if (driver_byte(the_result) != DRIVER_SENSE) { 2125 /* no sense, TUR either succeeded or failed 2126 * with a status error */ 2127 if(!spintime && !scsi_status_is_good(the_result)) { 2128 sd_print_result(sdkp, "Test Unit Ready failed", 2129 the_result); 2130 } 2131 break; 2132 } 2133 2134 /* 2135 * The device does not want the automatic start to be issued. 2136 */ 2137 if (sdkp->device->no_start_on_add) 2138 break; 2139 2140 if (sense_valid && sshdr.sense_key == NOT_READY) { 2141 if (sshdr.asc == 4 && sshdr.ascq == 3) 2142 break; /* manual intervention required */ 2143 if (sshdr.asc == 4 && sshdr.ascq == 0xb) 2144 break; /* standby */ 2145 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 2146 break; /* unavailable */ 2147 if (sshdr.asc == 4 && sshdr.ascq == 0x1b) 2148 break; /* sanitize in progress */ 2149 /* 2150 * Issue command to spin up drive when not ready 2151 */ 2152 if (!spintime) { 2153 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); 2154 cmd[0] = START_STOP; 2155 cmd[1] = 1; /* Return immediately */ 2156 memset((void *) &cmd[2], 0, 8); 2157 cmd[4] = 1; /* Start spin cycle */ 2158 if (sdkp->device->start_stop_pwr_cond) 2159 cmd[4] |= 1 << 4; 2160 scsi_execute_req(sdkp->device, cmd, DMA_NONE, 2161 NULL, 0, &sshdr, 2162 SD_TIMEOUT, SD_MAX_RETRIES, 2163 NULL); 2164 spintime_expire = jiffies + 100 * HZ; 2165 spintime = 1; 2166 } 2167 /* Wait 1 second for next try */ 2168 msleep(1000); 2169 printk(KERN_CONT "."); 2170 2171 /* 2172 * Wait for USB flash devices with slow firmware. 2173 * Yes, this sense key/ASC combination shouldn't 2174 * occur here. It's characteristic of these devices. 2175 */ 2176 } else if (sense_valid && 2177 sshdr.sense_key == UNIT_ATTENTION && 2178 sshdr.asc == 0x28) { 2179 if (!spintime) { 2180 spintime_expire = jiffies + 5 * HZ; 2181 spintime = 1; 2182 } 2183 /* Wait 1 second for next try */ 2184 msleep(1000); 2185 } else { 2186 /* we don't understand the sense code, so it's 2187 * probably pointless to loop */ 2188 if(!spintime) { 2189 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); 2190 sd_print_sense_hdr(sdkp, &sshdr); 2191 } 2192 break; 2193 } 2194 2195 } while (spintime && time_before_eq(jiffies, spintime_expire)); 2196 2197 if (spintime) { 2198 if (scsi_status_is_good(the_result)) 2199 printk(KERN_CONT "ready\n"); 2200 else 2201 printk(KERN_CONT "not responding...\n"); 2202 } 2203 } 2204 2205 /* 2206 * Determine whether disk supports Data Integrity Field. 2207 */ 2208 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) 2209 { 2210 struct scsi_device *sdp = sdkp->device; 2211 u8 type; 2212 int ret = 0; 2213 2214 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) 2215 return ret; 2216 2217 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 2218 2219 if (type > T10_PI_TYPE3_PROTECTION) 2220 ret = -ENODEV; 2221 else if (scsi_host_dif_capable(sdp->host, type)) 2222 ret = 1; 2223 2224 if (sdkp->first_scan || type != sdkp->protection_type) 2225 switch (ret) { 2226 case -ENODEV: 2227 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \ 2228 " protection type %u. Disabling disk!\n", 2229 type); 2230 break; 2231 case 1: 2232 sd_printk(KERN_NOTICE, sdkp, 2233 "Enabling DIF Type %u protection\n", type); 2234 break; 2235 case 0: 2236 sd_printk(KERN_NOTICE, sdkp, 2237 "Disabling DIF Type %u protection\n", type); 2238 break; 2239 } 2240 2241 sdkp->protection_type = type; 2242 2243 return ret; 2244 } 2245 2246 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, 2247 struct scsi_sense_hdr *sshdr, int sense_valid, 2248 int the_result) 2249 { 2250 if (driver_byte(the_result) == DRIVER_SENSE) 2251 sd_print_sense_hdr(sdkp, sshdr); 2252 else 2253 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); 2254 2255 /* 2256 * Set dirty bit for removable devices if not ready - 2257 * sometimes drives will not report this properly. 2258 */ 2259 if (sdp->removable && 2260 sense_valid && sshdr->sense_key == NOT_READY) 2261 set_media_not_present(sdkp); 2262 2263 /* 2264 * We used to set media_present to 0 here to indicate no media 2265 * in the drive, but some drives fail read capacity even with 2266 * media present, so we can't do that. 2267 */ 2268 sdkp->capacity = 0; /* unknown mapped to zero - as usual */ 2269 } 2270 2271 #define RC16_LEN 32 2272 #if RC16_LEN > SD_BUF_SIZE 2273 #error RC16_LEN must not be more than SD_BUF_SIZE 2274 #endif 2275 2276 #define READ_CAPACITY_RETRIES_ON_RESET 10 2277 2278 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2279 unsigned char *buffer) 2280 { 2281 unsigned char cmd[16]; 2282 struct scsi_sense_hdr sshdr; 2283 int sense_valid = 0; 2284 int the_result; 2285 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2286 unsigned int alignment; 2287 unsigned long long lba; 2288 unsigned sector_size; 2289 2290 if (sdp->no_read_capacity_16) 2291 return -EINVAL; 2292 2293 do { 2294 memset(cmd, 0, 16); 2295 cmd[0] = SERVICE_ACTION_IN_16; 2296 cmd[1] = SAI_READ_CAPACITY_16; 2297 cmd[13] = RC16_LEN; 2298 memset(buffer, 0, RC16_LEN); 2299 2300 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 2301 buffer, RC16_LEN, &sshdr, 2302 SD_TIMEOUT, SD_MAX_RETRIES, NULL); 2303 2304 if (media_not_present(sdkp, &sshdr)) 2305 return -ENODEV; 2306 2307 if (the_result) { 2308 sense_valid = scsi_sense_valid(&sshdr); 2309 if (sense_valid && 2310 sshdr.sense_key == ILLEGAL_REQUEST && 2311 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && 2312 sshdr.ascq == 0x00) 2313 /* Invalid Command Operation Code or 2314 * Invalid Field in CDB, just retry 2315 * silently with RC10 */ 2316 return -EINVAL; 2317 if (sense_valid && 2318 sshdr.sense_key == UNIT_ATTENTION && 2319 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2320 /* Device reset might occur several times, 2321 * give it one more chance */ 2322 if (--reset_retries > 0) 2323 continue; 2324 } 2325 retries--; 2326 2327 } while (the_result && retries); 2328 2329 if (the_result) { 2330 sd_print_result(sdkp, "Read Capacity(16) failed", the_result); 2331 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2332 return -EINVAL; 2333 } 2334 2335 sector_size = get_unaligned_be32(&buffer[8]); 2336 lba = get_unaligned_be64(&buffer[0]); 2337 2338 if (sd_read_protection_type(sdkp, buffer) < 0) { 2339 sdkp->capacity = 0; 2340 return -ENODEV; 2341 } 2342 2343 /* Logical blocks per physical block exponent */ 2344 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; 2345 2346 /* RC basis */ 2347 sdkp->rc_basis = (buffer[12] >> 4) & 0x3; 2348 2349 /* Lowest aligned logical block */ 2350 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; 2351 blk_queue_alignment_offset(sdp->request_queue, alignment); 2352 if (alignment && sdkp->first_scan) 2353 sd_printk(KERN_NOTICE, sdkp, 2354 "physical block alignment offset: %u\n", alignment); 2355 2356 if (buffer[14] & 0x80) { /* LBPME */ 2357 sdkp->lbpme = 1; 2358 2359 if (buffer[14] & 0x40) /* LBPRZ */ 2360 sdkp->lbprz = 1; 2361 2362 sd_config_discard(sdkp, SD_LBP_WS16); 2363 } 2364 2365 sdkp->capacity = lba + 1; 2366 return sector_size; 2367 } 2368 2369 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, 2370 unsigned char *buffer) 2371 { 2372 unsigned char cmd[16]; 2373 struct scsi_sense_hdr sshdr; 2374 int sense_valid = 0; 2375 int the_result; 2376 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2377 sector_t lba; 2378 unsigned sector_size; 2379 2380 do { 2381 cmd[0] = READ_CAPACITY; 2382 memset(&cmd[1], 0, 9); 2383 memset(buffer, 0, 8); 2384 2385 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 2386 buffer, 8, &sshdr, 2387 SD_TIMEOUT, SD_MAX_RETRIES, NULL); 2388 2389 if (media_not_present(sdkp, &sshdr)) 2390 return -ENODEV; 2391 2392 if (the_result) { 2393 sense_valid = scsi_sense_valid(&sshdr); 2394 if (sense_valid && 2395 sshdr.sense_key == UNIT_ATTENTION && 2396 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2397 /* Device reset might occur several times, 2398 * give it one more chance */ 2399 if (--reset_retries > 0) 2400 continue; 2401 } 2402 retries--; 2403 2404 } while (the_result && retries); 2405 2406 if (the_result) { 2407 sd_print_result(sdkp, "Read Capacity(10) failed", the_result); 2408 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2409 return -EINVAL; 2410 } 2411 2412 sector_size = get_unaligned_be32(&buffer[4]); 2413 lba = get_unaligned_be32(&buffer[0]); 2414 2415 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) { 2416 /* Some buggy (usb cardreader) devices return an lba of 2417 0xffffffff when the want to report a size of 0 (with 2418 which they really mean no media is present) */ 2419 sdkp->capacity = 0; 2420 sdkp->physical_block_size = sector_size; 2421 return sector_size; 2422 } 2423 2424 sdkp->capacity = lba + 1; 2425 sdkp->physical_block_size = sector_size; 2426 return sector_size; 2427 } 2428 2429 static int sd_try_rc16_first(struct scsi_device *sdp) 2430 { 2431 if (sdp->host->max_cmd_len < 16) 2432 return 0; 2433 if (sdp->try_rc_10_first) 2434 return 0; 2435 if (sdp->scsi_level > SCSI_SPC_2) 2436 return 1; 2437 if (scsi_device_protection(sdp)) 2438 return 1; 2439 return 0; 2440 } 2441 2442 /* 2443 * read disk capacity 2444 */ 2445 static void 2446 sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) 2447 { 2448 int sector_size; 2449 struct scsi_device *sdp = sdkp->device; 2450 2451 if (sd_try_rc16_first(sdp)) { 2452 sector_size = read_capacity_16(sdkp, sdp, buffer); 2453 if (sector_size == -EOVERFLOW) 2454 goto got_data; 2455 if (sector_size == -ENODEV) 2456 return; 2457 if (sector_size < 0) 2458 sector_size = read_capacity_10(sdkp, sdp, buffer); 2459 if (sector_size < 0) 2460 return; 2461 } else { 2462 sector_size = read_capacity_10(sdkp, sdp, buffer); 2463 if (sector_size == -EOVERFLOW) 2464 goto got_data; 2465 if (sector_size < 0) 2466 return; 2467 if ((sizeof(sdkp->capacity) > 4) && 2468 (sdkp->capacity > 0xffffffffULL)) { 2469 int old_sector_size = sector_size; 2470 sd_printk(KERN_NOTICE, sdkp, "Very big device. " 2471 "Trying to use READ CAPACITY(16).\n"); 2472 sector_size = read_capacity_16(sdkp, sdp, buffer); 2473 if (sector_size < 0) { 2474 sd_printk(KERN_NOTICE, sdkp, 2475 "Using 0xffffffff as device size\n"); 2476 sdkp->capacity = 1 + (sector_t) 0xffffffff; 2477 sector_size = old_sector_size; 2478 goto got_data; 2479 } 2480 /* Remember that READ CAPACITY(16) succeeded */ 2481 sdp->try_rc_10_first = 0; 2482 } 2483 } 2484 2485 /* Some devices are known to return the total number of blocks, 2486 * not the highest block number. Some devices have versions 2487 * which do this and others which do not. Some devices we might 2488 * suspect of doing this but we don't know for certain. 2489 * 2490 * If we know the reported capacity is wrong, decrement it. If 2491 * we can only guess, then assume the number of blocks is even 2492 * (usually true but not always) and err on the side of lowering 2493 * the capacity. 2494 */ 2495 if (sdp->fix_capacity || 2496 (sdp->guess_capacity && (sdkp->capacity & 0x01))) { 2497 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count " 2498 "from its reported value: %llu\n", 2499 (unsigned long long) sdkp->capacity); 2500 --sdkp->capacity; 2501 } 2502 2503 got_data: 2504 if (sector_size == 0) { 2505 sector_size = 512; 2506 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, " 2507 "assuming 512.\n"); 2508 } 2509 2510 if (sector_size != 512 && 2511 sector_size != 1024 && 2512 sector_size != 2048 && 2513 sector_size != 4096) { 2514 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", 2515 sector_size); 2516 /* 2517 * The user might want to re-format the drive with 2518 * a supported sectorsize. Once this happens, it 2519 * would be relatively trivial to set the thing up. 2520 * For this reason, we leave the thing in the table. 2521 */ 2522 sdkp->capacity = 0; 2523 /* 2524 * set a bogus sector size so the normal read/write 2525 * logic in the block layer will eventually refuse any 2526 * request on this device without tripping over power 2527 * of two sector size assumptions 2528 */ 2529 sector_size = 512; 2530 } 2531 blk_queue_logical_block_size(sdp->request_queue, sector_size); 2532 blk_queue_physical_block_size(sdp->request_queue, 2533 sdkp->physical_block_size); 2534 sdkp->device->sector_size = sector_size; 2535 2536 if (sdkp->capacity > 0xffffffff) 2537 sdp->use_16_for_rw = 1; 2538 2539 } 2540 2541 /* 2542 * Print disk capacity 2543 */ 2544 static void 2545 sd_print_capacity(struct scsi_disk *sdkp, 2546 sector_t old_capacity) 2547 { 2548 int sector_size = sdkp->device->sector_size; 2549 char cap_str_2[10], cap_str_10[10]; 2550 2551 if (!sdkp->first_scan && old_capacity == sdkp->capacity) 2552 return; 2553 2554 string_get_size(sdkp->capacity, sector_size, 2555 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 2556 string_get_size(sdkp->capacity, sector_size, 2557 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 2558 2559 sd_printk(KERN_NOTICE, sdkp, 2560 "%llu %d-byte logical blocks: (%s/%s)\n", 2561 (unsigned long long)sdkp->capacity, 2562 sector_size, cap_str_10, cap_str_2); 2563 2564 if (sdkp->physical_block_size != sector_size) 2565 sd_printk(KERN_NOTICE, sdkp, 2566 "%u-byte physical blocks\n", 2567 sdkp->physical_block_size); 2568 2569 sd_zbc_print_zones(sdkp); 2570 } 2571 2572 /* called with buffer of length 512 */ 2573 static inline int 2574 sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage, 2575 unsigned char *buffer, int len, struct scsi_mode_data *data, 2576 struct scsi_sense_hdr *sshdr) 2577 { 2578 return scsi_mode_sense(sdp, dbd, modepage, buffer, len, 2579 SD_TIMEOUT, SD_MAX_RETRIES, data, 2580 sshdr); 2581 } 2582 2583 /* 2584 * read write protect setting, if possible - called only in sd_revalidate_disk() 2585 * called with buffer of length SD_BUF_SIZE 2586 */ 2587 static void 2588 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) 2589 { 2590 int res; 2591 struct scsi_device *sdp = sdkp->device; 2592 struct scsi_mode_data data; 2593 int old_wp = sdkp->write_prot; 2594 2595 set_disk_ro(sdkp->disk, 0); 2596 if (sdp->skip_ms_page_3f) { 2597 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); 2598 return; 2599 } 2600 2601 if (sdp->use_192_bytes_for_3f) { 2602 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL); 2603 } else { 2604 /* 2605 * First attempt: ask for all pages (0x3F), but only 4 bytes. 2606 * We have to start carefully: some devices hang if we ask 2607 * for more than is available. 2608 */ 2609 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL); 2610 2611 /* 2612 * Second attempt: ask for page 0 When only page 0 is 2613 * implemented, a request for page 3F may return Sense Key 2614 * 5: Illegal Request, Sense Code 24: Invalid field in 2615 * CDB. 2616 */ 2617 if (!scsi_status_is_good(res)) 2618 res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL); 2619 2620 /* 2621 * Third attempt: ask 255 bytes, as we did earlier. 2622 */ 2623 if (!scsi_status_is_good(res)) 2624 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255, 2625 &data, NULL); 2626 } 2627 2628 if (!scsi_status_is_good(res)) { 2629 sd_first_printk(KERN_WARNING, sdkp, 2630 "Test WP failed, assume Write Enabled\n"); 2631 } else { 2632 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2633 set_disk_ro(sdkp->disk, sdkp->write_prot); 2634 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2635 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2636 sdkp->write_prot ? "on" : "off"); 2637 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); 2638 } 2639 } 2640 } 2641 2642 /* 2643 * sd_read_cache_type - called only from sd_revalidate_disk() 2644 * called with buffer of length SD_BUF_SIZE 2645 */ 2646 static void 2647 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) 2648 { 2649 int len = 0, res; 2650 struct scsi_device *sdp = sdkp->device; 2651 2652 int dbd; 2653 int modepage; 2654 int first_len; 2655 struct scsi_mode_data data; 2656 struct scsi_sense_hdr sshdr; 2657 int old_wce = sdkp->WCE; 2658 int old_rcd = sdkp->RCD; 2659 int old_dpofua = sdkp->DPOFUA; 2660 2661 2662 if (sdkp->cache_override) 2663 return; 2664 2665 first_len = 4; 2666 if (sdp->skip_ms_page_8) { 2667 if (sdp->type == TYPE_RBC) 2668 goto defaults; 2669 else { 2670 if (sdp->skip_ms_page_3f) 2671 goto defaults; 2672 modepage = 0x3F; 2673 if (sdp->use_192_bytes_for_3f) 2674 first_len = 192; 2675 dbd = 0; 2676 } 2677 } else if (sdp->type == TYPE_RBC) { 2678 modepage = 6; 2679 dbd = 8; 2680 } else { 2681 modepage = 8; 2682 dbd = 0; 2683 } 2684 2685 /* cautiously ask */ 2686 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len, 2687 &data, &sshdr); 2688 2689 if (!scsi_status_is_good(res)) 2690 goto bad_sense; 2691 2692 if (!data.header_length) { 2693 modepage = 6; 2694 first_len = 0; 2695 sd_first_printk(KERN_ERR, sdkp, 2696 "Missing header in MODE_SENSE response\n"); 2697 } 2698 2699 /* that went OK, now ask for the proper length */ 2700 len = data.length; 2701 2702 /* 2703 * We're only interested in the first three bytes, actually. 2704 * But the data cache page is defined for the first 20. 2705 */ 2706 if (len < 3) 2707 goto bad_sense; 2708 else if (len > SD_BUF_SIZE) { 2709 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " 2710 "data from %d to %d bytes\n", len, SD_BUF_SIZE); 2711 len = SD_BUF_SIZE; 2712 } 2713 if (modepage == 0x3F && sdp->use_192_bytes_for_3f) 2714 len = 192; 2715 2716 /* Get the data */ 2717 if (len > first_len) 2718 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, 2719 &data, &sshdr); 2720 2721 if (scsi_status_is_good(res)) { 2722 int offset = data.header_length + data.block_descriptor_length; 2723 2724 while (offset < len) { 2725 u8 page_code = buffer[offset] & 0x3F; 2726 u8 spf = buffer[offset] & 0x40; 2727 2728 if (page_code == 8 || page_code == 6) { 2729 /* We're interested only in the first 3 bytes. 2730 */ 2731 if (len - offset <= 2) { 2732 sd_first_printk(KERN_ERR, sdkp, 2733 "Incomplete mode parameter " 2734 "data\n"); 2735 goto defaults; 2736 } else { 2737 modepage = page_code; 2738 goto Page_found; 2739 } 2740 } else { 2741 /* Go to the next page */ 2742 if (spf && len - offset > 3) 2743 offset += 4 + (buffer[offset+2] << 8) + 2744 buffer[offset+3]; 2745 else if (!spf && len - offset > 1) 2746 offset += 2 + buffer[offset+1]; 2747 else { 2748 sd_first_printk(KERN_ERR, sdkp, 2749 "Incomplete mode " 2750 "parameter data\n"); 2751 goto defaults; 2752 } 2753 } 2754 } 2755 2756 sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); 2757 goto defaults; 2758 2759 Page_found: 2760 if (modepage == 8) { 2761 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2762 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 2763 } else { 2764 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0); 2765 sdkp->RCD = 0; 2766 } 2767 2768 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 2769 if (sdp->broken_fua) { 2770 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); 2771 sdkp->DPOFUA = 0; 2772 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && 2773 !sdkp->device->use_16_for_rw) { 2774 sd_first_printk(KERN_NOTICE, sdkp, 2775 "Uses READ/WRITE(6), disabling FUA\n"); 2776 sdkp->DPOFUA = 0; 2777 } 2778 2779 /* No cache flush allowed for write protected devices */ 2780 if (sdkp->WCE && sdkp->write_prot) 2781 sdkp->WCE = 0; 2782 2783 if (sdkp->first_scan || old_wce != sdkp->WCE || 2784 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) 2785 sd_printk(KERN_NOTICE, sdkp, 2786 "Write cache: %s, read cache: %s, %s\n", 2787 sdkp->WCE ? "enabled" : "disabled", 2788 sdkp->RCD ? "disabled" : "enabled", 2789 sdkp->DPOFUA ? "supports DPO and FUA" 2790 : "doesn't support DPO or FUA"); 2791 2792 return; 2793 } 2794 2795 bad_sense: 2796 if (scsi_sense_valid(&sshdr) && 2797 sshdr.sense_key == ILLEGAL_REQUEST && 2798 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 2799 /* Invalid field in CDB */ 2800 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); 2801 else 2802 sd_first_printk(KERN_ERR, sdkp, 2803 "Asking for cache data failed\n"); 2804 2805 defaults: 2806 if (sdp->wce_default_on) { 2807 sd_first_printk(KERN_NOTICE, sdkp, 2808 "Assuming drive cache: write back\n"); 2809 sdkp->WCE = 1; 2810 } else { 2811 sd_first_printk(KERN_ERR, sdkp, 2812 "Assuming drive cache: write through\n"); 2813 sdkp->WCE = 0; 2814 } 2815 sdkp->RCD = 0; 2816 sdkp->DPOFUA = 0; 2817 } 2818 2819 /* 2820 * The ATO bit indicates whether the DIF application tag is available 2821 * for use by the operating system. 2822 */ 2823 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) 2824 { 2825 int res, offset; 2826 struct scsi_device *sdp = sdkp->device; 2827 struct scsi_mode_data data; 2828 struct scsi_sense_hdr sshdr; 2829 2830 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 2831 return; 2832 2833 if (sdkp->protection_type == 0) 2834 return; 2835 2836 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT, 2837 SD_MAX_RETRIES, &data, &sshdr); 2838 2839 if (!scsi_status_is_good(res) || !data.header_length || 2840 data.length < 6) { 2841 sd_first_printk(KERN_WARNING, sdkp, 2842 "getting Control mode page failed, assume no ATO\n"); 2843 2844 if (scsi_sense_valid(&sshdr)) 2845 sd_print_sense_hdr(sdkp, &sshdr); 2846 2847 return; 2848 } 2849 2850 offset = data.header_length + data.block_descriptor_length; 2851 2852 if ((buffer[offset] & 0x3f) != 0x0a) { 2853 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); 2854 return; 2855 } 2856 2857 if ((buffer[offset + 5] & 0x80) == 0) 2858 return; 2859 2860 sdkp->ATO = 1; 2861 2862 return; 2863 } 2864 2865 /** 2866 * sd_read_block_limits - Query disk device for preferred I/O sizes. 2867 * @sdkp: disk to query 2868 */ 2869 static void sd_read_block_limits(struct scsi_disk *sdkp) 2870 { 2871 unsigned int sector_sz = sdkp->device->sector_size; 2872 const int vpd_len = 64; 2873 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); 2874 2875 if (!buffer || 2876 /* Block Limits VPD */ 2877 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) 2878 goto out; 2879 2880 blk_queue_io_min(sdkp->disk->queue, 2881 get_unaligned_be16(&buffer[6]) * sector_sz); 2882 2883 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]); 2884 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]); 2885 2886 if (buffer[3] == 0x3c) { 2887 unsigned int lba_count, desc_count; 2888 2889 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]); 2890 2891 if (!sdkp->lbpme) 2892 goto out; 2893 2894 lba_count = get_unaligned_be32(&buffer[20]); 2895 desc_count = get_unaligned_be32(&buffer[24]); 2896 2897 if (lba_count && desc_count) 2898 sdkp->max_unmap_blocks = lba_count; 2899 2900 sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]); 2901 2902 if (buffer[32] & 0x80) 2903 sdkp->unmap_alignment = 2904 get_unaligned_be32(&buffer[32]) & ~(1 << 31); 2905 2906 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ 2907 2908 if (sdkp->max_unmap_blocks) 2909 sd_config_discard(sdkp, SD_LBP_UNMAP); 2910 else 2911 sd_config_discard(sdkp, SD_LBP_WS16); 2912 2913 } else { /* LBP VPD page tells us what to use */ 2914 if (sdkp->lbpu && sdkp->max_unmap_blocks) 2915 sd_config_discard(sdkp, SD_LBP_UNMAP); 2916 else if (sdkp->lbpws) 2917 sd_config_discard(sdkp, SD_LBP_WS16); 2918 else if (sdkp->lbpws10) 2919 sd_config_discard(sdkp, SD_LBP_WS10); 2920 else 2921 sd_config_discard(sdkp, SD_LBP_DISABLE); 2922 } 2923 } 2924 2925 out: 2926 kfree(buffer); 2927 } 2928 2929 /** 2930 * sd_read_block_characteristics - Query block dev. characteristics 2931 * @sdkp: disk to query 2932 */ 2933 static void sd_read_block_characteristics(struct scsi_disk *sdkp) 2934 { 2935 struct request_queue *q = sdkp->disk->queue; 2936 unsigned char *buffer; 2937 u16 rot; 2938 const int vpd_len = 64; 2939 2940 buffer = kmalloc(vpd_len, GFP_KERNEL); 2941 2942 if (!buffer || 2943 /* Block Device Characteristics VPD */ 2944 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len)) 2945 goto out; 2946 2947 rot = get_unaligned_be16(&buffer[4]); 2948 2949 if (rot == 1) { 2950 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2951 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2952 } 2953 2954 if (sdkp->device->type == TYPE_ZBC) { 2955 /* Host-managed */ 2956 q->limits.zoned = BLK_ZONED_HM; 2957 } else { 2958 sdkp->zoned = (buffer[8] >> 4) & 3; 2959 if (sdkp->zoned == 1) 2960 /* Host-aware */ 2961 q->limits.zoned = BLK_ZONED_HA; 2962 else 2963 /* 2964 * Treat drive-managed devices as 2965 * regular block devices. 2966 */ 2967 q->limits.zoned = BLK_ZONED_NONE; 2968 } 2969 if (blk_queue_is_zoned(q) && sdkp->first_scan) 2970 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", 2971 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); 2972 2973 out: 2974 kfree(buffer); 2975 } 2976 2977 /** 2978 * sd_read_block_provisioning - Query provisioning VPD page 2979 * @sdkp: disk to query 2980 */ 2981 static void sd_read_block_provisioning(struct scsi_disk *sdkp) 2982 { 2983 unsigned char *buffer; 2984 const int vpd_len = 8; 2985 2986 if (sdkp->lbpme == 0) 2987 return; 2988 2989 buffer = kmalloc(vpd_len, GFP_KERNEL); 2990 2991 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len)) 2992 goto out; 2993 2994 sdkp->lbpvpd = 1; 2995 sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */ 2996 sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */ 2997 sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */ 2998 2999 out: 3000 kfree(buffer); 3001 } 3002 3003 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) 3004 { 3005 struct scsi_device *sdev = sdkp->device; 3006 3007 if (sdev->host->no_write_same) { 3008 sdev->no_write_same = 1; 3009 3010 return; 3011 } 3012 3013 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { 3014 /* too large values might cause issues with arcmsr */ 3015 int vpd_buf_len = 64; 3016 3017 sdev->no_report_opcodes = 1; 3018 3019 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION 3020 * CODES is unsupported and the device has an ATA 3021 * Information VPD page (SAT). 3022 */ 3023 if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len)) 3024 sdev->no_write_same = 1; 3025 } 3026 3027 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1) 3028 sdkp->ws16 = 1; 3029 3030 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1) 3031 sdkp->ws10 = 1; 3032 } 3033 3034 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) 3035 { 3036 struct scsi_device *sdev = sdkp->device; 3037 3038 if (!sdev->security_supported) 3039 return; 3040 3041 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3042 SECURITY_PROTOCOL_IN) == 1 && 3043 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3044 SECURITY_PROTOCOL_OUT) == 1) 3045 sdkp->security = 1; 3046 } 3047 3048 /* 3049 * Determine the device's preferred I/O size for reads and writes 3050 * unless the reported value is unreasonably small, large, not a 3051 * multiple of the physical block size, or simply garbage. 3052 */ 3053 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, 3054 unsigned int dev_max) 3055 { 3056 struct scsi_device *sdp = sdkp->device; 3057 unsigned int opt_xfer_bytes = 3058 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3059 3060 if (sdkp->opt_xfer_blocks == 0) 3061 return false; 3062 3063 if (sdkp->opt_xfer_blocks > dev_max) { 3064 sd_first_printk(KERN_WARNING, sdkp, 3065 "Optimal transfer size %u logical blocks " \ 3066 "> dev_max (%u logical blocks)\n", 3067 sdkp->opt_xfer_blocks, dev_max); 3068 return false; 3069 } 3070 3071 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { 3072 sd_first_printk(KERN_WARNING, sdkp, 3073 "Optimal transfer size %u logical blocks " \ 3074 "> sd driver limit (%u logical blocks)\n", 3075 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); 3076 return false; 3077 } 3078 3079 if (opt_xfer_bytes < PAGE_SIZE) { 3080 sd_first_printk(KERN_WARNING, sdkp, 3081 "Optimal transfer size %u bytes < " \ 3082 "PAGE_SIZE (%u bytes)\n", 3083 opt_xfer_bytes, (unsigned int)PAGE_SIZE); 3084 return false; 3085 } 3086 3087 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { 3088 sd_first_printk(KERN_WARNING, sdkp, 3089 "Optimal transfer size %u bytes not a " \ 3090 "multiple of physical block size (%u bytes)\n", 3091 opt_xfer_bytes, sdkp->physical_block_size); 3092 return false; 3093 } 3094 3095 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", 3096 opt_xfer_bytes); 3097 return true; 3098 } 3099 3100 /** 3101 * sd_revalidate_disk - called the first time a new disk is seen, 3102 * performs disk spin up, read_capacity, etc. 3103 * @disk: struct gendisk we care about 3104 **/ 3105 static int sd_revalidate_disk(struct gendisk *disk) 3106 { 3107 struct scsi_disk *sdkp = scsi_disk(disk); 3108 struct scsi_device *sdp = sdkp->device; 3109 struct request_queue *q = sdkp->disk->queue; 3110 sector_t old_capacity = sdkp->capacity; 3111 unsigned char *buffer; 3112 unsigned int dev_max, rw_max; 3113 3114 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 3115 "sd_revalidate_disk\n")); 3116 3117 /* 3118 * If the device is offline, don't try and read capacity or any 3119 * of the other niceties. 3120 */ 3121 if (!scsi_device_online(sdp)) 3122 goto out; 3123 3124 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); 3125 if (!buffer) { 3126 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " 3127 "allocation failure.\n"); 3128 goto out; 3129 } 3130 3131 sd_spinup_disk(sdkp); 3132 3133 /* 3134 * Without media there is no reason to ask; moreover, some devices 3135 * react badly if we do. 3136 */ 3137 if (sdkp->media_present) { 3138 sd_read_capacity(sdkp, buffer); 3139 3140 /* 3141 * set the default to rotational. All non-rotational devices 3142 * support the block characteristics VPD page, which will 3143 * cause this to be updated correctly and any device which 3144 * doesn't support it should be treated as rotational. 3145 */ 3146 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); 3147 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); 3148 3149 if (scsi_device_supports_vpd(sdp)) { 3150 sd_read_block_provisioning(sdkp); 3151 sd_read_block_limits(sdkp); 3152 sd_read_block_characteristics(sdkp); 3153 sd_zbc_read_zones(sdkp, buffer); 3154 } 3155 3156 sd_print_capacity(sdkp, old_capacity); 3157 3158 sd_read_write_protect_flag(sdkp, buffer); 3159 sd_read_cache_type(sdkp, buffer); 3160 sd_read_app_tag_own(sdkp, buffer); 3161 sd_read_write_same(sdkp, buffer); 3162 sd_read_security(sdkp, buffer); 3163 } 3164 3165 /* 3166 * We now have all cache related info, determine how we deal 3167 * with flush requests. 3168 */ 3169 sd_set_flush_flag(sdkp); 3170 3171 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ 3172 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; 3173 3174 /* Some devices report a maximum block count for READ/WRITE requests. */ 3175 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); 3176 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); 3177 3178 if (sd_validate_opt_xfer_size(sdkp, dev_max)) { 3179 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3180 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 3181 } else 3182 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), 3183 (sector_t)BLK_DEF_MAX_SECTORS); 3184 3185 /* Do not exceed controller limit */ 3186 rw_max = min(rw_max, queue_max_hw_sectors(q)); 3187 3188 /* 3189 * Only update max_sectors if previously unset or if the current value 3190 * exceeds the capabilities of the hardware. 3191 */ 3192 if (sdkp->first_scan || 3193 q->limits.max_sectors > q->limits.max_dev_sectors || 3194 q->limits.max_sectors > q->limits.max_hw_sectors) 3195 q->limits.max_sectors = rw_max; 3196 3197 sdkp->first_scan = 0; 3198 3199 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); 3200 sd_config_write_same(sdkp); 3201 kfree(buffer); 3202 3203 out: 3204 return 0; 3205 } 3206 3207 /** 3208 * sd_unlock_native_capacity - unlock native capacity 3209 * @disk: struct gendisk to set capacity for 3210 * 3211 * Block layer calls this function if it detects that partitions 3212 * on @disk reach beyond the end of the device. If the SCSI host 3213 * implements ->unlock_native_capacity() method, it's invoked to 3214 * give it a chance to adjust the device capacity. 3215 * 3216 * CONTEXT: 3217 * Defined by block layer. Might sleep. 3218 */ 3219 static void sd_unlock_native_capacity(struct gendisk *disk) 3220 { 3221 struct scsi_device *sdev = scsi_disk(disk)->device; 3222 3223 if (sdev->host->hostt->unlock_native_capacity) 3224 sdev->host->hostt->unlock_native_capacity(sdev); 3225 } 3226 3227 /** 3228 * sd_format_disk_name - format disk name 3229 * @prefix: name prefix - ie. "sd" for SCSI disks 3230 * @index: index of the disk to format name for 3231 * @buf: output buffer 3232 * @buflen: length of the output buffer 3233 * 3234 * SCSI disk names starts at sda. The 26th device is sdz and the 3235 * 27th is sdaa. The last one for two lettered suffix is sdzz 3236 * which is followed by sdaaa. 3237 * 3238 * This is basically 26 base counting with one extra 'nil' entry 3239 * at the beginning from the second digit on and can be 3240 * determined using similar method as 26 base conversion with the 3241 * index shifted -1 after each digit is computed. 3242 * 3243 * CONTEXT: 3244 * Don't care. 3245 * 3246 * RETURNS: 3247 * 0 on success, -errno on failure. 3248 */ 3249 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen) 3250 { 3251 const int base = 'z' - 'a' + 1; 3252 char *begin = buf + strlen(prefix); 3253 char *end = buf + buflen; 3254 char *p; 3255 int unit; 3256 3257 p = end - 1; 3258 *p = '\0'; 3259 unit = base; 3260 do { 3261 if (p == begin) 3262 return -EINVAL; 3263 *--p = 'a' + (index % unit); 3264 index = (index / unit) - 1; 3265 } while (index >= 0); 3266 3267 memmove(begin, p, end - p); 3268 memcpy(buf, prefix, strlen(prefix)); 3269 3270 return 0; 3271 } 3272 3273 /** 3274 * sd_probe - called during driver initialization and whenever a 3275 * new scsi device is attached to the system. It is called once 3276 * for each scsi device (not just disks) present. 3277 * @dev: pointer to device object 3278 * 3279 * Returns 0 if successful (or not interested in this scsi device 3280 * (e.g. scanner)); 1 when there is an error. 3281 * 3282 * Note: this function is invoked from the scsi mid-level. 3283 * This function sets up the mapping between a given 3284 * <host,channel,id,lun> (found in sdp) and new device name 3285 * (e.g. /dev/sda). More precisely it is the block device major 3286 * and minor number that is chosen here. 3287 * 3288 * Assume sd_probe is not re-entrant (for time being) 3289 * Also think about sd_probe() and sd_remove() running coincidentally. 3290 **/ 3291 static int sd_probe(struct device *dev) 3292 { 3293 struct scsi_device *sdp = to_scsi_device(dev); 3294 struct scsi_disk *sdkp; 3295 struct gendisk *gd; 3296 int index; 3297 int error; 3298 3299 scsi_autopm_get_device(sdp); 3300 error = -ENODEV; 3301 if (sdp->type != TYPE_DISK && 3302 sdp->type != TYPE_ZBC && 3303 sdp->type != TYPE_MOD && 3304 sdp->type != TYPE_RBC) 3305 goto out; 3306 3307 #ifndef CONFIG_BLK_DEV_ZONED 3308 if (sdp->type == TYPE_ZBC) 3309 goto out; 3310 #endif 3311 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, 3312 "sd_probe\n")); 3313 3314 error = -ENOMEM; 3315 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); 3316 if (!sdkp) 3317 goto out; 3318 3319 gd = alloc_disk(SD_MINORS); 3320 if (!gd) 3321 goto out_free; 3322 3323 index = ida_alloc(&sd_index_ida, GFP_KERNEL); 3324 if (index < 0) { 3325 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); 3326 goto out_put; 3327 } 3328 3329 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 3330 if (error) { 3331 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); 3332 goto out_free_index; 3333 } 3334 3335 sdkp->device = sdp; 3336 sdkp->driver = &sd_template; 3337 sdkp->disk = gd; 3338 sdkp->index = index; 3339 atomic_set(&sdkp->openers, 0); 3340 atomic_set(&sdkp->device->ioerr_cnt, 0); 3341 3342 if (!sdp->request_queue->rq_timeout) { 3343 if (sdp->type != TYPE_MOD) 3344 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); 3345 else 3346 blk_queue_rq_timeout(sdp->request_queue, 3347 SD_MOD_TIMEOUT); 3348 } 3349 3350 device_initialize(&sdkp->dev); 3351 sdkp->dev.parent = dev; 3352 sdkp->dev.class = &sd_disk_class; 3353 dev_set_name(&sdkp->dev, "%s", dev_name(dev)); 3354 3355 error = device_add(&sdkp->dev); 3356 if (error) 3357 goto out_free_index; 3358 3359 get_device(dev); 3360 dev_set_drvdata(dev, sdkp); 3361 3362 gd->major = sd_major((index & 0xf0) >> 4); 3363 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 3364 3365 gd->fops = &sd_fops; 3366 gd->private_data = &sdkp->driver; 3367 gd->queue = sdkp->device->request_queue; 3368 3369 /* defaults, until the device tells us otherwise */ 3370 sdp->sector_size = 512; 3371 sdkp->capacity = 0; 3372 sdkp->media_present = 1; 3373 sdkp->write_prot = 0; 3374 sdkp->cache_override = 0; 3375 sdkp->WCE = 0; 3376 sdkp->RCD = 0; 3377 sdkp->ATO = 0; 3378 sdkp->first_scan = 1; 3379 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; 3380 3381 sd_revalidate_disk(gd); 3382 3383 gd->flags = GENHD_FL_EXT_DEVT; 3384 if (sdp->removable) { 3385 gd->flags |= GENHD_FL_REMOVABLE; 3386 gd->events |= DISK_EVENT_MEDIA_CHANGE; 3387 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT; 3388 } 3389 3390 blk_pm_runtime_init(sdp->request_queue, dev); 3391 if (sdp->rpm_autosuspend) { 3392 pm_runtime_set_autosuspend_delay(dev, 3393 sdp->host->hostt->rpm_autosuspend_delay); 3394 } 3395 device_add_disk(dev, gd, NULL); 3396 if (sdkp->capacity) 3397 sd_dif_config_host(sdkp); 3398 3399 sd_revalidate_disk(gd); 3400 3401 if (sdkp->security) { 3402 sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit); 3403 if (sdkp->opal_dev) 3404 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n"); 3405 } 3406 3407 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 3408 sdp->removable ? "removable " : ""); 3409 scsi_autopm_put_device(sdp); 3410 3411 return 0; 3412 3413 out_free_index: 3414 ida_free(&sd_index_ida, index); 3415 out_put: 3416 put_disk(gd); 3417 out_free: 3418 kfree(sdkp); 3419 out: 3420 scsi_autopm_put_device(sdp); 3421 return error; 3422 } 3423 3424 /** 3425 * sd_remove - called whenever a scsi disk (previously recognized by 3426 * sd_probe) is detached from the system. It is called (potentially 3427 * multiple times) during sd module unload. 3428 * @dev: pointer to device object 3429 * 3430 * Note: this function is invoked from the scsi mid-level. 3431 * This function potentially frees up a device name (e.g. /dev/sdc) 3432 * that could be re-used by a subsequent sd_probe(). 3433 * This function is not called when the built-in sd driver is "exit-ed". 3434 **/ 3435 static int sd_remove(struct device *dev) 3436 { 3437 struct scsi_disk *sdkp; 3438 dev_t devt; 3439 3440 sdkp = dev_get_drvdata(dev); 3441 devt = disk_devt(sdkp->disk); 3442 scsi_autopm_get_device(sdkp->device); 3443 3444 async_synchronize_full_domain(&scsi_sd_pm_domain); 3445 device_del(&sdkp->dev); 3446 del_gendisk(sdkp->disk); 3447 sd_shutdown(dev); 3448 3449 free_opal_dev(sdkp->opal_dev); 3450 3451 blk_register_region(devt, SD_MINORS, NULL, 3452 sd_default_probe, NULL, NULL); 3453 3454 mutex_lock(&sd_ref_mutex); 3455 dev_set_drvdata(dev, NULL); 3456 put_device(&sdkp->dev); 3457 mutex_unlock(&sd_ref_mutex); 3458 3459 return 0; 3460 } 3461 3462 /** 3463 * scsi_disk_release - Called to free the scsi_disk structure 3464 * @dev: pointer to embedded class device 3465 * 3466 * sd_ref_mutex must be held entering this routine. Because it is 3467 * called on last put, you should always use the scsi_disk_get() 3468 * scsi_disk_put() helpers which manipulate the semaphore directly 3469 * and never do a direct put_device. 3470 **/ 3471 static void scsi_disk_release(struct device *dev) 3472 { 3473 struct scsi_disk *sdkp = to_scsi_disk(dev); 3474 struct gendisk *disk = sdkp->disk; 3475 struct request_queue *q = disk->queue; 3476 3477 ida_free(&sd_index_ida, sdkp->index); 3478 3479 /* 3480 * Wait until all requests that are in progress have completed. 3481 * This is necessary to avoid that e.g. scsi_end_request() crashes 3482 * due to clearing the disk->private_data pointer. Wait from inside 3483 * scsi_disk_release() instead of from sd_release() to avoid that 3484 * freezing and unfreezing the request queue affects user space I/O 3485 * in case multiple processes open a /dev/sd... node concurrently. 3486 */ 3487 blk_mq_freeze_queue(q); 3488 blk_mq_unfreeze_queue(q); 3489 3490 disk->private_data = NULL; 3491 put_disk(disk); 3492 put_device(&sdkp->device->sdev_gendev); 3493 3494 kfree(sdkp); 3495 } 3496 3497 static int sd_start_stop_device(struct scsi_disk *sdkp, int start) 3498 { 3499 unsigned char cmd[6] = { START_STOP }; /* START_VALID */ 3500 struct scsi_sense_hdr sshdr; 3501 struct scsi_device *sdp = sdkp->device; 3502 int res; 3503 3504 if (start) 3505 cmd[4] |= 1; /* START */ 3506 3507 if (sdp->start_stop_pwr_cond) 3508 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */ 3509 3510 if (!scsi_device_online(sdp)) 3511 return -ENODEV; 3512 3513 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, 3514 SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL); 3515 if (res) { 3516 sd_print_result(sdkp, "Start/Stop Unit failed", res); 3517 if (driver_byte(res) == DRIVER_SENSE) 3518 sd_print_sense_hdr(sdkp, &sshdr); 3519 if (scsi_sense_valid(&sshdr) && 3520 /* 0x3a is medium not present */ 3521 sshdr.asc == 0x3a) 3522 res = 0; 3523 } 3524 3525 /* SCSI error codes must not go to the generic layer */ 3526 if (res) 3527 return -EIO; 3528 3529 return 0; 3530 } 3531 3532 /* 3533 * Send a SYNCHRONIZE CACHE instruction down to the device through 3534 * the normal SCSI command structure. Wait for the command to 3535 * complete. 3536 */ 3537 static void sd_shutdown(struct device *dev) 3538 { 3539 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3540 3541 if (!sdkp) 3542 return; /* this can happen */ 3543 3544 if (pm_runtime_suspended(dev)) 3545 return; 3546 3547 if (sdkp->WCE && sdkp->media_present) { 3548 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3549 sd_sync_cache(sdkp, NULL); 3550 } 3551 3552 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { 3553 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3554 sd_start_stop_device(sdkp, 0); 3555 } 3556 } 3557 3558 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) 3559 { 3560 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3561 struct scsi_sense_hdr sshdr; 3562 int ret = 0; 3563 3564 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ 3565 return 0; 3566 3567 if (sdkp->WCE && sdkp->media_present) { 3568 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3569 ret = sd_sync_cache(sdkp, &sshdr); 3570 3571 if (ret) { 3572 /* ignore OFFLINE device */ 3573 if (ret == -ENODEV) 3574 return 0; 3575 3576 if (!scsi_sense_valid(&sshdr) || 3577 sshdr.sense_key != ILLEGAL_REQUEST) 3578 return ret; 3579 3580 /* 3581 * sshdr.sense_key == ILLEGAL_REQUEST means this drive 3582 * doesn't support sync. There's not much to do and 3583 * suspend shouldn't fail. 3584 */ 3585 ret = 0; 3586 } 3587 } 3588 3589 if (sdkp->device->manage_start_stop) { 3590 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3591 /* an error is not worth aborting a system sleep */ 3592 ret = sd_start_stop_device(sdkp, 0); 3593 if (ignore_stop_errors) 3594 ret = 0; 3595 } 3596 3597 return ret; 3598 } 3599 3600 static int sd_suspend_system(struct device *dev) 3601 { 3602 return sd_suspend_common(dev, true); 3603 } 3604 3605 static int sd_suspend_runtime(struct device *dev) 3606 { 3607 return sd_suspend_common(dev, false); 3608 } 3609 3610 static int sd_resume(struct device *dev) 3611 { 3612 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3613 int ret; 3614 3615 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 3616 return 0; 3617 3618 if (!sdkp->device->manage_start_stop) 3619 return 0; 3620 3621 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 3622 ret = sd_start_stop_device(sdkp, 1); 3623 if (!ret) 3624 opal_unlock_from_suspend(sdkp->opal_dev); 3625 return ret; 3626 } 3627 3628 /** 3629 * init_sd - entry point for this driver (both when built in or when 3630 * a module). 3631 * 3632 * Note: this function registers this driver with the scsi mid-level. 3633 **/ 3634 static int __init init_sd(void) 3635 { 3636 int majors = 0, i, err; 3637 3638 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); 3639 3640 for (i = 0; i < SD_MAJORS; i++) { 3641 if (register_blkdev(sd_major(i), "sd") != 0) 3642 continue; 3643 majors++; 3644 blk_register_region(sd_major(i), SD_MINORS, NULL, 3645 sd_default_probe, NULL, NULL); 3646 } 3647 3648 if (!majors) 3649 return -ENODEV; 3650 3651 err = class_register(&sd_disk_class); 3652 if (err) 3653 goto err_out; 3654 3655 sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE, 3656 0, 0, NULL); 3657 if (!sd_cdb_cache) { 3658 printk(KERN_ERR "sd: can't init extended cdb cache\n"); 3659 err = -ENOMEM; 3660 goto err_out_class; 3661 } 3662 3663 sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache); 3664 if (!sd_cdb_pool) { 3665 printk(KERN_ERR "sd: can't init extended cdb pool\n"); 3666 err = -ENOMEM; 3667 goto err_out_cache; 3668 } 3669 3670 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); 3671 if (!sd_page_pool) { 3672 printk(KERN_ERR "sd: can't init discard page pool\n"); 3673 err = -ENOMEM; 3674 goto err_out_ppool; 3675 } 3676 3677 err = scsi_register_driver(&sd_template.gendrv); 3678 if (err) 3679 goto err_out_driver; 3680 3681 return 0; 3682 3683 err_out_driver: 3684 mempool_destroy(sd_page_pool); 3685 3686 err_out_ppool: 3687 mempool_destroy(sd_cdb_pool); 3688 3689 err_out_cache: 3690 kmem_cache_destroy(sd_cdb_cache); 3691 3692 err_out_class: 3693 class_unregister(&sd_disk_class); 3694 err_out: 3695 for (i = 0; i < SD_MAJORS; i++) 3696 unregister_blkdev(sd_major(i), "sd"); 3697 return err; 3698 } 3699 3700 /** 3701 * exit_sd - exit point for this driver (when it is a module). 3702 * 3703 * Note: this function unregisters this driver from the scsi mid-level. 3704 **/ 3705 static void __exit exit_sd(void) 3706 { 3707 int i; 3708 3709 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); 3710 3711 scsi_unregister_driver(&sd_template.gendrv); 3712 mempool_destroy(sd_cdb_pool); 3713 mempool_destroy(sd_page_pool); 3714 kmem_cache_destroy(sd_cdb_cache); 3715 3716 class_unregister(&sd_disk_class); 3717 3718 for (i = 0; i < SD_MAJORS; i++) { 3719 blk_unregister_region(sd_major(i), SD_MINORS); 3720 unregister_blkdev(sd_major(i), "sd"); 3721 } 3722 } 3723 3724 module_init(init_sd); 3725 module_exit(exit_sd); 3726 3727 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 3728 { 3729 scsi_print_sense_hdr(sdkp->device, 3730 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); 3731 } 3732 3733 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result) 3734 { 3735 const char *hb_string = scsi_hostbyte_string(result); 3736 const char *db_string = scsi_driverbyte_string(result); 3737 3738 if (hb_string || db_string) 3739 sd_printk(KERN_INFO, sdkp, 3740 "%s: Result: hostbyte=%s driverbyte=%s\n", msg, 3741 hb_string ? hb_string : "invalid", 3742 db_string ? db_string : "invalid"); 3743 else 3744 sd_printk(KERN_INFO, sdkp, 3745 "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n", 3746 msg, host_byte(result), driver_byte(result)); 3747 } 3748