1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * sd.c Copyright (C) 1992 Drew Eckhardt 4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 5 * 6 * Linux scsi disk driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * Modification history: 10 * - Drew Eckhardt <drew@colorado.edu> original 11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple 12 * outstanding request, and other enhancements. 13 * Support loadable low-level scsi drivers. 14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using 15 * eight major numbers. 16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs. 17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in 18 * sd_init and cleanups. 19 * - Alex Davis <letmein@erols.com> Fix problem where partition info 20 * not being read in sd_open. Fix problem where removable media 21 * could be ejected after sd_open. 22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x 23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox 24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>: 25 * Support 32k/1M disks. 26 * 27 * Logging policy (needs CONFIG_SCSI_LOGGING defined): 28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2 29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1 30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1 31 * - entering other commands: SCSI_LOG_HLQUEUE level 3 32 * Note: when the logging level is set by the user, it must be greater 33 * than the level indicated above to trigger output. 34 */ 35 36 #include <linux/bio-integrity.h> 37 #include <linux/module.h> 38 #include <linux/fs.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/hdreg.h> 42 #include <linux/errno.h> 43 #include <linux/idr.h> 44 #include <linux/interrupt.h> 45 #include <linux/init.h> 46 #include <linux/blkdev.h> 47 #include <linux/blkpg.h> 48 #include <linux/blk-pm.h> 49 #include <linux/delay.h> 50 #include <linux/rw_hint.h> 51 #include <linux/major.h> 52 #include <linux/mutex.h> 53 #include <linux/string_helpers.h> 54 #include <linux/slab.h> 55 #include <linux/sed-opal.h> 56 #include <linux/pm_runtime.h> 57 #include <linux/pr.h> 58 #include <linux/t10-pi.h> 59 #include <linux/uaccess.h> 60 #include <linux/unaligned.h> 61 62 #include <scsi/scsi.h> 63 #include <scsi/scsi_cmnd.h> 64 #include <scsi/scsi_dbg.h> 65 #include <scsi/scsi_device.h> 66 #include <scsi/scsi_devinfo.h> 67 #include <scsi/scsi_driver.h> 68 #include <scsi/scsi_eh.h> 69 #include <scsi/scsi_host.h> 70 #include <scsi/scsi_ioctl.h> 71 #include <scsi/scsicam.h> 72 #include <scsi/scsi_common.h> 73 74 #include "sd.h" 75 #include "scsi_priv.h" 76 #include "scsi_logging.h" 77 78 MODULE_AUTHOR("Eric Youngdale"); 79 MODULE_DESCRIPTION("SCSI disk (sd) driver"); 80 MODULE_LICENSE("GPL"); 81 82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR); 83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR); 84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR); 85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR); 86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR); 87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR); 88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR); 89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR); 90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR); 91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR); 92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR); 93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR); 94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR); 95 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); 96 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); 97 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); 98 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); 99 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 100 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 101 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); 102 103 #define SD_MINORS 16 104 105 static void sd_config_write_same(struct scsi_disk *sdkp, 106 struct queue_limits *lim); 107 static void sd_revalidate_disk(struct gendisk *); 108 109 static DEFINE_IDA(sd_index_ida); 110 111 static mempool_t *sd_page_pool; 112 static struct lock_class_key sd_bio_compl_lkclass; 113 114 static const char *sd_cache_types[] = { 115 "write through", "none", "write back", 116 "write back, no read (daft)" 117 }; 118 119 static void sd_disable_discard(struct scsi_disk *sdkp) 120 { 121 sdkp->provisioning_mode = SD_LBP_DISABLE; 122 blk_queue_disable_discard(sdkp->disk->queue); 123 } 124 125 static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, 126 unsigned int mode) 127 { 128 unsigned int logical_block_size = sdkp->device->sector_size; 129 unsigned int max_blocks = 0; 130 131 lim->discard_alignment = sdkp->unmap_alignment * logical_block_size; 132 lim->discard_granularity = max(sdkp->physical_block_size, 133 sdkp->unmap_granularity * logical_block_size); 134 sdkp->provisioning_mode = mode; 135 136 switch (mode) { 137 138 case SD_LBP_FULL: 139 case SD_LBP_DISABLE: 140 break; 141 142 case SD_LBP_UNMAP: 143 max_blocks = min_not_zero(sdkp->max_unmap_blocks, 144 (u32)SD_MAX_WS16_BLOCKS); 145 break; 146 147 case SD_LBP_WS16: 148 if (sdkp->device->unmap_limit_for_ws) 149 max_blocks = sdkp->max_unmap_blocks; 150 else 151 max_blocks = sdkp->max_ws_blocks; 152 153 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); 154 break; 155 156 case SD_LBP_WS10: 157 if (sdkp->device->unmap_limit_for_ws) 158 max_blocks = sdkp->max_unmap_blocks; 159 else 160 max_blocks = sdkp->max_ws_blocks; 161 162 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); 163 break; 164 165 case SD_LBP_ZERO: 166 max_blocks = min_not_zero(sdkp->max_ws_blocks, 167 (u32)SD_MAX_WS10_BLOCKS); 168 break; 169 } 170 171 lim->max_hw_discard_sectors = max_blocks * 172 (logical_block_size >> SECTOR_SHIFT); 173 } 174 175 static void sd_set_flush_flag(struct scsi_disk *sdkp, 176 struct queue_limits *lim) 177 { 178 if (sdkp->WCE) { 179 lim->features |= BLK_FEAT_WRITE_CACHE; 180 if (sdkp->DPOFUA) 181 lim->features |= BLK_FEAT_FUA; 182 else 183 lim->features &= ~BLK_FEAT_FUA; 184 } else { 185 lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); 186 } 187 } 188 189 static ssize_t 190 cache_type_store(struct device *dev, struct device_attribute *attr, 191 const char *buf, size_t count) 192 { 193 int ct, rcd, wce, sp; 194 struct scsi_disk *sdkp = to_scsi_disk(dev); 195 struct scsi_device *sdp = sdkp->device; 196 char buffer[64]; 197 char *buffer_data; 198 struct scsi_mode_data data; 199 struct scsi_sense_hdr sshdr; 200 static const char temp[] = "temporary "; 201 int len, ret; 202 203 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 204 /* no cache control on RBC devices; theoretically they 205 * can do it, but there's probably so many exceptions 206 * it's not worth the risk */ 207 return -EINVAL; 208 209 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { 210 buf += sizeof(temp) - 1; 211 sdkp->cache_override = 1; 212 } else { 213 sdkp->cache_override = 0; 214 } 215 216 ct = sysfs_match_string(sd_cache_types, buf); 217 if (ct < 0) 218 return -EINVAL; 219 220 rcd = ct & 0x01 ? 1 : 0; 221 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; 222 223 if (sdkp->cache_override) { 224 struct queue_limits lim; 225 226 sdkp->WCE = wce; 227 sdkp->RCD = rcd; 228 229 lim = queue_limits_start_update(sdkp->disk->queue); 230 sd_set_flush_flag(sdkp, &lim); 231 ret = queue_limits_commit_update_frozen(sdkp->disk->queue, 232 &lim); 233 if (ret) 234 return ret; 235 return count; 236 } 237 238 if (scsi_mode_sense(sdp, 0x08, 8, 0, buffer, sizeof(buffer), SD_TIMEOUT, 239 sdkp->max_retries, &data, NULL)) 240 return -EINVAL; 241 len = min_t(size_t, sizeof(buffer), data.length - data.header_length - 242 data.block_descriptor_length); 243 buffer_data = buffer + data.header_length + 244 data.block_descriptor_length; 245 buffer_data[2] &= ~0x05; 246 buffer_data[2] |= wce << 2 | rcd; 247 sp = buffer_data[0] & 0x80 ? 1 : 0; 248 buffer_data[0] &= ~0x80; 249 250 /* 251 * Ensure WP, DPOFUA, and RESERVED fields are cleared in 252 * received mode parameter buffer before doing MODE SELECT. 253 */ 254 data.device_specific = 0; 255 256 ret = scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT, 257 sdkp->max_retries, &data, &sshdr); 258 if (ret) { 259 if (ret > 0 && scsi_sense_valid(&sshdr)) 260 sd_print_sense_hdr(sdkp, &sshdr); 261 return -EINVAL; 262 } 263 sd_revalidate_disk(sdkp->disk); 264 return count; 265 } 266 267 static ssize_t 268 manage_start_stop_show(struct device *dev, 269 struct device_attribute *attr, char *buf) 270 { 271 struct scsi_disk *sdkp = to_scsi_disk(dev); 272 struct scsi_device *sdp = sdkp->device; 273 274 return sysfs_emit(buf, "%u\n", 275 sdp->manage_system_start_stop && 276 sdp->manage_runtime_start_stop && 277 sdp->manage_shutdown); 278 } 279 static DEVICE_ATTR_RO(manage_start_stop); 280 281 static ssize_t 282 manage_system_start_stop_show(struct device *dev, 283 struct device_attribute *attr, char *buf) 284 { 285 struct scsi_disk *sdkp = to_scsi_disk(dev); 286 struct scsi_device *sdp = sdkp->device; 287 288 return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop); 289 } 290 291 static ssize_t 292 manage_system_start_stop_store(struct device *dev, 293 struct device_attribute *attr, 294 const char *buf, size_t count) 295 { 296 struct scsi_disk *sdkp = to_scsi_disk(dev); 297 struct scsi_device *sdp = sdkp->device; 298 bool v; 299 300 if (!capable(CAP_SYS_ADMIN)) 301 return -EACCES; 302 303 if (kstrtobool(buf, &v)) 304 return -EINVAL; 305 306 sdp->manage_system_start_stop = v; 307 308 return count; 309 } 310 static DEVICE_ATTR_RW(manage_system_start_stop); 311 312 static ssize_t 313 manage_runtime_start_stop_show(struct device *dev, 314 struct device_attribute *attr, char *buf) 315 { 316 struct scsi_disk *sdkp = to_scsi_disk(dev); 317 struct scsi_device *sdp = sdkp->device; 318 319 return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop); 320 } 321 322 static ssize_t 323 manage_runtime_start_stop_store(struct device *dev, 324 struct device_attribute *attr, 325 const char *buf, size_t count) 326 { 327 struct scsi_disk *sdkp = to_scsi_disk(dev); 328 struct scsi_device *sdp = sdkp->device; 329 bool v; 330 331 if (!capable(CAP_SYS_ADMIN)) 332 return -EACCES; 333 334 if (kstrtobool(buf, &v)) 335 return -EINVAL; 336 337 sdp->manage_runtime_start_stop = v; 338 339 return count; 340 } 341 static DEVICE_ATTR_RW(manage_runtime_start_stop); 342 343 static ssize_t manage_shutdown_show(struct device *dev, 344 struct device_attribute *attr, char *buf) 345 { 346 struct scsi_disk *sdkp = to_scsi_disk(dev); 347 struct scsi_device *sdp = sdkp->device; 348 349 return sysfs_emit(buf, "%u\n", sdp->manage_shutdown); 350 } 351 352 static ssize_t manage_shutdown_store(struct device *dev, 353 struct device_attribute *attr, 354 const char *buf, size_t count) 355 { 356 struct scsi_disk *sdkp = to_scsi_disk(dev); 357 struct scsi_device *sdp = sdkp->device; 358 bool v; 359 360 if (!capable(CAP_SYS_ADMIN)) 361 return -EACCES; 362 363 if (kstrtobool(buf, &v)) 364 return -EINVAL; 365 366 sdp->manage_shutdown = v; 367 368 return count; 369 } 370 static DEVICE_ATTR_RW(manage_shutdown); 371 372 static ssize_t manage_restart_show(struct device *dev, 373 struct device_attribute *attr, char *buf) 374 { 375 struct scsi_disk *sdkp = to_scsi_disk(dev); 376 struct scsi_device *sdp = sdkp->device; 377 378 return sysfs_emit(buf, "%u\n", sdp->manage_restart); 379 } 380 381 static ssize_t manage_restart_store(struct device *dev, 382 struct device_attribute *attr, 383 const char *buf, size_t count) 384 { 385 struct scsi_disk *sdkp = to_scsi_disk(dev); 386 struct scsi_device *sdp = sdkp->device; 387 bool v; 388 389 if (!capable(CAP_SYS_ADMIN)) 390 return -EACCES; 391 392 if (kstrtobool(buf, &v)) 393 return -EINVAL; 394 395 sdp->manage_restart = v; 396 397 return count; 398 } 399 static DEVICE_ATTR_RW(manage_restart); 400 401 static ssize_t 402 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) 403 { 404 struct scsi_disk *sdkp = to_scsi_disk(dev); 405 406 return sprintf(buf, "%u\n", sdkp->device->allow_restart); 407 } 408 409 static ssize_t 410 allow_restart_store(struct device *dev, struct device_attribute *attr, 411 const char *buf, size_t count) 412 { 413 bool v; 414 struct scsi_disk *sdkp = to_scsi_disk(dev); 415 struct scsi_device *sdp = sdkp->device; 416 417 if (!capable(CAP_SYS_ADMIN)) 418 return -EACCES; 419 420 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 421 return -EINVAL; 422 423 if (kstrtobool(buf, &v)) 424 return -EINVAL; 425 426 sdp->allow_restart = v; 427 428 return count; 429 } 430 static DEVICE_ATTR_RW(allow_restart); 431 432 static ssize_t 433 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) 434 { 435 struct scsi_disk *sdkp = to_scsi_disk(dev); 436 int ct = sdkp->RCD + 2*sdkp->WCE; 437 438 return sprintf(buf, "%s\n", sd_cache_types[ct]); 439 } 440 static DEVICE_ATTR_RW(cache_type); 441 442 static ssize_t 443 FUA_show(struct device *dev, struct device_attribute *attr, char *buf) 444 { 445 struct scsi_disk *sdkp = to_scsi_disk(dev); 446 447 return sprintf(buf, "%u\n", sdkp->DPOFUA); 448 } 449 static DEVICE_ATTR_RO(FUA); 450 451 static ssize_t 452 protection_type_show(struct device *dev, struct device_attribute *attr, 453 char *buf) 454 { 455 struct scsi_disk *sdkp = to_scsi_disk(dev); 456 457 return sprintf(buf, "%u\n", sdkp->protection_type); 458 } 459 460 static ssize_t 461 protection_type_store(struct device *dev, struct device_attribute *attr, 462 const char *buf, size_t count) 463 { 464 struct scsi_disk *sdkp = to_scsi_disk(dev); 465 unsigned int val; 466 int err; 467 468 if (!capable(CAP_SYS_ADMIN)) 469 return -EACCES; 470 471 err = kstrtouint(buf, 10, &val); 472 473 if (err) 474 return err; 475 476 if (val <= T10_PI_TYPE3_PROTECTION) 477 sdkp->protection_type = val; 478 479 return count; 480 } 481 static DEVICE_ATTR_RW(protection_type); 482 483 static ssize_t 484 protection_mode_show(struct device *dev, struct device_attribute *attr, 485 char *buf) 486 { 487 struct scsi_disk *sdkp = to_scsi_disk(dev); 488 struct scsi_device *sdp = sdkp->device; 489 unsigned int dif, dix; 490 491 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 492 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); 493 494 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) { 495 dif = 0; 496 dix = 1; 497 } 498 499 if (!dif && !dix) 500 return sprintf(buf, "none\n"); 501 502 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif); 503 } 504 static DEVICE_ATTR_RO(protection_mode); 505 506 static ssize_t 507 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) 508 { 509 struct scsi_disk *sdkp = to_scsi_disk(dev); 510 511 return sprintf(buf, "%u\n", sdkp->ATO); 512 } 513 static DEVICE_ATTR_RO(app_tag_own); 514 515 static ssize_t 516 thin_provisioning_show(struct device *dev, struct device_attribute *attr, 517 char *buf) 518 { 519 struct scsi_disk *sdkp = to_scsi_disk(dev); 520 521 return sprintf(buf, "%u\n", sdkp->lbpme); 522 } 523 static DEVICE_ATTR_RO(thin_provisioning); 524 525 /* sysfs_match_string() requires dense arrays */ 526 static const char *lbp_mode[] = { 527 [SD_LBP_FULL] = "full", 528 [SD_LBP_UNMAP] = "unmap", 529 [SD_LBP_WS16] = "writesame_16", 530 [SD_LBP_WS10] = "writesame_10", 531 [SD_LBP_ZERO] = "writesame_zero", 532 [SD_LBP_DISABLE] = "disabled", 533 }; 534 535 static ssize_t 536 provisioning_mode_show(struct device *dev, struct device_attribute *attr, 537 char *buf) 538 { 539 struct scsi_disk *sdkp = to_scsi_disk(dev); 540 541 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); 542 } 543 544 static ssize_t 545 provisioning_mode_store(struct device *dev, struct device_attribute *attr, 546 const char *buf, size_t count) 547 { 548 struct scsi_disk *sdkp = to_scsi_disk(dev); 549 struct scsi_device *sdp = sdkp->device; 550 struct queue_limits lim; 551 int mode, err; 552 553 if (!capable(CAP_SYS_ADMIN)) 554 return -EACCES; 555 556 if (sdp->type != TYPE_DISK) 557 return -EINVAL; 558 559 mode = sysfs_match_string(lbp_mode, buf); 560 if (mode < 0) 561 return -EINVAL; 562 563 lim = queue_limits_start_update(sdkp->disk->queue); 564 sd_config_discard(sdkp, &lim, mode); 565 err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim); 566 if (err) 567 return err; 568 return count; 569 } 570 static DEVICE_ATTR_RW(provisioning_mode); 571 572 /* sysfs_match_string() requires dense arrays */ 573 static const char *zeroing_mode[] = { 574 [SD_ZERO_WRITE] = "write", 575 [SD_ZERO_WS] = "writesame", 576 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap", 577 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap", 578 }; 579 580 static ssize_t 581 zeroing_mode_show(struct device *dev, struct device_attribute *attr, 582 char *buf) 583 { 584 struct scsi_disk *sdkp = to_scsi_disk(dev); 585 586 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); 587 } 588 589 static ssize_t 590 zeroing_mode_store(struct device *dev, struct device_attribute *attr, 591 const char *buf, size_t count) 592 { 593 struct scsi_disk *sdkp = to_scsi_disk(dev); 594 int mode; 595 596 if (!capable(CAP_SYS_ADMIN)) 597 return -EACCES; 598 599 mode = sysfs_match_string(zeroing_mode, buf); 600 if (mode < 0) 601 return -EINVAL; 602 603 sdkp->zeroing_mode = mode; 604 605 return count; 606 } 607 static DEVICE_ATTR_RW(zeroing_mode); 608 609 static ssize_t 610 max_medium_access_timeouts_show(struct device *dev, 611 struct device_attribute *attr, char *buf) 612 { 613 struct scsi_disk *sdkp = to_scsi_disk(dev); 614 615 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); 616 } 617 618 static ssize_t 619 max_medium_access_timeouts_store(struct device *dev, 620 struct device_attribute *attr, const char *buf, 621 size_t count) 622 { 623 struct scsi_disk *sdkp = to_scsi_disk(dev); 624 int err; 625 626 if (!capable(CAP_SYS_ADMIN)) 627 return -EACCES; 628 629 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts); 630 631 return err ? err : count; 632 } 633 static DEVICE_ATTR_RW(max_medium_access_timeouts); 634 635 static ssize_t 636 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, 637 char *buf) 638 { 639 struct scsi_disk *sdkp = to_scsi_disk(dev); 640 641 return sprintf(buf, "%u\n", sdkp->max_ws_blocks); 642 } 643 644 static ssize_t 645 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, 646 const char *buf, size_t count) 647 { 648 struct scsi_disk *sdkp = to_scsi_disk(dev); 649 struct scsi_device *sdp = sdkp->device; 650 struct queue_limits lim; 651 unsigned long max; 652 int err; 653 654 if (!capable(CAP_SYS_ADMIN)) 655 return -EACCES; 656 657 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 658 return -EINVAL; 659 660 err = kstrtoul(buf, 10, &max); 661 662 if (err) 663 return err; 664 665 if (max == 0) 666 sdp->no_write_same = 1; 667 else if (max <= SD_MAX_WS16_BLOCKS) { 668 sdp->no_write_same = 0; 669 sdkp->max_ws_blocks = max; 670 } 671 672 lim = queue_limits_start_update(sdkp->disk->queue); 673 sd_config_write_same(sdkp, &lim); 674 err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim); 675 if (err) 676 return err; 677 return count; 678 } 679 static DEVICE_ATTR_RW(max_write_same_blocks); 680 681 static ssize_t 682 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf) 683 { 684 struct scsi_disk *sdkp = to_scsi_disk(dev); 685 686 if (sdkp->device->type == TYPE_ZBC) 687 return sprintf(buf, "host-managed\n"); 688 if (sdkp->zoned == 1) 689 return sprintf(buf, "host-aware\n"); 690 if (sdkp->zoned == 2) 691 return sprintf(buf, "drive-managed\n"); 692 return sprintf(buf, "none\n"); 693 } 694 static DEVICE_ATTR_RO(zoned_cap); 695 696 static ssize_t 697 max_retries_store(struct device *dev, struct device_attribute *attr, 698 const char *buf, size_t count) 699 { 700 struct scsi_disk *sdkp = to_scsi_disk(dev); 701 struct scsi_device *sdev = sdkp->device; 702 int retries, err; 703 704 err = kstrtoint(buf, 10, &retries); 705 if (err) 706 return err; 707 708 if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) { 709 sdkp->max_retries = retries; 710 return count; 711 } 712 713 sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n", 714 SD_MAX_RETRIES); 715 return -EINVAL; 716 } 717 718 static ssize_t 719 max_retries_show(struct device *dev, struct device_attribute *attr, 720 char *buf) 721 { 722 struct scsi_disk *sdkp = to_scsi_disk(dev); 723 724 return sprintf(buf, "%d\n", sdkp->max_retries); 725 } 726 727 static DEVICE_ATTR_RW(max_retries); 728 729 static struct attribute *sd_disk_attrs[] = { 730 &dev_attr_cache_type.attr, 731 &dev_attr_FUA.attr, 732 &dev_attr_allow_restart.attr, 733 &dev_attr_manage_start_stop.attr, 734 &dev_attr_manage_system_start_stop.attr, 735 &dev_attr_manage_runtime_start_stop.attr, 736 &dev_attr_manage_shutdown.attr, 737 &dev_attr_manage_restart.attr, 738 &dev_attr_protection_type.attr, 739 &dev_attr_protection_mode.attr, 740 &dev_attr_app_tag_own.attr, 741 &dev_attr_thin_provisioning.attr, 742 &dev_attr_provisioning_mode.attr, 743 &dev_attr_zeroing_mode.attr, 744 &dev_attr_max_write_same_blocks.attr, 745 &dev_attr_max_medium_access_timeouts.attr, 746 &dev_attr_zoned_cap.attr, 747 &dev_attr_max_retries.attr, 748 NULL, 749 }; 750 ATTRIBUTE_GROUPS(sd_disk); 751 752 static void scsi_disk_release(struct device *dev) 753 { 754 struct scsi_disk *sdkp = to_scsi_disk(dev); 755 756 ida_free(&sd_index_ida, sdkp->index); 757 put_device(&sdkp->device->sdev_gendev); 758 free_opal_dev(sdkp->opal_dev); 759 760 kfree(sdkp); 761 } 762 763 static struct class sd_disk_class = { 764 .name = "scsi_disk", 765 .dev_release = scsi_disk_release, 766 .dev_groups = sd_disk_groups, 767 }; 768 769 /* 770 * Don't request a new module, as that could deadlock in multipath 771 * environment. 772 */ 773 static void sd_default_probe(dev_t devt) 774 { 775 } 776 777 /* 778 * Device no to disk mapping: 779 * 780 * major disc2 disc p1 781 * |............|.............|....|....| <- dev_t 782 * 31 20 19 8 7 4 3 0 783 * 784 * Inside a major, we have 16k disks, however mapped non- 785 * contiguously. The first 16 disks are for major0, the next 786 * ones with major1, ... Disk 256 is for major0 again, disk 272 787 * for major1, ... 788 * As we stay compatible with our numbering scheme, we can reuse 789 * the well-know SCSI majors 8, 65--71, 136--143. 790 */ 791 static int sd_major(int major_idx) 792 { 793 switch (major_idx) { 794 case 0: 795 return SCSI_DISK0_MAJOR; 796 case 1 ... 7: 797 return SCSI_DISK1_MAJOR + major_idx - 1; 798 case 8 ... 15: 799 return SCSI_DISK8_MAJOR + major_idx - 8; 800 default: 801 BUG(); 802 return 0; /* shut up gcc */ 803 } 804 } 805 806 #ifdef CONFIG_BLK_SED_OPAL 807 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, 808 size_t len, bool send) 809 { 810 struct scsi_disk *sdkp = data; 811 struct scsi_device *sdev = sdkp->device; 812 u8 cdb[12] = { 0, }; 813 const struct scsi_exec_args exec_args = { 814 .req_flags = BLK_MQ_REQ_PM, 815 }; 816 int ret; 817 818 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN; 819 cdb[1] = secp; 820 put_unaligned_be16(spsp, &cdb[2]); 821 put_unaligned_be32(len, &cdb[6]); 822 823 ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 824 buffer, len, SD_TIMEOUT, sdkp->max_retries, 825 &exec_args); 826 return ret <= 0 ? ret : -EIO; 827 } 828 #endif /* CONFIG_BLK_SED_OPAL */ 829 830 /* 831 * Look up the DIX operation based on whether the command is read or 832 * write and whether dix and dif are enabled. 833 */ 834 static unsigned int sd_prot_op(bool write, bool dix, bool dif) 835 { 836 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ 837 static const unsigned int ops[] = { /* wrt dix dif */ 838 SCSI_PROT_NORMAL, /* 0 0 0 */ 839 SCSI_PROT_READ_STRIP, /* 0 0 1 */ 840 SCSI_PROT_READ_INSERT, /* 0 1 0 */ 841 SCSI_PROT_READ_PASS, /* 0 1 1 */ 842 SCSI_PROT_NORMAL, /* 1 0 0 */ 843 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ 844 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ 845 SCSI_PROT_WRITE_PASS, /* 1 1 1 */ 846 }; 847 848 return ops[write << 2 | dix << 1 | dif]; 849 } 850 851 /* 852 * Returns a mask of the protection flags that are valid for a given DIX 853 * operation. 854 */ 855 static unsigned int sd_prot_flag_mask(unsigned int prot_op) 856 { 857 static const unsigned int flag_mask[] = { 858 [SCSI_PROT_NORMAL] = 0, 859 860 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | 861 SCSI_PROT_GUARD_CHECK | 862 SCSI_PROT_REF_CHECK | 863 SCSI_PROT_REF_INCREMENT, 864 865 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | 866 SCSI_PROT_IP_CHECKSUM, 867 868 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | 869 SCSI_PROT_GUARD_CHECK | 870 SCSI_PROT_REF_CHECK | 871 SCSI_PROT_REF_INCREMENT | 872 SCSI_PROT_IP_CHECKSUM, 873 874 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | 875 SCSI_PROT_REF_INCREMENT, 876 877 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | 878 SCSI_PROT_REF_CHECK | 879 SCSI_PROT_REF_INCREMENT | 880 SCSI_PROT_IP_CHECKSUM, 881 882 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | 883 SCSI_PROT_GUARD_CHECK | 884 SCSI_PROT_REF_CHECK | 885 SCSI_PROT_REF_INCREMENT | 886 SCSI_PROT_IP_CHECKSUM, 887 }; 888 889 return flag_mask[prot_op]; 890 } 891 892 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, 893 unsigned int dix, unsigned int dif) 894 { 895 struct request *rq = scsi_cmd_to_rq(scmd); 896 struct bio *bio = rq->bio; 897 unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif); 898 unsigned int protect = 0; 899 900 if (dix) { /* DIX Type 0, 1, 2, 3 */ 901 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) 902 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; 903 904 if (bio_integrity_flagged(bio, BIP_CHECK_GUARD)) 905 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; 906 } 907 908 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ 909 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; 910 911 if (bio_integrity_flagged(bio, BIP_CHECK_REFTAG)) 912 scmd->prot_flags |= SCSI_PROT_REF_CHECK; 913 } 914 915 if (dif) { /* DIX/DIF Type 1, 2, 3 */ 916 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI; 917 918 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) 919 protect = 3 << 5; /* Disable target PI checking */ 920 else 921 protect = 1 << 5; /* Enable target PI checking */ 922 } 923 924 scsi_set_prot_op(scmd, prot_op); 925 scsi_set_prot_type(scmd, dif); 926 scmd->prot_flags &= sd_prot_flag_mask(prot_op); 927 928 return protect; 929 } 930 931 static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) 932 { 933 struct page *page; 934 935 page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 936 if (!page) 937 return NULL; 938 clear_highpage(page); 939 bvec_set_page(&rq->special_vec, page, data_len, 0); 940 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 941 return bvec_virt(&rq->special_vec); 942 } 943 944 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) 945 { 946 struct scsi_device *sdp = cmd->device; 947 struct request *rq = scsi_cmd_to_rq(cmd); 948 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 949 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 950 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 951 unsigned int data_len = 24; 952 char *buf; 953 954 buf = sd_set_special_bvec(rq, data_len); 955 if (!buf) 956 return BLK_STS_RESOURCE; 957 958 cmd->cmd_len = 10; 959 cmd->cmnd[0] = UNMAP; 960 cmd->cmnd[8] = 24; 961 962 put_unaligned_be16(6 + 16, &buf[0]); 963 put_unaligned_be16(16, &buf[2]); 964 put_unaligned_be64(lba, &buf[8]); 965 put_unaligned_be32(nr_blocks, &buf[16]); 966 967 cmd->allowed = sdkp->max_retries; 968 cmd->transfersize = data_len; 969 rq->timeout = SD_TIMEOUT; 970 971 return scsi_alloc_sgtables(cmd); 972 } 973 974 static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim) 975 { 976 unsigned int logical_block_size = sdkp->device->sector_size, 977 physical_block_size_sectors, max_atomic, unit_min, unit_max; 978 979 if ((!sdkp->max_atomic && !sdkp->max_atomic_with_boundary) || 980 sdkp->protection_type == T10_PI_TYPE2_PROTECTION) 981 return; 982 983 physical_block_size_sectors = sdkp->physical_block_size / 984 sdkp->device->sector_size; 985 986 unit_min = rounddown_pow_of_two(sdkp->atomic_granularity ? 987 sdkp->atomic_granularity : 988 physical_block_size_sectors); 989 990 /* 991 * Only use atomic boundary when we have the odd scenario of 992 * sdkp->max_atomic == 0, which the spec does permit. 993 */ 994 if (sdkp->max_atomic) { 995 max_atomic = sdkp->max_atomic; 996 unit_max = rounddown_pow_of_two(sdkp->max_atomic); 997 sdkp->use_atomic_write_boundary = 0; 998 } else { 999 max_atomic = sdkp->max_atomic_with_boundary; 1000 unit_max = rounddown_pow_of_two(sdkp->max_atomic_boundary); 1001 sdkp->use_atomic_write_boundary = 1; 1002 } 1003 1004 /* 1005 * Ensure compliance with granularity and alignment. For now, keep it 1006 * simple and just don't support atomic writes for values mismatched 1007 * with max_{boundary}atomic, physical block size, and 1008 * atomic_granularity itself. 1009 * 1010 * We're really being distrustful by checking unit_max also... 1011 */ 1012 if (sdkp->atomic_granularity > 1) { 1013 if (unit_min > 1 && unit_min % sdkp->atomic_granularity) 1014 return; 1015 if (unit_max > 1 && unit_max % sdkp->atomic_granularity) 1016 return; 1017 } 1018 1019 if (sdkp->atomic_alignment > 1) { 1020 if (unit_min > 1 && unit_min % sdkp->atomic_alignment) 1021 return; 1022 if (unit_max > 1 && unit_max % sdkp->atomic_alignment) 1023 return; 1024 } 1025 1026 lim->atomic_write_hw_max = max_atomic * logical_block_size; 1027 lim->atomic_write_hw_boundary = 0; 1028 lim->atomic_write_hw_unit_min = unit_min * logical_block_size; 1029 lim->atomic_write_hw_unit_max = unit_max * logical_block_size; 1030 lim->features |= BLK_FEAT_ATOMIC_WRITES; 1031 } 1032 1033 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, 1034 bool unmap) 1035 { 1036 struct scsi_device *sdp = cmd->device; 1037 struct request *rq = scsi_cmd_to_rq(cmd); 1038 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1039 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1040 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1041 u32 data_len = sdp->sector_size; 1042 1043 if (!sd_set_special_bvec(rq, data_len)) 1044 return BLK_STS_RESOURCE; 1045 1046 cmd->cmd_len = 16; 1047 cmd->cmnd[0] = WRITE_SAME_16; 1048 if (unmap) 1049 cmd->cmnd[1] = 0x8; /* UNMAP */ 1050 put_unaligned_be64(lba, &cmd->cmnd[2]); 1051 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 1052 1053 cmd->allowed = sdkp->max_retries; 1054 cmd->transfersize = data_len; 1055 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 1056 1057 return scsi_alloc_sgtables(cmd); 1058 } 1059 1060 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, 1061 bool unmap) 1062 { 1063 struct scsi_device *sdp = cmd->device; 1064 struct request *rq = scsi_cmd_to_rq(cmd); 1065 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1066 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1067 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1068 u32 data_len = sdp->sector_size; 1069 1070 if (!sd_set_special_bvec(rq, data_len)) 1071 return BLK_STS_RESOURCE; 1072 1073 cmd->cmd_len = 10; 1074 cmd->cmnd[0] = WRITE_SAME; 1075 if (unmap) 1076 cmd->cmnd[1] = 0x8; /* UNMAP */ 1077 put_unaligned_be32(lba, &cmd->cmnd[2]); 1078 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 1079 1080 cmd->allowed = sdkp->max_retries; 1081 cmd->transfersize = data_len; 1082 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 1083 1084 return scsi_alloc_sgtables(cmd); 1085 } 1086 1087 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) 1088 { 1089 struct request *rq = scsi_cmd_to_rq(cmd); 1090 struct scsi_device *sdp = cmd->device; 1091 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1092 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1093 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1094 1095 if (!(rq->cmd_flags & REQ_NOUNMAP)) { 1096 switch (sdkp->zeroing_mode) { 1097 case SD_ZERO_WS16_UNMAP: 1098 return sd_setup_write_same16_cmnd(cmd, true); 1099 case SD_ZERO_WS10_UNMAP: 1100 return sd_setup_write_same10_cmnd(cmd, true); 1101 } 1102 } 1103 1104 if (sdp->no_write_same) { 1105 rq->rq_flags |= RQF_QUIET; 1106 return BLK_STS_TARGET; 1107 } 1108 1109 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) 1110 return sd_setup_write_same16_cmnd(cmd, false); 1111 1112 return sd_setup_write_same10_cmnd(cmd, false); 1113 } 1114 1115 static void sd_disable_write_same(struct scsi_disk *sdkp) 1116 { 1117 sdkp->device->no_write_same = 1; 1118 sdkp->max_ws_blocks = 0; 1119 blk_queue_disable_write_zeroes(sdkp->disk->queue); 1120 } 1121 1122 static void sd_config_write_same(struct scsi_disk *sdkp, 1123 struct queue_limits *lim) 1124 { 1125 unsigned int logical_block_size = sdkp->device->sector_size; 1126 1127 if (sdkp->device->no_write_same) { 1128 sdkp->max_ws_blocks = 0; 1129 goto out; 1130 } 1131 1132 /* Some devices can not handle block counts above 0xffff despite 1133 * supporting WRITE SAME(16). Consequently we default to 64k 1134 * blocks per I/O unless the device explicitly advertises a 1135 * bigger limit. 1136 */ 1137 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) 1138 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 1139 (u32)SD_MAX_WS16_BLOCKS); 1140 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) 1141 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 1142 (u32)SD_MAX_WS10_BLOCKS); 1143 else { 1144 sdkp->device->no_write_same = 1; 1145 sdkp->max_ws_blocks = 0; 1146 } 1147 1148 if (sdkp->lbprz && sdkp->lbpws) 1149 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; 1150 else if (sdkp->lbprz && sdkp->lbpws10) 1151 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; 1152 else if (sdkp->max_ws_blocks) 1153 sdkp->zeroing_mode = SD_ZERO_WS; 1154 else 1155 sdkp->zeroing_mode = SD_ZERO_WRITE; 1156 1157 if (sdkp->max_ws_blocks && 1158 sdkp->physical_block_size > logical_block_size) { 1159 /* 1160 * Reporting a maximum number of blocks that is not aligned 1161 * on the device physical size would cause a large write same 1162 * request to be split into physically unaligned chunks by 1163 * __blkdev_issue_write_zeroes() even if the caller of this 1164 * functions took care to align the large request. So make sure 1165 * the maximum reported is aligned to the device physical block 1166 * size. This is only an optional optimization for regular 1167 * disks, but this is mandatory to avoid failure of large write 1168 * same requests directed at sequential write required zones of 1169 * host-managed ZBC disks. 1170 */ 1171 sdkp->max_ws_blocks = 1172 round_down(sdkp->max_ws_blocks, 1173 bytes_to_logical(sdkp->device, 1174 sdkp->physical_block_size)); 1175 } 1176 1177 out: 1178 lim->max_write_zeroes_sectors = 1179 sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT); 1180 1181 if (sdkp->zeroing_mode == SD_ZERO_WS16_UNMAP || 1182 sdkp->zeroing_mode == SD_ZERO_WS10_UNMAP) 1183 lim->max_hw_wzeroes_unmap_sectors = 1184 lim->max_write_zeroes_sectors; 1185 } 1186 1187 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 1188 { 1189 struct request *rq = scsi_cmd_to_rq(cmd); 1190 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1191 1192 /* flush requests don't perform I/O, zero the S/G table */ 1193 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1194 1195 if (cmd->device->use_16_for_sync) { 1196 cmd->cmnd[0] = SYNCHRONIZE_CACHE_16; 1197 cmd->cmd_len = 16; 1198 } else { 1199 cmd->cmnd[0] = SYNCHRONIZE_CACHE; 1200 cmd->cmd_len = 10; 1201 } 1202 cmd->transfersize = 0; 1203 cmd->allowed = sdkp->max_retries; 1204 1205 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; 1206 return BLK_STS_OK; 1207 } 1208 1209 /** 1210 * sd_group_number() - Compute the GROUP NUMBER field 1211 * @cmd: SCSI command for which to compute the value of the six-bit GROUP NUMBER 1212 * field. 1213 * 1214 * From SBC-5 r05 (https://www.t10.org/cgi-bin/ac.pl?t=f&f=sbc5r05.pdf): 1215 * 0: no relative lifetime. 1216 * 1: shortest relative lifetime. 1217 * 2: second shortest relative lifetime. 1218 * 3 - 0x3d: intermediate relative lifetimes. 1219 * 0x3e: second longest relative lifetime. 1220 * 0x3f: longest relative lifetime. 1221 */ 1222 static u8 sd_group_number(struct scsi_cmnd *cmd) 1223 { 1224 const struct request *rq = scsi_cmd_to_rq(cmd); 1225 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1226 1227 if (!sdkp->rscs) 1228 return 0; 1229 1230 return min3((u32)rq->bio->bi_write_hint, 1231 (u32)sdkp->permanent_stream_count, 0x3fu); 1232 } 1233 1234 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, 1235 sector_t lba, unsigned int nr_blocks, 1236 unsigned char flags, unsigned int dld) 1237 { 1238 cmd->cmd_len = SD_EXT_CDB_SIZE; 1239 cmd->cmnd[0] = VARIABLE_LENGTH_CMD; 1240 cmd->cmnd[6] = sd_group_number(cmd); 1241 cmd->cmnd[7] = 0x18; /* Additional CDB len */ 1242 cmd->cmnd[9] = write ? WRITE_32 : READ_32; 1243 cmd->cmnd[10] = flags; 1244 cmd->cmnd[11] = dld & 0x07; 1245 put_unaligned_be64(lba, &cmd->cmnd[12]); 1246 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ 1247 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); 1248 1249 return BLK_STS_OK; 1250 } 1251 1252 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, 1253 sector_t lba, unsigned int nr_blocks, 1254 unsigned char flags, unsigned int dld) 1255 { 1256 cmd->cmd_len = 16; 1257 cmd->cmnd[0] = write ? WRITE_16 : READ_16; 1258 cmd->cmnd[1] = flags | ((dld >> 2) & 0x01); 1259 cmd->cmnd[14] = ((dld & 0x03) << 6) | sd_group_number(cmd); 1260 cmd->cmnd[15] = 0; 1261 put_unaligned_be64(lba, &cmd->cmnd[2]); 1262 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 1263 1264 return BLK_STS_OK; 1265 } 1266 1267 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, 1268 sector_t lba, unsigned int nr_blocks, 1269 unsigned char flags) 1270 { 1271 cmd->cmd_len = 10; 1272 cmd->cmnd[0] = write ? WRITE_10 : READ_10; 1273 cmd->cmnd[1] = flags; 1274 cmd->cmnd[6] = sd_group_number(cmd); 1275 cmd->cmnd[9] = 0; 1276 put_unaligned_be32(lba, &cmd->cmnd[2]); 1277 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 1278 1279 return BLK_STS_OK; 1280 } 1281 1282 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, 1283 sector_t lba, unsigned int nr_blocks, 1284 unsigned char flags) 1285 { 1286 /* Avoid that 0 blocks gets translated into 256 blocks. */ 1287 if (WARN_ON_ONCE(nr_blocks == 0)) 1288 return BLK_STS_IOERR; 1289 1290 if (unlikely(flags & 0x8)) { 1291 /* 1292 * This happens only if this drive failed 10byte rw 1293 * command with ILLEGAL_REQUEST during operation and 1294 * thus turned off use_10_for_rw. 1295 */ 1296 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n"); 1297 return BLK_STS_IOERR; 1298 } 1299 1300 cmd->cmd_len = 6; 1301 cmd->cmnd[0] = write ? WRITE_6 : READ_6; 1302 cmd->cmnd[1] = (lba >> 16) & 0x1f; 1303 cmd->cmnd[2] = (lba >> 8) & 0xff; 1304 cmd->cmnd[3] = lba & 0xff; 1305 cmd->cmnd[4] = nr_blocks; 1306 cmd->cmnd[5] = 0; 1307 1308 return BLK_STS_OK; 1309 } 1310 1311 /* 1312 * Check if a command has a duration limit set. If it does, and the target 1313 * device supports CDL and the feature is enabled, return the limit 1314 * descriptor index to use. Return 0 (no limit) otherwise. 1315 */ 1316 static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd) 1317 { 1318 struct scsi_device *sdp = sdkp->device; 1319 int hint; 1320 1321 if (!sdp->cdl_supported || !sdp->cdl_enable) 1322 return 0; 1323 1324 /* 1325 * Use "no limit" if the request ioprio does not specify a duration 1326 * limit hint. 1327 */ 1328 hint = IOPRIO_PRIO_HINT(req_get_ioprio(scsi_cmd_to_rq(scmd))); 1329 if (hint < IOPRIO_HINT_DEV_DURATION_LIMIT_1 || 1330 hint > IOPRIO_HINT_DEV_DURATION_LIMIT_7) 1331 return 0; 1332 1333 return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1; 1334 } 1335 1336 static blk_status_t sd_setup_atomic_cmnd(struct scsi_cmnd *cmd, 1337 sector_t lba, unsigned int nr_blocks, 1338 bool boundary, unsigned char flags) 1339 { 1340 cmd->cmd_len = 16; 1341 cmd->cmnd[0] = WRITE_ATOMIC_16; 1342 cmd->cmnd[1] = flags; 1343 put_unaligned_be64(lba, &cmd->cmnd[2]); 1344 put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); 1345 if (boundary) 1346 put_unaligned_be16(nr_blocks, &cmd->cmnd[10]); 1347 else 1348 put_unaligned_be16(0, &cmd->cmnd[10]); 1349 put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); 1350 cmd->cmnd[14] = 0; 1351 cmd->cmnd[15] = 0; 1352 1353 return BLK_STS_OK; 1354 } 1355 1356 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) 1357 { 1358 struct request *rq = scsi_cmd_to_rq(cmd); 1359 struct scsi_device *sdp = cmd->device; 1360 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1361 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1362 sector_t threshold; 1363 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1364 unsigned int mask = logical_to_sectors(sdp, 1) - 1; 1365 bool write = rq_data_dir(rq) == WRITE; 1366 unsigned char protect, fua; 1367 unsigned int dld; 1368 blk_status_t ret; 1369 unsigned int dif; 1370 bool dix; 1371 1372 ret = scsi_alloc_sgtables(cmd); 1373 if (ret != BLK_STS_OK) 1374 return ret; 1375 1376 ret = BLK_STS_IOERR; 1377 if (!scsi_device_online(sdp) || sdp->changed) { 1378 scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); 1379 goto fail; 1380 } 1381 1382 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) { 1383 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); 1384 goto fail; 1385 } 1386 1387 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { 1388 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); 1389 goto fail; 1390 } 1391 1392 /* 1393 * Some SD card readers can't handle accesses which touch the 1394 * last one or two logical blocks. Split accesses as needed. 1395 */ 1396 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS; 1397 1398 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) { 1399 if (lba < threshold) { 1400 /* Access up to the threshold but not beyond */ 1401 nr_blocks = threshold - lba; 1402 } else { 1403 /* Access only a single logical block */ 1404 nr_blocks = 1; 1405 } 1406 } 1407 1408 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; 1409 dix = scsi_prot_sg_count(cmd); 1410 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); 1411 dld = sd_cdl_dld(sdkp, cmd); 1412 1413 if (dif || dix) 1414 protect = sd_setup_protect_cmnd(cmd, dix, dif); 1415 else 1416 protect = 0; 1417 1418 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { 1419 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, 1420 protect | fua, dld); 1421 } else if (rq->cmd_flags & REQ_ATOMIC) { 1422 ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks, 1423 sdkp->use_atomic_write_boundary, 1424 protect | fua); 1425 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { 1426 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, 1427 protect | fua, dld); 1428 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || 1429 sdp->use_10_for_rw || protect || rq->bio->bi_write_hint) { 1430 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, 1431 protect | fua); 1432 } else { 1433 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks, 1434 protect | fua); 1435 } 1436 1437 if (unlikely(ret != BLK_STS_OK)) 1438 goto fail; 1439 1440 /* 1441 * We shouldn't disconnect in the middle of a sector, so with a dumb 1442 * host adapter, it's safe to assume that we can at least transfer 1443 * this many bytes between each connect / disconnect. 1444 */ 1445 cmd->transfersize = sdp->sector_size; 1446 cmd->underflow = nr_blocks << 9; 1447 cmd->allowed = sdkp->max_retries; 1448 cmd->sdb.length = nr_blocks * sdp->sector_size; 1449 1450 SCSI_LOG_HLQUEUE(1, 1451 scmd_printk(KERN_INFO, cmd, 1452 "%s: block=%llu, count=%d\n", __func__, 1453 (unsigned long long)blk_rq_pos(rq), 1454 blk_rq_sectors(rq))); 1455 SCSI_LOG_HLQUEUE(2, 1456 scmd_printk(KERN_INFO, cmd, 1457 "%s %d/%u 512 byte blocks.\n", 1458 write ? "writing" : "reading", nr_blocks, 1459 blk_rq_sectors(rq))); 1460 1461 /* 1462 * This indicates that the command is ready from our end to be queued. 1463 */ 1464 return BLK_STS_OK; 1465 fail: 1466 scsi_free_sgtables(cmd); 1467 return ret; 1468 } 1469 1470 static blk_status_t sd_init_command(struct scsi_cmnd *cmd) 1471 { 1472 struct request *rq = scsi_cmd_to_rq(cmd); 1473 1474 switch (req_op(rq)) { 1475 case REQ_OP_DISCARD: 1476 switch (scsi_disk(rq->q->disk)->provisioning_mode) { 1477 case SD_LBP_UNMAP: 1478 return sd_setup_unmap_cmnd(cmd); 1479 case SD_LBP_WS16: 1480 return sd_setup_write_same16_cmnd(cmd, true); 1481 case SD_LBP_WS10: 1482 return sd_setup_write_same10_cmnd(cmd, true); 1483 case SD_LBP_ZERO: 1484 return sd_setup_write_same10_cmnd(cmd, false); 1485 default: 1486 return BLK_STS_TARGET; 1487 } 1488 case REQ_OP_WRITE_ZEROES: 1489 return sd_setup_write_zeroes_cmnd(cmd); 1490 case REQ_OP_FLUSH: 1491 return sd_setup_flush_cmnd(cmd); 1492 case REQ_OP_READ: 1493 case REQ_OP_WRITE: 1494 return sd_setup_read_write_cmnd(cmd); 1495 case REQ_OP_ZONE_RESET: 1496 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1497 false); 1498 case REQ_OP_ZONE_RESET_ALL: 1499 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1500 true); 1501 case REQ_OP_ZONE_OPEN: 1502 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false); 1503 case REQ_OP_ZONE_CLOSE: 1504 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false); 1505 case REQ_OP_ZONE_FINISH: 1506 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false); 1507 default: 1508 WARN_ON_ONCE(1); 1509 return BLK_STS_NOTSUPP; 1510 } 1511 } 1512 1513 static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1514 { 1515 struct request *rq = scsi_cmd_to_rq(SCpnt); 1516 1517 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1518 mempool_free(rq->special_vec.bv_page, sd_page_pool); 1519 } 1520 1521 static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp) 1522 { 1523 if (sdkp->device->removable || sdkp->write_prot) { 1524 if (disk_check_media_change(disk)) 1525 return true; 1526 } 1527 1528 /* 1529 * Force a full rescan after ioctl(BLKRRPART). While the disk state has 1530 * nothing to do with partitions, BLKRRPART is used to force a full 1531 * revalidate after things like a format for historical reasons. 1532 */ 1533 return test_bit(GD_NEED_PART_SCAN, &disk->state); 1534 } 1535 1536 /** 1537 * sd_open - open a scsi disk device 1538 * @disk: disk to open 1539 * @mode: open mode 1540 * 1541 * Returns 0 if successful. Returns a negated errno value in case 1542 * of error. 1543 * 1544 * Note: This can be called from a user context (e.g. fsck(1) ) 1545 * or from within the kernel (e.g. as a result of a mount(1) ). 1546 * In the latter case @inode and @filp carry an abridged amount 1547 * of information as noted above. 1548 * 1549 * Locking: called with disk->open_mutex held. 1550 **/ 1551 static int sd_open(struct gendisk *disk, blk_mode_t mode) 1552 { 1553 struct scsi_disk *sdkp = scsi_disk(disk); 1554 struct scsi_device *sdev = sdkp->device; 1555 int retval; 1556 1557 if (scsi_device_get(sdev)) 1558 return -ENXIO; 1559 1560 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); 1561 1562 /* 1563 * If the device is in error recovery, wait until it is done. 1564 * If the device is offline, then disallow any access to it. 1565 */ 1566 retval = -ENXIO; 1567 if (!scsi_block_when_processing_errors(sdev)) 1568 goto error_out; 1569 1570 if (sd_need_revalidate(disk, sdkp)) 1571 sd_revalidate_disk(disk); 1572 1573 /* 1574 * If the drive is empty, just let the open fail. 1575 */ 1576 retval = -ENOMEDIUM; 1577 if (sdev->removable && !sdkp->media_present && 1578 !(mode & BLK_OPEN_NDELAY)) 1579 goto error_out; 1580 1581 /* 1582 * If the device has the write protect tab set, have the open fail 1583 * if the user expects to be able to write to the thing. 1584 */ 1585 retval = -EROFS; 1586 if (sdkp->write_prot && (mode & BLK_OPEN_WRITE)) 1587 goto error_out; 1588 1589 /* 1590 * It is possible that the disk changing stuff resulted in 1591 * the device being taken offline. If this is the case, 1592 * report this to the user, and don't pretend that the 1593 * open actually succeeded. 1594 */ 1595 retval = -ENXIO; 1596 if (!scsi_device_online(sdev)) 1597 goto error_out; 1598 1599 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { 1600 if (scsi_block_when_processing_errors(sdev)) 1601 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 1602 } 1603 1604 return 0; 1605 1606 error_out: 1607 scsi_device_put(sdev); 1608 return retval; 1609 } 1610 1611 /** 1612 * sd_release - invoked when the (last) close(2) is called on this 1613 * scsi disk. 1614 * @disk: disk to release 1615 * 1616 * Returns 0. 1617 * 1618 * Note: may block (uninterruptible) if error recovery is underway 1619 * on this disk. 1620 * 1621 * Locking: called with disk->open_mutex held. 1622 **/ 1623 static void sd_release(struct gendisk *disk) 1624 { 1625 struct scsi_disk *sdkp = scsi_disk(disk); 1626 struct scsi_device *sdev = sdkp->device; 1627 1628 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 1629 1630 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { 1631 if (scsi_block_when_processing_errors(sdev)) 1632 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 1633 } 1634 1635 scsi_device_put(sdev); 1636 } 1637 1638 static int sd_getgeo(struct gendisk *disk, struct hd_geometry *geo) 1639 { 1640 struct scsi_disk *sdkp = scsi_disk(disk); 1641 struct scsi_device *sdp = sdkp->device; 1642 struct Scsi_Host *host = sdp->host; 1643 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); 1644 int diskinfo[4]; 1645 1646 /* default to most commonly used values */ 1647 diskinfo[0] = 0x40; /* 1 << 6 */ 1648 diskinfo[1] = 0x20; /* 1 << 5 */ 1649 diskinfo[2] = capacity >> 11; 1650 1651 /* override with calculated, extended default, or driver values */ 1652 if (host->hostt->bios_param) 1653 host->hostt->bios_param(sdp, disk, capacity, diskinfo); 1654 else 1655 scsicam_bios_param(disk, capacity, diskinfo); 1656 1657 geo->heads = diskinfo[0]; 1658 geo->sectors = diskinfo[1]; 1659 geo->cylinders = diskinfo[2]; 1660 return 0; 1661 } 1662 1663 /** 1664 * sd_ioctl - process an ioctl 1665 * @bdev: target block device 1666 * @mode: open mode 1667 * @cmd: ioctl command number 1668 * @arg: this is third argument given to ioctl(2) system call. 1669 * Often contains a pointer. 1670 * 1671 * Returns 0 if successful (some ioctls return positive numbers on 1672 * success as well). Returns a negated errno value in case of error. 1673 * 1674 * Note: most ioctls are forward onto the block subsystem or further 1675 * down in the scsi subsystem. 1676 **/ 1677 static int sd_ioctl(struct block_device *bdev, blk_mode_t mode, 1678 unsigned int cmd, unsigned long arg) 1679 { 1680 struct gendisk *disk = bdev->bd_disk; 1681 struct scsi_disk *sdkp = scsi_disk(disk); 1682 struct scsi_device *sdp = sdkp->device; 1683 void __user *p = (void __user *)arg; 1684 int error; 1685 1686 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, 1687 "sd_ioctl: disk=%s, cmd=0x%x\n", 1688 disk->disk_name, cmd)); 1689 1690 if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) 1691 return -ENOIOCTLCMD; 1692 1693 /* 1694 * If we are in the middle of error recovery, don't let anyone 1695 * else try and use this device. Also, if error recovery fails, it 1696 * may try and take the device offline, in which case all further 1697 * access to the device is prohibited. 1698 */ 1699 error = scsi_ioctl_block_when_processing_errors(sdp, cmd, 1700 (mode & BLK_OPEN_NDELAY)); 1701 if (error) 1702 return error; 1703 1704 if (is_sed_ioctl(cmd)) 1705 return sed_ioctl(sdkp->opal_dev, cmd, p); 1706 return scsi_ioctl(sdp, mode & BLK_OPEN_WRITE, cmd, p); 1707 } 1708 1709 static void set_media_not_present(struct scsi_disk *sdkp) 1710 { 1711 if (sdkp->media_present) 1712 sdkp->device->changed = 1; 1713 1714 if (sdkp->device->removable) { 1715 sdkp->media_present = 0; 1716 sdkp->capacity = 0; 1717 } 1718 } 1719 1720 static int media_not_present(struct scsi_disk *sdkp, 1721 struct scsi_sense_hdr *sshdr) 1722 { 1723 if (!scsi_sense_valid(sshdr)) 1724 return 0; 1725 1726 /* not invoked for commands that could return deferred errors */ 1727 switch (sshdr->sense_key) { 1728 case UNIT_ATTENTION: 1729 case NOT_READY: 1730 /* medium not present */ 1731 if (sshdr->asc == 0x3A) { 1732 set_media_not_present(sdkp); 1733 return 1; 1734 } 1735 } 1736 return 0; 1737 } 1738 1739 /** 1740 * sd_check_events - check media events 1741 * @disk: kernel device descriptor 1742 * @clearing: disk events currently being cleared 1743 * 1744 * Returns mask of DISK_EVENT_*. 1745 * 1746 * Note: this function is invoked from the block subsystem. 1747 **/ 1748 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) 1749 { 1750 struct scsi_disk *sdkp = disk->private_data; 1751 struct scsi_device *sdp; 1752 int retval; 1753 bool disk_changed; 1754 1755 if (!sdkp) 1756 return 0; 1757 1758 sdp = sdkp->device; 1759 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); 1760 1761 /* 1762 * If the device is offline, don't send any commands - just pretend as 1763 * if the command failed. If the device ever comes back online, we 1764 * can deal with it then. It is only because of unrecoverable errors 1765 * that we would ever take a device offline in the first place. 1766 */ 1767 if (!scsi_device_online(sdp)) { 1768 set_media_not_present(sdkp); 1769 goto out; 1770 } 1771 1772 /* 1773 * Using TEST_UNIT_READY enables differentiation between drive with 1774 * no cartridge loaded - NOT READY, drive with changed cartridge - 1775 * UNIT ATTENTION, or with same cartridge - GOOD STATUS. 1776 * 1777 * Drives that auto spin down. eg iomega jaz 1G, will be started 1778 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever 1779 * sd_revalidate() is called. 1780 */ 1781 if (scsi_block_when_processing_errors(sdp)) { 1782 struct scsi_sense_hdr sshdr = { 0, }; 1783 1784 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries, 1785 &sshdr); 1786 1787 /* failed to execute TUR, assume media not present */ 1788 if (retval < 0 || host_byte(retval)) { 1789 set_media_not_present(sdkp); 1790 goto out; 1791 } 1792 1793 if (media_not_present(sdkp, &sshdr)) 1794 goto out; 1795 } 1796 1797 /* 1798 * For removable scsi disk we have to recognise the presence 1799 * of a disk in the drive. 1800 */ 1801 if (!sdkp->media_present) 1802 sdp->changed = 1; 1803 sdkp->media_present = 1; 1804 out: 1805 /* 1806 * sdp->changed is set under the following conditions: 1807 * 1808 * Medium present state has changed in either direction. 1809 * Device has indicated UNIT_ATTENTION. 1810 */ 1811 disk_changed = sdp->changed; 1812 sdp->changed = 0; 1813 return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0; 1814 } 1815 1816 static int sd_sync_cache(struct scsi_disk *sdkp) 1817 { 1818 int res; 1819 struct scsi_device *sdp = sdkp->device; 1820 const int timeout = sdp->request_queue->rq_timeout 1821 * SD_FLUSH_TIMEOUT_MULTIPLIER; 1822 /* Leave the rest of the command zero to indicate flush everything. */ 1823 const unsigned char cmd[16] = { sdp->use_16_for_sync ? 1824 SYNCHRONIZE_CACHE_16 : SYNCHRONIZE_CACHE }; 1825 struct scsi_sense_hdr sshdr; 1826 struct scsi_failure failure_defs[] = { 1827 { 1828 .allowed = 3, 1829 .result = SCMD_FAILURE_RESULT_ANY, 1830 }, 1831 {} 1832 }; 1833 struct scsi_failures failures = { 1834 .failure_definitions = failure_defs, 1835 }; 1836 const struct scsi_exec_args exec_args = { 1837 .req_flags = BLK_MQ_REQ_PM, 1838 .sshdr = &sshdr, 1839 .failures = &failures, 1840 }; 1841 1842 if (!scsi_device_online(sdp)) 1843 return -ENODEV; 1844 1845 res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, timeout, 1846 sdkp->max_retries, &exec_args); 1847 if (res) { 1848 sd_print_result(sdkp, "Synchronize Cache(10) failed", res); 1849 1850 if (res < 0) 1851 return res; 1852 1853 if (scsi_status_is_check_condition(res) && 1854 scsi_sense_valid(&sshdr)) { 1855 sd_print_sense_hdr(sdkp, &sshdr); 1856 1857 /* we need to evaluate the error return */ 1858 if (sshdr.asc == 0x3a || /* medium not present */ 1859 sshdr.asc == 0x20 || /* invalid command */ 1860 (sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */ 1861 /* this is no error here */ 1862 return 0; 1863 1864 /* 1865 * If a format is in progress or if the drive does not 1866 * support sync, there is not much we can do because 1867 * this is called during shutdown or suspend so just 1868 * return success so those operations can proceed. 1869 */ 1870 if ((sshdr.asc == 0x04 && sshdr.ascq == 0x04) || 1871 sshdr.sense_key == ILLEGAL_REQUEST) 1872 return 0; 1873 } 1874 1875 switch (host_byte(res)) { 1876 /* ignore errors due to racing a disconnection */ 1877 case DID_BAD_TARGET: 1878 case DID_NO_CONNECT: 1879 return 0; 1880 /* signal the upper layer it might try again */ 1881 case DID_BUS_BUSY: 1882 case DID_IMM_RETRY: 1883 case DID_REQUEUE: 1884 case DID_SOFT_ERROR: 1885 return -EBUSY; 1886 default: 1887 return -EIO; 1888 } 1889 } 1890 return 0; 1891 } 1892 1893 static void sd_rescan(struct device *dev) 1894 { 1895 struct scsi_disk *sdkp = dev_get_drvdata(dev); 1896 1897 sd_revalidate_disk(sdkp->disk); 1898 } 1899 1900 static int sd_get_unique_id(struct gendisk *disk, u8 id[16], 1901 enum blk_unique_id type) 1902 { 1903 struct scsi_device *sdev = scsi_disk(disk)->device; 1904 const struct scsi_vpd *vpd; 1905 const unsigned char *d; 1906 int ret = -ENXIO, len; 1907 1908 rcu_read_lock(); 1909 vpd = rcu_dereference(sdev->vpd_pg83); 1910 if (!vpd) 1911 goto out_unlock; 1912 1913 ret = -EINVAL; 1914 for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) { 1915 /* we only care about designators with LU association */ 1916 if (((d[1] >> 4) & 0x3) != 0x00) 1917 continue; 1918 if ((d[1] & 0xf) != type) 1919 continue; 1920 1921 /* 1922 * Only exit early if a 16-byte descriptor was found. Otherwise 1923 * keep looking as one with more entropy might still show up. 1924 */ 1925 len = d[3]; 1926 if (len != 8 && len != 12 && len != 16) 1927 continue; 1928 ret = len; 1929 memcpy(id, d + 4, len); 1930 if (len == 16) 1931 break; 1932 } 1933 out_unlock: 1934 rcu_read_unlock(); 1935 return ret; 1936 } 1937 1938 static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result) 1939 { 1940 switch (host_byte(result)) { 1941 case DID_TRANSPORT_MARGINAL: 1942 case DID_TRANSPORT_DISRUPTED: 1943 case DID_BUS_BUSY: 1944 return PR_STS_RETRY_PATH_FAILURE; 1945 case DID_NO_CONNECT: 1946 return PR_STS_PATH_FAILED; 1947 case DID_TRANSPORT_FAILFAST: 1948 return PR_STS_PATH_FAST_FAILED; 1949 } 1950 1951 switch (status_byte(result)) { 1952 case SAM_STAT_RESERVATION_CONFLICT: 1953 return PR_STS_RESERVATION_CONFLICT; 1954 case SAM_STAT_CHECK_CONDITION: 1955 if (!scsi_sense_valid(sshdr)) 1956 return PR_STS_IOERR; 1957 1958 if (sshdr->sense_key == ILLEGAL_REQUEST && 1959 (sshdr->asc == 0x26 || sshdr->asc == 0x24)) 1960 return -EINVAL; 1961 1962 fallthrough; 1963 default: 1964 return PR_STS_IOERR; 1965 } 1966 } 1967 1968 static int sd_pr_in_command(struct block_device *bdev, u8 sa, 1969 unsigned char *data, int data_len) 1970 { 1971 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1972 struct scsi_device *sdev = sdkp->device; 1973 struct scsi_sense_hdr sshdr; 1974 u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa }; 1975 struct scsi_failure failure_defs[] = { 1976 { 1977 .sense = UNIT_ATTENTION, 1978 .asc = SCMD_FAILURE_ASC_ANY, 1979 .ascq = SCMD_FAILURE_ASCQ_ANY, 1980 .allowed = 5, 1981 .result = SAM_STAT_CHECK_CONDITION, 1982 }, 1983 {} 1984 }; 1985 struct scsi_failures failures = { 1986 .failure_definitions = failure_defs, 1987 }; 1988 const struct scsi_exec_args exec_args = { 1989 .sshdr = &sshdr, 1990 .failures = &failures, 1991 }; 1992 int result; 1993 1994 put_unaligned_be16(data_len, &cmd[7]); 1995 1996 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len, 1997 SD_TIMEOUT, sdkp->max_retries, &exec_args); 1998 if (scsi_status_is_check_condition(result) && 1999 scsi_sense_valid(&sshdr)) { 2000 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); 2001 scsi_print_sense_hdr(sdev, NULL, &sshdr); 2002 } 2003 2004 if (result <= 0) 2005 return result; 2006 2007 return sd_scsi_to_pr_err(&sshdr, result); 2008 } 2009 2010 static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info) 2011 { 2012 int result, i, data_offset, num_copy_keys; 2013 u32 num_keys = keys_info->num_keys; 2014 int data_len; 2015 u8 *data; 2016 2017 /* 2018 * Each reservation key takes 8 bytes and there is an 8-byte header 2019 * before the reservation key list. The total size must fit into the 2020 * 16-bit ALLOCATION LENGTH field. 2021 */ 2022 if (check_mul_overflow(num_keys, 8, &data_len) || 2023 check_add_overflow(data_len, 8, &data_len) || 2024 data_len > USHRT_MAX) 2025 return -EINVAL; 2026 2027 data = kzalloc(data_len, GFP_KERNEL); 2028 if (!data) 2029 return -ENOMEM; 2030 2031 result = sd_pr_in_command(bdev, READ_KEYS, data, data_len); 2032 if (result) 2033 goto free_data; 2034 2035 keys_info->generation = get_unaligned_be32(&data[0]); 2036 keys_info->num_keys = get_unaligned_be32(&data[4]) / 8; 2037 2038 data_offset = 8; 2039 num_copy_keys = min(num_keys, keys_info->num_keys); 2040 2041 for (i = 0; i < num_copy_keys; i++) { 2042 keys_info->keys[i] = get_unaligned_be64(&data[data_offset]); 2043 data_offset += 8; 2044 } 2045 2046 free_data: 2047 kfree(data); 2048 return result; 2049 } 2050 2051 static int sd_pr_read_reservation(struct block_device *bdev, 2052 struct pr_held_reservation *rsv) 2053 { 2054 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 2055 struct scsi_device *sdev = sdkp->device; 2056 u8 data[24] = { }; 2057 int result, len; 2058 2059 result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data)); 2060 if (result) 2061 return result; 2062 2063 len = get_unaligned_be32(&data[4]); 2064 if (!len) 2065 return 0; 2066 2067 /* Make sure we have at least the key and type */ 2068 if (len < 14) { 2069 sdev_printk(KERN_INFO, sdev, 2070 "READ RESERVATION failed due to short return buffer of %d bytes\n", 2071 len); 2072 return -EINVAL; 2073 } 2074 2075 rsv->generation = get_unaligned_be32(&data[0]); 2076 rsv->key = get_unaligned_be64(&data[8]); 2077 rsv->type = scsi_pr_type_to_block(data[21] & 0x0f); 2078 return 0; 2079 } 2080 2081 static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key, 2082 u64 sa_key, enum scsi_pr_type type, u8 flags) 2083 { 2084 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 2085 struct scsi_device *sdev = sdkp->device; 2086 struct scsi_sense_hdr sshdr; 2087 struct scsi_failure failure_defs[] = { 2088 { 2089 .sense = UNIT_ATTENTION, 2090 .asc = SCMD_FAILURE_ASC_ANY, 2091 .ascq = SCMD_FAILURE_ASCQ_ANY, 2092 .allowed = 5, 2093 .result = SAM_STAT_CHECK_CONDITION, 2094 }, 2095 {} 2096 }; 2097 struct scsi_failures failures = { 2098 .failure_definitions = failure_defs, 2099 }; 2100 const struct scsi_exec_args exec_args = { 2101 .sshdr = &sshdr, 2102 .failures = &failures, 2103 }; 2104 int result; 2105 u8 cmd[16] = { 0, }; 2106 u8 data[24] = { 0, }; 2107 2108 cmd[0] = PERSISTENT_RESERVE_OUT; 2109 cmd[1] = sa; 2110 cmd[2] = type; 2111 put_unaligned_be32(sizeof(data), &cmd[5]); 2112 2113 put_unaligned_be64(key, &data[0]); 2114 put_unaligned_be64(sa_key, &data[8]); 2115 data[20] = flags; 2116 2117 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data, 2118 sizeof(data), SD_TIMEOUT, sdkp->max_retries, 2119 &exec_args); 2120 2121 if (scsi_status_is_check_condition(result) && 2122 scsi_sense_valid(&sshdr)) { 2123 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); 2124 scsi_print_sense_hdr(sdev, NULL, &sshdr); 2125 } 2126 2127 if (result <= 0) 2128 return result; 2129 2130 return sd_scsi_to_pr_err(&sshdr, result); 2131 } 2132 2133 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 2134 u32 flags) 2135 { 2136 if (flags & ~PR_FL_IGNORE_KEY) 2137 return -EOPNOTSUPP; 2138 return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, 2139 old_key, new_key, 0, 2140 (1 << 0) /* APTPL */); 2141 } 2142 2143 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 2144 u32 flags) 2145 { 2146 if (flags) 2147 return -EOPNOTSUPP; 2148 return sd_pr_out_command(bdev, 0x01, key, 0, 2149 block_pr_type_to_scsi(type), 0); 2150 } 2151 2152 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 2153 { 2154 return sd_pr_out_command(bdev, 0x02, key, 0, 2155 block_pr_type_to_scsi(type), 0); 2156 } 2157 2158 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 2159 enum pr_type type, bool abort) 2160 { 2161 return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, 2162 block_pr_type_to_scsi(type), 0); 2163 } 2164 2165 static int sd_pr_clear(struct block_device *bdev, u64 key) 2166 { 2167 return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0); 2168 } 2169 2170 static const struct pr_ops sd_pr_ops = { 2171 .pr_register = sd_pr_register, 2172 .pr_reserve = sd_pr_reserve, 2173 .pr_release = sd_pr_release, 2174 .pr_preempt = sd_pr_preempt, 2175 .pr_clear = sd_pr_clear, 2176 .pr_read_keys = sd_pr_read_keys, 2177 .pr_read_reservation = sd_pr_read_reservation, 2178 }; 2179 2180 static void scsi_disk_free_disk(struct gendisk *disk) 2181 { 2182 struct scsi_disk *sdkp = scsi_disk(disk); 2183 2184 put_device(&sdkp->disk_dev); 2185 } 2186 2187 /** 2188 * sd_eh_reset - reset error handling callback 2189 * @scmd: sd-issued command that has failed 2190 * 2191 * This function is called by the SCSI midlayer before starting 2192 * SCSI EH. When counting medium access failures we have to be 2193 * careful to register it only only once per device and SCSI EH run; 2194 * there might be several timed out commands which will cause the 2195 * 'max_medium_access_timeouts' counter to trigger after the first 2196 * SCSI EH run already and set the device to offline. 2197 * So this function resets the internal counter before starting SCSI EH. 2198 **/ 2199 static void sd_eh_reset(struct scsi_cmnd *scmd) 2200 { 2201 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); 2202 2203 /* New SCSI EH run, reset gate variable */ 2204 sdkp->ignore_medium_access_errors = false; 2205 } 2206 2207 /** 2208 * sd_eh_action - error handling callback 2209 * @scmd: sd-issued command that has failed 2210 * @eh_disp: The recovery disposition suggested by the midlayer 2211 * 2212 * This function is called by the SCSI midlayer upon completion of an 2213 * error test command (currently TEST UNIT READY). The result of sending 2214 * the eh command is passed in eh_disp. We're looking for devices that 2215 * fail medium access commands but are OK with non access commands like 2216 * test unit ready (so wrongly see the device as having a successful 2217 * recovery) 2218 **/ 2219 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) 2220 { 2221 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); 2222 struct scsi_device *sdev = scmd->device; 2223 2224 if (!scsi_device_online(sdev) || 2225 !scsi_medium_access_command(scmd) || 2226 host_byte(scmd->result) != DID_TIME_OUT || 2227 eh_disp != SUCCESS) 2228 return eh_disp; 2229 2230 /* 2231 * The device has timed out executing a medium access command. 2232 * However, the TEST UNIT READY command sent during error 2233 * handling completed successfully. Either the device is in the 2234 * process of recovering or has it suffered an internal failure 2235 * that prevents access to the storage medium. 2236 */ 2237 if (!sdkp->ignore_medium_access_errors) { 2238 sdkp->medium_access_timed_out++; 2239 sdkp->ignore_medium_access_errors = true; 2240 } 2241 2242 /* 2243 * If the device keeps failing read/write commands but TEST UNIT 2244 * READY always completes successfully we assume that medium 2245 * access is no longer possible and take the device offline. 2246 */ 2247 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { 2248 scmd_printk(KERN_ERR, scmd, 2249 "Medium access timeout failure. Offlining disk!\n"); 2250 mutex_lock(&sdev->state_mutex); 2251 scsi_device_set_state(sdev, SDEV_OFFLINE); 2252 mutex_unlock(&sdev->state_mutex); 2253 2254 return SUCCESS; 2255 } 2256 2257 return eh_disp; 2258 } 2259 2260 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 2261 { 2262 struct request *req = scsi_cmd_to_rq(scmd); 2263 struct scsi_device *sdev = scmd->device; 2264 unsigned int transferred, good_bytes; 2265 u64 start_lba, end_lba, bad_lba; 2266 2267 /* 2268 * Some commands have a payload smaller than the device logical 2269 * block size (e.g. INQUIRY on a 4K disk). 2270 */ 2271 if (scsi_bufflen(scmd) <= sdev->sector_size) 2272 return 0; 2273 2274 /* Check if we have a 'bad_lba' information */ 2275 if (!scsi_get_sense_info_fld(scmd->sense_buffer, 2276 SCSI_SENSE_BUFFERSIZE, 2277 &bad_lba)) 2278 return 0; 2279 2280 /* 2281 * If the bad lba was reported incorrectly, we have no idea where 2282 * the error is. 2283 */ 2284 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); 2285 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd)); 2286 if (bad_lba < start_lba || bad_lba >= end_lba) 2287 return 0; 2288 2289 /* 2290 * resid is optional but mostly filled in. When it's unused, 2291 * its value is zero, so we assume the whole buffer transferred 2292 */ 2293 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); 2294 2295 /* This computation should always be done in terms of the 2296 * resolution of the device's medium. 2297 */ 2298 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); 2299 2300 return min(good_bytes, transferred); 2301 } 2302 2303 /** 2304 * sd_done - bottom half handler: called when the lower level 2305 * driver has completed (successfully or otherwise) a scsi command. 2306 * @SCpnt: mid-level's per command structure. 2307 * 2308 * Note: potentially run from within an ISR. Must not block. 2309 **/ 2310 static int sd_done(struct scsi_cmnd *SCpnt) 2311 { 2312 int result = SCpnt->result; 2313 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 2314 unsigned int sector_size = SCpnt->device->sector_size; 2315 unsigned int resid; 2316 struct scsi_sense_hdr sshdr; 2317 struct request *req = scsi_cmd_to_rq(SCpnt); 2318 struct scsi_disk *sdkp = scsi_disk(req->q->disk); 2319 int sense_valid = 0; 2320 int sense_deferred = 0; 2321 2322 switch (req_op(req)) { 2323 case REQ_OP_DISCARD: 2324 case REQ_OP_WRITE_ZEROES: 2325 case REQ_OP_ZONE_RESET: 2326 case REQ_OP_ZONE_RESET_ALL: 2327 case REQ_OP_ZONE_OPEN: 2328 case REQ_OP_ZONE_CLOSE: 2329 case REQ_OP_ZONE_FINISH: 2330 if (!result) { 2331 good_bytes = blk_rq_bytes(req); 2332 scsi_set_resid(SCpnt, 0); 2333 } else { 2334 good_bytes = 0; 2335 scsi_set_resid(SCpnt, blk_rq_bytes(req)); 2336 } 2337 break; 2338 default: 2339 /* 2340 * In case of bogus fw or device, we could end up having 2341 * an unaligned partial completion. Check this here and force 2342 * alignment. 2343 */ 2344 resid = scsi_get_resid(SCpnt); 2345 if (resid & (sector_size - 1)) { 2346 sd_printk(KERN_INFO, sdkp, 2347 "Unaligned partial completion (resid=%u, sector_sz=%u)\n", 2348 resid, sector_size); 2349 scsi_print_command(SCpnt); 2350 resid = min(scsi_bufflen(SCpnt), 2351 round_up(resid, sector_size)); 2352 scsi_set_resid(SCpnt, resid); 2353 } 2354 } 2355 2356 if (result) { 2357 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); 2358 if (sense_valid) 2359 sense_deferred = scsi_sense_is_deferred(&sshdr); 2360 } 2361 sdkp->medium_access_timed_out = 0; 2362 2363 if (!scsi_status_is_check_condition(result) && 2364 (!sense_valid || sense_deferred)) 2365 goto out; 2366 2367 switch (sshdr.sense_key) { 2368 case HARDWARE_ERROR: 2369 case MEDIUM_ERROR: 2370 good_bytes = sd_completed_bytes(SCpnt); 2371 break; 2372 case RECOVERED_ERROR: 2373 good_bytes = scsi_bufflen(SCpnt); 2374 break; 2375 case NO_SENSE: 2376 /* This indicates a false check condition, so ignore it. An 2377 * unknown amount of data was transferred so treat it as an 2378 * error. 2379 */ 2380 SCpnt->result = 0; 2381 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2382 break; 2383 case ABORTED_COMMAND: 2384 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ 2385 good_bytes = sd_completed_bytes(SCpnt); 2386 break; 2387 case ILLEGAL_REQUEST: 2388 switch (sshdr.asc) { 2389 case 0x10: /* DIX: Host detected corruption */ 2390 good_bytes = sd_completed_bytes(SCpnt); 2391 break; 2392 case 0x20: /* INVALID COMMAND OPCODE */ 2393 case 0x24: /* INVALID FIELD IN CDB */ 2394 switch (SCpnt->cmnd[0]) { 2395 case UNMAP: 2396 sd_disable_discard(sdkp); 2397 break; 2398 case WRITE_SAME_16: 2399 case WRITE_SAME: 2400 if (SCpnt->cmnd[1] & 8) { /* UNMAP */ 2401 sd_disable_discard(sdkp); 2402 } else { 2403 sd_disable_write_same(sdkp); 2404 req->rq_flags |= RQF_QUIET; 2405 } 2406 break; 2407 } 2408 } 2409 break; 2410 default: 2411 break; 2412 } 2413 2414 out: 2415 if (sdkp->device->type == TYPE_ZBC) 2416 good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr); 2417 2418 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, 2419 "sd_done: completed %d of %d bytes\n", 2420 good_bytes, scsi_bufflen(SCpnt))); 2421 2422 return good_bytes; 2423 } 2424 2425 /* 2426 * spinup disk - called only in sd_revalidate_disk() 2427 */ 2428 static void 2429 sd_spinup_disk(struct scsi_disk *sdkp) 2430 { 2431 static const u8 cmd[10] = { TEST_UNIT_READY }; 2432 unsigned long spintime_expire = 0; 2433 int spintime, sense_valid = 0; 2434 unsigned int the_result; 2435 struct scsi_sense_hdr sshdr; 2436 struct scsi_failure failure_defs[] = { 2437 /* Do not retry Medium Not Present */ 2438 { 2439 .sense = UNIT_ATTENTION, 2440 .asc = 0x3A, 2441 .ascq = SCMD_FAILURE_ASCQ_ANY, 2442 .result = SAM_STAT_CHECK_CONDITION, 2443 }, 2444 { 2445 .sense = NOT_READY, 2446 .asc = 0x3A, 2447 .ascq = SCMD_FAILURE_ASCQ_ANY, 2448 .result = SAM_STAT_CHECK_CONDITION, 2449 }, 2450 /* Retry when scsi_status_is_good would return false 3 times */ 2451 { 2452 .result = SCMD_FAILURE_STAT_ANY, 2453 .allowed = 3, 2454 }, 2455 {} 2456 }; 2457 struct scsi_failures failures = { 2458 .failure_definitions = failure_defs, 2459 }; 2460 const struct scsi_exec_args exec_args = { 2461 .sshdr = &sshdr, 2462 .failures = &failures, 2463 }; 2464 2465 spintime = 0; 2466 2467 /* Spin up drives, as required. Only do this at boot time */ 2468 /* Spinup needs to be done for module loads too. */ 2469 do { 2470 bool media_was_present = sdkp->media_present; 2471 2472 scsi_failures_reset_retries(&failures); 2473 2474 the_result = scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, 2475 NULL, 0, SD_TIMEOUT, 2476 sdkp->max_retries, &exec_args); 2477 2478 2479 if (the_result > 0) { 2480 /* 2481 * If the drive has indicated to us that it doesn't 2482 * have any media in it, don't bother with any more 2483 * polling. 2484 */ 2485 if (media_not_present(sdkp, &sshdr)) { 2486 if (media_was_present) 2487 sd_printk(KERN_NOTICE, sdkp, 2488 "Media removed, stopped polling\n"); 2489 return; 2490 } 2491 sense_valid = scsi_sense_valid(&sshdr); 2492 } 2493 2494 if (!scsi_status_is_check_condition(the_result)) { 2495 /* no sense, TUR either succeeded or failed 2496 * with a status error */ 2497 if(!spintime && !scsi_status_is_good(the_result)) { 2498 sd_print_result(sdkp, "Test Unit Ready failed", 2499 the_result); 2500 } 2501 break; 2502 } 2503 2504 /* 2505 * The device does not want the automatic start to be issued. 2506 */ 2507 if (sdkp->device->no_start_on_add) 2508 break; 2509 2510 if (sense_valid && sshdr.sense_key == NOT_READY) { 2511 if (sshdr.asc == 4 && sshdr.ascq == 3) 2512 break; /* manual intervention required */ 2513 if (sshdr.asc == 4 && sshdr.ascq == 0xb) 2514 break; /* standby */ 2515 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 2516 break; /* unavailable */ 2517 if (sshdr.asc == 4 && sshdr.ascq == 0x1b) 2518 break; /* sanitize in progress */ 2519 if (sshdr.asc == 4 && sshdr.ascq == 0x24) 2520 break; /* depopulation in progress */ 2521 if (sshdr.asc == 4 && sshdr.ascq == 0x25) 2522 break; /* depopulation restoration in progress */ 2523 /* 2524 * Issue command to spin up drive when not ready 2525 */ 2526 if (!spintime) { 2527 /* Return immediately and start spin cycle */ 2528 const u8 start_cmd[10] = { 2529 [0] = START_STOP, 2530 [1] = 1, 2531 [4] = sdkp->device->start_stop_pwr_cond ? 2532 0x11 : 1, 2533 }; 2534 2535 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); 2536 scsi_execute_cmd(sdkp->device, start_cmd, 2537 REQ_OP_DRV_IN, NULL, 0, 2538 SD_TIMEOUT, sdkp->max_retries, 2539 &exec_args); 2540 spintime_expire = jiffies + 100 * HZ; 2541 spintime = 1; 2542 } 2543 /* Wait 1 second for next try */ 2544 msleep(1000); 2545 printk(KERN_CONT "."); 2546 2547 /* 2548 * Wait for USB flash devices with slow firmware. 2549 * Yes, this sense key/ASC combination shouldn't 2550 * occur here. It's characteristic of these devices. 2551 */ 2552 } else if (sense_valid && 2553 sshdr.sense_key == UNIT_ATTENTION && 2554 sshdr.asc == 0x28) { 2555 if (!spintime) { 2556 spintime_expire = jiffies + 5 * HZ; 2557 spintime = 1; 2558 } 2559 /* Wait 1 second for next try */ 2560 msleep(1000); 2561 } else { 2562 /* we don't understand the sense code, so it's 2563 * probably pointless to loop */ 2564 if(!spintime) { 2565 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); 2566 sd_print_sense_hdr(sdkp, &sshdr); 2567 } 2568 break; 2569 } 2570 2571 } while (spintime && time_before_eq(jiffies, spintime_expire)); 2572 2573 if (spintime) { 2574 if (scsi_status_is_good(the_result)) 2575 printk(KERN_CONT "ready\n"); 2576 else 2577 printk(KERN_CONT "not responding...\n"); 2578 } 2579 } 2580 2581 /* 2582 * Determine whether disk supports Data Integrity Field. 2583 */ 2584 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) 2585 { 2586 struct scsi_device *sdp = sdkp->device; 2587 u8 type; 2588 2589 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { 2590 sdkp->protection_type = 0; 2591 return 0; 2592 } 2593 2594 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 2595 2596 if (type > T10_PI_TYPE3_PROTECTION) { 2597 sd_printk(KERN_ERR, sdkp, 2598 "formatted with unsupported protection type %u. Disabling disk!\n", 2599 type); 2600 sdkp->protection_type = 0; 2601 return -ENODEV; 2602 } 2603 2604 sdkp->protection_type = type; 2605 2606 return 0; 2607 } 2608 2609 static void sd_config_protection(struct scsi_disk *sdkp, 2610 struct queue_limits *lim) 2611 { 2612 struct scsi_device *sdp = sdkp->device; 2613 2614 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) 2615 sd_dif_config_host(sdkp, lim); 2616 2617 if (!sdkp->protection_type) 2618 return; 2619 2620 if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) { 2621 sd_first_printk(KERN_NOTICE, sdkp, 2622 "Disabling DIF Type %u protection\n", 2623 sdkp->protection_type); 2624 sdkp->protection_type = 0; 2625 } 2626 2627 sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n", 2628 sdkp->protection_type); 2629 } 2630 2631 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, 2632 struct scsi_sense_hdr *sshdr, int sense_valid, 2633 int the_result) 2634 { 2635 if (sense_valid) 2636 sd_print_sense_hdr(sdkp, sshdr); 2637 else 2638 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); 2639 2640 /* 2641 * Set dirty bit for removable devices if not ready - 2642 * sometimes drives will not report this properly. 2643 */ 2644 if (sdp->removable && 2645 sense_valid && sshdr->sense_key == NOT_READY) 2646 set_media_not_present(sdkp); 2647 2648 /* 2649 * We used to set media_present to 0 here to indicate no media 2650 * in the drive, but some drives fail read capacity even with 2651 * media present, so we can't do that. 2652 */ 2653 sdkp->capacity = 0; /* unknown mapped to zero - as usual */ 2654 } 2655 2656 #define RC16_LEN 32 2657 #if RC16_LEN > SD_BUF_SIZE 2658 #error RC16_LEN must not be more than SD_BUF_SIZE 2659 #endif 2660 2661 #define READ_CAPACITY_RETRIES_ON_RESET 10 2662 2663 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2664 struct queue_limits *lim, unsigned char *buffer) 2665 { 2666 unsigned char cmd[16]; 2667 struct scsi_sense_hdr sshdr; 2668 const struct scsi_exec_args exec_args = { 2669 .sshdr = &sshdr, 2670 }; 2671 int sense_valid = 0; 2672 int the_result; 2673 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2674 unsigned int alignment; 2675 unsigned long long lba; 2676 unsigned sector_size; 2677 2678 if (sdp->no_read_capacity_16) 2679 return -EINVAL; 2680 2681 do { 2682 memset(cmd, 0, 16); 2683 cmd[0] = SERVICE_ACTION_IN_16; 2684 cmd[1] = SAI_READ_CAPACITY_16; 2685 cmd[13] = RC16_LEN; 2686 memset(buffer, 0, RC16_LEN); 2687 2688 the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, 2689 buffer, RC16_LEN, SD_TIMEOUT, 2690 sdkp->max_retries, &exec_args); 2691 if (the_result > 0) { 2692 if (media_not_present(sdkp, &sshdr)) 2693 return -ENODEV; 2694 2695 sense_valid = scsi_sense_valid(&sshdr); 2696 if (sense_valid && 2697 sshdr.sense_key == ILLEGAL_REQUEST && 2698 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && 2699 sshdr.ascq == 0x00) 2700 /* Invalid Command Operation Code or 2701 * Invalid Field in CDB, just retry 2702 * silently with RC10 */ 2703 return -EINVAL; 2704 if (sense_valid && 2705 sshdr.sense_key == UNIT_ATTENTION && 2706 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2707 /* Device reset might occur several times, 2708 * give it one more chance */ 2709 if (--reset_retries > 0) 2710 continue; 2711 } 2712 retries--; 2713 2714 } while (the_result && retries); 2715 2716 if (the_result) { 2717 sd_print_result(sdkp, "Read Capacity(16) failed", the_result); 2718 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2719 return -EINVAL; 2720 } 2721 2722 sector_size = get_unaligned_be32(&buffer[8]); 2723 lba = get_unaligned_be64(&buffer[0]); 2724 2725 if (sd_read_protection_type(sdkp, buffer) < 0) { 2726 sdkp->capacity = 0; 2727 return -ENODEV; 2728 } 2729 2730 /* Logical blocks per physical block exponent */ 2731 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; 2732 2733 /* RC basis */ 2734 sdkp->rc_basis = (buffer[12] >> 4) & 0x3; 2735 2736 /* Lowest aligned logical block */ 2737 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; 2738 lim->alignment_offset = alignment; 2739 if (alignment && sdkp->first_scan) 2740 sd_printk(KERN_NOTICE, sdkp, 2741 "physical block alignment offset: %u\n", alignment); 2742 2743 if (buffer[14] & 0x80) { /* LBPME */ 2744 sdkp->lbpme = 1; 2745 2746 if (buffer[14] & 0x40) /* LBPRZ */ 2747 sdkp->lbprz = 1; 2748 } 2749 2750 sdkp->capacity = lba + 1; 2751 return sector_size; 2752 } 2753 2754 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, 2755 unsigned char *buffer) 2756 { 2757 static const u8 cmd[10] = { READ_CAPACITY }; 2758 struct scsi_sense_hdr sshdr; 2759 struct scsi_failure failure_defs[] = { 2760 /* Do not retry Medium Not Present */ 2761 { 2762 .sense = UNIT_ATTENTION, 2763 .asc = 0x3A, 2764 .result = SAM_STAT_CHECK_CONDITION, 2765 }, 2766 { 2767 .sense = NOT_READY, 2768 .asc = 0x3A, 2769 .result = SAM_STAT_CHECK_CONDITION, 2770 }, 2771 /* Device reset might occur several times so retry a lot */ 2772 { 2773 .sense = UNIT_ATTENTION, 2774 .asc = 0x29, 2775 .allowed = READ_CAPACITY_RETRIES_ON_RESET, 2776 .result = SAM_STAT_CHECK_CONDITION, 2777 }, 2778 /* Any other error not listed above retry 3 times */ 2779 { 2780 .result = SCMD_FAILURE_RESULT_ANY, 2781 .allowed = 3, 2782 }, 2783 {} 2784 }; 2785 struct scsi_failures failures = { 2786 .failure_definitions = failure_defs, 2787 }; 2788 const struct scsi_exec_args exec_args = { 2789 .sshdr = &sshdr, 2790 .failures = &failures, 2791 }; 2792 int sense_valid = 0; 2793 int the_result; 2794 sector_t lba; 2795 unsigned sector_size; 2796 2797 memset(buffer, 0, 8); 2798 2799 the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer, 2800 8, SD_TIMEOUT, sdkp->max_retries, 2801 &exec_args); 2802 2803 if (the_result > 0) { 2804 sense_valid = scsi_sense_valid(&sshdr); 2805 2806 if (media_not_present(sdkp, &sshdr)) 2807 return -ENODEV; 2808 } 2809 2810 if (the_result) { 2811 sd_print_result(sdkp, "Read Capacity(10) failed", the_result); 2812 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2813 return -EINVAL; 2814 } 2815 2816 sector_size = get_unaligned_be32(&buffer[4]); 2817 lba = get_unaligned_be32(&buffer[0]); 2818 2819 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) { 2820 /* Some buggy (usb cardreader) devices return an lba of 2821 0xffffffff when the want to report a size of 0 (with 2822 which they really mean no media is present) */ 2823 sdkp->capacity = 0; 2824 sdkp->physical_block_size = sector_size; 2825 return sector_size; 2826 } 2827 2828 sdkp->capacity = lba + 1; 2829 sdkp->physical_block_size = sector_size; 2830 return sector_size; 2831 } 2832 2833 static int sd_try_rc16_first(struct scsi_device *sdp) 2834 { 2835 if (sdp->host->max_cmd_len < 16) 2836 return 0; 2837 if (sdp->try_rc_10_first) 2838 return 0; 2839 if (sdp->scsi_level > SCSI_SPC_2) 2840 return 1; 2841 if (scsi_device_protection(sdp)) 2842 return 1; 2843 return 0; 2844 } 2845 2846 /* 2847 * read disk capacity 2848 */ 2849 static void 2850 sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim, 2851 unsigned char *buffer) 2852 { 2853 int sector_size; 2854 struct scsi_device *sdp = sdkp->device; 2855 2856 if (sd_try_rc16_first(sdp)) { 2857 sector_size = read_capacity_16(sdkp, sdp, lim, buffer); 2858 if (sector_size == -EOVERFLOW) 2859 goto got_data; 2860 if (sector_size == -ENODEV) 2861 return; 2862 if (sector_size < 0) 2863 sector_size = read_capacity_10(sdkp, sdp, buffer); 2864 if (sector_size < 0) 2865 return; 2866 } else { 2867 sector_size = read_capacity_10(sdkp, sdp, buffer); 2868 if (sector_size == -EOVERFLOW) 2869 goto got_data; 2870 if (sector_size < 0) 2871 return; 2872 if ((sizeof(sdkp->capacity) > 4) && 2873 (sdkp->capacity > 0xffffffffULL)) { 2874 int old_sector_size = sector_size; 2875 sd_printk(KERN_NOTICE, sdkp, 2876 "Very big device. Trying to use READ CAPACITY(16).\n"); 2877 sector_size = read_capacity_16(sdkp, sdp, lim, buffer); 2878 if (sector_size < 0) { 2879 sd_printk(KERN_NOTICE, sdkp, 2880 "Using 0xffffffff as device size\n"); 2881 sdkp->capacity = 1 + (sector_t) 0xffffffff; 2882 sector_size = old_sector_size; 2883 goto got_data; 2884 } 2885 /* Remember that READ CAPACITY(16) succeeded */ 2886 sdp->try_rc_10_first = 0; 2887 } 2888 } 2889 2890 /* Some devices are known to return the total number of blocks, 2891 * not the highest block number. Some devices have versions 2892 * which do this and others which do not. Some devices we might 2893 * suspect of doing this but we don't know for certain. 2894 * 2895 * If we know the reported capacity is wrong, decrement it. If 2896 * we can only guess, then assume the number of blocks is even 2897 * (usually true but not always) and err on the side of lowering 2898 * the capacity. 2899 */ 2900 if (sdp->fix_capacity || 2901 (sdp->guess_capacity && (sdkp->capacity & 0x01))) { 2902 sd_printk(KERN_INFO, sdkp, 2903 "Adjusting the sector count from its reported value: %llu\n", 2904 (unsigned long long) sdkp->capacity); 2905 --sdkp->capacity; 2906 } 2907 2908 got_data: 2909 if (sector_size == 0) { 2910 sector_size = 512; 2911 sd_printk(KERN_NOTICE, sdkp, 2912 "Sector size 0 reported, assuming 512.\n"); 2913 } 2914 2915 if (sector_size != 512 && 2916 sector_size != 1024 && 2917 sector_size != 2048 && 2918 sector_size != 4096) { 2919 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", 2920 sector_size); 2921 /* 2922 * The user might want to re-format the drive with 2923 * a supported sectorsize. Once this happens, it 2924 * would be relatively trivial to set the thing up. 2925 * For this reason, we leave the thing in the table. 2926 */ 2927 sdkp->capacity = 0; 2928 /* 2929 * set a bogus sector size so the normal read/write 2930 * logic in the block layer will eventually refuse any 2931 * request on this device without tripping over power 2932 * of two sector size assumptions 2933 */ 2934 sector_size = 512; 2935 } 2936 lim->logical_block_size = sector_size; 2937 lim->physical_block_size = sdkp->physical_block_size; 2938 sdkp->device->sector_size = sector_size; 2939 2940 if (sdkp->capacity > 0xffffffff) 2941 sdp->use_16_for_rw = 1; 2942 2943 } 2944 2945 /* 2946 * Print disk capacity 2947 */ 2948 static void 2949 sd_print_capacity(struct scsi_disk *sdkp, 2950 sector_t old_capacity) 2951 { 2952 int sector_size = sdkp->device->sector_size; 2953 char cap_str_2[10], cap_str_10[10]; 2954 2955 if (!sdkp->first_scan && old_capacity == sdkp->capacity) 2956 return; 2957 2958 string_get_size(sdkp->capacity, sector_size, 2959 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 2960 string_get_size(sdkp->capacity, sector_size, 2961 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 2962 2963 sd_printk(KERN_NOTICE, sdkp, 2964 "%llu %d-byte logical blocks: (%s/%s)\n", 2965 (unsigned long long)sdkp->capacity, 2966 sector_size, cap_str_10, cap_str_2); 2967 2968 if (sdkp->physical_block_size != sector_size) 2969 sd_printk(KERN_NOTICE, sdkp, 2970 "%u-byte physical blocks\n", 2971 sdkp->physical_block_size); 2972 } 2973 2974 /* called with buffer of length 512 */ 2975 static inline int 2976 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage, 2977 unsigned char *buffer, int len, struct scsi_mode_data *data, 2978 struct scsi_sense_hdr *sshdr) 2979 { 2980 /* 2981 * If we must use MODE SENSE(10), make sure that the buffer length 2982 * is at least 8 bytes so that the mode sense header fits. 2983 */ 2984 if (sdkp->device->use_10_for_ms && len < 8) 2985 len = 8; 2986 2987 return scsi_mode_sense(sdkp->device, dbd, modepage, 0, buffer, len, 2988 SD_TIMEOUT, sdkp->max_retries, data, sshdr); 2989 } 2990 2991 /* 2992 * read write protect setting, if possible - called only in sd_revalidate_disk() 2993 * called with buffer of length SD_BUF_SIZE 2994 */ 2995 static void 2996 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) 2997 { 2998 int res; 2999 struct scsi_device *sdp = sdkp->device; 3000 struct scsi_mode_data data; 3001 int old_wp = sdkp->write_prot; 3002 3003 set_disk_ro(sdkp->disk, 0); 3004 if (sdp->skip_ms_page_3f) { 3005 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); 3006 return; 3007 } 3008 3009 if (sdp->use_192_bytes_for_3f) { 3010 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL); 3011 } else { 3012 /* 3013 * First attempt: ask for all pages (0x3F), but only 4 bytes. 3014 * We have to start carefully: some devices hang if we ask 3015 * for more than is available. 3016 */ 3017 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL); 3018 3019 /* 3020 * Second attempt: ask for page 0 When only page 0 is 3021 * implemented, a request for page 3F may return Sense Key 3022 * 5: Illegal Request, Sense Code 24: Invalid field in 3023 * CDB. 3024 */ 3025 if (res < 0) 3026 res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL); 3027 3028 /* 3029 * Third attempt: ask 255 bytes, as we did earlier. 3030 */ 3031 if (res < 0) 3032 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255, 3033 &data, NULL); 3034 } 3035 3036 if (res < 0) { 3037 sd_first_printk(KERN_WARNING, sdkp, 3038 "Test WP failed, assume Write Enabled\n"); 3039 } else { 3040 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 3041 set_disk_ro(sdkp->disk, sdkp->write_prot); 3042 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 3043 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 3044 sdkp->write_prot ? "on" : "off"); 3045 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); 3046 } 3047 } 3048 } 3049 3050 /* 3051 * sd_read_cache_type - called only from sd_revalidate_disk() 3052 * called with buffer of length SD_BUF_SIZE 3053 */ 3054 static void 3055 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) 3056 { 3057 int len = 0, res; 3058 struct scsi_device *sdp = sdkp->device; 3059 3060 int dbd; 3061 int modepage; 3062 int first_len; 3063 struct scsi_mode_data data; 3064 struct scsi_sense_hdr sshdr; 3065 int old_wce = sdkp->WCE; 3066 int old_rcd = sdkp->RCD; 3067 int old_dpofua = sdkp->DPOFUA; 3068 3069 3070 if (sdkp->cache_override) 3071 return; 3072 3073 first_len = 4; 3074 if (sdp->skip_ms_page_8) { 3075 if (sdp->type == TYPE_RBC) 3076 goto defaults; 3077 else { 3078 if (sdp->skip_ms_page_3f) 3079 goto defaults; 3080 modepage = 0x3F; 3081 if (sdp->use_192_bytes_for_3f) 3082 first_len = 192; 3083 dbd = 0; 3084 } 3085 } else if (sdp->type == TYPE_RBC) { 3086 modepage = 6; 3087 dbd = 8; 3088 } else { 3089 modepage = 8; 3090 dbd = 0; 3091 } 3092 3093 /* cautiously ask */ 3094 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len, 3095 &data, &sshdr); 3096 3097 if (res < 0) 3098 goto bad_sense; 3099 3100 if (!data.header_length) { 3101 modepage = 6; 3102 first_len = 0; 3103 sd_first_printk(KERN_ERR, sdkp, 3104 "Missing header in MODE_SENSE response\n"); 3105 } 3106 3107 /* that went OK, now ask for the proper length */ 3108 len = data.length; 3109 3110 /* 3111 * We're only interested in the first three bytes, actually. 3112 * But the data cache page is defined for the first 20. 3113 */ 3114 if (len < 3) 3115 goto bad_sense; 3116 else if (len > SD_BUF_SIZE) { 3117 sd_first_printk(KERN_NOTICE, sdkp, 3118 "Truncating mode parameter data from %d to %d bytes\n", 3119 len, SD_BUF_SIZE); 3120 len = SD_BUF_SIZE; 3121 } 3122 if (modepage == 0x3F && sdp->use_192_bytes_for_3f) 3123 len = 192; 3124 3125 /* Get the data */ 3126 if (len > first_len) 3127 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len, 3128 &data, &sshdr); 3129 3130 if (!res) { 3131 int offset = data.header_length + data.block_descriptor_length; 3132 3133 while (offset < len) { 3134 u8 page_code = buffer[offset] & 0x3F; 3135 u8 spf = buffer[offset] & 0x40; 3136 3137 if (page_code == 8 || page_code == 6) { 3138 /* We're interested only in the first 3 bytes. 3139 */ 3140 if (len - offset <= 2) { 3141 sd_first_printk(KERN_ERR, sdkp, 3142 "Incomplete mode parameter data\n"); 3143 goto defaults; 3144 } else { 3145 modepage = page_code; 3146 goto Page_found; 3147 } 3148 } else { 3149 /* Go to the next page */ 3150 if (spf && len - offset > 3) 3151 offset += 4 + (buffer[offset+2] << 8) + 3152 buffer[offset+3]; 3153 else if (!spf && len - offset > 1) 3154 offset += 2 + buffer[offset+1]; 3155 else { 3156 sd_first_printk(KERN_ERR, sdkp, 3157 "Incomplete mode parameter data\n"); 3158 goto defaults; 3159 } 3160 } 3161 } 3162 3163 sd_first_printk(KERN_WARNING, sdkp, 3164 "No Caching mode page found\n"); 3165 goto defaults; 3166 3167 Page_found: 3168 if (modepage == 8) { 3169 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 3170 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 3171 } else { 3172 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0); 3173 sdkp->RCD = 0; 3174 } 3175 3176 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 3177 if (sdp->broken_fua) { 3178 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); 3179 sdkp->DPOFUA = 0; 3180 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && 3181 !sdkp->device->use_16_for_rw) { 3182 sd_first_printk(KERN_NOTICE, sdkp, 3183 "Uses READ/WRITE(6), disabling FUA\n"); 3184 sdkp->DPOFUA = 0; 3185 } 3186 3187 /* No cache flush allowed for write protected devices */ 3188 if (sdkp->WCE && sdkp->write_prot) 3189 sdkp->WCE = 0; 3190 3191 if (sdkp->first_scan || old_wce != sdkp->WCE || 3192 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) 3193 sd_printk(KERN_NOTICE, sdkp, 3194 "Write cache: %s, read cache: %s, %s\n", 3195 sdkp->WCE ? "enabled" : "disabled", 3196 sdkp->RCD ? "disabled" : "enabled", 3197 sdkp->DPOFUA ? "supports DPO and FUA" 3198 : "doesn't support DPO or FUA"); 3199 3200 return; 3201 } 3202 3203 bad_sense: 3204 if (res == -EIO && scsi_sense_valid(&sshdr) && 3205 sshdr.sense_key == ILLEGAL_REQUEST && 3206 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 3207 /* Invalid field in CDB */ 3208 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); 3209 else 3210 sd_first_printk(KERN_ERR, sdkp, 3211 "Asking for cache data failed\n"); 3212 3213 defaults: 3214 if (sdp->wce_default_on) { 3215 sd_first_printk(KERN_NOTICE, sdkp, 3216 "Assuming drive cache: write back\n"); 3217 sdkp->WCE = 1; 3218 } else { 3219 sd_first_printk(KERN_WARNING, sdkp, 3220 "Assuming drive cache: write through\n"); 3221 sdkp->WCE = 0; 3222 } 3223 sdkp->RCD = 0; 3224 sdkp->DPOFUA = 0; 3225 } 3226 3227 static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id) 3228 { 3229 u8 cdb[16] = { SERVICE_ACTION_IN_16, SAI_GET_STREAM_STATUS }; 3230 struct { 3231 struct scsi_stream_status_header h; 3232 struct scsi_stream_status s; 3233 } buf; 3234 struct scsi_device *sdev = sdkp->device; 3235 struct scsi_sense_hdr sshdr; 3236 const struct scsi_exec_args exec_args = { 3237 .sshdr = &sshdr, 3238 }; 3239 int res; 3240 3241 put_unaligned_be16(stream_id, &cdb[4]); 3242 put_unaligned_be32(sizeof(buf), &cdb[10]); 3243 3244 res = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, &buf, sizeof(buf), 3245 SD_TIMEOUT, sdkp->max_retries, &exec_args); 3246 if (res < 0) 3247 return false; 3248 if (scsi_status_is_check_condition(res) && scsi_sense_valid(&sshdr)) 3249 sd_print_sense_hdr(sdkp, &sshdr); 3250 if (res) 3251 return false; 3252 if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status)) 3253 return false; 3254 return buf.s.perm; 3255 } 3256 3257 static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer) 3258 { 3259 struct scsi_device *sdp = sdkp->device; 3260 const struct scsi_io_group_descriptor *desc, *start, *end; 3261 u16 permanent_stream_count_old; 3262 struct scsi_sense_hdr sshdr; 3263 struct scsi_mode_data data; 3264 int res; 3265 3266 if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS) 3267 return; 3268 3269 res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a, 3270 /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT, 3271 sdkp->max_retries, &data, &sshdr); 3272 if (res < 0) 3273 return; 3274 start = (void *)buffer + data.header_length + 16; 3275 end = (void *)buffer + ALIGN_DOWN(data.header_length + data.length, 3276 sizeof(*end)); 3277 /* 3278 * From "SBC-5 Constrained Streams with Data Lifetimes": Device severs 3279 * should assign the lowest numbered stream identifiers to permanent 3280 * streams. 3281 */ 3282 for (desc = start; desc < end; desc++) 3283 if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start)) 3284 break; 3285 permanent_stream_count_old = sdkp->permanent_stream_count; 3286 sdkp->permanent_stream_count = desc - start; 3287 if (sdkp->rscs && sdkp->permanent_stream_count < 2) 3288 sd_printk(KERN_INFO, sdkp, 3289 "Unexpected: RSCS has been set and the permanent stream count is %u\n", 3290 sdkp->permanent_stream_count); 3291 else if (sdkp->permanent_stream_count != permanent_stream_count_old) 3292 sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n", 3293 sdkp->permanent_stream_count); 3294 } 3295 3296 /* 3297 * The ATO bit indicates whether the DIF application tag is available 3298 * for use by the operating system. 3299 */ 3300 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) 3301 { 3302 int res, offset; 3303 struct scsi_device *sdp = sdkp->device; 3304 struct scsi_mode_data data; 3305 struct scsi_sense_hdr sshdr; 3306 3307 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 3308 return; 3309 3310 if (sdkp->protection_type == 0) 3311 return; 3312 3313 res = scsi_mode_sense(sdp, 1, 0x0a, 0, buffer, 36, SD_TIMEOUT, 3314 sdkp->max_retries, &data, &sshdr); 3315 3316 if (res < 0 || !data.header_length || 3317 data.length < 6) { 3318 sd_first_printk(KERN_WARNING, sdkp, 3319 "getting Control mode page failed, assume no ATO\n"); 3320 3321 if (res == -EIO && scsi_sense_valid(&sshdr)) 3322 sd_print_sense_hdr(sdkp, &sshdr); 3323 3324 return; 3325 } 3326 3327 offset = data.header_length + data.block_descriptor_length; 3328 3329 if ((buffer[offset] & 0x3f) != 0x0a) { 3330 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); 3331 return; 3332 } 3333 3334 if ((buffer[offset + 5] & 0x80) == 0) 3335 return; 3336 3337 sdkp->ATO = 1; 3338 3339 return; 3340 } 3341 3342 static unsigned int sd_discard_mode(struct scsi_disk *sdkp) 3343 { 3344 if (!sdkp->lbpme) 3345 return SD_LBP_FULL; 3346 3347 if (!sdkp->lbpvpd) { 3348 /* LBP VPD page not provided */ 3349 if (sdkp->max_unmap_blocks) 3350 return SD_LBP_UNMAP; 3351 return SD_LBP_WS16; 3352 } 3353 3354 /* LBP VPD page tells us what to use */ 3355 if (sdkp->lbpu && sdkp->max_unmap_blocks) 3356 return SD_LBP_UNMAP; 3357 if (sdkp->lbpws) 3358 return SD_LBP_WS16; 3359 if (sdkp->lbpws10) 3360 return SD_LBP_WS10; 3361 return SD_LBP_DISABLE; 3362 } 3363 3364 /* 3365 * Query disk device for preferred I/O sizes. 3366 */ 3367 static void sd_read_block_limits(struct scsi_disk *sdkp, 3368 struct queue_limits *lim) 3369 { 3370 struct scsi_vpd *vpd; 3371 3372 rcu_read_lock(); 3373 3374 vpd = rcu_dereference(sdkp->device->vpd_pgb0); 3375 if (!vpd || vpd->len < 16) 3376 goto out; 3377 3378 sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]); 3379 sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]); 3380 sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]); 3381 3382 if (vpd->len >= 64) { 3383 unsigned int lba_count, desc_count; 3384 3385 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]); 3386 3387 if (!sdkp->lbpme) 3388 goto config_atomic; 3389 3390 lba_count = get_unaligned_be32(&vpd->data[20]); 3391 desc_count = get_unaligned_be32(&vpd->data[24]); 3392 3393 if (lba_count && desc_count) 3394 sdkp->max_unmap_blocks = lba_count; 3395 3396 sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]); 3397 3398 if (vpd->data[32] & 0x80) 3399 sdkp->unmap_alignment = 3400 get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); 3401 3402 config_atomic: 3403 sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]); 3404 sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]); 3405 sdkp->atomic_granularity = get_unaligned_be32(&vpd->data[52]); 3406 sdkp->max_atomic_with_boundary = get_unaligned_be32(&vpd->data[56]); 3407 sdkp->max_atomic_boundary = get_unaligned_be32(&vpd->data[60]); 3408 3409 sd_config_atomic(sdkp, lim); 3410 } 3411 3412 out: 3413 rcu_read_unlock(); 3414 } 3415 3416 /* Parse the Block Limits Extension VPD page (0xb7) */ 3417 static void sd_read_block_limits_ext(struct scsi_disk *sdkp) 3418 { 3419 struct scsi_vpd *vpd; 3420 3421 rcu_read_lock(); 3422 vpd = rcu_dereference(sdkp->device->vpd_pgb7); 3423 if (vpd && vpd->len >= 6) 3424 sdkp->rscs = vpd->data[5] & 1; 3425 rcu_read_unlock(); 3426 } 3427 3428 /* Query block device characteristics */ 3429 static void sd_read_block_characteristics(struct scsi_disk *sdkp, 3430 struct queue_limits *lim) 3431 { 3432 struct scsi_vpd *vpd; 3433 u16 rot; 3434 3435 rcu_read_lock(); 3436 vpd = rcu_dereference(sdkp->device->vpd_pgb1); 3437 3438 if (!vpd || vpd->len <= 8) { 3439 rcu_read_unlock(); 3440 return; 3441 } 3442 3443 rot = get_unaligned_be16(&vpd->data[4]); 3444 sdkp->zoned = (vpd->data[8] >> 4) & 3; 3445 rcu_read_unlock(); 3446 3447 if (rot == 1) 3448 lim->features &= ~(BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); 3449 3450 if (!sdkp->first_scan) 3451 return; 3452 3453 if (sdkp->device->type == TYPE_ZBC) 3454 sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n"); 3455 else if (sdkp->zoned == 1) 3456 sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n"); 3457 else if (sdkp->zoned == 2) 3458 sd_printk(KERN_NOTICE, sdkp, "Drive-managed SMR disk\n"); 3459 } 3460 3461 /** 3462 * sd_read_block_provisioning - Query provisioning VPD page 3463 * @sdkp: disk to query 3464 */ 3465 static void sd_read_block_provisioning(struct scsi_disk *sdkp) 3466 { 3467 struct scsi_vpd *vpd; 3468 3469 if (sdkp->lbpme == 0) 3470 return; 3471 3472 rcu_read_lock(); 3473 vpd = rcu_dereference(sdkp->device->vpd_pgb2); 3474 3475 if (!vpd || vpd->len < 8) { 3476 rcu_read_unlock(); 3477 return; 3478 } 3479 3480 sdkp->lbpvpd = 1; 3481 sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */ 3482 sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */ 3483 sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */ 3484 rcu_read_unlock(); 3485 } 3486 3487 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) 3488 { 3489 struct scsi_device *sdev = sdkp->device; 3490 3491 if (sdev->host->no_write_same) { 3492 sdev->no_write_same = 1; 3493 3494 return; 3495 } 3496 3497 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY, 0) < 0) { 3498 sdev->no_report_opcodes = 1; 3499 3500 /* 3501 * Disable WRITE SAME if REPORT SUPPORTED OPERATION CODES is 3502 * unsupported and this is an ATA device. 3503 */ 3504 if (sdev->is_ata) 3505 sdev->no_write_same = 1; 3506 } 3507 3508 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16, 0) == 1) 3509 sdkp->ws16 = 1; 3510 3511 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME, 0) == 1) 3512 sdkp->ws10 = 1; 3513 } 3514 3515 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) 3516 { 3517 struct scsi_device *sdev = sdkp->device; 3518 3519 if (!sdev->security_supported) 3520 return; 3521 3522 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3523 SECURITY_PROTOCOL_IN, 0) == 1 && 3524 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3525 SECURITY_PROTOCOL_OUT, 0) == 1) 3526 sdkp->security = 1; 3527 } 3528 3529 static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf) 3530 { 3531 return logical_to_sectors(sdkp->device, get_unaligned_be64(buf)); 3532 } 3533 3534 /** 3535 * sd_read_cpr - Query concurrent positioning ranges 3536 * @sdkp: disk to query 3537 */ 3538 static void sd_read_cpr(struct scsi_disk *sdkp) 3539 { 3540 struct blk_independent_access_ranges *iars = NULL; 3541 unsigned char *buffer = NULL; 3542 unsigned int nr_cpr = 0; 3543 int i, vpd_len, buf_len = SD_BUF_SIZE; 3544 u8 *desc; 3545 3546 /* 3547 * We need to have the capacity set first for the block layer to be 3548 * able to check the ranges. 3549 */ 3550 if (sdkp->first_scan) 3551 return; 3552 3553 if (!sdkp->capacity) 3554 goto out; 3555 3556 /* 3557 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges, 3558 * leading to a maximum page size of 64 + 256*32 bytes. 3559 */ 3560 buf_len = 64 + 256*32; 3561 buffer = kmalloc(buf_len, GFP_KERNEL); 3562 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len)) 3563 goto out; 3564 3565 /* We must have at least a 64B header and one 32B range descriptor */ 3566 vpd_len = get_unaligned_be16(&buffer[2]) + 4; 3567 if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { 3568 sd_printk(KERN_ERR, sdkp, 3569 "Invalid Concurrent Positioning Ranges VPD page\n"); 3570 goto out; 3571 } 3572 3573 nr_cpr = (vpd_len - 64) / 32; 3574 if (nr_cpr == 1) { 3575 nr_cpr = 0; 3576 goto out; 3577 } 3578 3579 iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr); 3580 if (!iars) { 3581 nr_cpr = 0; 3582 goto out; 3583 } 3584 3585 desc = &buffer[64]; 3586 for (i = 0; i < nr_cpr; i++, desc += 32) { 3587 if (desc[0] != i) { 3588 sd_printk(KERN_ERR, sdkp, 3589 "Invalid Concurrent Positioning Range number\n"); 3590 nr_cpr = 0; 3591 break; 3592 } 3593 3594 iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8); 3595 iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16); 3596 } 3597 3598 out: 3599 disk_set_independent_access_ranges(sdkp->disk, iars); 3600 if (nr_cpr && sdkp->nr_actuators != nr_cpr) { 3601 sd_printk(KERN_NOTICE, sdkp, 3602 "%u concurrent positioning ranges\n", nr_cpr); 3603 sdkp->nr_actuators = nr_cpr; 3604 } 3605 3606 kfree(buffer); 3607 } 3608 3609 static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp) 3610 { 3611 struct scsi_device *sdp = sdkp->device; 3612 unsigned int min_xfer_bytes = 3613 logical_to_bytes(sdp, sdkp->min_xfer_blocks); 3614 3615 if (sdkp->min_xfer_blocks == 0) 3616 return false; 3617 3618 if (min_xfer_bytes & (sdkp->physical_block_size - 1)) { 3619 sd_first_printk(KERN_WARNING, sdkp, 3620 "Preferred minimum I/O size %u bytes not a multiple of physical block size (%u bytes)\n", 3621 min_xfer_bytes, sdkp->physical_block_size); 3622 sdkp->min_xfer_blocks = 0; 3623 return false; 3624 } 3625 3626 sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n", 3627 min_xfer_bytes); 3628 return true; 3629 } 3630 3631 /* 3632 * Determine the device's preferred I/O size for reads and writes 3633 * unless the reported value is unreasonably small, large, not a 3634 * multiple of the physical block size, or simply garbage. 3635 */ 3636 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, 3637 unsigned int dev_max) 3638 { 3639 struct scsi_device *sdp = sdkp->device; 3640 unsigned int opt_xfer_bytes = 3641 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3642 unsigned int min_xfer_bytes = 3643 logical_to_bytes(sdp, sdkp->min_xfer_blocks); 3644 3645 if (sdkp->opt_xfer_blocks == 0) 3646 return false; 3647 3648 if (sdkp->opt_xfer_blocks > dev_max) { 3649 sd_first_printk(KERN_WARNING, sdkp, 3650 "Optimal transfer size %u logical blocks > dev_max (%u logical blocks)\n", 3651 sdkp->opt_xfer_blocks, dev_max); 3652 return false; 3653 } 3654 3655 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { 3656 sd_first_printk(KERN_WARNING, sdkp, 3657 "Optimal transfer size %u logical blocks > sd driver limit (%u logical blocks)\n", 3658 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); 3659 return false; 3660 } 3661 3662 if (opt_xfer_bytes < PAGE_SIZE) { 3663 sd_first_printk(KERN_WARNING, sdkp, 3664 "Optimal transfer size %u bytes < PAGE_SIZE (%u bytes)\n", 3665 opt_xfer_bytes, (unsigned int)PAGE_SIZE); 3666 return false; 3667 } 3668 3669 if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) { 3670 sd_first_printk(KERN_WARNING, sdkp, 3671 "Optimal transfer size %u bytes not a multiple of preferred minimum block size (%u bytes)\n", 3672 opt_xfer_bytes, min_xfer_bytes); 3673 return false; 3674 } 3675 3676 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { 3677 sd_first_printk(KERN_WARNING, sdkp, 3678 "Optimal transfer size %u bytes not a multiple of physical block size (%u bytes)\n", 3679 opt_xfer_bytes, sdkp->physical_block_size); 3680 return false; 3681 } 3682 3683 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", 3684 opt_xfer_bytes); 3685 return true; 3686 } 3687 3688 static void sd_read_block_zero(struct scsi_disk *sdkp) 3689 { 3690 struct scsi_device *sdev = sdkp->device; 3691 unsigned int buf_len = sdev->sector_size; 3692 u8 *buffer, cmd[16] = { }; 3693 3694 buffer = kmalloc(buf_len, GFP_KERNEL); 3695 if (!buffer) 3696 return; 3697 3698 if (sdev->use_16_for_rw) { 3699 cmd[0] = READ_16; 3700 put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */ 3701 put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */ 3702 } else { 3703 cmd[0] = READ_10; 3704 put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ 3705 put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ 3706 } 3707 3708 scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len, 3709 SD_TIMEOUT, sdkp->max_retries, NULL); 3710 kfree(buffer); 3711 } 3712 3713 /** 3714 * sd_revalidate_disk - called the first time a new disk is seen, 3715 * performs disk spin up, read_capacity, etc. 3716 * @disk: struct gendisk we care about 3717 **/ 3718 static void sd_revalidate_disk(struct gendisk *disk) 3719 { 3720 struct scsi_disk *sdkp = scsi_disk(disk); 3721 struct scsi_device *sdp = sdkp->device; 3722 sector_t old_capacity = sdkp->capacity; 3723 struct queue_limits *lim = NULL; 3724 unsigned char *buffer = NULL; 3725 unsigned int dev_max; 3726 int err; 3727 3728 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 3729 "sd_revalidate_disk\n")); 3730 3731 /* 3732 * If the device is offline, don't try and read capacity or any 3733 * of the other niceties. 3734 */ 3735 if (!scsi_device_online(sdp)) 3736 return; 3737 3738 lim = kmalloc(sizeof(*lim), GFP_KERNEL); 3739 if (!lim) 3740 return; 3741 3742 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); 3743 if (!buffer) 3744 goto out; 3745 3746 sd_spinup_disk(sdkp); 3747 3748 *lim = queue_limits_start_update(sdkp->disk->queue); 3749 3750 /* 3751 * Without media there is no reason to ask; moreover, some devices 3752 * react badly if we do. 3753 */ 3754 if (sdkp->media_present) { 3755 sd_read_capacity(sdkp, lim, buffer); 3756 /* 3757 * Some USB/UAS devices return generic values for mode pages 3758 * until the media has been accessed. Trigger a READ operation 3759 * to force the device to populate mode pages. 3760 */ 3761 if (sdp->read_before_ms) 3762 sd_read_block_zero(sdkp); 3763 /* 3764 * set the default to rotational. All non-rotational devices 3765 * support the block characteristics VPD page, which will 3766 * cause this to be updated correctly and any device which 3767 * doesn't support it should be treated as rotational. 3768 */ 3769 lim->features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); 3770 3771 if (scsi_device_supports_vpd(sdp)) { 3772 sd_read_block_provisioning(sdkp); 3773 sd_read_block_limits(sdkp, lim); 3774 sd_read_block_limits_ext(sdkp); 3775 sd_read_block_characteristics(sdkp, lim); 3776 sd_zbc_read_zones(sdkp, lim, buffer); 3777 } 3778 3779 sd_config_discard(sdkp, lim, sd_discard_mode(sdkp)); 3780 3781 sd_print_capacity(sdkp, old_capacity); 3782 3783 sd_read_write_protect_flag(sdkp, buffer); 3784 sd_read_cache_type(sdkp, buffer); 3785 sd_read_io_hints(sdkp, buffer); 3786 sd_read_app_tag_own(sdkp, buffer); 3787 sd_read_write_same(sdkp, buffer); 3788 sd_read_security(sdkp, buffer); 3789 sd_config_protection(sdkp, lim); 3790 } 3791 3792 /* 3793 * We now have all cache related info, determine how we deal 3794 * with flush requests. 3795 */ 3796 sd_set_flush_flag(sdkp, lim); 3797 3798 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ 3799 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; 3800 3801 /* Some devices report a maximum block count for READ/WRITE requests. */ 3802 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); 3803 lim->max_dev_sectors = logical_to_sectors(sdp, dev_max); 3804 3805 if (sd_validate_min_xfer_size(sdkp)) 3806 lim->io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks); 3807 else 3808 lim->io_min = 0; 3809 3810 /* 3811 * Limit default to SCSI host optimal sector limit if set. There may be 3812 * an impact on performance for when the size of a request exceeds this 3813 * host limit. 3814 */ 3815 lim->io_opt = sdp->host->opt_sectors << SECTOR_SHIFT; 3816 if (sd_validate_opt_xfer_size(sdkp, dev_max)) { 3817 lim->io_opt = min_not_zero(lim->io_opt, 3818 logical_to_bytes(sdp, sdkp->opt_xfer_blocks)); 3819 } 3820 3821 sdkp->first_scan = 0; 3822 3823 set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); 3824 sd_config_write_same(sdkp, lim); 3825 3826 err = queue_limits_commit_update_frozen(sdkp->disk->queue, lim); 3827 if (err) 3828 goto out; 3829 3830 /* 3831 * Query concurrent positioning ranges after 3832 * queue_limits_commit_update() unlocked q->limits_lock to avoid 3833 * deadlock with q->sysfs_dir_lock and q->sysfs_lock. 3834 */ 3835 if (sdkp->media_present && scsi_device_supports_vpd(sdp)) 3836 sd_read_cpr(sdkp); 3837 3838 /* 3839 * For a zoned drive, revalidating the zones can be done only once 3840 * the gendisk capacity is set. So if this fails, set back the gendisk 3841 * capacity to 0. 3842 */ 3843 if (sd_zbc_revalidate_zones(sdkp)) 3844 set_capacity_and_notify(disk, 0); 3845 3846 out: 3847 kfree(buffer); 3848 kfree(lim); 3849 3850 } 3851 3852 /** 3853 * sd_unlock_native_capacity - unlock native capacity 3854 * @disk: struct gendisk to set capacity for 3855 * 3856 * Block layer calls this function if it detects that partitions 3857 * on @disk reach beyond the end of the device. If the SCSI host 3858 * implements ->unlock_native_capacity() method, it's invoked to 3859 * give it a chance to adjust the device capacity. 3860 * 3861 * CONTEXT: 3862 * Defined by block layer. Might sleep. 3863 */ 3864 static void sd_unlock_native_capacity(struct gendisk *disk) 3865 { 3866 struct scsi_device *sdev = scsi_disk(disk)->device; 3867 3868 if (sdev->host->hostt->unlock_native_capacity) 3869 sdev->host->hostt->unlock_native_capacity(sdev); 3870 } 3871 3872 static const struct block_device_operations sd_fops = { 3873 .owner = THIS_MODULE, 3874 .open = sd_open, 3875 .release = sd_release, 3876 .ioctl = sd_ioctl, 3877 .getgeo = sd_getgeo, 3878 .compat_ioctl = blkdev_compat_ptr_ioctl, 3879 .check_events = sd_check_events, 3880 .unlock_native_capacity = sd_unlock_native_capacity, 3881 .report_zones = sd_zbc_report_zones, 3882 .get_unique_id = sd_get_unique_id, 3883 .free_disk = scsi_disk_free_disk, 3884 .pr_ops = &sd_pr_ops, 3885 }; 3886 3887 /** 3888 * sd_format_disk_name - format disk name 3889 * @prefix: name prefix - ie. "sd" for SCSI disks 3890 * @index: index of the disk to format name for 3891 * @buf: output buffer 3892 * @buflen: length of the output buffer 3893 * 3894 * SCSI disk names starts at sda. The 26th device is sdz and the 3895 * 27th is sdaa. The last one for two lettered suffix is sdzz 3896 * which is followed by sdaaa. 3897 * 3898 * This is basically 26 base counting with one extra 'nil' entry 3899 * at the beginning from the second digit on and can be 3900 * determined using similar method as 26 base conversion with the 3901 * index shifted -1 after each digit is computed. 3902 * 3903 * CONTEXT: 3904 * Don't care. 3905 * 3906 * RETURNS: 3907 * 0 on success, -errno on failure. 3908 */ 3909 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen) 3910 { 3911 const int base = 'z' - 'a' + 1; 3912 char *begin = buf + strlen(prefix); 3913 char *end = buf + buflen; 3914 char *p; 3915 int unit; 3916 3917 p = end - 1; 3918 *p = '\0'; 3919 unit = base; 3920 do { 3921 if (p == begin) 3922 return -EINVAL; 3923 *--p = 'a' + (index % unit); 3924 index = (index / unit) - 1; 3925 } while (index >= 0); 3926 3927 memmove(begin, p, end - p); 3928 memcpy(buf, prefix, strlen(prefix)); 3929 3930 return 0; 3931 } 3932 3933 /** 3934 * sd_probe - called during driver initialization and whenever a 3935 * new scsi device is attached to the system. It is called once 3936 * for each scsi device (not just disks) present. 3937 * @sdp: pointer to device object 3938 * 3939 * Returns 0 if successful (or not interested in this scsi device 3940 * (e.g. scanner)); 1 when there is an error. 3941 * 3942 * Note: this function is invoked from the scsi mid-level. 3943 * This function sets up the mapping between a given 3944 * <host,channel,id,lun> (found in sdp) and new device name 3945 * (e.g. /dev/sda). More precisely it is the block device major 3946 * and minor number that is chosen here. 3947 * 3948 * Assume sd_probe is not re-entrant (for time being) 3949 * Also think about sd_probe() and sd_remove() running coincidentally. 3950 **/ 3951 static int sd_probe(struct scsi_device *sdp) 3952 { 3953 struct device *dev = &sdp->sdev_gendev; 3954 struct scsi_disk *sdkp; 3955 struct gendisk *gd; 3956 int index; 3957 int error; 3958 3959 scsi_autopm_get_device(sdp); 3960 error = -ENODEV; 3961 if (sdp->type != TYPE_DISK && 3962 sdp->type != TYPE_ZBC && 3963 sdp->type != TYPE_MOD && 3964 sdp->type != TYPE_RBC) 3965 goto out; 3966 3967 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) { 3968 sdev_printk(KERN_WARNING, sdp, 3969 "Unsupported ZBC host-managed device.\n"); 3970 goto out; 3971 } 3972 3973 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, 3974 "sd_probe\n")); 3975 3976 error = -ENOMEM; 3977 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); 3978 if (!sdkp) 3979 goto out; 3980 3981 gd = blk_mq_alloc_disk_for_queue(sdp->request_queue, 3982 &sd_bio_compl_lkclass); 3983 if (!gd) 3984 goto out_free; 3985 3986 index = ida_alloc(&sd_index_ida, GFP_KERNEL); 3987 if (index < 0) { 3988 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); 3989 goto out_put; 3990 } 3991 3992 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 3993 if (error) { 3994 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); 3995 goto out_free_index; 3996 } 3997 3998 sdkp->device = sdp; 3999 sdkp->disk = gd; 4000 sdkp->index = index; 4001 sdkp->max_retries = SD_MAX_RETRIES; 4002 atomic_set(&sdkp->openers, 0); 4003 atomic_set(&sdkp->device->ioerr_cnt, 0); 4004 4005 if (!sdp->request_queue->rq_timeout) { 4006 if (sdp->type != TYPE_MOD) 4007 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); 4008 else 4009 blk_queue_rq_timeout(sdp->request_queue, 4010 SD_MOD_TIMEOUT); 4011 } 4012 4013 device_initialize(&sdkp->disk_dev); 4014 sdkp->disk_dev.parent = get_device(dev); 4015 sdkp->disk_dev.class = &sd_disk_class; 4016 dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev)); 4017 4018 error = device_add(&sdkp->disk_dev); 4019 if (error) { 4020 put_device(&sdkp->disk_dev); 4021 goto out; 4022 } 4023 4024 dev_set_drvdata(dev, sdkp); 4025 4026 gd->major = sd_major((index & 0xf0) >> 4); 4027 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 4028 gd->minors = SD_MINORS; 4029 4030 gd->fops = &sd_fops; 4031 gd->private_data = sdkp; 4032 4033 /* defaults, until the device tells us otherwise */ 4034 sdp->sector_size = 512; 4035 sdkp->capacity = 0; 4036 sdkp->media_present = 1; 4037 sdkp->write_prot = 0; 4038 sdkp->cache_override = 0; 4039 sdkp->WCE = 0; 4040 sdkp->RCD = 0; 4041 sdkp->ATO = 0; 4042 sdkp->first_scan = 1; 4043 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; 4044 4045 sd_revalidate_disk(gd); 4046 4047 if (sdp->removable) { 4048 gd->flags |= GENHD_FL_REMOVABLE; 4049 gd->events |= DISK_EVENT_MEDIA_CHANGE; 4050 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT; 4051 } 4052 4053 blk_pm_runtime_init(sdp->request_queue, dev); 4054 if (sdp->rpm_autosuspend) { 4055 pm_runtime_set_autosuspend_delay(dev, 4056 sdp->host->rpm_autosuspend_delay); 4057 } 4058 4059 error = device_add_disk(dev, gd, NULL); 4060 if (error) { 4061 device_unregister(&sdkp->disk_dev); 4062 put_disk(gd); 4063 goto out; 4064 } 4065 4066 if (sdkp->security) { 4067 sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit); 4068 if (sdkp->opal_dev) 4069 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n"); 4070 } 4071 4072 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 4073 sdp->removable ? "removable " : ""); 4074 scsi_autopm_put_device(sdp); 4075 4076 return 0; 4077 4078 out_free_index: 4079 ida_free(&sd_index_ida, index); 4080 out_put: 4081 put_disk(gd); 4082 out_free: 4083 kfree(sdkp); 4084 out: 4085 scsi_autopm_put_device(sdp); 4086 return error; 4087 } 4088 4089 static int sd_start_stop_device(struct scsi_disk *sdkp, int start) 4090 { 4091 unsigned char cmd[6] = { START_STOP }; /* START_VALID */ 4092 struct scsi_sense_hdr sshdr; 4093 struct scsi_failure failure_defs[] = { 4094 { 4095 /* Power on, reset, or bus device reset occurred */ 4096 .sense = UNIT_ATTENTION, 4097 .asc = 0x29, 4098 .ascq = 0, 4099 .result = SAM_STAT_CHECK_CONDITION, 4100 }, 4101 { 4102 /* Power on occurred */ 4103 .sense = UNIT_ATTENTION, 4104 .asc = 0x29, 4105 .ascq = 1, 4106 .result = SAM_STAT_CHECK_CONDITION, 4107 }, 4108 { 4109 /* SCSI bus reset */ 4110 .sense = UNIT_ATTENTION, 4111 .asc = 0x29, 4112 .ascq = 2, 4113 .result = SAM_STAT_CHECK_CONDITION, 4114 }, 4115 {} 4116 }; 4117 struct scsi_failures failures = { 4118 .total_allowed = 3, 4119 .failure_definitions = failure_defs, 4120 }; 4121 const struct scsi_exec_args exec_args = { 4122 .sshdr = &sshdr, 4123 .req_flags = BLK_MQ_REQ_PM, 4124 .failures = &failures, 4125 }; 4126 struct scsi_device *sdp = sdkp->device; 4127 int res; 4128 4129 if (start) 4130 cmd[4] |= 1; /* START */ 4131 4132 if (sdp->start_stop_pwr_cond) 4133 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */ 4134 4135 if (!scsi_device_online(sdp)) 4136 return -ENODEV; 4137 4138 res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT, 4139 sdkp->max_retries, &exec_args); 4140 if (res) { 4141 sd_print_result(sdkp, "Start/Stop Unit failed", res); 4142 if (res > 0 && scsi_sense_valid(&sshdr)) { 4143 sd_print_sense_hdr(sdkp, &sshdr); 4144 /* 0x3a is medium not present */ 4145 if (sshdr.asc == 0x3a) 4146 res = 0; 4147 } 4148 } 4149 4150 /* SCSI error codes must not go to the generic layer */ 4151 if (res) 4152 return -EIO; 4153 4154 return 0; 4155 } 4156 4157 /* 4158 * Send a SYNCHRONIZE CACHE instruction down to the device through 4159 * the normal SCSI command structure. Wait for the command to 4160 * complete. 4161 */ 4162 static void sd_shutdown(struct scsi_device *sdp) 4163 { 4164 struct device *dev = &sdp->sdev_gendev; 4165 struct scsi_disk *sdkp = dev_get_drvdata(dev); 4166 4167 if (!sdkp) 4168 return; /* this can happen */ 4169 4170 if (pm_runtime_suspended(dev)) 4171 return; 4172 4173 if (sdkp->WCE && sdkp->media_present) { 4174 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 4175 sd_sync_cache(sdkp); 4176 } 4177 4178 if ((system_state != SYSTEM_RESTART && 4179 sdkp->device->manage_system_start_stop) || 4180 (system_state == SYSTEM_POWER_OFF && 4181 sdkp->device->manage_shutdown) || 4182 (system_state == SYSTEM_RUNNING && 4183 sdkp->device->manage_runtime_start_stop) || 4184 (system_state == SYSTEM_RESTART && 4185 sdkp->device->manage_restart)) { 4186 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 4187 sd_start_stop_device(sdkp, 0); 4188 } 4189 } 4190 4191 /** 4192 * sd_remove - called whenever a scsi disk (previously recognized by 4193 * sd_probe) is detached from the system. It is called (potentially 4194 * multiple times) during sd module unload. 4195 * @sdp: pointer to device object 4196 * 4197 * Note: this function is invoked from the scsi mid-level. 4198 * This function potentially frees up a device name (e.g. /dev/sdc) 4199 * that could be re-used by a subsequent sd_probe(). 4200 * This function is not called when the built-in sd driver is "exit-ed". 4201 **/ 4202 static void sd_remove(struct scsi_device *sdp) 4203 { 4204 struct device *dev = &sdp->sdev_gendev; 4205 struct scsi_disk *sdkp = dev_get_drvdata(dev); 4206 4207 scsi_autopm_get_device(sdkp->device); 4208 4209 device_del(&sdkp->disk_dev); 4210 del_gendisk(sdkp->disk); 4211 if (!sdkp->suspended) 4212 sd_shutdown(sdp); 4213 4214 put_disk(sdkp->disk); 4215 } 4216 4217 static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime) 4218 { 4219 return (sdev->manage_system_start_stop && !runtime) || 4220 (sdev->manage_runtime_start_stop && runtime); 4221 } 4222 4223 static int sd_suspend_common(struct device *dev, bool runtime) 4224 { 4225 struct scsi_disk *sdkp = dev_get_drvdata(dev); 4226 int ret = 0; 4227 4228 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ 4229 return 0; 4230 4231 if (sdkp->WCE && sdkp->media_present) { 4232 if (!sdkp->device->silence_suspend) 4233 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 4234 ret = sd_sync_cache(sdkp); 4235 /* ignore OFFLINE device */ 4236 if (ret == -ENODEV) 4237 return 0; 4238 4239 if (ret) 4240 return ret; 4241 } 4242 4243 if (sd_do_start_stop(sdkp->device, runtime)) { 4244 if (!sdkp->device->silence_suspend) 4245 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 4246 /* an error is not worth aborting a system sleep */ 4247 ret = sd_start_stop_device(sdkp, 0); 4248 if (!runtime) 4249 ret = 0; 4250 } 4251 4252 if (!ret) 4253 sdkp->suspended = true; 4254 4255 return ret; 4256 } 4257 4258 static int sd_suspend_system(struct device *dev) 4259 { 4260 if (pm_runtime_suspended(dev)) 4261 return 0; 4262 4263 return sd_suspend_common(dev, false); 4264 } 4265 4266 static int sd_suspend_runtime(struct device *dev) 4267 { 4268 return sd_suspend_common(dev, true); 4269 } 4270 4271 static int sd_resume(struct device *dev) 4272 { 4273 struct scsi_disk *sdkp = dev_get_drvdata(dev); 4274 4275 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 4276 4277 if (opal_unlock_from_suspend(sdkp->opal_dev)) { 4278 sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n"); 4279 return -EIO; 4280 } 4281 4282 return 0; 4283 } 4284 4285 static int sd_resume_common(struct device *dev, bool runtime) 4286 { 4287 struct scsi_disk *sdkp = dev_get_drvdata(dev); 4288 int ret; 4289 4290 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 4291 return 0; 4292 4293 if (!sd_do_start_stop(sdkp->device, runtime)) { 4294 sdkp->suspended = false; 4295 return 0; 4296 } 4297 4298 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 4299 ret = sd_start_stop_device(sdkp, 1); 4300 if (!ret) { 4301 sd_resume(dev); 4302 sdkp->suspended = false; 4303 } 4304 4305 return ret; 4306 } 4307 4308 static int sd_resume_system(struct device *dev) 4309 { 4310 if (pm_runtime_suspended(dev)) { 4311 struct scsi_disk *sdkp = dev_get_drvdata(dev); 4312 struct scsi_device *sdp = sdkp ? sdkp->device : NULL; 4313 4314 if (sdp && sdp->force_runtime_start_on_system_start) 4315 pm_request_resume(dev); 4316 4317 return 0; 4318 } 4319 4320 return sd_resume_common(dev, false); 4321 } 4322 4323 static int sd_resume_runtime(struct device *dev) 4324 { 4325 struct scsi_disk *sdkp = dev_get_drvdata(dev); 4326 struct scsi_device *sdp; 4327 4328 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 4329 return 0; 4330 4331 sdp = sdkp->device; 4332 4333 if (sdp->ignore_media_change) { 4334 /* clear the device's sense data */ 4335 static const u8 cmd[10] = { REQUEST_SENSE }; 4336 const struct scsi_exec_args exec_args = { 4337 .req_flags = BLK_MQ_REQ_PM, 4338 }; 4339 4340 if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, 4341 sdp->request_queue->rq_timeout, 1, 4342 &exec_args)) 4343 sd_printk(KERN_NOTICE, sdkp, 4344 "Failed to clear sense data\n"); 4345 } 4346 4347 return sd_resume_common(dev, true); 4348 } 4349 4350 static const struct dev_pm_ops sd_pm_ops = { 4351 .suspend = sd_suspend_system, 4352 .resume = sd_resume_system, 4353 .poweroff = sd_suspend_system, 4354 .restore = sd_resume_system, 4355 .runtime_suspend = sd_suspend_runtime, 4356 .runtime_resume = sd_resume_runtime, 4357 }; 4358 4359 static struct scsi_driver sd_template = { 4360 .probe = sd_probe, 4361 .remove = sd_remove, 4362 .shutdown = sd_shutdown, 4363 .gendrv = { 4364 .name = "sd", 4365 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 4366 .pm = &sd_pm_ops, 4367 }, 4368 .rescan = sd_rescan, 4369 .resume = sd_resume, 4370 .init_command = sd_init_command, 4371 .uninit_command = sd_uninit_command, 4372 .done = sd_done, 4373 .eh_action = sd_eh_action, 4374 .eh_reset = sd_eh_reset, 4375 }; 4376 4377 /** 4378 * init_sd - entry point for this driver (both when built in or when 4379 * a module). 4380 * 4381 * Note: this function registers this driver with the scsi mid-level. 4382 **/ 4383 static int __init init_sd(void) 4384 { 4385 int majors = 0, i, err; 4386 4387 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); 4388 4389 for (i = 0; i < SD_MAJORS; i++) { 4390 if (__register_blkdev(sd_major(i), "sd", sd_default_probe)) 4391 continue; 4392 majors++; 4393 } 4394 4395 if (!majors) 4396 return -ENODEV; 4397 4398 err = class_register(&sd_disk_class); 4399 if (err) 4400 goto err_out; 4401 4402 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); 4403 if (!sd_page_pool) { 4404 printk(KERN_ERR "sd: can't init discard page pool\n"); 4405 err = -ENOMEM; 4406 goto err_out_class; 4407 } 4408 4409 err = scsi_register_driver(&sd_template); 4410 if (err) 4411 goto err_out_driver; 4412 4413 return 0; 4414 4415 err_out_driver: 4416 mempool_destroy(sd_page_pool); 4417 err_out_class: 4418 class_unregister(&sd_disk_class); 4419 err_out: 4420 for (i = 0; i < SD_MAJORS; i++) 4421 unregister_blkdev(sd_major(i), "sd"); 4422 return err; 4423 } 4424 4425 /** 4426 * exit_sd - exit point for this driver (when it is a module). 4427 * 4428 * Note: this function unregisters this driver from the scsi mid-level. 4429 **/ 4430 static void __exit exit_sd(void) 4431 { 4432 int i; 4433 4434 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); 4435 4436 scsi_unregister_driver(&sd_template); 4437 mempool_destroy(sd_page_pool); 4438 4439 class_unregister(&sd_disk_class); 4440 4441 for (i = 0; i < SD_MAJORS; i++) 4442 unregister_blkdev(sd_major(i), "sd"); 4443 } 4444 4445 module_init(init_sd); 4446 module_exit(exit_sd); 4447 4448 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 4449 { 4450 scsi_print_sense_hdr(sdkp->device, 4451 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); 4452 } 4453 4454 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result) 4455 { 4456 const char *hb_string = scsi_hostbyte_string(result); 4457 4458 if (hb_string) 4459 sd_printk(KERN_INFO, sdkp, 4460 "%s: Result: hostbyte=%s driverbyte=%s\n", msg, 4461 hb_string ? hb_string : "invalid", 4462 "DRIVER_OK"); 4463 else 4464 sd_printk(KERN_INFO, sdkp, 4465 "%s: Result: hostbyte=0x%02x driverbyte=%s\n", 4466 msg, host_byte(result), "DRIVER_OK"); 4467 } 4468