1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * scsi.c Copyright (C) 1992 Drew Eckhardt 4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 5 * Copyright (C) 2002, 2003 Christoph Hellwig 6 * 7 * generic mid-level SCSI driver 8 * Initial versions: Drew Eckhardt 9 * Subsequent revisions: Eric Youngdale 10 * 11 * <drew@colorado.edu> 12 * 13 * Bug correction thanks go to : 14 * Rik Faith <faith@cs.unc.edu> 15 * Tommy Thorn <tthorn> 16 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> 17 * 18 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to 19 * add scatter-gather, multiple outstanding request, and other 20 * enhancements. 21 * 22 * Native multichannel, wide scsi, /proc/scsi and hot plugging 23 * support added by Michael Neuffer <mike@i-connect.net> 24 * 25 * Added request_module("scsi_hostadapter") for kerneld: 26 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) 27 * Bjorn Ekwall <bj0rn@blox.se> 28 * (changed to kmod) 29 * 30 * Major improvements to the timeout, abort, and reset processing, 31 * as well as performance modifications for large queue depths by 32 * Leonard N. Zubkoff <lnz@dandelion.com> 33 * 34 * Converted cli() code to spinlocks, Ingo Molnar 35 * 36 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli 37 * 38 * out_of_space hacks, D. Gilbert (dpg) 990608 39 */ 40 41 #include <linux/module.h> 42 #include <linux/moduleparam.h> 43 #include <linux/kernel.h> 44 #include <linux/timer.h> 45 #include <linux/string.h> 46 #include <linux/slab.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/init.h> 50 #include <linux/completion.h> 51 #include <linux/unistd.h> 52 #include <linux/spinlock.h> 53 #include <linux/kmod.h> 54 #include <linux/interrupt.h> 55 #include <linux/notifier.h> 56 #include <linux/cpu.h> 57 #include <linux/mutex.h> 58 #include <linux/unaligned.h> 59 60 #include <scsi/scsi.h> 61 #include <scsi/scsi_cmnd.h> 62 #include <scsi/scsi_dbg.h> 63 #include <scsi/scsi_device.h> 64 #include <scsi/scsi_driver.h> 65 #include <scsi/scsi_eh.h> 66 #include <scsi/scsi_host.h> 67 #include <scsi/scsi_tcq.h> 68 69 #include "scsi_priv.h" 70 #include "scsi_logging.h" 71 72 #define CREATE_TRACE_POINTS 73 #include <trace/events/scsi.h> 74 75 /* 76 * Definitions and constants. 77 */ 78 79 /* 80 * Note - the initial logging level can be set here to log events at boot time. 81 * After the system is up, you may enable logging via the /proc interface. 82 */ 83 unsigned int scsi_logging_level; 84 #if defined(CONFIG_SCSI_LOGGING) 85 EXPORT_SYMBOL(scsi_logging_level); 86 #endif 87 88 #ifdef CONFIG_SCSI_LOGGING 89 void scsi_log_send(struct scsi_cmnd *cmd) 90 { 91 unsigned int level; 92 93 /* 94 * If ML QUEUE log level is greater than or equal to: 95 * 96 * 1: nothing (match completion) 97 * 98 * 2: log opcode + command of all commands + cmd address 99 * 100 * 3: same as 2 101 * 102 * 4: same as 3 103 */ 104 if (unlikely(scsi_logging_level)) { 105 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 106 SCSI_LOG_MLQUEUE_BITS); 107 if (level > 1) { 108 scmd_printk(KERN_INFO, cmd, 109 "Send: scmd 0x%p\n", cmd); 110 scsi_print_command(cmd); 111 } 112 } 113 } 114 115 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 116 { 117 unsigned int level; 118 119 /* 120 * If ML COMPLETE log level is greater than or equal to: 121 * 122 * 1: log disposition, result, opcode + command, and conditionally 123 * sense data for failures or non SUCCESS dispositions. 124 * 125 * 2: same as 1 but for all command completions. 126 * 127 * 3: same as 2 128 * 129 * 4: same as 3 plus dump extra junk 130 */ 131 if (unlikely(scsi_logging_level)) { 132 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 133 SCSI_LOG_MLCOMPLETE_BITS); 134 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 135 (level > 1)) { 136 scsi_print_result(cmd, "Done", disposition); 137 scsi_print_command(cmd); 138 if (scsi_status_is_check_condition(cmd->result)) 139 scsi_print_sense(cmd); 140 if (level > 3) 141 scmd_printk(KERN_INFO, cmd, 142 "scsi host busy %d failed %d\n", 143 scsi_host_busy(cmd->device->host), 144 cmd->device->host->host_failed); 145 } 146 } 147 } 148 #endif 149 150 /** 151 * scsi_finish_command - cleanup and pass command back to upper layer 152 * @cmd: the command 153 * 154 * Description: Pass command off to upper layer for finishing of I/O 155 * request, waking processes that are waiting on results, 156 * etc. 157 */ 158 void scsi_finish_command(struct scsi_cmnd *cmd) 159 { 160 struct scsi_device *sdev = cmd->device; 161 struct scsi_target *starget = scsi_target(sdev); 162 struct Scsi_Host *shost = sdev->host; 163 struct scsi_driver *drv; 164 unsigned int good_bytes; 165 166 scsi_device_unbusy(sdev, cmd); 167 168 /* 169 * Clear the flags that say that the device/target/host is no longer 170 * capable of accepting new commands. 171 */ 172 if (atomic_read(&shost->host_blocked)) 173 atomic_set(&shost->host_blocked, 0); 174 if (atomic_read(&starget->target_blocked)) 175 atomic_set(&starget->target_blocked, 0); 176 if (atomic_read(&sdev->device_blocked)) 177 atomic_set(&sdev->device_blocked, 0); 178 179 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, 180 "Notifying upper driver of completion " 181 "(result %x)\n", cmd->result)); 182 183 good_bytes = scsi_bufflen(cmd); 184 if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) { 185 int old_good_bytes = good_bytes; 186 drv = scsi_cmd_to_driver(cmd); 187 if (drv->done) 188 good_bytes = drv->done(cmd); 189 /* 190 * USB may not give sense identifying bad sector and 191 * simply return a residue instead, so subtract off the 192 * residue if drv->done() error processing indicates no 193 * change to the completion length. 194 */ 195 if (good_bytes == old_good_bytes) 196 good_bytes -= scsi_get_resid(cmd); 197 } 198 scsi_io_completion(cmd, good_bytes); 199 } 200 201 202 /* 203 * 4096 is big enough for saturating fast SCSI LUNs. 204 */ 205 int scsi_device_max_queue_depth(struct scsi_device *sdev) 206 { 207 return min_t(int, sdev->host->can_queue, 4096); 208 } 209 210 /** 211 * scsi_change_queue_depth - change a device's queue depth 212 * @sdev: SCSI Device in question 213 * @depth: number of commands allowed to be queued to the driver 214 * 215 * Sets the device queue depth and returns the new value. 216 */ 217 int scsi_change_queue_depth(struct scsi_device *sdev, int depth) 218 { 219 if (!sdev->budget_map.map) 220 return -EINVAL; 221 222 depth = min_t(int, depth, scsi_device_max_queue_depth(sdev)); 223 224 if (depth > 0) { 225 sdev->queue_depth = depth; 226 wmb(); 227 } 228 229 if (sdev->request_queue) 230 blk_set_queue_depth(sdev->request_queue, depth); 231 232 sbitmap_resize(&sdev->budget_map, sdev->queue_depth); 233 234 return sdev->queue_depth; 235 } 236 EXPORT_SYMBOL(scsi_change_queue_depth); 237 238 /** 239 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth 240 * @sdev: SCSI Device in question 241 * @depth: Current number of outstanding SCSI commands on this device, 242 * not counting the one returned as QUEUE_FULL. 243 * 244 * Description: This function will track successive QUEUE_FULL events on a 245 * specific SCSI device to determine if and when there is a 246 * need to adjust the queue depth on the device. 247 * 248 * Returns: 249 * * 0 - No change needed 250 * * >0 - Adjust queue depth to this new depth, 251 * * -1 - Drop back to untagged operation using host->cmd_per_lun as the 252 * untagged command depth 253 * 254 * Lock Status: None held on entry 255 * 256 * Notes: Low level drivers may call this at any time and we will do 257 * "The Right Thing." We are interrupt context safe. 258 */ 259 int scsi_track_queue_full(struct scsi_device *sdev, int depth) 260 { 261 if (!sdev->budget_map.map) 262 return 0; 263 264 /* 265 * Don't let QUEUE_FULLs on the same 266 * jiffies count, they could all be from 267 * same event. 268 */ 269 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) 270 return 0; 271 272 sdev->last_queue_full_time = jiffies; 273 if (sdev->last_queue_full_depth != depth) { 274 sdev->last_queue_full_count = 1; 275 sdev->last_queue_full_depth = depth; 276 } else { 277 sdev->last_queue_full_count++; 278 } 279 280 if (sdev->last_queue_full_count <= 10) 281 return 0; 282 283 return scsi_change_queue_depth(sdev, depth); 284 } 285 EXPORT_SYMBOL(scsi_track_queue_full); 286 287 /** 288 * scsi_vpd_inquiry - Request a device provide us with a VPD page 289 * @sdev: The device to ask 290 * @buffer: Where to put the result 291 * @page: Which Vital Product Data to return 292 * @len: The length of the buffer 293 * 294 * This is an internal helper function. You probably want to use 295 * scsi_get_vpd_page instead. 296 * 297 * Returns size of the vpd page on success or a negative error number. 298 */ 299 static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, 300 u8 page, unsigned len) 301 { 302 int result; 303 unsigned char cmd[16]; 304 305 if (len < 4) 306 return -EINVAL; 307 308 cmd[0] = INQUIRY; 309 cmd[1] = 1; /* EVPD */ 310 cmd[2] = page; 311 cmd[3] = len >> 8; 312 cmd[4] = len & 0xff; 313 cmd[5] = 0; /* Control byte */ 314 315 /* 316 * I'm not convinced we need to try quite this hard to get VPD, but 317 * all the existing users tried this hard. 318 */ 319 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, 320 30 * HZ, 3, NULL); 321 if (result) 322 return -EIO; 323 324 /* 325 * Sanity check that we got the page back that we asked for and that 326 * the page size is not 0. 327 */ 328 if (buffer[1] != page) 329 return -EIO; 330 331 result = get_unaligned_be16(&buffer[2]); 332 if (!result) 333 return -EIO; 334 335 return result + 4; 336 } 337 338 enum scsi_vpd_parameters { 339 SCSI_VPD_HEADER_SIZE = 4, 340 SCSI_VPD_LIST_SIZE = 36, 341 }; 342 343 static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) 344 { 345 unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4); 346 int result; 347 348 if (sdev->no_vpd_size) 349 return SCSI_DEFAULT_VPD_LEN; 350 351 /* 352 * Fetch the supported pages VPD and validate that the requested page 353 * number is present. 354 */ 355 if (page != 0) { 356 result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd)); 357 if (result < SCSI_VPD_HEADER_SIZE) 358 return 0; 359 360 if (result > sizeof(vpd)) { 361 dev_warn_once(&sdev->sdev_gendev, 362 "%s: long VPD page 0 length: %d bytes\n", 363 __func__, result); 364 result = sizeof(vpd); 365 } 366 367 result -= SCSI_VPD_HEADER_SIZE; 368 if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result)) 369 return 0; 370 } 371 /* 372 * Fetch the VPD page header to find out how big the page 373 * is. This is done to prevent problems on legacy devices 374 * which can not handle allocation lengths as large as 375 * potentially requested by the caller. 376 */ 377 result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE); 378 if (result < 0) 379 return 0; 380 381 if (result < SCSI_VPD_HEADER_SIZE) { 382 dev_warn_once(&sdev->sdev_gendev, 383 "%s: short VPD page 0x%02x length: %d bytes\n", 384 __func__, page, result); 385 return 0; 386 } 387 388 return result; 389 } 390 391 /** 392 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device 393 * @sdev: The device to ask 394 * @page: Which Vital Product Data to return 395 * @buf: where to store the VPD 396 * @buf_len: number of bytes in the VPD buffer area 397 * 398 * SCSI devices may optionally supply Vital Product Data. Each 'page' 399 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC). 400 * If the device supports this VPD page, this routine fills @buf 401 * with the data from that page and return 0. If the VPD page is not 402 * supported or its content cannot be retrieved, -EINVAL is returned. 403 */ 404 int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, 405 int buf_len) 406 { 407 int result, vpd_len; 408 409 if (!scsi_device_supports_vpd(sdev)) 410 return -EINVAL; 411 412 vpd_len = scsi_get_vpd_size(sdev, page); 413 if (vpd_len <= 0) 414 return -EINVAL; 415 416 vpd_len = min(vpd_len, buf_len); 417 418 /* 419 * Fetch the actual page. Since the appropriate size was reported 420 * by the device it is now safe to ask for something bigger. 421 */ 422 memset(buf, 0, buf_len); 423 result = scsi_vpd_inquiry(sdev, buf, page, vpd_len); 424 if (result < 0) 425 return -EINVAL; 426 else if (result > vpd_len) 427 dev_warn_once(&sdev->sdev_gendev, 428 "%s: VPD page 0x%02x result %d > %d bytes\n", 429 __func__, page, result, vpd_len); 430 431 return 0; 432 } 433 EXPORT_SYMBOL_GPL(scsi_get_vpd_page); 434 435 /** 436 * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device 437 * @sdev: The device to ask 438 * @page: Which Vital Product Data to return 439 * 440 * Returns %NULL upon failure. 441 */ 442 static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page) 443 { 444 struct scsi_vpd *vpd_buf; 445 int vpd_len, result; 446 447 vpd_len = scsi_get_vpd_size(sdev, page); 448 if (vpd_len <= 0) 449 return NULL; 450 451 retry_pg: 452 /* 453 * Fetch the actual page. Since the appropriate size was reported 454 * by the device it is now safe to ask for something bigger. 455 */ 456 vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL); 457 if (!vpd_buf) 458 return NULL; 459 460 result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len); 461 if (result < 0) { 462 kfree(vpd_buf); 463 return NULL; 464 } 465 if (result > vpd_len) { 466 dev_warn_once(&sdev->sdev_gendev, 467 "%s: VPD page 0x%02x result %d > %d bytes\n", 468 __func__, page, result, vpd_len); 469 vpd_len = result; 470 kfree(vpd_buf); 471 goto retry_pg; 472 } 473 474 vpd_buf->len = result; 475 476 return vpd_buf; 477 } 478 479 static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page, 480 struct scsi_vpd __rcu **sdev_vpd_buf) 481 { 482 struct scsi_vpd *vpd_buf; 483 484 vpd_buf = scsi_get_vpd_buf(sdev, page); 485 if (!vpd_buf) 486 return; 487 488 mutex_lock(&sdev->inquiry_mutex); 489 vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf, 490 lockdep_is_held(&sdev->inquiry_mutex)); 491 mutex_unlock(&sdev->inquiry_mutex); 492 493 if (vpd_buf) 494 kfree_rcu(vpd_buf, rcu); 495 } 496 497 /** 498 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure 499 * @sdev: The device to ask 500 * 501 * Attach the 'Device Identification' VPD page (0x83) and the 502 * 'Unit Serial Number' VPD page (0x80) to a SCSI device 503 * structure. This information can be used to identify the device 504 * uniquely. 505 */ 506 void scsi_attach_vpd(struct scsi_device *sdev) 507 { 508 int i; 509 struct scsi_vpd *vpd_buf; 510 511 if (!scsi_device_supports_vpd(sdev)) 512 return; 513 514 /* Ask for all the pages supported by this device */ 515 vpd_buf = scsi_get_vpd_buf(sdev, 0); 516 if (!vpd_buf) 517 return; 518 519 for (i = 4; i < vpd_buf->len; i++) { 520 switch (vpd_buf->data[i]) { 521 case 0x0: 522 scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0); 523 break; 524 case 0x80: 525 scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80); 526 break; 527 case 0x83: 528 scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83); 529 break; 530 case 0x89: 531 scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89); 532 break; 533 case 0xb0: 534 scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0); 535 break; 536 case 0xb1: 537 scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1); 538 break; 539 case 0xb2: 540 scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2); 541 break; 542 case 0xb7: 543 scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7); 544 break; 545 default: 546 break; 547 } 548 } 549 kfree(vpd_buf); 550 } 551 552 /** 553 * scsi_report_opcode - Find out if a given command is supported 554 * @sdev: scsi device to query 555 * @buffer: scratch buffer (must be at least 20 bytes long) 556 * @len: length of buffer 557 * @opcode: opcode for the command to look up 558 * @sa: service action for the command to look up 559 * 560 * Uses the REPORT SUPPORTED OPERATION CODES to check support for the 561 * command identified with @opcode and @sa. If the command does not 562 * have a service action, @sa must be 0. Returns -EINVAL if RSOC fails, 563 * 0 if the command is not supported and 1 if the device claims to 564 * support the command. 565 */ 566 int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, 567 unsigned int len, unsigned char opcode, 568 unsigned short sa) 569 { 570 unsigned char cmd[16]; 571 struct scsi_sense_hdr sshdr; 572 int result, request_len; 573 const struct scsi_exec_args exec_args = { 574 .sshdr = &sshdr, 575 }; 576 577 if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) 578 return -EINVAL; 579 580 /* RSOC header + size of command we are asking about */ 581 request_len = 4 + COMMAND_SIZE(opcode); 582 if (request_len > len) { 583 dev_warn_once(&sdev->sdev_gendev, 584 "%s: len %u bytes, opcode 0x%02x needs %u\n", 585 __func__, len, opcode, request_len); 586 return -EINVAL; 587 } 588 589 memset(cmd, 0, 16); 590 cmd[0] = MAINTENANCE_IN; 591 cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; 592 if (!sa) { 593 cmd[2] = 1; /* One command format */ 594 cmd[3] = opcode; 595 } else { 596 cmd[2] = 3; /* One command format with service action */ 597 cmd[3] = opcode; 598 put_unaligned_be16(sa, &cmd[4]); 599 } 600 put_unaligned_be32(request_len, &cmd[6]); 601 memset(buffer, 0, len); 602 603 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, 604 request_len, 30 * HZ, 3, &exec_args); 605 if (result < 0) 606 return result; 607 if (result && scsi_sense_valid(&sshdr) && 608 sshdr.sense_key == ILLEGAL_REQUEST && 609 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) 610 return -EINVAL; 611 612 if ((buffer[1] & 3) == 3) /* Command supported */ 613 return 1; 614 615 return 0; 616 } 617 EXPORT_SYMBOL(scsi_report_opcode); 618 619 #define SCSI_CDL_CHECK_BUF_LEN 64 620 621 static bool scsi_cdl_check_cmd(struct scsi_device *sdev, u8 opcode, u16 sa, 622 unsigned char *buf) 623 { 624 int ret; 625 u8 cdlp; 626 627 /* Check operation code */ 628 ret = scsi_report_opcode(sdev, buf, SCSI_CDL_CHECK_BUF_LEN, opcode, sa); 629 if (ret <= 0) 630 return false; 631 632 if ((buf[1] & 0x03) != 0x03) 633 return false; 634 635 /* 636 * See SPC-6, One_command parameter data format for 637 * REPORT SUPPORTED OPERATION CODES. We have the following cases 638 * depending on rwcdlp (buf[0] & 0x01) value: 639 * - rwcdlp == 0: then cdlp indicates support for the A mode page when 640 * it is equal to 1 and for the B mode page when it is 641 * equal to 2. 642 * - rwcdlp == 1: then cdlp indicates support for the T2A mode page 643 * when it is equal to 1 and for the T2B mode page when 644 * it is equal to 2. 645 * Overall, to detect support for command duration limits, we only need 646 * to check that cdlp is 1 or 2. 647 */ 648 cdlp = (buf[1] & 0x18) >> 3; 649 650 return cdlp == 0x01 || cdlp == 0x02; 651 } 652 653 /** 654 * scsi_cdl_check - Check if a SCSI device supports Command Duration Limits 655 * @sdev: The device to check 656 */ 657 void scsi_cdl_check(struct scsi_device *sdev) 658 { 659 bool cdl_supported; 660 unsigned char *buf; 661 662 /* 663 * Support for CDL was defined in SPC-5. Ignore devices reporting an 664 * lower SPC version. This also avoids problems with old drives choking 665 * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a 666 * service action specified, as done in scsi_cdl_check_cmd(). 667 */ 668 if (sdev->scsi_level < SCSI_SPC_5) { 669 sdev->cdl_supported = 0; 670 return; 671 } 672 673 buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL); 674 if (!buf) { 675 sdev->cdl_supported = 0; 676 return; 677 } 678 679 /* Check support for READ_16, WRITE_16, READ_32 and WRITE_32 commands */ 680 cdl_supported = 681 scsi_cdl_check_cmd(sdev, READ_16, 0, buf) || 682 scsi_cdl_check_cmd(sdev, WRITE_16, 0, buf) || 683 scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, READ_32, buf) || 684 scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, WRITE_32, buf); 685 if (cdl_supported) { 686 /* 687 * We have CDL support: force the use of READ16/WRITE16. 688 * READ32 and WRITE32 will be used for devices that support 689 * the T10_PI_TYPE2_PROTECTION protection type. 690 */ 691 sdev->use_16_for_rw = 1; 692 sdev->use_10_for_rw = 0; 693 694 sdev->cdl_supported = 1; 695 696 /* 697 * If the device supports CDL, make sure that the current drive 698 * feature status is consistent with the user controlled 699 * cdl_enable state. 700 */ 701 scsi_cdl_enable(sdev, sdev->cdl_enable); 702 } else { 703 sdev->cdl_supported = 0; 704 } 705 706 kfree(buf); 707 } 708 709 /** 710 * scsi_cdl_enable - Enable or disable a SCSI device supports for Command 711 * Duration Limits 712 * @sdev: The target device 713 * @enable: the target state 714 */ 715 int scsi_cdl_enable(struct scsi_device *sdev, bool enable) 716 { 717 char buf[64]; 718 int ret; 719 720 if (!sdev->cdl_supported) 721 return -EOPNOTSUPP; 722 723 /* 724 * For ATA devices, CDL needs to be enabled with a SET FEATURES command. 725 */ 726 if (sdev->is_ata) { 727 struct scsi_mode_data data; 728 struct scsi_sense_hdr sshdr; 729 char *buf_data; 730 int len; 731 732 ret = scsi_mode_sense(sdev, 0x08, 0x0a, 0xf2, buf, sizeof(buf), 733 5 * HZ, 3, &data, NULL); 734 if (ret) 735 return -EINVAL; 736 737 /* Enable or disable CDL using the ATA feature page */ 738 len = min_t(size_t, sizeof(buf), 739 data.length - data.header_length - 740 data.block_descriptor_length); 741 buf_data = buf + data.header_length + 742 data.block_descriptor_length; 743 744 /* 745 * If we want to enable CDL and CDL is already enabled on the 746 * device, do nothing. This avoids needlessly resetting the CDL 747 * statistics on the device as that is implied by the CDL enable 748 * action. Similar to this, there is no need to do anything if 749 * we want to disable CDL and CDL is already disabled. 750 */ 751 if (enable) { 752 if ((buf_data[4] & 0x03) == 0x02) 753 goto out; 754 buf_data[4] &= ~0x03; 755 buf_data[4] |= 0x02; 756 } else { 757 if ((buf_data[4] & 0x03) == 0x00) 758 goto out; 759 buf_data[4] &= ~0x03; 760 } 761 762 ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3, 763 &data, &sshdr); 764 if (ret) { 765 if (ret > 0 && scsi_sense_valid(&sshdr)) 766 scsi_print_sense_hdr(sdev, 767 dev_name(&sdev->sdev_gendev), &sshdr); 768 return ret; 769 } 770 } 771 772 out: 773 sdev->cdl_enable = enable; 774 775 return 0; 776 } 777 778 /** 779 * scsi_device_get - get an additional reference to a scsi_device 780 * @sdev: device to get a reference to 781 * 782 * Description: Gets a reference to the scsi_device and increments the use count 783 * of the underlying LLDD module. You must hold host_lock of the 784 * parent Scsi_Host or already have a reference when calling this. 785 * 786 * This will fail if a device is deleted or cancelled, or when the LLD module 787 * is in the process of being unloaded. 788 */ 789 int scsi_device_get(struct scsi_device *sdev) 790 { 791 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) 792 goto fail; 793 if (!try_module_get(sdev->host->hostt->module)) 794 goto fail; 795 if (!get_device(&sdev->sdev_gendev)) 796 goto fail_put_module; 797 return 0; 798 799 fail_put_module: 800 module_put(sdev->host->hostt->module); 801 fail: 802 return -ENXIO; 803 } 804 EXPORT_SYMBOL(scsi_device_get); 805 806 /** 807 * scsi_device_put - release a reference to a scsi_device 808 * @sdev: device to release a reference on. 809 * 810 * Description: Release a reference to the scsi_device and decrements the use 811 * count of the underlying LLDD module. The device is freed once the last 812 * user vanishes. 813 */ 814 void scsi_device_put(struct scsi_device *sdev) 815 { 816 struct module *mod = sdev->host->hostt->module; 817 818 put_device(&sdev->sdev_gendev); 819 module_put(mod); 820 } 821 EXPORT_SYMBOL(scsi_device_put); 822 823 /* helper for shost_for_each_device, see that for documentation */ 824 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, 825 struct scsi_device *prev) 826 { 827 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); 828 struct scsi_device *next = NULL; 829 unsigned long flags; 830 831 spin_lock_irqsave(shost->host_lock, flags); 832 while (list->next != &shost->__devices) { 833 next = list_entry(list->next, struct scsi_device, siblings); 834 /* 835 * Skip pseudo devices and also devices we can't get a 836 * reference to. 837 */ 838 if (!scsi_device_is_pseudo_dev(next) && !scsi_device_get(next)) 839 break; 840 next = NULL; 841 list = list->next; 842 } 843 spin_unlock_irqrestore(shost->host_lock, flags); 844 845 if (prev) 846 scsi_device_put(prev); 847 return next; 848 } 849 EXPORT_SYMBOL(__scsi_iterate_devices); 850 851 /** 852 * starget_for_each_device - helper to walk all devices of a target 853 * @starget: target whose devices we want to iterate over. 854 * @data: Opaque passed to each function call. 855 * @fn: Function to call on each device 856 * 857 * This traverses over each device of @starget. The devices have 858 * a reference that must be released by scsi_host_put when breaking 859 * out of the loop. 860 */ 861 void starget_for_each_device(struct scsi_target *starget, void *data, 862 void (*fn)(struct scsi_device *, void *)) 863 { 864 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 865 struct scsi_device *sdev; 866 867 shost_for_each_device(sdev, shost) { 868 if ((sdev->channel == starget->channel) && 869 (sdev->id == starget->id)) 870 fn(sdev, data); 871 } 872 } 873 EXPORT_SYMBOL(starget_for_each_device); 874 875 /** 876 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED) 877 * @starget: target whose devices we want to iterate over. 878 * @data: parameter for callback @fn() 879 * @fn: callback function that is invoked for each device 880 * 881 * This traverses over each device of @starget. It does _not_ 882 * take a reference on the scsi_device, so the whole loop must be 883 * protected by shost->host_lock. 884 * 885 * Note: The only reason why drivers would want to use this is because 886 * they need to access the device list in irq context. Otherwise you 887 * really want to use starget_for_each_device instead. 888 **/ 889 void __starget_for_each_device(struct scsi_target *starget, void *data, 890 void (*fn)(struct scsi_device *, void *)) 891 { 892 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 893 struct scsi_device *sdev; 894 895 __shost_for_each_device(sdev, shost) { 896 if ((sdev->channel == starget->channel) && 897 (sdev->id == starget->id)) 898 fn(sdev, data); 899 } 900 } 901 EXPORT_SYMBOL(__starget_for_each_device); 902 903 /** 904 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) 905 * @starget: SCSI target pointer 906 * @lun: SCSI Logical Unit Number 907 * 908 * Description: Looks up the scsi_device with the specified @lun for a given 909 * @starget. The returned scsi_device does not have an additional 910 * reference. You must hold the host's host_lock over this call and 911 * any access to the returned scsi_device. A scsi_device in state 912 * SDEV_DEL is skipped. 913 * 914 * Note: The only reason why drivers should use this is because 915 * they need to access the device list in irq context. Otherwise you 916 * really want to use scsi_device_lookup_by_target instead. 917 **/ 918 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, 919 u64 lun) 920 { 921 struct scsi_device *sdev; 922 923 list_for_each_entry(sdev, &starget->devices, same_target_siblings) { 924 if (sdev->sdev_state == SDEV_DEL) 925 continue; 926 if (sdev->lun ==lun) 927 return sdev; 928 } 929 930 return NULL; 931 } 932 EXPORT_SYMBOL(__scsi_device_lookup_by_target); 933 934 /** 935 * scsi_device_lookup_by_target - find a device given the target 936 * @starget: SCSI target pointer 937 * @lun: SCSI Logical Unit Number 938 * 939 * Description: Looks up the scsi_device with the specified @lun for a given 940 * @starget. The returned scsi_device has an additional reference that 941 * needs to be released with scsi_device_put once you're done with it. 942 **/ 943 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 944 u64 lun) 945 { 946 struct scsi_device *sdev; 947 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 948 unsigned long flags; 949 950 spin_lock_irqsave(shost->host_lock, flags); 951 sdev = __scsi_device_lookup_by_target(starget, lun); 952 if (sdev && scsi_device_get(sdev)) 953 sdev = NULL; 954 spin_unlock_irqrestore(shost->host_lock, flags); 955 956 return sdev; 957 } 958 EXPORT_SYMBOL(scsi_device_lookup_by_target); 959 960 /** 961 * __scsi_device_lookup - find a device given the host (UNLOCKED) 962 * @shost: SCSI host pointer 963 * @channel: SCSI channel (zero if only one channel) 964 * @id: SCSI target number (physical unit number) 965 * @lun: SCSI Logical Unit Number 966 * 967 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 968 * for a given host. The returned scsi_device does not have an additional 969 * reference. You must hold the host's host_lock over this call and any access 970 * to the returned scsi_device. 971 * 972 * Note: The only reason why drivers would want to use this is because 973 * they need to access the device list in irq context. Otherwise you 974 * really want to use scsi_device_lookup instead. 975 **/ 976 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, 977 uint channel, uint id, u64 lun) 978 { 979 struct scsi_device *sdev; 980 981 list_for_each_entry(sdev, &shost->__devices, siblings) { 982 if (sdev->sdev_state == SDEV_DEL) 983 continue; 984 if (sdev->channel == channel && sdev->id == id && 985 sdev->lun ==lun) 986 return sdev; 987 } 988 989 return NULL; 990 } 991 EXPORT_SYMBOL(__scsi_device_lookup); 992 993 /** 994 * scsi_device_lookup - find a device given the host 995 * @shost: SCSI host pointer 996 * @channel: SCSI channel (zero if only one channel) 997 * @id: SCSI target number (physical unit number) 998 * @lun: SCSI Logical Unit Number 999 * 1000 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1001 * for a given host. The returned scsi_device has an additional reference that 1002 * needs to be released with scsi_device_put once you're done with it. 1003 **/ 1004 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, 1005 uint channel, uint id, u64 lun) 1006 { 1007 struct scsi_device *sdev; 1008 unsigned long flags; 1009 1010 spin_lock_irqsave(shost->host_lock, flags); 1011 sdev = __scsi_device_lookup(shost, channel, id, lun); 1012 if (sdev && scsi_device_get(sdev)) 1013 sdev = NULL; 1014 spin_unlock_irqrestore(shost->host_lock, flags); 1015 1016 return sdev; 1017 } 1018 EXPORT_SYMBOL(scsi_device_lookup); 1019 1020 MODULE_DESCRIPTION("SCSI core"); 1021 MODULE_LICENSE("GPL"); 1022 1023 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 1024 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 1025 1026 static int __init init_scsi(void) 1027 { 1028 int error; 1029 1030 error = scsi_init_procfs(); 1031 if (error) 1032 goto cleanup_queue; 1033 error = scsi_init_devinfo(); 1034 if (error) 1035 goto cleanup_procfs; 1036 error = scsi_init_hosts(); 1037 if (error) 1038 goto cleanup_devlist; 1039 error = scsi_init_sysctl(); 1040 if (error) 1041 goto cleanup_hosts; 1042 error = scsi_sysfs_register(); 1043 if (error) 1044 goto cleanup_sysctl; 1045 1046 scsi_netlink_init(); 1047 1048 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1049 return 0; 1050 1051 cleanup_sysctl: 1052 scsi_exit_sysctl(); 1053 cleanup_hosts: 1054 scsi_exit_hosts(); 1055 cleanup_devlist: 1056 scsi_exit_devinfo(); 1057 cleanup_procfs: 1058 scsi_exit_procfs(); 1059 cleanup_queue: 1060 scsi_exit_queue(); 1061 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", 1062 -error); 1063 return error; 1064 } 1065 1066 static void __exit exit_scsi(void) 1067 { 1068 scsi_netlink_exit(); 1069 scsi_sysfs_unregister(); 1070 scsi_exit_sysctl(); 1071 scsi_exit_hosts(); 1072 scsi_exit_devinfo(); 1073 scsi_exit_procfs(); 1074 scsi_exit_queue(); 1075 } 1076 1077 subsys_initcall(init_scsi); 1078 module_exit(exit_scsi); 1079