1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0 516 }; 517 518 519 520 #if (defined(SD_PROP_TST)) 521 522 #define SD_TST_CTYPE_VAL CTYPE_CDROM 523 #define SD_TST_THROTTLE_VAL 16 524 #define SD_TST_NOTREADY_VAL 12 525 #define SD_TST_BUSY_VAL 60 526 #define SD_TST_RST_RETRY_VAL 36 527 #define SD_TST_RSV_REL_TIME 60 528 529 static sd_tunables tst_properties = { 530 SD_TST_THROTTLE_VAL, 531 SD_TST_CTYPE_VAL, 532 SD_TST_NOTREADY_VAL, 533 SD_TST_BUSY_VAL, 534 SD_TST_RST_RETRY_VAL, 535 SD_TST_RSV_REL_TIME, 536 0, 537 0, 538 0 539 }; 540 #endif 541 542 /* This is similar to the ANSI toupper implementation */ 543 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 544 545 /* 546 * Static Driver Configuration Table 547 * 548 * This is the table of disks which need throttle adjustment (or, perhaps 549 * something else as defined by the flags at a future time.) device_id 550 * is a string consisting of concatenated vid (vendor), pid (product/model) 551 * and revision strings as defined in the scsi_inquiry structure. Offsets of 552 * the parts of the string are as defined by the sizes in the scsi_inquiry 553 * structure. Device type is searched as far as the device_id string is 554 * defined. Flags defines which values are to be set in the driver from the 555 * properties list. 556 * 557 * Entries below which begin and end with a "*" are a special case. 558 * These do not have a specific vendor, and the string which follows 559 * can appear anywhere in the 16 byte PID portion of the inquiry data. 560 * 561 * Entries below which begin and end with a " " (blank) are a special 562 * case. The comparison function will treat multiple consecutive blanks 563 * as equivalent to a single blank. For example, this causes a 564 * sd_disk_table entry of " NEC CDROM " to match a device's id string 565 * of "NEC CDROM". 566 * 567 * Note: The MD21 controller type has been obsoleted. 568 * ST318202F is a Legacy device 569 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 570 * made with an FC connection. The entries here are a legacy. 571 */ 572 static sd_disk_config_t sd_disk_table[] = { 573 #if defined(__fibre) || defined(__i386) || defined(__amd64) 574 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 575 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 589 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 598 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 602 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 621 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 622 { "SUN T3", SD_CONF_BSET_THROTTLE | 623 SD_CONF_BSET_BSY_RETRY_COUNT| 624 SD_CONF_BSET_RST_RETRIES| 625 SD_CONF_BSET_RSV_REL_TIME, 626 &purple_properties }, 627 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 628 SD_CONF_BSET_BSY_RETRY_COUNT| 629 SD_CONF_BSET_RST_RETRIES| 630 SD_CONF_BSET_RSV_REL_TIME| 631 SD_CONF_BSET_MIN_THROTTLE| 632 SD_CONF_BSET_DISKSORT_DISABLED, 633 &sve_properties }, 634 { "SUN T4", SD_CONF_BSET_THROTTLE | 635 SD_CONF_BSET_BSY_RETRY_COUNT| 636 SD_CONF_BSET_RST_RETRIES| 637 SD_CONF_BSET_RSV_REL_TIME, 638 &purple_properties }, 639 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 640 SD_CONF_BSET_LUN_RESET_ENABLED, 641 &maserati_properties }, 642 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 643 SD_CONF_BSET_NRR_COUNT| 644 SD_CONF_BSET_BSY_RETRY_COUNT| 645 SD_CONF_BSET_RST_RETRIES| 646 SD_CONF_BSET_MIN_THROTTLE| 647 SD_CONF_BSET_DISKSORT_DISABLED| 648 SD_CONF_BSET_LUN_RESET_ENABLED, 649 &pirus_properties }, 650 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 651 SD_CONF_BSET_NRR_COUNT| 652 SD_CONF_BSET_BSY_RETRY_COUNT| 653 SD_CONF_BSET_RST_RETRIES| 654 SD_CONF_BSET_MIN_THROTTLE| 655 SD_CONF_BSET_DISKSORT_DISABLED| 656 SD_CONF_BSET_LUN_RESET_ENABLED, 657 &pirus_properties }, 658 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 659 SD_CONF_BSET_NRR_COUNT| 660 SD_CONF_BSET_BSY_RETRY_COUNT| 661 SD_CONF_BSET_RST_RETRIES| 662 SD_CONF_BSET_MIN_THROTTLE| 663 SD_CONF_BSET_DISKSORT_DISABLED| 664 SD_CONF_BSET_LUN_RESET_ENABLED, 665 &pirus_properties }, 666 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 667 SD_CONF_BSET_NRR_COUNT| 668 SD_CONF_BSET_BSY_RETRY_COUNT| 669 SD_CONF_BSET_RST_RETRIES| 670 SD_CONF_BSET_MIN_THROTTLE| 671 SD_CONF_BSET_DISKSORT_DISABLED| 672 SD_CONF_BSET_LUN_RESET_ENABLED, 673 &pirus_properties }, 674 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 675 SD_CONF_BSET_NRR_COUNT| 676 SD_CONF_BSET_BSY_RETRY_COUNT| 677 SD_CONF_BSET_RST_RETRIES| 678 SD_CONF_BSET_MIN_THROTTLE| 679 SD_CONF_BSET_DISKSORT_DISABLED| 680 SD_CONF_BSET_LUN_RESET_ENABLED, 681 &pirus_properties }, 682 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 683 SD_CONF_BSET_NRR_COUNT| 684 SD_CONF_BSET_BSY_RETRY_COUNT| 685 SD_CONF_BSET_RST_RETRIES| 686 SD_CONF_BSET_MIN_THROTTLE| 687 SD_CONF_BSET_DISKSORT_DISABLED| 688 SD_CONF_BSET_LUN_RESET_ENABLED, 689 &pirus_properties }, 690 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 691 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 692 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 693 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 694 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 695 #endif /* fibre or NON-sparc platforms */ 696 #if ((defined(__sparc) && !defined(__fibre)) ||\ 697 (defined(__i386) || defined(__amd64))) 698 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 699 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 700 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 701 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 702 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 703 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 704 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 705 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 706 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 707 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 708 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 709 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 710 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 711 &symbios_properties }, 712 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 713 &lsi_properties_scsi }, 714 #if defined(__i386) || defined(__amd64) 715 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 716 | SD_CONF_BSET_READSUB_BCD 717 | SD_CONF_BSET_READ_TOC_ADDR_BCD 718 | SD_CONF_BSET_NO_READ_HEADER 719 | SD_CONF_BSET_READ_CD_XD4), NULL }, 720 721 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 722 | SD_CONF_BSET_READSUB_BCD 723 | SD_CONF_BSET_READ_TOC_ADDR_BCD 724 | SD_CONF_BSET_NO_READ_HEADER 725 | SD_CONF_BSET_READ_CD_XD4), NULL }, 726 #endif /* __i386 || __amd64 */ 727 #endif /* sparc NON-fibre or NON-sparc platforms */ 728 729 #if (defined(SD_PROP_TST)) 730 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 731 | SD_CONF_BSET_CTYPE 732 | SD_CONF_BSET_NRR_COUNT 733 | SD_CONF_BSET_FAB_DEVID 734 | SD_CONF_BSET_NOCACHE 735 | SD_CONF_BSET_BSY_RETRY_COUNT 736 | SD_CONF_BSET_PLAYMSF_BCD 737 | SD_CONF_BSET_READSUB_BCD 738 | SD_CONF_BSET_READ_TOC_TRK_BCD 739 | SD_CONF_BSET_READ_TOC_ADDR_BCD 740 | SD_CONF_BSET_NO_READ_HEADER 741 | SD_CONF_BSET_READ_CD_XD4 742 | SD_CONF_BSET_RST_RETRIES 743 | SD_CONF_BSET_RSV_REL_TIME 744 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 745 #endif 746 }; 747 748 static const int sd_disk_table_size = 749 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 750 751 752 753 #define SD_INTERCONNECT_PARALLEL 0 754 #define SD_INTERCONNECT_FABRIC 1 755 #define SD_INTERCONNECT_FIBRE 2 756 #define SD_INTERCONNECT_SSA 3 757 #define SD_INTERCONNECT_SATA 4 758 #define SD_IS_PARALLEL_SCSI(un) \ 759 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 760 #define SD_IS_SERIAL(un) \ 761 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 762 763 /* 764 * Definitions used by device id registration routines 765 */ 766 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 767 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 768 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 769 770 static kmutex_t sd_sense_mutex = {0}; 771 772 /* 773 * Macros for updates of the driver state 774 */ 775 #define New_state(un, s) \ 776 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 777 #define Restore_state(un) \ 778 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 779 780 static struct sd_cdbinfo sd_cdbtab[] = { 781 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 782 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 783 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 784 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 785 }; 786 787 /* 788 * Specifies the number of seconds that must have elapsed since the last 789 * cmd. has completed for a device to be declared idle to the PM framework. 790 */ 791 static int sd_pm_idletime = 1; 792 793 /* 794 * Internal function prototypes 795 */ 796 797 #if (defined(__fibre)) 798 /* 799 * These #defines are to avoid namespace collisions that occur because this 800 * code is currently used to compile two separate driver modules: sd and ssd. 801 * All function names need to be treated this way (even if declared static) 802 * in order to allow the debugger to resolve the names properly. 803 * It is anticipated that in the near future the ssd module will be obsoleted, 804 * at which time this ugliness should go away. 805 */ 806 #define sd_log_trace ssd_log_trace 807 #define sd_log_info ssd_log_info 808 #define sd_log_err ssd_log_err 809 #define sdprobe ssdprobe 810 #define sdinfo ssdinfo 811 #define sd_prop_op ssd_prop_op 812 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 813 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 814 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 815 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 816 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 817 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 818 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 819 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 820 #define sd_spin_up_unit ssd_spin_up_unit 821 #define sd_enable_descr_sense ssd_enable_descr_sense 822 #define sd_reenable_dsense_task ssd_reenable_dsense_task 823 #define sd_set_mmc_caps ssd_set_mmc_caps 824 #define sd_read_unit_properties ssd_read_unit_properties 825 #define sd_process_sdconf_file ssd_process_sdconf_file 826 #define sd_process_sdconf_table ssd_process_sdconf_table 827 #define sd_sdconf_id_match ssd_sdconf_id_match 828 #define sd_blank_cmp ssd_blank_cmp 829 #define sd_chk_vers1_data ssd_chk_vers1_data 830 #define sd_set_vers1_properties ssd_set_vers1_properties 831 832 #define sd_get_physical_geometry ssd_get_physical_geometry 833 #define sd_get_virtual_geometry ssd_get_virtual_geometry 834 #define sd_update_block_info ssd_update_block_info 835 #define sd_register_devid ssd_register_devid 836 #define sd_get_devid ssd_get_devid 837 #define sd_create_devid ssd_create_devid 838 #define sd_write_deviceid ssd_write_deviceid 839 #define sd_check_vpd_page_support ssd_check_vpd_page_support 840 #define sd_setup_pm ssd_setup_pm 841 #define sd_create_pm_components ssd_create_pm_components 842 #define sd_ddi_suspend ssd_ddi_suspend 843 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 844 #define sd_ddi_resume ssd_ddi_resume 845 #define sd_ddi_pm_resume ssd_ddi_pm_resume 846 #define sdpower ssdpower 847 #define sdattach ssdattach 848 #define sddetach ssddetach 849 #define sd_unit_attach ssd_unit_attach 850 #define sd_unit_detach ssd_unit_detach 851 #define sd_set_unit_attributes ssd_set_unit_attributes 852 #define sd_create_errstats ssd_create_errstats 853 #define sd_set_errstats ssd_set_errstats 854 #define sd_set_pstats ssd_set_pstats 855 #define sddump ssddump 856 #define sd_scsi_poll ssd_scsi_poll 857 #define sd_send_polled_RQS ssd_send_polled_RQS 858 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 859 #define sd_init_event_callbacks ssd_init_event_callbacks 860 #define sd_event_callback ssd_event_callback 861 #define sd_cache_control ssd_cache_control 862 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 863 #define sd_make_device ssd_make_device 864 #define sdopen ssdopen 865 #define sdclose ssdclose 866 #define sd_ready_and_valid ssd_ready_and_valid 867 #define sdmin ssdmin 868 #define sdread ssdread 869 #define sdwrite ssdwrite 870 #define sdaread ssdaread 871 #define sdawrite ssdawrite 872 #define sdstrategy ssdstrategy 873 #define sdioctl ssdioctl 874 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 875 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 876 #define sd_checksum_iostart ssd_checksum_iostart 877 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 878 #define sd_pm_iostart ssd_pm_iostart 879 #define sd_core_iostart ssd_core_iostart 880 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 881 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 882 #define sd_checksum_iodone ssd_checksum_iodone 883 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 884 #define sd_pm_iodone ssd_pm_iodone 885 #define sd_initpkt_for_buf ssd_initpkt_for_buf 886 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 887 #define sd_setup_rw_pkt ssd_setup_rw_pkt 888 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 889 #define sd_buf_iodone ssd_buf_iodone 890 #define sd_uscsi_strategy ssd_uscsi_strategy 891 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 892 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 893 #define sd_uscsi_iodone ssd_uscsi_iodone 894 #define sd_xbuf_strategy ssd_xbuf_strategy 895 #define sd_xbuf_init ssd_xbuf_init 896 #define sd_pm_entry ssd_pm_entry 897 #define sd_pm_exit ssd_pm_exit 898 899 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 900 #define sd_pm_timeout_handler ssd_pm_timeout_handler 901 902 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 903 #define sdintr ssdintr 904 #define sd_start_cmds ssd_start_cmds 905 #define sd_send_scsi_cmd ssd_send_scsi_cmd 906 #define sd_bioclone_alloc ssd_bioclone_alloc 907 #define sd_bioclone_free ssd_bioclone_free 908 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 909 #define sd_shadow_buf_free ssd_shadow_buf_free 910 #define sd_print_transport_rejected_message \ 911 ssd_print_transport_rejected_message 912 #define sd_retry_command ssd_retry_command 913 #define sd_set_retry_bp ssd_set_retry_bp 914 #define sd_send_request_sense_command ssd_send_request_sense_command 915 #define sd_start_retry_command ssd_start_retry_command 916 #define sd_start_direct_priority_command \ 917 ssd_start_direct_priority_command 918 #define sd_return_failed_command ssd_return_failed_command 919 #define sd_return_failed_command_no_restart \ 920 ssd_return_failed_command_no_restart 921 #define sd_return_command ssd_return_command 922 #define sd_sync_with_callback ssd_sync_with_callback 923 #define sdrunout ssdrunout 924 #define sd_mark_rqs_busy ssd_mark_rqs_busy 925 #define sd_mark_rqs_idle ssd_mark_rqs_idle 926 #define sd_reduce_throttle ssd_reduce_throttle 927 #define sd_restore_throttle ssd_restore_throttle 928 #define sd_print_incomplete_msg ssd_print_incomplete_msg 929 #define sd_init_cdb_limits ssd_init_cdb_limits 930 #define sd_pkt_status_good ssd_pkt_status_good 931 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 932 #define sd_pkt_status_busy ssd_pkt_status_busy 933 #define sd_pkt_status_reservation_conflict \ 934 ssd_pkt_status_reservation_conflict 935 #define sd_pkt_status_qfull ssd_pkt_status_qfull 936 #define sd_handle_request_sense ssd_handle_request_sense 937 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 938 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 939 #define sd_validate_sense_data ssd_validate_sense_data 940 #define sd_decode_sense ssd_decode_sense 941 #define sd_print_sense_msg ssd_print_sense_msg 942 #define sd_sense_key_no_sense ssd_sense_key_no_sense 943 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 944 #define sd_sense_key_not_ready ssd_sense_key_not_ready 945 #define sd_sense_key_medium_or_hardware_error \ 946 ssd_sense_key_medium_or_hardware_error 947 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 948 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 949 #define sd_sense_key_fail_command ssd_sense_key_fail_command 950 #define sd_sense_key_blank_check ssd_sense_key_blank_check 951 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 952 #define sd_sense_key_default ssd_sense_key_default 953 #define sd_print_retry_msg ssd_print_retry_msg 954 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 955 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 956 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 957 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 958 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 959 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 960 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 961 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 962 #define sd_pkt_reason_default ssd_pkt_reason_default 963 #define sd_reset_target ssd_reset_target 964 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 965 #define sd_start_stop_unit_task ssd_start_stop_unit_task 966 #define sd_taskq_create ssd_taskq_create 967 #define sd_taskq_delete ssd_taskq_delete 968 #define sd_media_change_task ssd_media_change_task 969 #define sd_handle_mchange ssd_handle_mchange 970 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 971 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 972 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 973 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 974 #define sd_send_scsi_feature_GET_CONFIGURATION \ 975 sd_send_scsi_feature_GET_CONFIGURATION 976 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 977 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 978 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 979 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 980 ssd_send_scsi_PERSISTENT_RESERVE_IN 981 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 982 ssd_send_scsi_PERSISTENT_RESERVE_OUT 983 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 984 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 985 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 986 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 987 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 988 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 989 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 990 #define sd_alloc_rqs ssd_alloc_rqs 991 #define sd_free_rqs ssd_free_rqs 992 #define sd_dump_memory ssd_dump_memory 993 #define sd_get_media_info ssd_get_media_info 994 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 995 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 996 #define sd_setup_next_xfer ssd_setup_next_xfer 997 #define sd_dkio_get_temp ssd_dkio_get_temp 998 #define sd_check_mhd ssd_check_mhd 999 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1000 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1001 #define sd_sname ssd_sname 1002 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1003 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1004 #define sd_take_ownership ssd_take_ownership 1005 #define sd_reserve_release ssd_reserve_release 1006 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1007 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1008 #define sd_persistent_reservation_in_read_keys \ 1009 ssd_persistent_reservation_in_read_keys 1010 #define sd_persistent_reservation_in_read_resv \ 1011 ssd_persistent_reservation_in_read_resv 1012 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1013 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1014 #define sd_mhdioc_release ssd_mhdioc_release 1015 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1016 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1017 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1018 #define sr_change_blkmode ssr_change_blkmode 1019 #define sr_change_speed ssr_change_speed 1020 #define sr_atapi_change_speed ssr_atapi_change_speed 1021 #define sr_pause_resume ssr_pause_resume 1022 #define sr_play_msf ssr_play_msf 1023 #define sr_play_trkind ssr_play_trkind 1024 #define sr_read_all_subcodes ssr_read_all_subcodes 1025 #define sr_read_subchannel ssr_read_subchannel 1026 #define sr_read_tocentry ssr_read_tocentry 1027 #define sr_read_tochdr ssr_read_tochdr 1028 #define sr_read_cdda ssr_read_cdda 1029 #define sr_read_cdxa ssr_read_cdxa 1030 #define sr_read_mode1 ssr_read_mode1 1031 #define sr_read_mode2 ssr_read_mode2 1032 #define sr_read_cd_mode2 ssr_read_cd_mode2 1033 #define sr_sector_mode ssr_sector_mode 1034 #define sr_eject ssr_eject 1035 #define sr_ejected ssr_ejected 1036 #define sr_check_wp ssr_check_wp 1037 #define sd_check_media ssd_check_media 1038 #define sd_media_watch_cb ssd_media_watch_cb 1039 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1040 #define sr_volume_ctrl ssr_volume_ctrl 1041 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1042 #define sd_log_page_supported ssd_log_page_supported 1043 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1044 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1045 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1046 #define sd_range_lock ssd_range_lock 1047 #define sd_get_range ssd_get_range 1048 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1049 #define sd_range_unlock ssd_range_unlock 1050 #define sd_read_modify_write_task ssd_read_modify_write_task 1051 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1052 1053 #define sd_iostart_chain ssd_iostart_chain 1054 #define sd_iodone_chain ssd_iodone_chain 1055 #define sd_initpkt_map ssd_initpkt_map 1056 #define sd_destroypkt_map ssd_destroypkt_map 1057 #define sd_chain_type_map ssd_chain_type_map 1058 #define sd_chain_index_map ssd_chain_index_map 1059 1060 #define sd_failfast_flushctl ssd_failfast_flushctl 1061 #define sd_failfast_flushq ssd_failfast_flushq 1062 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1063 1064 #define sd_is_lsi ssd_is_lsi 1065 #define sd_tg_rdwr ssd_tg_rdwr 1066 #define sd_tg_getinfo ssd_tg_getinfo 1067 1068 #endif /* #if (defined(__fibre)) */ 1069 1070 1071 int _init(void); 1072 int _fini(void); 1073 int _info(struct modinfo *modinfop); 1074 1075 /*PRINTFLIKE3*/ 1076 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1077 /*PRINTFLIKE3*/ 1078 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1079 /*PRINTFLIKE3*/ 1080 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1081 1082 static int sdprobe(dev_info_t *devi); 1083 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1084 void **result); 1085 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1086 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1087 1088 /* 1089 * Smart probe for parallel scsi 1090 */ 1091 static void sd_scsi_probe_cache_init(void); 1092 static void sd_scsi_probe_cache_fini(void); 1093 static void sd_scsi_clear_probe_cache(void); 1094 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1095 1096 /* 1097 * Attached luns on target for parallel scsi 1098 */ 1099 static void sd_scsi_target_lun_init(void); 1100 static void sd_scsi_target_lun_fini(void); 1101 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1102 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1103 1104 static int sd_spin_up_unit(struct sd_lun *un); 1105 #ifdef _LP64 1106 static void sd_enable_descr_sense(struct sd_lun *un); 1107 static void sd_reenable_dsense_task(void *arg); 1108 #endif /* _LP64 */ 1109 1110 static void sd_set_mmc_caps(struct sd_lun *un); 1111 1112 static void sd_read_unit_properties(struct sd_lun *un); 1113 static int sd_process_sdconf_file(struct sd_lun *un); 1114 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1115 int *data_list, sd_tunables *values); 1116 static void sd_process_sdconf_table(struct sd_lun *un); 1117 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1118 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1119 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1120 int list_len, char *dataname_ptr); 1121 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1122 sd_tunables *prop_list); 1123 1124 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1125 int reservation_flag); 1126 static int sd_get_devid(struct sd_lun *un); 1127 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1128 static int sd_write_deviceid(struct sd_lun *un); 1129 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1130 static int sd_check_vpd_page_support(struct sd_lun *un); 1131 1132 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1133 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1134 1135 static int sd_ddi_suspend(dev_info_t *devi); 1136 static int sd_ddi_pm_suspend(struct sd_lun *un); 1137 static int sd_ddi_resume(dev_info_t *devi); 1138 static int sd_ddi_pm_resume(struct sd_lun *un); 1139 static int sdpower(dev_info_t *devi, int component, int level); 1140 1141 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1142 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1143 static int sd_unit_attach(dev_info_t *devi); 1144 static int sd_unit_detach(dev_info_t *devi); 1145 1146 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1147 static void sd_create_errstats(struct sd_lun *un, int instance); 1148 static void sd_set_errstats(struct sd_lun *un); 1149 static void sd_set_pstats(struct sd_lun *un); 1150 1151 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1152 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1153 static int sd_send_polled_RQS(struct sd_lun *un); 1154 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1155 1156 #if (defined(__fibre)) 1157 /* 1158 * Event callbacks (photon) 1159 */ 1160 static void sd_init_event_callbacks(struct sd_lun *un); 1161 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1162 #endif 1163 1164 /* 1165 * Defines for sd_cache_control 1166 */ 1167 1168 #define SD_CACHE_ENABLE 1 1169 #define SD_CACHE_DISABLE 0 1170 #define SD_CACHE_NOCHANGE -1 1171 1172 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1173 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1174 static dev_t sd_make_device(dev_info_t *devi); 1175 1176 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1177 uint64_t capacity); 1178 1179 /* 1180 * Driver entry point functions. 1181 */ 1182 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1183 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1184 static int sd_ready_and_valid(struct sd_lun *un); 1185 1186 static void sdmin(struct buf *bp); 1187 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1188 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1189 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1190 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1191 1192 static int sdstrategy(struct buf *bp); 1193 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1194 1195 /* 1196 * Function prototypes for layering functions in the iostart chain. 1197 */ 1198 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1199 struct buf *bp); 1200 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1201 struct buf *bp); 1202 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1203 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1204 struct buf *bp); 1205 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1206 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1207 1208 /* 1209 * Function prototypes for layering functions in the iodone chain. 1210 */ 1211 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1212 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1213 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1214 struct buf *bp); 1215 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1216 struct buf *bp); 1217 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1218 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1219 struct buf *bp); 1220 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1221 1222 /* 1223 * Prototypes for functions to support buf(9S) based IO. 1224 */ 1225 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1226 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1227 static void sd_destroypkt_for_buf(struct buf *); 1228 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1229 struct buf *bp, int flags, 1230 int (*callback)(caddr_t), caddr_t callback_arg, 1231 diskaddr_t lba, uint32_t blockcount); 1232 #if defined(__i386) || defined(__amd64) 1233 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1234 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1235 #endif /* defined(__i386) || defined(__amd64) */ 1236 1237 /* 1238 * Prototypes for functions to support USCSI IO. 1239 */ 1240 static int sd_uscsi_strategy(struct buf *bp); 1241 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1242 static void sd_destroypkt_for_uscsi(struct buf *); 1243 1244 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1245 uchar_t chain_type, void *pktinfop); 1246 1247 static int sd_pm_entry(struct sd_lun *un); 1248 static void sd_pm_exit(struct sd_lun *un); 1249 1250 static void sd_pm_idletimeout_handler(void *arg); 1251 1252 /* 1253 * sd_core internal functions (used at the sd_core_io layer). 1254 */ 1255 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1256 static void sdintr(struct scsi_pkt *pktp); 1257 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1258 1259 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1260 enum uio_seg dataspace, int path_flag); 1261 1262 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1263 daddr_t blkno, int (*func)(struct buf *)); 1264 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1265 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1266 static void sd_bioclone_free(struct buf *bp); 1267 static void sd_shadow_buf_free(struct buf *bp); 1268 1269 static void sd_print_transport_rejected_message(struct sd_lun *un, 1270 struct sd_xbuf *xp, int code); 1271 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1272 void *arg, int code); 1273 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1274 void *arg, int code); 1275 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1276 void *arg, int code); 1277 1278 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1279 int retry_check_flag, 1280 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1281 int c), 1282 void *user_arg, int failure_code, clock_t retry_delay, 1283 void (*statp)(kstat_io_t *)); 1284 1285 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1286 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1287 1288 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1289 struct scsi_pkt *pktp); 1290 static void sd_start_retry_command(void *arg); 1291 static void sd_start_direct_priority_command(void *arg); 1292 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1293 int errcode); 1294 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1295 struct buf *bp, int errcode); 1296 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1297 static void sd_sync_with_callback(struct sd_lun *un); 1298 static int sdrunout(caddr_t arg); 1299 1300 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1301 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1302 1303 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1304 static void sd_restore_throttle(void *arg); 1305 1306 static void sd_init_cdb_limits(struct sd_lun *un); 1307 1308 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1309 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1310 1311 /* 1312 * Error handling functions 1313 */ 1314 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1315 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1316 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1317 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1318 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1319 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1320 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1321 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1322 1323 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1324 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp); 1329 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1330 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1331 1332 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1333 void *arg, int code); 1334 1335 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1336 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1337 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1338 uint8_t *sense_datap, 1339 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 static void sd_sense_key_not_ready(struct sd_lun *un, 1341 uint8_t *sense_datap, 1342 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1343 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1344 uint8_t *sense_datap, 1345 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1346 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1347 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 static void sd_sense_key_unit_attention(struct sd_lun *un, 1349 uint8_t *sense_datap, 1350 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1351 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1352 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1356 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1357 static void sd_sense_key_default(struct sd_lun *un, 1358 uint8_t *sense_datap, 1359 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 1361 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1362 void *arg, int flag); 1363 1364 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1365 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1366 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1367 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1369 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1370 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1371 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1372 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1373 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1374 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1377 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1378 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1379 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1380 1381 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1382 1383 static void sd_start_stop_unit_callback(void *arg); 1384 static void sd_start_stop_unit_task(void *arg); 1385 1386 static void sd_taskq_create(void); 1387 static void sd_taskq_delete(void); 1388 static void sd_media_change_task(void *arg); 1389 1390 static int sd_handle_mchange(struct sd_lun *un); 1391 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1392 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1393 uint32_t *lbap, int path_flag); 1394 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1395 uint32_t *lbap, int path_flag); 1396 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1397 int path_flag); 1398 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1399 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1400 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1401 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1402 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1403 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1404 uchar_t usr_cmd, uchar_t *usr_bufp); 1405 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1406 struct dk_callback *dkc); 1407 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1408 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1409 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1410 uchar_t *bufaddr, uint_t buflen, int path_flag); 1411 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1412 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1413 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1414 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1415 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1416 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1417 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1418 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1419 size_t buflen, daddr_t start_block, int path_flag); 1420 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1421 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1422 path_flag) 1423 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1424 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1425 path_flag) 1426 1427 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1428 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1429 uint16_t param_ptr, int path_flag); 1430 1431 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1432 static void sd_free_rqs(struct sd_lun *un); 1433 1434 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1435 uchar_t *data, int len, int fmt); 1436 static void sd_panic_for_res_conflict(struct sd_lun *un); 1437 1438 /* 1439 * Disk Ioctl Function Prototypes 1440 */ 1441 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1442 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1443 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1444 1445 /* 1446 * Multi-host Ioctl Prototypes 1447 */ 1448 static int sd_check_mhd(dev_t dev, int interval); 1449 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1450 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1451 static char *sd_sname(uchar_t status); 1452 static void sd_mhd_resvd_recover(void *arg); 1453 static void sd_resv_reclaim_thread(); 1454 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1455 static int sd_reserve_release(dev_t dev, int cmd); 1456 static void sd_rmv_resv_reclaim_req(dev_t dev); 1457 static void sd_mhd_reset_notify_cb(caddr_t arg); 1458 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1459 mhioc_inkeys_t *usrp, int flag); 1460 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1461 mhioc_inresvs_t *usrp, int flag); 1462 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1463 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1464 static int sd_mhdioc_release(dev_t dev); 1465 static int sd_mhdioc_register_devid(dev_t dev); 1466 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1467 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1468 1469 /* 1470 * SCSI removable prototypes 1471 */ 1472 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1473 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1474 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1475 static int sr_pause_resume(dev_t dev, int mode); 1476 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1477 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1478 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1479 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1480 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1481 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1482 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1483 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1484 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1487 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1488 static int sr_eject(dev_t dev); 1489 static void sr_ejected(register struct sd_lun *un); 1490 static int sr_check_wp(dev_t dev); 1491 static int sd_check_media(dev_t dev, enum dkio_state state); 1492 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1493 static void sd_delayed_cv_broadcast(void *arg); 1494 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1495 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1496 1497 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1498 1499 /* 1500 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1501 */ 1502 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1503 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1504 static void sd_wm_cache_destructor(void *wm, void *un); 1505 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1506 daddr_t endb, ushort_t typ); 1507 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1508 daddr_t endb); 1509 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1510 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1511 static void sd_read_modify_write_task(void * arg); 1512 static int 1513 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1514 struct buf **bpp); 1515 1516 1517 /* 1518 * Function prototypes for failfast support. 1519 */ 1520 static void sd_failfast_flushq(struct sd_lun *un); 1521 static int sd_failfast_flushq_callback(struct buf *bp); 1522 1523 /* 1524 * Function prototypes to check for lsi devices 1525 */ 1526 static void sd_is_lsi(struct sd_lun *un); 1527 1528 /* 1529 * Function prototypes for x86 support 1530 */ 1531 #if defined(__i386) || defined(__amd64) 1532 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1533 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1534 #endif 1535 1536 1537 /* Function prototypes for cmlb */ 1538 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1539 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1540 1541 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1542 1543 /* 1544 * Constants for failfast support: 1545 * 1546 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1547 * failfast processing being performed. 1548 * 1549 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1550 * failfast processing on all bufs with B_FAILFAST set. 1551 */ 1552 1553 #define SD_FAILFAST_INACTIVE 0 1554 #define SD_FAILFAST_ACTIVE 1 1555 1556 /* 1557 * Bitmask to control behavior of buf(9S) flushes when a transition to 1558 * the failfast state occurs. Optional bits include: 1559 * 1560 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1561 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1562 * be flushed. 1563 * 1564 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1565 * driver, in addition to the regular wait queue. This includes the xbuf 1566 * queues. When clear, only the driver's wait queue will be flushed. 1567 */ 1568 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1569 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1570 1571 /* 1572 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1573 * to flush all queues within the driver. 1574 */ 1575 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1576 1577 1578 /* 1579 * SD Testing Fault Injection 1580 */ 1581 #ifdef SD_FAULT_INJECTION 1582 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1583 static void sd_faultinjection(struct scsi_pkt *pktp); 1584 static void sd_injection_log(char *buf, struct sd_lun *un); 1585 #endif 1586 1587 /* 1588 * Device driver ops vector 1589 */ 1590 static struct cb_ops sd_cb_ops = { 1591 sdopen, /* open */ 1592 sdclose, /* close */ 1593 sdstrategy, /* strategy */ 1594 nodev, /* print */ 1595 sddump, /* dump */ 1596 sdread, /* read */ 1597 sdwrite, /* write */ 1598 sdioctl, /* ioctl */ 1599 nodev, /* devmap */ 1600 nodev, /* mmap */ 1601 nodev, /* segmap */ 1602 nochpoll, /* poll */ 1603 sd_prop_op, /* cb_prop_op */ 1604 0, /* streamtab */ 1605 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1606 CB_REV, /* cb_rev */ 1607 sdaread, /* async I/O read entry point */ 1608 sdawrite /* async I/O write entry point */ 1609 }; 1610 1611 static struct dev_ops sd_ops = { 1612 DEVO_REV, /* devo_rev, */ 1613 0, /* refcnt */ 1614 sdinfo, /* info */ 1615 nulldev, /* identify */ 1616 sdprobe, /* probe */ 1617 sdattach, /* attach */ 1618 sddetach, /* detach */ 1619 nodev, /* reset */ 1620 &sd_cb_ops, /* driver operations */ 1621 NULL, /* bus operations */ 1622 sdpower /* power */ 1623 }; 1624 1625 1626 /* 1627 * This is the loadable module wrapper. 1628 */ 1629 #include <sys/modctl.h> 1630 1631 static struct modldrv modldrv = { 1632 &mod_driverops, /* Type of module. This one is a driver */ 1633 SD_MODULE_NAME, /* Module name. */ 1634 &sd_ops /* driver ops */ 1635 }; 1636 1637 1638 static struct modlinkage modlinkage = { 1639 MODREV_1, 1640 &modldrv, 1641 NULL 1642 }; 1643 1644 static cmlb_tg_ops_t sd_tgops = { 1645 TG_DK_OPS_VERSION_1, 1646 sd_tg_rdwr, 1647 sd_tg_getinfo 1648 }; 1649 1650 static struct scsi_asq_key_strings sd_additional_codes[] = { 1651 0x81, 0, "Logical Unit is Reserved", 1652 0x85, 0, "Audio Address Not Valid", 1653 0xb6, 0, "Media Load Mechanism Failed", 1654 0xB9, 0, "Audio Play Operation Aborted", 1655 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1656 0x53, 2, "Medium removal prevented", 1657 0x6f, 0, "Authentication failed during key exchange", 1658 0x6f, 1, "Key not present", 1659 0x6f, 2, "Key not established", 1660 0x6f, 3, "Read without proper authentication", 1661 0x6f, 4, "Mismatched region to this logical unit", 1662 0x6f, 5, "Region reset count error", 1663 0xffff, 0x0, NULL 1664 }; 1665 1666 1667 /* 1668 * Struct for passing printing information for sense data messages 1669 */ 1670 struct sd_sense_info { 1671 int ssi_severity; 1672 int ssi_pfa_flag; 1673 }; 1674 1675 /* 1676 * Table of function pointers for iostart-side routines. Separate "chains" 1677 * of layered function calls are formed by placing the function pointers 1678 * sequentially in the desired order. Functions are called according to an 1679 * incrementing table index ordering. The last function in each chain must 1680 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1681 * in the sd_iodone_chain[] array. 1682 * 1683 * Note: It may seem more natural to organize both the iostart and iodone 1684 * functions together, into an array of structures (or some similar 1685 * organization) with a common index, rather than two separate arrays which 1686 * must be maintained in synchronization. The purpose of this division is 1687 * to achieve improved performance: individual arrays allows for more 1688 * effective cache line utilization on certain platforms. 1689 */ 1690 1691 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1692 1693 1694 static sd_chain_t sd_iostart_chain[] = { 1695 1696 /* Chain for buf IO for disk drive targets (PM enabled) */ 1697 sd_mapblockaddr_iostart, /* Index: 0 */ 1698 sd_pm_iostart, /* Index: 1 */ 1699 sd_core_iostart, /* Index: 2 */ 1700 1701 /* Chain for buf IO for disk drive targets (PM disabled) */ 1702 sd_mapblockaddr_iostart, /* Index: 3 */ 1703 sd_core_iostart, /* Index: 4 */ 1704 1705 /* Chain for buf IO for removable-media targets (PM enabled) */ 1706 sd_mapblockaddr_iostart, /* Index: 5 */ 1707 sd_mapblocksize_iostart, /* Index: 6 */ 1708 sd_pm_iostart, /* Index: 7 */ 1709 sd_core_iostart, /* Index: 8 */ 1710 1711 /* Chain for buf IO for removable-media targets (PM disabled) */ 1712 sd_mapblockaddr_iostart, /* Index: 9 */ 1713 sd_mapblocksize_iostart, /* Index: 10 */ 1714 sd_core_iostart, /* Index: 11 */ 1715 1716 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1717 sd_mapblockaddr_iostart, /* Index: 12 */ 1718 sd_checksum_iostart, /* Index: 13 */ 1719 sd_pm_iostart, /* Index: 14 */ 1720 sd_core_iostart, /* Index: 15 */ 1721 1722 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1723 sd_mapblockaddr_iostart, /* Index: 16 */ 1724 sd_checksum_iostart, /* Index: 17 */ 1725 sd_core_iostart, /* Index: 18 */ 1726 1727 /* Chain for USCSI commands (all targets) */ 1728 sd_pm_iostart, /* Index: 19 */ 1729 sd_core_iostart, /* Index: 20 */ 1730 1731 /* Chain for checksumming USCSI commands (all targets) */ 1732 sd_checksum_uscsi_iostart, /* Index: 21 */ 1733 sd_pm_iostart, /* Index: 22 */ 1734 sd_core_iostart, /* Index: 23 */ 1735 1736 /* Chain for "direct" USCSI commands (all targets) */ 1737 sd_core_iostart, /* Index: 24 */ 1738 1739 /* Chain for "direct priority" USCSI commands (all targets) */ 1740 sd_core_iostart, /* Index: 25 */ 1741 }; 1742 1743 /* 1744 * Macros to locate the first function of each iostart chain in the 1745 * sd_iostart_chain[] array. These are located by the index in the array. 1746 */ 1747 #define SD_CHAIN_DISK_IOSTART 0 1748 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1749 #define SD_CHAIN_RMMEDIA_IOSTART 5 1750 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1751 #define SD_CHAIN_CHKSUM_IOSTART 12 1752 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1753 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1754 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1755 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1756 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1757 1758 1759 /* 1760 * Table of function pointers for the iodone-side routines for the driver- 1761 * internal layering mechanism. The calling sequence for iodone routines 1762 * uses a decrementing table index, so the last routine called in a chain 1763 * must be at the lowest array index location for that chain. The last 1764 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1765 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1766 * of the functions in an iodone side chain must correspond to the ordering 1767 * of the iostart routines for that chain. Note that there is no iodone 1768 * side routine that corresponds to sd_core_iostart(), so there is no 1769 * entry in the table for this. 1770 */ 1771 1772 static sd_chain_t sd_iodone_chain[] = { 1773 1774 /* Chain for buf IO for disk drive targets (PM enabled) */ 1775 sd_buf_iodone, /* Index: 0 */ 1776 sd_mapblockaddr_iodone, /* Index: 1 */ 1777 sd_pm_iodone, /* Index: 2 */ 1778 1779 /* Chain for buf IO for disk drive targets (PM disabled) */ 1780 sd_buf_iodone, /* Index: 3 */ 1781 sd_mapblockaddr_iodone, /* Index: 4 */ 1782 1783 /* Chain for buf IO for removable-media targets (PM enabled) */ 1784 sd_buf_iodone, /* Index: 5 */ 1785 sd_mapblockaddr_iodone, /* Index: 6 */ 1786 sd_mapblocksize_iodone, /* Index: 7 */ 1787 sd_pm_iodone, /* Index: 8 */ 1788 1789 /* Chain for buf IO for removable-media targets (PM disabled) */ 1790 sd_buf_iodone, /* Index: 9 */ 1791 sd_mapblockaddr_iodone, /* Index: 10 */ 1792 sd_mapblocksize_iodone, /* Index: 11 */ 1793 1794 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1795 sd_buf_iodone, /* Index: 12 */ 1796 sd_mapblockaddr_iodone, /* Index: 13 */ 1797 sd_checksum_iodone, /* Index: 14 */ 1798 sd_pm_iodone, /* Index: 15 */ 1799 1800 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1801 sd_buf_iodone, /* Index: 16 */ 1802 sd_mapblockaddr_iodone, /* Index: 17 */ 1803 sd_checksum_iodone, /* Index: 18 */ 1804 1805 /* Chain for USCSI commands (non-checksum targets) */ 1806 sd_uscsi_iodone, /* Index: 19 */ 1807 sd_pm_iodone, /* Index: 20 */ 1808 1809 /* Chain for USCSI commands (checksum targets) */ 1810 sd_uscsi_iodone, /* Index: 21 */ 1811 sd_checksum_uscsi_iodone, /* Index: 22 */ 1812 sd_pm_iodone, /* Index: 22 */ 1813 1814 /* Chain for "direct" USCSI commands (all targets) */ 1815 sd_uscsi_iodone, /* Index: 24 */ 1816 1817 /* Chain for "direct priority" USCSI commands (all targets) */ 1818 sd_uscsi_iodone, /* Index: 25 */ 1819 }; 1820 1821 1822 /* 1823 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1824 * each iodone-side chain. These are located by the array index, but as the 1825 * iodone side functions are called in a decrementing-index order, the 1826 * highest index number in each chain must be specified (as these correspond 1827 * to the first function in the iodone chain that will be called by the core 1828 * at IO completion time). 1829 */ 1830 1831 #define SD_CHAIN_DISK_IODONE 2 1832 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1833 #define SD_CHAIN_RMMEDIA_IODONE 8 1834 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1835 #define SD_CHAIN_CHKSUM_IODONE 15 1836 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1837 #define SD_CHAIN_USCSI_CMD_IODONE 20 1838 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1839 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1840 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1841 1842 1843 1844 1845 /* 1846 * Array to map a layering chain index to the appropriate initpkt routine. 1847 * The redundant entries are present so that the index used for accessing 1848 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1849 * with this table as well. 1850 */ 1851 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1852 1853 static sd_initpkt_t sd_initpkt_map[] = { 1854 1855 /* Chain for buf IO for disk drive targets (PM enabled) */ 1856 sd_initpkt_for_buf, /* Index: 0 */ 1857 sd_initpkt_for_buf, /* Index: 1 */ 1858 sd_initpkt_for_buf, /* Index: 2 */ 1859 1860 /* Chain for buf IO for disk drive targets (PM disabled) */ 1861 sd_initpkt_for_buf, /* Index: 3 */ 1862 sd_initpkt_for_buf, /* Index: 4 */ 1863 1864 /* Chain for buf IO for removable-media targets (PM enabled) */ 1865 sd_initpkt_for_buf, /* Index: 5 */ 1866 sd_initpkt_for_buf, /* Index: 6 */ 1867 sd_initpkt_for_buf, /* Index: 7 */ 1868 sd_initpkt_for_buf, /* Index: 8 */ 1869 1870 /* Chain for buf IO for removable-media targets (PM disabled) */ 1871 sd_initpkt_for_buf, /* Index: 9 */ 1872 sd_initpkt_for_buf, /* Index: 10 */ 1873 sd_initpkt_for_buf, /* Index: 11 */ 1874 1875 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1876 sd_initpkt_for_buf, /* Index: 12 */ 1877 sd_initpkt_for_buf, /* Index: 13 */ 1878 sd_initpkt_for_buf, /* Index: 14 */ 1879 sd_initpkt_for_buf, /* Index: 15 */ 1880 1881 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1882 sd_initpkt_for_buf, /* Index: 16 */ 1883 sd_initpkt_for_buf, /* Index: 17 */ 1884 sd_initpkt_for_buf, /* Index: 18 */ 1885 1886 /* Chain for USCSI commands (non-checksum targets) */ 1887 sd_initpkt_for_uscsi, /* Index: 19 */ 1888 sd_initpkt_for_uscsi, /* Index: 20 */ 1889 1890 /* Chain for USCSI commands (checksum targets) */ 1891 sd_initpkt_for_uscsi, /* Index: 21 */ 1892 sd_initpkt_for_uscsi, /* Index: 22 */ 1893 sd_initpkt_for_uscsi, /* Index: 22 */ 1894 1895 /* Chain for "direct" USCSI commands (all targets) */ 1896 sd_initpkt_for_uscsi, /* Index: 24 */ 1897 1898 /* Chain for "direct priority" USCSI commands (all targets) */ 1899 sd_initpkt_for_uscsi, /* Index: 25 */ 1900 1901 }; 1902 1903 1904 /* 1905 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1906 * The redundant entries are present so that the index used for accessing 1907 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1908 * with this table as well. 1909 */ 1910 typedef void (*sd_destroypkt_t)(struct buf *); 1911 1912 static sd_destroypkt_t sd_destroypkt_map[] = { 1913 1914 /* Chain for buf IO for disk drive targets (PM enabled) */ 1915 sd_destroypkt_for_buf, /* Index: 0 */ 1916 sd_destroypkt_for_buf, /* Index: 1 */ 1917 sd_destroypkt_for_buf, /* Index: 2 */ 1918 1919 /* Chain for buf IO for disk drive targets (PM disabled) */ 1920 sd_destroypkt_for_buf, /* Index: 3 */ 1921 sd_destroypkt_for_buf, /* Index: 4 */ 1922 1923 /* Chain for buf IO for removable-media targets (PM enabled) */ 1924 sd_destroypkt_for_buf, /* Index: 5 */ 1925 sd_destroypkt_for_buf, /* Index: 6 */ 1926 sd_destroypkt_for_buf, /* Index: 7 */ 1927 sd_destroypkt_for_buf, /* Index: 8 */ 1928 1929 /* Chain for buf IO for removable-media targets (PM disabled) */ 1930 sd_destroypkt_for_buf, /* Index: 9 */ 1931 sd_destroypkt_for_buf, /* Index: 10 */ 1932 sd_destroypkt_for_buf, /* Index: 11 */ 1933 1934 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1935 sd_destroypkt_for_buf, /* Index: 12 */ 1936 sd_destroypkt_for_buf, /* Index: 13 */ 1937 sd_destroypkt_for_buf, /* Index: 14 */ 1938 sd_destroypkt_for_buf, /* Index: 15 */ 1939 1940 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1941 sd_destroypkt_for_buf, /* Index: 16 */ 1942 sd_destroypkt_for_buf, /* Index: 17 */ 1943 sd_destroypkt_for_buf, /* Index: 18 */ 1944 1945 /* Chain for USCSI commands (non-checksum targets) */ 1946 sd_destroypkt_for_uscsi, /* Index: 19 */ 1947 sd_destroypkt_for_uscsi, /* Index: 20 */ 1948 1949 /* Chain for USCSI commands (checksum targets) */ 1950 sd_destroypkt_for_uscsi, /* Index: 21 */ 1951 sd_destroypkt_for_uscsi, /* Index: 22 */ 1952 sd_destroypkt_for_uscsi, /* Index: 22 */ 1953 1954 /* Chain for "direct" USCSI commands (all targets) */ 1955 sd_destroypkt_for_uscsi, /* Index: 24 */ 1956 1957 /* Chain for "direct priority" USCSI commands (all targets) */ 1958 sd_destroypkt_for_uscsi, /* Index: 25 */ 1959 1960 }; 1961 1962 1963 1964 /* 1965 * Array to map a layering chain index to the appropriate chain "type". 1966 * The chain type indicates a specific property/usage of the chain. 1967 * The redundant entries are present so that the index used for accessing 1968 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1969 * with this table as well. 1970 */ 1971 1972 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1973 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1974 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1975 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1976 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1977 /* (for error recovery) */ 1978 1979 static int sd_chain_type_map[] = { 1980 1981 /* Chain for buf IO for disk drive targets (PM enabled) */ 1982 SD_CHAIN_BUFIO, /* Index: 0 */ 1983 SD_CHAIN_BUFIO, /* Index: 1 */ 1984 SD_CHAIN_BUFIO, /* Index: 2 */ 1985 1986 /* Chain for buf IO for disk drive targets (PM disabled) */ 1987 SD_CHAIN_BUFIO, /* Index: 3 */ 1988 SD_CHAIN_BUFIO, /* Index: 4 */ 1989 1990 /* Chain for buf IO for removable-media targets (PM enabled) */ 1991 SD_CHAIN_BUFIO, /* Index: 5 */ 1992 SD_CHAIN_BUFIO, /* Index: 6 */ 1993 SD_CHAIN_BUFIO, /* Index: 7 */ 1994 SD_CHAIN_BUFIO, /* Index: 8 */ 1995 1996 /* Chain for buf IO for removable-media targets (PM disabled) */ 1997 SD_CHAIN_BUFIO, /* Index: 9 */ 1998 SD_CHAIN_BUFIO, /* Index: 10 */ 1999 SD_CHAIN_BUFIO, /* Index: 11 */ 2000 2001 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2002 SD_CHAIN_BUFIO, /* Index: 12 */ 2003 SD_CHAIN_BUFIO, /* Index: 13 */ 2004 SD_CHAIN_BUFIO, /* Index: 14 */ 2005 SD_CHAIN_BUFIO, /* Index: 15 */ 2006 2007 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2008 SD_CHAIN_BUFIO, /* Index: 16 */ 2009 SD_CHAIN_BUFIO, /* Index: 17 */ 2010 SD_CHAIN_BUFIO, /* Index: 18 */ 2011 2012 /* Chain for USCSI commands (non-checksum targets) */ 2013 SD_CHAIN_USCSI, /* Index: 19 */ 2014 SD_CHAIN_USCSI, /* Index: 20 */ 2015 2016 /* Chain for USCSI commands (checksum targets) */ 2017 SD_CHAIN_USCSI, /* Index: 21 */ 2018 SD_CHAIN_USCSI, /* Index: 22 */ 2019 SD_CHAIN_USCSI, /* Index: 22 */ 2020 2021 /* Chain for "direct" USCSI commands (all targets) */ 2022 SD_CHAIN_DIRECT, /* Index: 24 */ 2023 2024 /* Chain for "direct priority" USCSI commands (all targets) */ 2025 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2026 }; 2027 2028 2029 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2030 #define SD_IS_BUFIO(xp) \ 2031 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2032 2033 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2034 #define SD_IS_DIRECT_PRIORITY(xp) \ 2035 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2036 2037 2038 2039 /* 2040 * Struct, array, and macros to map a specific chain to the appropriate 2041 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2042 * 2043 * The sd_chain_index_map[] array is used at attach time to set the various 2044 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2045 * chain to be used with the instance. This allows different instances to use 2046 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2047 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2048 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2049 * dynamically & without the use of locking; and (2) a layer may update the 2050 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2051 * to allow for deferred processing of an IO within the same chain from a 2052 * different execution context. 2053 */ 2054 2055 struct sd_chain_index { 2056 int sci_iostart_index; 2057 int sci_iodone_index; 2058 }; 2059 2060 static struct sd_chain_index sd_chain_index_map[] = { 2061 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2062 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2063 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2064 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2065 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2066 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2067 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2068 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2069 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2070 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2071 }; 2072 2073 2074 /* 2075 * The following are indexes into the sd_chain_index_map[] array. 2076 */ 2077 2078 /* un->un_buf_chain_type must be set to one of these */ 2079 #define SD_CHAIN_INFO_DISK 0 2080 #define SD_CHAIN_INFO_DISK_NO_PM 1 2081 #define SD_CHAIN_INFO_RMMEDIA 2 2082 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2083 #define SD_CHAIN_INFO_CHKSUM 4 2084 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2085 2086 /* un->un_uscsi_chain_type must be set to one of these */ 2087 #define SD_CHAIN_INFO_USCSI_CMD 6 2088 /* USCSI with PM disabled is the same as DIRECT */ 2089 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2090 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2091 2092 /* un->un_direct_chain_type must be set to one of these */ 2093 #define SD_CHAIN_INFO_DIRECT_CMD 8 2094 2095 /* un->un_priority_chain_type must be set to one of these */ 2096 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2097 2098 /* size for devid inquiries */ 2099 #define MAX_INQUIRY_SIZE 0xF0 2100 2101 /* 2102 * Macros used by functions to pass a given buf(9S) struct along to the 2103 * next function in the layering chain for further processing. 2104 * 2105 * In the following macros, passing more than three arguments to the called 2106 * routines causes the optimizer for the SPARC compiler to stop doing tail 2107 * call elimination which results in significant performance degradation. 2108 */ 2109 #define SD_BEGIN_IOSTART(index, un, bp) \ 2110 ((*(sd_iostart_chain[index]))(index, un, bp)) 2111 2112 #define SD_BEGIN_IODONE(index, un, bp) \ 2113 ((*(sd_iodone_chain[index]))(index, un, bp)) 2114 2115 #define SD_NEXT_IOSTART(index, un, bp) \ 2116 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2117 2118 #define SD_NEXT_IODONE(index, un, bp) \ 2119 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2120 2121 /* 2122 * Function: _init 2123 * 2124 * Description: This is the driver _init(9E) entry point. 2125 * 2126 * Return Code: Returns the value from mod_install(9F) or 2127 * ddi_soft_state_init(9F) as appropriate. 2128 * 2129 * Context: Called when driver module loaded. 2130 */ 2131 2132 int 2133 _init(void) 2134 { 2135 int err; 2136 2137 /* establish driver name from module name */ 2138 sd_label = mod_modname(&modlinkage); 2139 2140 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2141 SD_MAXUNIT); 2142 2143 if (err != 0) { 2144 return (err); 2145 } 2146 2147 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2148 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2149 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2150 2151 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2152 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2153 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2154 2155 /* 2156 * it's ok to init here even for fibre device 2157 */ 2158 sd_scsi_probe_cache_init(); 2159 2160 sd_scsi_target_lun_init(); 2161 2162 /* 2163 * Creating taskq before mod_install ensures that all callers (threads) 2164 * that enter the module after a successfull mod_install encounter 2165 * a valid taskq. 2166 */ 2167 sd_taskq_create(); 2168 2169 err = mod_install(&modlinkage); 2170 if (err != 0) { 2171 /* delete taskq if install fails */ 2172 sd_taskq_delete(); 2173 2174 mutex_destroy(&sd_detach_mutex); 2175 mutex_destroy(&sd_log_mutex); 2176 mutex_destroy(&sd_label_mutex); 2177 2178 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2179 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2180 cv_destroy(&sd_tr.srq_inprocess_cv); 2181 2182 sd_scsi_probe_cache_fini(); 2183 2184 sd_scsi_target_lun_fini(); 2185 2186 ddi_soft_state_fini(&sd_state); 2187 return (err); 2188 } 2189 2190 return (err); 2191 } 2192 2193 2194 /* 2195 * Function: _fini 2196 * 2197 * Description: This is the driver _fini(9E) entry point. 2198 * 2199 * Return Code: Returns the value from mod_remove(9F) 2200 * 2201 * Context: Called when driver module is unloaded. 2202 */ 2203 2204 int 2205 _fini(void) 2206 { 2207 int err; 2208 2209 if ((err = mod_remove(&modlinkage)) != 0) { 2210 return (err); 2211 } 2212 2213 sd_taskq_delete(); 2214 2215 mutex_destroy(&sd_detach_mutex); 2216 mutex_destroy(&sd_log_mutex); 2217 mutex_destroy(&sd_label_mutex); 2218 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2219 2220 sd_scsi_probe_cache_fini(); 2221 2222 sd_scsi_target_lun_fini(); 2223 2224 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2225 cv_destroy(&sd_tr.srq_inprocess_cv); 2226 2227 ddi_soft_state_fini(&sd_state); 2228 2229 return (err); 2230 } 2231 2232 2233 /* 2234 * Function: _info 2235 * 2236 * Description: This is the driver _info(9E) entry point. 2237 * 2238 * Arguments: modinfop - pointer to the driver modinfo structure 2239 * 2240 * Return Code: Returns the value from mod_info(9F). 2241 * 2242 * Context: Kernel thread context 2243 */ 2244 2245 int 2246 _info(struct modinfo *modinfop) 2247 { 2248 return (mod_info(&modlinkage, modinfop)); 2249 } 2250 2251 2252 /* 2253 * The following routines implement the driver message logging facility. 2254 * They provide component- and level- based debug output filtering. 2255 * Output may also be restricted to messages for a single instance by 2256 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2257 * to NULL, then messages for all instances are printed. 2258 * 2259 * These routines have been cloned from each other due to the language 2260 * constraints of macros and variable argument list processing. 2261 */ 2262 2263 2264 /* 2265 * Function: sd_log_err 2266 * 2267 * Description: This routine is called by the SD_ERROR macro for debug 2268 * logging of error conditions. 2269 * 2270 * Arguments: comp - driver component being logged 2271 * dev - pointer to driver info structure 2272 * fmt - error string and format to be logged 2273 */ 2274 2275 static void 2276 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2277 { 2278 va_list ap; 2279 dev_info_t *dev; 2280 2281 ASSERT(un != NULL); 2282 dev = SD_DEVINFO(un); 2283 ASSERT(dev != NULL); 2284 2285 /* 2286 * Filter messages based on the global component and level masks. 2287 * Also print if un matches the value of sd_debug_un, or if 2288 * sd_debug_un is set to NULL. 2289 */ 2290 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2291 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2292 mutex_enter(&sd_log_mutex); 2293 va_start(ap, fmt); 2294 (void) vsprintf(sd_log_buf, fmt, ap); 2295 va_end(ap); 2296 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2297 mutex_exit(&sd_log_mutex); 2298 } 2299 #ifdef SD_FAULT_INJECTION 2300 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2301 if (un->sd_injection_mask & comp) { 2302 mutex_enter(&sd_log_mutex); 2303 va_start(ap, fmt); 2304 (void) vsprintf(sd_log_buf, fmt, ap); 2305 va_end(ap); 2306 sd_injection_log(sd_log_buf, un); 2307 mutex_exit(&sd_log_mutex); 2308 } 2309 #endif 2310 } 2311 2312 2313 /* 2314 * Function: sd_log_info 2315 * 2316 * Description: This routine is called by the SD_INFO macro for debug 2317 * logging of general purpose informational conditions. 2318 * 2319 * Arguments: comp - driver component being logged 2320 * dev - pointer to driver info structure 2321 * fmt - info string and format to be logged 2322 */ 2323 2324 static void 2325 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2326 { 2327 va_list ap; 2328 dev_info_t *dev; 2329 2330 ASSERT(un != NULL); 2331 dev = SD_DEVINFO(un); 2332 ASSERT(dev != NULL); 2333 2334 /* 2335 * Filter messages based on the global component and level masks. 2336 * Also print if un matches the value of sd_debug_un, or if 2337 * sd_debug_un is set to NULL. 2338 */ 2339 if ((sd_component_mask & component) && 2340 (sd_level_mask & SD_LOGMASK_INFO) && 2341 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2342 mutex_enter(&sd_log_mutex); 2343 va_start(ap, fmt); 2344 (void) vsprintf(sd_log_buf, fmt, ap); 2345 va_end(ap); 2346 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2347 mutex_exit(&sd_log_mutex); 2348 } 2349 #ifdef SD_FAULT_INJECTION 2350 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2351 if (un->sd_injection_mask & component) { 2352 mutex_enter(&sd_log_mutex); 2353 va_start(ap, fmt); 2354 (void) vsprintf(sd_log_buf, fmt, ap); 2355 va_end(ap); 2356 sd_injection_log(sd_log_buf, un); 2357 mutex_exit(&sd_log_mutex); 2358 } 2359 #endif 2360 } 2361 2362 2363 /* 2364 * Function: sd_log_trace 2365 * 2366 * Description: This routine is called by the SD_TRACE macro for debug 2367 * logging of trace conditions (i.e. function entry/exit). 2368 * 2369 * Arguments: comp - driver component being logged 2370 * dev - pointer to driver info structure 2371 * fmt - trace string and format to be logged 2372 */ 2373 2374 static void 2375 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2376 { 2377 va_list ap; 2378 dev_info_t *dev; 2379 2380 ASSERT(un != NULL); 2381 dev = SD_DEVINFO(un); 2382 ASSERT(dev != NULL); 2383 2384 /* 2385 * Filter messages based on the global component and level masks. 2386 * Also print if un matches the value of sd_debug_un, or if 2387 * sd_debug_un is set to NULL. 2388 */ 2389 if ((sd_component_mask & component) && 2390 (sd_level_mask & SD_LOGMASK_TRACE) && 2391 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2392 mutex_enter(&sd_log_mutex); 2393 va_start(ap, fmt); 2394 (void) vsprintf(sd_log_buf, fmt, ap); 2395 va_end(ap); 2396 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2397 mutex_exit(&sd_log_mutex); 2398 } 2399 #ifdef SD_FAULT_INJECTION 2400 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2401 if (un->sd_injection_mask & component) { 2402 mutex_enter(&sd_log_mutex); 2403 va_start(ap, fmt); 2404 (void) vsprintf(sd_log_buf, fmt, ap); 2405 va_end(ap); 2406 sd_injection_log(sd_log_buf, un); 2407 mutex_exit(&sd_log_mutex); 2408 } 2409 #endif 2410 } 2411 2412 2413 /* 2414 * Function: sdprobe 2415 * 2416 * Description: This is the driver probe(9e) entry point function. 2417 * 2418 * Arguments: devi - opaque device info handle 2419 * 2420 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2421 * DDI_PROBE_FAILURE: If the probe failed. 2422 * DDI_PROBE_PARTIAL: If the instance is not present now, 2423 * but may be present in the future. 2424 */ 2425 2426 static int 2427 sdprobe(dev_info_t *devi) 2428 { 2429 struct scsi_device *devp; 2430 int rval; 2431 int instance; 2432 2433 /* 2434 * if it wasn't for pln, sdprobe could actually be nulldev 2435 * in the "__fibre" case. 2436 */ 2437 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2438 return (DDI_PROBE_DONTCARE); 2439 } 2440 2441 devp = ddi_get_driver_private(devi); 2442 2443 if (devp == NULL) { 2444 /* Ooops... nexus driver is mis-configured... */ 2445 return (DDI_PROBE_FAILURE); 2446 } 2447 2448 instance = ddi_get_instance(devi); 2449 2450 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2451 return (DDI_PROBE_PARTIAL); 2452 } 2453 2454 /* 2455 * Call the SCSA utility probe routine to see if we actually 2456 * have a target at this SCSI nexus. 2457 */ 2458 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2459 case SCSIPROBE_EXISTS: 2460 switch (devp->sd_inq->inq_dtype) { 2461 case DTYPE_DIRECT: 2462 rval = DDI_PROBE_SUCCESS; 2463 break; 2464 case DTYPE_RODIRECT: 2465 /* CDs etc. Can be removable media */ 2466 rval = DDI_PROBE_SUCCESS; 2467 break; 2468 case DTYPE_OPTICAL: 2469 /* 2470 * Rewritable optical driver HP115AA 2471 * Can also be removable media 2472 */ 2473 2474 /* 2475 * Do not attempt to bind to DTYPE_OPTICAL if 2476 * pre solaris 9 sparc sd behavior is required 2477 * 2478 * If first time through and sd_dtype_optical_bind 2479 * has not been set in /etc/system check properties 2480 */ 2481 2482 if (sd_dtype_optical_bind < 0) { 2483 sd_dtype_optical_bind = ddi_prop_get_int 2484 (DDI_DEV_T_ANY, devi, 0, 2485 "optical-device-bind", 1); 2486 } 2487 2488 if (sd_dtype_optical_bind == 0) { 2489 rval = DDI_PROBE_FAILURE; 2490 } else { 2491 rval = DDI_PROBE_SUCCESS; 2492 } 2493 break; 2494 2495 case DTYPE_NOTPRESENT: 2496 default: 2497 rval = DDI_PROBE_FAILURE; 2498 break; 2499 } 2500 break; 2501 default: 2502 rval = DDI_PROBE_PARTIAL; 2503 break; 2504 } 2505 2506 /* 2507 * This routine checks for resource allocation prior to freeing, 2508 * so it will take care of the "smart probing" case where a 2509 * scsi_probe() may or may not have been issued and will *not* 2510 * free previously-freed resources. 2511 */ 2512 scsi_unprobe(devp); 2513 return (rval); 2514 } 2515 2516 2517 /* 2518 * Function: sdinfo 2519 * 2520 * Description: This is the driver getinfo(9e) entry point function. 2521 * Given the device number, return the devinfo pointer from 2522 * the scsi_device structure or the instance number 2523 * associated with the dev_t. 2524 * 2525 * Arguments: dip - pointer to device info structure 2526 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2527 * DDI_INFO_DEVT2INSTANCE) 2528 * arg - driver dev_t 2529 * resultp - user buffer for request response 2530 * 2531 * Return Code: DDI_SUCCESS 2532 * DDI_FAILURE 2533 */ 2534 /* ARGSUSED */ 2535 static int 2536 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2537 { 2538 struct sd_lun *un; 2539 dev_t dev; 2540 int instance; 2541 int error; 2542 2543 switch (infocmd) { 2544 case DDI_INFO_DEVT2DEVINFO: 2545 dev = (dev_t)arg; 2546 instance = SDUNIT(dev); 2547 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2548 return (DDI_FAILURE); 2549 } 2550 *result = (void *) SD_DEVINFO(un); 2551 error = DDI_SUCCESS; 2552 break; 2553 case DDI_INFO_DEVT2INSTANCE: 2554 dev = (dev_t)arg; 2555 instance = SDUNIT(dev); 2556 *result = (void *)(uintptr_t)instance; 2557 error = DDI_SUCCESS; 2558 break; 2559 default: 2560 error = DDI_FAILURE; 2561 } 2562 return (error); 2563 } 2564 2565 /* 2566 * Function: sd_prop_op 2567 * 2568 * Description: This is the driver prop_op(9e) entry point function. 2569 * Return the number of blocks for the partition in question 2570 * or forward the request to the property facilities. 2571 * 2572 * Arguments: dev - device number 2573 * dip - pointer to device info structure 2574 * prop_op - property operator 2575 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2576 * name - pointer to property name 2577 * valuep - pointer or address of the user buffer 2578 * lengthp - property length 2579 * 2580 * Return Code: DDI_PROP_SUCCESS 2581 * DDI_PROP_NOT_FOUND 2582 * DDI_PROP_UNDEFINED 2583 * DDI_PROP_NO_MEMORY 2584 * DDI_PROP_BUF_TOO_SMALL 2585 */ 2586 2587 static int 2588 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2589 char *name, caddr_t valuep, int *lengthp) 2590 { 2591 int instance = ddi_get_instance(dip); 2592 struct sd_lun *un; 2593 uint64_t nblocks64; 2594 uint_t dblk; 2595 2596 /* 2597 * Our dynamic properties are all device specific and size oriented. 2598 * Requests issued under conditions where size is valid are passed 2599 * to ddi_prop_op_nblocks with the size information, otherwise the 2600 * request is passed to ddi_prop_op. Size depends on valid geometry. 2601 */ 2602 un = ddi_get_soft_state(sd_state, instance); 2603 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2604 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2605 name, valuep, lengthp)); 2606 } else if (!SD_IS_VALID_LABEL(un)) { 2607 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2608 valuep, lengthp)); 2609 } 2610 2611 /* get nblocks value */ 2612 ASSERT(!mutex_owned(SD_MUTEX(un))); 2613 2614 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2615 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2616 2617 /* report size in target size blocks */ 2618 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2619 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2620 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2621 } 2622 2623 /* 2624 * The following functions are for smart probing: 2625 * sd_scsi_probe_cache_init() 2626 * sd_scsi_probe_cache_fini() 2627 * sd_scsi_clear_probe_cache() 2628 * sd_scsi_probe_with_cache() 2629 */ 2630 2631 /* 2632 * Function: sd_scsi_probe_cache_init 2633 * 2634 * Description: Initializes the probe response cache mutex and head pointer. 2635 * 2636 * Context: Kernel thread context 2637 */ 2638 2639 static void 2640 sd_scsi_probe_cache_init(void) 2641 { 2642 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2643 sd_scsi_probe_cache_head = NULL; 2644 } 2645 2646 2647 /* 2648 * Function: sd_scsi_probe_cache_fini 2649 * 2650 * Description: Frees all resources associated with the probe response cache. 2651 * 2652 * Context: Kernel thread context 2653 */ 2654 2655 static void 2656 sd_scsi_probe_cache_fini(void) 2657 { 2658 struct sd_scsi_probe_cache *cp; 2659 struct sd_scsi_probe_cache *ncp; 2660 2661 /* Clean up our smart probing linked list */ 2662 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2663 ncp = cp->next; 2664 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2665 } 2666 sd_scsi_probe_cache_head = NULL; 2667 mutex_destroy(&sd_scsi_probe_cache_mutex); 2668 } 2669 2670 2671 /* 2672 * Function: sd_scsi_clear_probe_cache 2673 * 2674 * Description: This routine clears the probe response cache. This is 2675 * done when open() returns ENXIO so that when deferred 2676 * attach is attempted (possibly after a device has been 2677 * turned on) we will retry the probe. Since we don't know 2678 * which target we failed to open, we just clear the 2679 * entire cache. 2680 * 2681 * Context: Kernel thread context 2682 */ 2683 2684 static void 2685 sd_scsi_clear_probe_cache(void) 2686 { 2687 struct sd_scsi_probe_cache *cp; 2688 int i; 2689 2690 mutex_enter(&sd_scsi_probe_cache_mutex); 2691 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2692 /* 2693 * Reset all entries to SCSIPROBE_EXISTS. This will 2694 * force probing to be performed the next time 2695 * sd_scsi_probe_with_cache is called. 2696 */ 2697 for (i = 0; i < NTARGETS_WIDE; i++) { 2698 cp->cache[i] = SCSIPROBE_EXISTS; 2699 } 2700 } 2701 mutex_exit(&sd_scsi_probe_cache_mutex); 2702 } 2703 2704 2705 /* 2706 * Function: sd_scsi_probe_with_cache 2707 * 2708 * Description: This routine implements support for a scsi device probe 2709 * with cache. The driver maintains a cache of the target 2710 * responses to scsi probes. If we get no response from a 2711 * target during a probe inquiry, we remember that, and we 2712 * avoid additional calls to scsi_probe on non-zero LUNs 2713 * on the same target until the cache is cleared. By doing 2714 * so we avoid the 1/4 sec selection timeout for nonzero 2715 * LUNs. lun0 of a target is always probed. 2716 * 2717 * Arguments: devp - Pointer to a scsi_device(9S) structure 2718 * waitfunc - indicates what the allocator routines should 2719 * do when resources are not available. This value 2720 * is passed on to scsi_probe() when that routine 2721 * is called. 2722 * 2723 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2724 * otherwise the value returned by scsi_probe(9F). 2725 * 2726 * Context: Kernel thread context 2727 */ 2728 2729 static int 2730 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2731 { 2732 struct sd_scsi_probe_cache *cp; 2733 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2734 int lun, tgt; 2735 2736 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2737 SCSI_ADDR_PROP_LUN, 0); 2738 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2739 SCSI_ADDR_PROP_TARGET, -1); 2740 2741 /* Make sure caching enabled and target in range */ 2742 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2743 /* do it the old way (no cache) */ 2744 return (scsi_probe(devp, waitfn)); 2745 } 2746 2747 mutex_enter(&sd_scsi_probe_cache_mutex); 2748 2749 /* Find the cache for this scsi bus instance */ 2750 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2751 if (cp->pdip == pdip) { 2752 break; 2753 } 2754 } 2755 2756 /* If we can't find a cache for this pdip, create one */ 2757 if (cp == NULL) { 2758 int i; 2759 2760 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2761 KM_SLEEP); 2762 cp->pdip = pdip; 2763 cp->next = sd_scsi_probe_cache_head; 2764 sd_scsi_probe_cache_head = cp; 2765 for (i = 0; i < NTARGETS_WIDE; i++) { 2766 cp->cache[i] = SCSIPROBE_EXISTS; 2767 } 2768 } 2769 2770 mutex_exit(&sd_scsi_probe_cache_mutex); 2771 2772 /* Recompute the cache for this target if LUN zero */ 2773 if (lun == 0) { 2774 cp->cache[tgt] = SCSIPROBE_EXISTS; 2775 } 2776 2777 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2778 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2779 return (SCSIPROBE_NORESP); 2780 } 2781 2782 /* Do the actual probe; save & return the result */ 2783 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2784 } 2785 2786 2787 /* 2788 * Function: sd_scsi_target_lun_init 2789 * 2790 * Description: Initializes the attached lun chain mutex and head pointer. 2791 * 2792 * Context: Kernel thread context 2793 */ 2794 2795 static void 2796 sd_scsi_target_lun_init(void) 2797 { 2798 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2799 sd_scsi_target_lun_head = NULL; 2800 } 2801 2802 2803 /* 2804 * Function: sd_scsi_target_lun_fini 2805 * 2806 * Description: Frees all resources associated with the attached lun 2807 * chain 2808 * 2809 * Context: Kernel thread context 2810 */ 2811 2812 static void 2813 sd_scsi_target_lun_fini(void) 2814 { 2815 struct sd_scsi_hba_tgt_lun *cp; 2816 struct sd_scsi_hba_tgt_lun *ncp; 2817 2818 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2819 ncp = cp->next; 2820 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2821 } 2822 sd_scsi_target_lun_head = NULL; 2823 mutex_destroy(&sd_scsi_target_lun_mutex); 2824 } 2825 2826 2827 /* 2828 * Function: sd_scsi_get_target_lun_count 2829 * 2830 * Description: This routine will check in the attached lun chain to see 2831 * how many luns are attached on the required SCSI controller 2832 * and target. Currently, some capabilities like tagged queue 2833 * are supported per target based by HBA. So all luns in a 2834 * target have the same capabilities. Based on this assumption, 2835 * sd should only set these capabilities once per target. This 2836 * function is called when sd needs to decide how many luns 2837 * already attached on a target. 2838 * 2839 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2840 * controller device. 2841 * target - The target ID on the controller's SCSI bus. 2842 * 2843 * Return Code: The number of luns attached on the required target and 2844 * controller. 2845 * -1 if target ID is not in parallel SCSI scope or the given 2846 * dip is not in the chain. 2847 * 2848 * Context: Kernel thread context 2849 */ 2850 2851 static int 2852 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2853 { 2854 struct sd_scsi_hba_tgt_lun *cp; 2855 2856 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2857 return (-1); 2858 } 2859 2860 mutex_enter(&sd_scsi_target_lun_mutex); 2861 2862 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2863 if (cp->pdip == dip) { 2864 break; 2865 } 2866 } 2867 2868 mutex_exit(&sd_scsi_target_lun_mutex); 2869 2870 if (cp == NULL) { 2871 return (-1); 2872 } 2873 2874 return (cp->nlun[target]); 2875 } 2876 2877 2878 /* 2879 * Function: sd_scsi_update_lun_on_target 2880 * 2881 * Description: This routine is used to update the attached lun chain when a 2882 * lun is attached or detached on a target. 2883 * 2884 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2885 * controller device. 2886 * target - The target ID on the controller's SCSI bus. 2887 * flag - Indicate the lun is attached or detached. 2888 * 2889 * Context: Kernel thread context 2890 */ 2891 2892 static void 2893 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2894 { 2895 struct sd_scsi_hba_tgt_lun *cp; 2896 2897 mutex_enter(&sd_scsi_target_lun_mutex); 2898 2899 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2900 if (cp->pdip == dip) { 2901 break; 2902 } 2903 } 2904 2905 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2906 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2907 KM_SLEEP); 2908 cp->pdip = dip; 2909 cp->next = sd_scsi_target_lun_head; 2910 sd_scsi_target_lun_head = cp; 2911 } 2912 2913 mutex_exit(&sd_scsi_target_lun_mutex); 2914 2915 if (cp != NULL) { 2916 if (flag == SD_SCSI_LUN_ATTACH) { 2917 cp->nlun[target] ++; 2918 } else { 2919 cp->nlun[target] --; 2920 } 2921 } 2922 } 2923 2924 2925 /* 2926 * Function: sd_spin_up_unit 2927 * 2928 * Description: Issues the following commands to spin-up the device: 2929 * START STOP UNIT, and INQUIRY. 2930 * 2931 * Arguments: un - driver soft state (unit) structure 2932 * 2933 * Return Code: 0 - success 2934 * EIO - failure 2935 * EACCES - reservation conflict 2936 * 2937 * Context: Kernel thread context 2938 */ 2939 2940 static int 2941 sd_spin_up_unit(struct sd_lun *un) 2942 { 2943 size_t resid = 0; 2944 int has_conflict = FALSE; 2945 uchar_t *bufaddr; 2946 2947 ASSERT(un != NULL); 2948 2949 /* 2950 * Send a throwaway START UNIT command. 2951 * 2952 * If we fail on this, we don't care presently what precisely 2953 * is wrong. EMC's arrays will also fail this with a check 2954 * condition (0x2/0x4/0x3) if the device is "inactive," but 2955 * we don't want to fail the attach because it may become 2956 * "active" later. 2957 */ 2958 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2959 == EACCES) 2960 has_conflict = TRUE; 2961 2962 /* 2963 * Send another INQUIRY command to the target. This is necessary for 2964 * non-removable media direct access devices because their INQUIRY data 2965 * may not be fully qualified until they are spun up (perhaps via the 2966 * START command above). Note: This seems to be needed for some 2967 * legacy devices only.) The INQUIRY command should succeed even if a 2968 * Reservation Conflict is present. 2969 */ 2970 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2971 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2972 kmem_free(bufaddr, SUN_INQSIZE); 2973 return (EIO); 2974 } 2975 2976 /* 2977 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2978 * Note that this routine does not return a failure here even if the 2979 * INQUIRY command did not return any data. This is a legacy behavior. 2980 */ 2981 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2982 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2983 } 2984 2985 kmem_free(bufaddr, SUN_INQSIZE); 2986 2987 /* If we hit a reservation conflict above, tell the caller. */ 2988 if (has_conflict == TRUE) { 2989 return (EACCES); 2990 } 2991 2992 return (0); 2993 } 2994 2995 #ifdef _LP64 2996 /* 2997 * Function: sd_enable_descr_sense 2998 * 2999 * Description: This routine attempts to select descriptor sense format 3000 * using the Control mode page. Devices that support 64 bit 3001 * LBAs (for >2TB luns) should also implement descriptor 3002 * sense data so we will call this function whenever we see 3003 * a lun larger than 2TB. If for some reason the device 3004 * supports 64 bit LBAs but doesn't support descriptor sense 3005 * presumably the mode select will fail. Everything will 3006 * continue to work normally except that we will not get 3007 * complete sense data for commands that fail with an LBA 3008 * larger than 32 bits. 3009 * 3010 * Arguments: un - driver soft state (unit) structure 3011 * 3012 * Context: Kernel thread context only 3013 */ 3014 3015 static void 3016 sd_enable_descr_sense(struct sd_lun *un) 3017 { 3018 uchar_t *header; 3019 struct mode_control_scsi3 *ctrl_bufp; 3020 size_t buflen; 3021 size_t bd_len; 3022 3023 /* 3024 * Read MODE SENSE page 0xA, Control Mode Page 3025 */ 3026 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3027 sizeof (struct mode_control_scsi3); 3028 header = kmem_zalloc(buflen, KM_SLEEP); 3029 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3030 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3031 SD_ERROR(SD_LOG_COMMON, un, 3032 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3033 goto eds_exit; 3034 } 3035 3036 /* 3037 * Determine size of Block Descriptors in order to locate 3038 * the mode page data. ATAPI devices return 0, SCSI devices 3039 * should return MODE_BLK_DESC_LENGTH. 3040 */ 3041 bd_len = ((struct mode_header *)header)->bdesc_length; 3042 3043 /* Clear the mode data length field for MODE SELECT */ 3044 ((struct mode_header *)header)->length = 0; 3045 3046 ctrl_bufp = (struct mode_control_scsi3 *) 3047 (header + MODE_HEADER_LENGTH + bd_len); 3048 3049 /* 3050 * If the page length is smaller than the expected value, 3051 * the target device doesn't support D_SENSE. Bail out here. 3052 */ 3053 if (ctrl_bufp->mode_page.length < 3054 sizeof (struct mode_control_scsi3) - 2) { 3055 SD_ERROR(SD_LOG_COMMON, un, 3056 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3057 goto eds_exit; 3058 } 3059 3060 /* 3061 * Clear PS bit for MODE SELECT 3062 */ 3063 ctrl_bufp->mode_page.ps = 0; 3064 3065 /* 3066 * Set D_SENSE to enable descriptor sense format. 3067 */ 3068 ctrl_bufp->d_sense = 1; 3069 3070 /* 3071 * Use MODE SELECT to commit the change to the D_SENSE bit 3072 */ 3073 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3074 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3075 SD_INFO(SD_LOG_COMMON, un, 3076 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3077 goto eds_exit; 3078 } 3079 3080 eds_exit: 3081 kmem_free(header, buflen); 3082 } 3083 3084 /* 3085 * Function: sd_reenable_dsense_task 3086 * 3087 * Description: Re-enable descriptor sense after device or bus reset 3088 * 3089 * Context: Executes in a taskq() thread context 3090 */ 3091 static void 3092 sd_reenable_dsense_task(void *arg) 3093 { 3094 struct sd_lun *un = arg; 3095 3096 ASSERT(un != NULL); 3097 sd_enable_descr_sense(un); 3098 } 3099 #endif /* _LP64 */ 3100 3101 /* 3102 * Function: sd_set_mmc_caps 3103 * 3104 * Description: This routine determines if the device is MMC compliant and if 3105 * the device supports CDDA via a mode sense of the CDVD 3106 * capabilities mode page. Also checks if the device is a 3107 * dvdram writable device. 3108 * 3109 * Arguments: un - driver soft state (unit) structure 3110 * 3111 * Context: Kernel thread context only 3112 */ 3113 3114 static void 3115 sd_set_mmc_caps(struct sd_lun *un) 3116 { 3117 struct mode_header_grp2 *sense_mhp; 3118 uchar_t *sense_page; 3119 caddr_t buf; 3120 int bd_len; 3121 int status; 3122 struct uscsi_cmd com; 3123 int rtn; 3124 uchar_t *out_data_rw, *out_data_hd; 3125 uchar_t *rqbuf_rw, *rqbuf_hd; 3126 3127 ASSERT(un != NULL); 3128 3129 /* 3130 * The flags which will be set in this function are - mmc compliant, 3131 * dvdram writable device, cdda support. Initialize them to FALSE 3132 * and if a capability is detected - it will be set to TRUE. 3133 */ 3134 un->un_f_mmc_cap = FALSE; 3135 un->un_f_dvdram_writable_device = FALSE; 3136 un->un_f_cfg_cdda = FALSE; 3137 3138 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3139 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3140 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3141 3142 if (status != 0) { 3143 /* command failed; just return */ 3144 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3145 return; 3146 } 3147 /* 3148 * If the mode sense request for the CDROM CAPABILITIES 3149 * page (0x2A) succeeds the device is assumed to be MMC. 3150 */ 3151 un->un_f_mmc_cap = TRUE; 3152 3153 /* Get to the page data */ 3154 sense_mhp = (struct mode_header_grp2 *)buf; 3155 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3156 sense_mhp->bdesc_length_lo; 3157 if (bd_len > MODE_BLK_DESC_LENGTH) { 3158 /* 3159 * We did not get back the expected block descriptor 3160 * length so we cannot determine if the device supports 3161 * CDDA. However, we still indicate the device is MMC 3162 * according to the successful response to the page 3163 * 0x2A mode sense request. 3164 */ 3165 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3166 "sd_set_mmc_caps: Mode Sense returned " 3167 "invalid block descriptor length\n"); 3168 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3169 return; 3170 } 3171 3172 /* See if read CDDA is supported */ 3173 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3174 bd_len); 3175 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3176 3177 /* See if writing DVD RAM is supported. */ 3178 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3179 if (un->un_f_dvdram_writable_device == TRUE) { 3180 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3181 return; 3182 } 3183 3184 /* 3185 * If the device presents DVD or CD capabilities in the mode 3186 * page, we can return here since a RRD will not have 3187 * these capabilities. 3188 */ 3189 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3190 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3191 return; 3192 } 3193 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3194 3195 /* 3196 * If un->un_f_dvdram_writable_device is still FALSE, 3197 * check for a Removable Rigid Disk (RRD). A RRD 3198 * device is identified by the features RANDOM_WRITABLE and 3199 * HARDWARE_DEFECT_MANAGEMENT. 3200 */ 3201 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3202 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3203 3204 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3205 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3206 RANDOM_WRITABLE, SD_PATH_STANDARD); 3207 if (rtn != 0) { 3208 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3209 kmem_free(rqbuf_rw, SENSE_LENGTH); 3210 return; 3211 } 3212 3213 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3214 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3215 3216 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3217 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3218 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3219 if (rtn == 0) { 3220 /* 3221 * We have good information, check for random writable 3222 * and hardware defect features. 3223 */ 3224 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3225 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3226 un->un_f_dvdram_writable_device = TRUE; 3227 } 3228 } 3229 3230 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3231 kmem_free(rqbuf_rw, SENSE_LENGTH); 3232 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3233 kmem_free(rqbuf_hd, SENSE_LENGTH); 3234 } 3235 3236 /* 3237 * Function: sd_check_for_writable_cd 3238 * 3239 * Description: This routine determines if the media in the device is 3240 * writable or not. It uses the get configuration command (0x46) 3241 * to determine if the media is writable 3242 * 3243 * Arguments: un - driver soft state (unit) structure 3244 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3245 * chain and the normal command waitq, or 3246 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3247 * "direct" chain and bypass the normal command 3248 * waitq. 3249 * 3250 * Context: Never called at interrupt context. 3251 */ 3252 3253 static void 3254 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3255 { 3256 struct uscsi_cmd com; 3257 uchar_t *out_data; 3258 uchar_t *rqbuf; 3259 int rtn; 3260 uchar_t *out_data_rw, *out_data_hd; 3261 uchar_t *rqbuf_rw, *rqbuf_hd; 3262 struct mode_header_grp2 *sense_mhp; 3263 uchar_t *sense_page; 3264 caddr_t buf; 3265 int bd_len; 3266 int status; 3267 3268 ASSERT(un != NULL); 3269 ASSERT(mutex_owned(SD_MUTEX(un))); 3270 3271 /* 3272 * Initialize the writable media to false, if configuration info. 3273 * tells us otherwise then only we will set it. 3274 */ 3275 un->un_f_mmc_writable_media = FALSE; 3276 mutex_exit(SD_MUTEX(un)); 3277 3278 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3279 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3280 3281 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3282 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3283 3284 mutex_enter(SD_MUTEX(un)); 3285 if (rtn == 0) { 3286 /* 3287 * We have good information, check for writable DVD. 3288 */ 3289 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3290 un->un_f_mmc_writable_media = TRUE; 3291 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3292 kmem_free(rqbuf, SENSE_LENGTH); 3293 return; 3294 } 3295 } 3296 3297 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3298 kmem_free(rqbuf, SENSE_LENGTH); 3299 3300 /* 3301 * Determine if this is a RRD type device. 3302 */ 3303 mutex_exit(SD_MUTEX(un)); 3304 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3305 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3306 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3307 mutex_enter(SD_MUTEX(un)); 3308 if (status != 0) { 3309 /* command failed; just return */ 3310 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3311 return; 3312 } 3313 3314 /* Get to the page data */ 3315 sense_mhp = (struct mode_header_grp2 *)buf; 3316 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3317 if (bd_len > MODE_BLK_DESC_LENGTH) { 3318 /* 3319 * We did not get back the expected block descriptor length so 3320 * we cannot check the mode page. 3321 */ 3322 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3323 "sd_check_for_writable_cd: Mode Sense returned " 3324 "invalid block descriptor length\n"); 3325 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3326 return; 3327 } 3328 3329 /* 3330 * If the device presents DVD or CD capabilities in the mode 3331 * page, we can return here since a RRD device will not have 3332 * these capabilities. 3333 */ 3334 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3335 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3336 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3337 return; 3338 } 3339 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3340 3341 /* 3342 * If un->un_f_mmc_writable_media is still FALSE, 3343 * check for RRD type media. A RRD device is identified 3344 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3345 */ 3346 mutex_exit(SD_MUTEX(un)); 3347 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3348 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3349 3350 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3351 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3352 RANDOM_WRITABLE, path_flag); 3353 if (rtn != 0) { 3354 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3355 kmem_free(rqbuf_rw, SENSE_LENGTH); 3356 mutex_enter(SD_MUTEX(un)); 3357 return; 3358 } 3359 3360 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3361 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3362 3363 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3364 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3365 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3366 mutex_enter(SD_MUTEX(un)); 3367 if (rtn == 0) { 3368 /* 3369 * We have good information, check for random writable 3370 * and hardware defect features as current. 3371 */ 3372 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3373 (out_data_rw[10] & 0x1) && 3374 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3375 (out_data_hd[10] & 0x1)) { 3376 un->un_f_mmc_writable_media = TRUE; 3377 } 3378 } 3379 3380 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3381 kmem_free(rqbuf_rw, SENSE_LENGTH); 3382 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3383 kmem_free(rqbuf_hd, SENSE_LENGTH); 3384 } 3385 3386 /* 3387 * Function: sd_read_unit_properties 3388 * 3389 * Description: The following implements a property lookup mechanism. 3390 * Properties for particular disks (keyed on vendor, model 3391 * and rev numbers) are sought in the sd.conf file via 3392 * sd_process_sdconf_file(), and if not found there, are 3393 * looked for in a list hardcoded in this driver via 3394 * sd_process_sdconf_table() Once located the properties 3395 * are used to update the driver unit structure. 3396 * 3397 * Arguments: un - driver soft state (unit) structure 3398 */ 3399 3400 static void 3401 sd_read_unit_properties(struct sd_lun *un) 3402 { 3403 /* 3404 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3405 * the "sd-config-list" property (from the sd.conf file) or if 3406 * there was not a match for the inquiry vid/pid. If this event 3407 * occurs the static driver configuration table is searched for 3408 * a match. 3409 */ 3410 ASSERT(un != NULL); 3411 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3412 sd_process_sdconf_table(un); 3413 } 3414 3415 /* check for LSI device */ 3416 sd_is_lsi(un); 3417 3418 3419 } 3420 3421 3422 /* 3423 * Function: sd_process_sdconf_file 3424 * 3425 * Description: Use ddi_getlongprop to obtain the properties from the 3426 * driver's config file (ie, sd.conf) and update the driver 3427 * soft state structure accordingly. 3428 * 3429 * Arguments: un - driver soft state (unit) structure 3430 * 3431 * Return Code: SD_SUCCESS - The properties were successfully set according 3432 * to the driver configuration file. 3433 * SD_FAILURE - The driver config list was not obtained or 3434 * there was no vid/pid match. This indicates that 3435 * the static config table should be used. 3436 * 3437 * The config file has a property, "sd-config-list", which consists of 3438 * one or more duplets as follows: 3439 * 3440 * sd-config-list= 3441 * <duplet>, 3442 * [<duplet>,] 3443 * [<duplet>]; 3444 * 3445 * The structure of each duplet is as follows: 3446 * 3447 * <duplet>:= <vid+pid>,<data-property-name_list> 3448 * 3449 * The first entry of the duplet is the device ID string (the concatenated 3450 * vid & pid; not to be confused with a device_id). This is defined in 3451 * the same way as in the sd_disk_table. 3452 * 3453 * The second part of the duplet is a string that identifies a 3454 * data-property-name-list. The data-property-name-list is defined as 3455 * follows: 3456 * 3457 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3458 * 3459 * The syntax of <data-property-name> depends on the <version> field. 3460 * 3461 * If version = SD_CONF_VERSION_1 we have the following syntax: 3462 * 3463 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3464 * 3465 * where the prop0 value will be used to set prop0 if bit0 set in the 3466 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3467 * 3468 */ 3469 3470 static int 3471 sd_process_sdconf_file(struct sd_lun *un) 3472 { 3473 char *config_list = NULL; 3474 int config_list_len; 3475 int len; 3476 int dupletlen = 0; 3477 char *vidptr; 3478 int vidlen; 3479 char *dnlist_ptr; 3480 char *dataname_ptr; 3481 int dnlist_len; 3482 int dataname_len; 3483 int *data_list; 3484 int data_list_len; 3485 int rval = SD_FAILURE; 3486 int i; 3487 3488 ASSERT(un != NULL); 3489 3490 /* Obtain the configuration list associated with the .conf file */ 3491 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3492 sd_config_list, (caddr_t)&config_list, &config_list_len) 3493 != DDI_PROP_SUCCESS) { 3494 return (SD_FAILURE); 3495 } 3496 3497 /* 3498 * Compare vids in each duplet to the inquiry vid - if a match is 3499 * made, get the data value and update the soft state structure 3500 * accordingly. 3501 * 3502 * Note: This algorithm is complex and difficult to maintain. It should 3503 * be replaced with a more robust implementation. 3504 */ 3505 for (len = config_list_len, vidptr = config_list; len > 0; 3506 vidptr += dupletlen, len -= dupletlen) { 3507 /* 3508 * Note: The assumption here is that each vid entry is on 3509 * a unique line from its associated duplet. 3510 */ 3511 vidlen = dupletlen = (int)strlen(vidptr); 3512 if ((vidlen == 0) || 3513 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3514 dupletlen++; 3515 continue; 3516 } 3517 3518 /* 3519 * dnlist contains 1 or more blank separated 3520 * data-property-name entries 3521 */ 3522 dnlist_ptr = vidptr + vidlen + 1; 3523 dnlist_len = (int)strlen(dnlist_ptr); 3524 dupletlen += dnlist_len + 2; 3525 3526 /* 3527 * Set a pointer for the first data-property-name 3528 * entry in the list 3529 */ 3530 dataname_ptr = dnlist_ptr; 3531 dataname_len = 0; 3532 3533 /* 3534 * Loop through all data-property-name entries in the 3535 * data-property-name-list setting the properties for each. 3536 */ 3537 while (dataname_len < dnlist_len) { 3538 int version; 3539 3540 /* 3541 * Determine the length of the current 3542 * data-property-name entry by indexing until a 3543 * blank or NULL is encountered. When the space is 3544 * encountered reset it to a NULL for compliance 3545 * with ddi_getlongprop(). 3546 */ 3547 for (i = 0; ((dataname_ptr[i] != ' ') && 3548 (dataname_ptr[i] != '\0')); i++) { 3549 ; 3550 } 3551 3552 dataname_len += i; 3553 /* If not null terminated, Make it so */ 3554 if (dataname_ptr[i] == ' ') { 3555 dataname_ptr[i] = '\0'; 3556 } 3557 dataname_len++; 3558 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3559 "sd_process_sdconf_file: disk:%s, data:%s\n", 3560 vidptr, dataname_ptr); 3561 3562 /* Get the data list */ 3563 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3564 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3565 != DDI_PROP_SUCCESS) { 3566 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3567 "sd_process_sdconf_file: data property (%s)" 3568 " has no value\n", dataname_ptr); 3569 dataname_ptr = dnlist_ptr + dataname_len; 3570 continue; 3571 } 3572 3573 version = data_list[0]; 3574 3575 if (version == SD_CONF_VERSION_1) { 3576 sd_tunables values; 3577 3578 /* Set the properties */ 3579 if (sd_chk_vers1_data(un, data_list[1], 3580 &data_list[2], data_list_len, dataname_ptr) 3581 == SD_SUCCESS) { 3582 sd_get_tunables_from_conf(un, 3583 data_list[1], &data_list[2], 3584 &values); 3585 sd_set_vers1_properties(un, 3586 data_list[1], &values); 3587 rval = SD_SUCCESS; 3588 } else { 3589 rval = SD_FAILURE; 3590 } 3591 } else { 3592 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3593 "data property %s version 0x%x is invalid.", 3594 dataname_ptr, version); 3595 rval = SD_FAILURE; 3596 } 3597 kmem_free(data_list, data_list_len); 3598 dataname_ptr = dnlist_ptr + dataname_len; 3599 } 3600 } 3601 3602 /* free up the memory allocated by ddi_getlongprop */ 3603 if (config_list) { 3604 kmem_free(config_list, config_list_len); 3605 } 3606 3607 return (rval); 3608 } 3609 3610 /* 3611 * Function: sd_get_tunables_from_conf() 3612 * 3613 * 3614 * This function reads the data list from the sd.conf file and pulls 3615 * the values that can have numeric values as arguments and places 3616 * the values in the appropriate sd_tunables member. 3617 * Since the order of the data list members varies across platforms 3618 * This function reads them from the data list in a platform specific 3619 * order and places them into the correct sd_tunable member that is 3620 * consistent across all platforms. 3621 */ 3622 static void 3623 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3624 sd_tunables *values) 3625 { 3626 int i; 3627 int mask; 3628 3629 bzero(values, sizeof (sd_tunables)); 3630 3631 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3632 3633 mask = 1 << i; 3634 if (mask > flags) { 3635 break; 3636 } 3637 3638 switch (mask & flags) { 3639 case 0: /* This mask bit not set in flags */ 3640 continue; 3641 case SD_CONF_BSET_THROTTLE: 3642 values->sdt_throttle = data_list[i]; 3643 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3644 "sd_get_tunables_from_conf: throttle = %d\n", 3645 values->sdt_throttle); 3646 break; 3647 case SD_CONF_BSET_CTYPE: 3648 values->sdt_ctype = data_list[i]; 3649 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3650 "sd_get_tunables_from_conf: ctype = %d\n", 3651 values->sdt_ctype); 3652 break; 3653 case SD_CONF_BSET_NRR_COUNT: 3654 values->sdt_not_rdy_retries = data_list[i]; 3655 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3656 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3657 values->sdt_not_rdy_retries); 3658 break; 3659 case SD_CONF_BSET_BSY_RETRY_COUNT: 3660 values->sdt_busy_retries = data_list[i]; 3661 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3662 "sd_get_tunables_from_conf: busy_retries = %d\n", 3663 values->sdt_busy_retries); 3664 break; 3665 case SD_CONF_BSET_RST_RETRIES: 3666 values->sdt_reset_retries = data_list[i]; 3667 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3668 "sd_get_tunables_from_conf: reset_retries = %d\n", 3669 values->sdt_reset_retries); 3670 break; 3671 case SD_CONF_BSET_RSV_REL_TIME: 3672 values->sdt_reserv_rel_time = data_list[i]; 3673 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3674 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3675 values->sdt_reserv_rel_time); 3676 break; 3677 case SD_CONF_BSET_MIN_THROTTLE: 3678 values->sdt_min_throttle = data_list[i]; 3679 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3680 "sd_get_tunables_from_conf: min_throttle = %d\n", 3681 values->sdt_min_throttle); 3682 break; 3683 case SD_CONF_BSET_DISKSORT_DISABLED: 3684 values->sdt_disk_sort_dis = data_list[i]; 3685 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3686 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3687 values->sdt_disk_sort_dis); 3688 break; 3689 case SD_CONF_BSET_LUN_RESET_ENABLED: 3690 values->sdt_lun_reset_enable = data_list[i]; 3691 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3692 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3693 "\n", values->sdt_lun_reset_enable); 3694 break; 3695 } 3696 } 3697 } 3698 3699 /* 3700 * Function: sd_process_sdconf_table 3701 * 3702 * Description: Search the static configuration table for a match on the 3703 * inquiry vid/pid and update the driver soft state structure 3704 * according to the table property values for the device. 3705 * 3706 * The form of a configuration table entry is: 3707 * <vid+pid>,<flags>,<property-data> 3708 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3709 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3710 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3711 * 3712 * Arguments: un - driver soft state (unit) structure 3713 */ 3714 3715 static void 3716 sd_process_sdconf_table(struct sd_lun *un) 3717 { 3718 char *id = NULL; 3719 int table_index; 3720 int idlen; 3721 3722 ASSERT(un != NULL); 3723 for (table_index = 0; table_index < sd_disk_table_size; 3724 table_index++) { 3725 id = sd_disk_table[table_index].device_id; 3726 idlen = strlen(id); 3727 if (idlen == 0) { 3728 continue; 3729 } 3730 3731 /* 3732 * The static configuration table currently does not 3733 * implement version 10 properties. Additionally, 3734 * multiple data-property-name entries are not 3735 * implemented in the static configuration table. 3736 */ 3737 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3738 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3739 "sd_process_sdconf_table: disk %s\n", id); 3740 sd_set_vers1_properties(un, 3741 sd_disk_table[table_index].flags, 3742 sd_disk_table[table_index].properties); 3743 break; 3744 } 3745 } 3746 } 3747 3748 3749 /* 3750 * Function: sd_sdconf_id_match 3751 * 3752 * Description: This local function implements a case sensitive vid/pid 3753 * comparison as well as the boundary cases of wild card and 3754 * multiple blanks. 3755 * 3756 * Note: An implicit assumption made here is that the scsi 3757 * inquiry structure will always keep the vid, pid and 3758 * revision strings in consecutive sequence, so they can be 3759 * read as a single string. If this assumption is not the 3760 * case, a separate string, to be used for the check, needs 3761 * to be built with these strings concatenated. 3762 * 3763 * Arguments: un - driver soft state (unit) structure 3764 * id - table or config file vid/pid 3765 * idlen - length of the vid/pid (bytes) 3766 * 3767 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3768 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3769 */ 3770 3771 static int 3772 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3773 { 3774 struct scsi_inquiry *sd_inq; 3775 int rval = SD_SUCCESS; 3776 3777 ASSERT(un != NULL); 3778 sd_inq = un->un_sd->sd_inq; 3779 ASSERT(id != NULL); 3780 3781 /* 3782 * We use the inq_vid as a pointer to a buffer containing the 3783 * vid and pid and use the entire vid/pid length of the table 3784 * entry for the comparison. This works because the inq_pid 3785 * data member follows inq_vid in the scsi_inquiry structure. 3786 */ 3787 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3788 /* 3789 * The user id string is compared to the inquiry vid/pid 3790 * using a case insensitive comparison and ignoring 3791 * multiple spaces. 3792 */ 3793 rval = sd_blank_cmp(un, id, idlen); 3794 if (rval != SD_SUCCESS) { 3795 /* 3796 * User id strings that start and end with a "*" 3797 * are a special case. These do not have a 3798 * specific vendor, and the product string can 3799 * appear anywhere in the 16 byte PID portion of 3800 * the inquiry data. This is a simple strstr() 3801 * type search for the user id in the inquiry data. 3802 */ 3803 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3804 char *pidptr = &id[1]; 3805 int i; 3806 int j; 3807 int pidstrlen = idlen - 2; 3808 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3809 pidstrlen; 3810 3811 if (j < 0) { 3812 return (SD_FAILURE); 3813 } 3814 for (i = 0; i < j; i++) { 3815 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3816 pidptr, pidstrlen) == 0) { 3817 rval = SD_SUCCESS; 3818 break; 3819 } 3820 } 3821 } 3822 } 3823 } 3824 return (rval); 3825 } 3826 3827 3828 /* 3829 * Function: sd_blank_cmp 3830 * 3831 * Description: If the id string starts and ends with a space, treat 3832 * multiple consecutive spaces as equivalent to a single 3833 * space. For example, this causes a sd_disk_table entry 3834 * of " NEC CDROM " to match a device's id string of 3835 * "NEC CDROM". 3836 * 3837 * Note: The success exit condition for this routine is if 3838 * the pointer to the table entry is '\0' and the cnt of 3839 * the inquiry length is zero. This will happen if the inquiry 3840 * string returned by the device is padded with spaces to be 3841 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3842 * SCSI spec states that the inquiry string is to be padded with 3843 * spaces. 3844 * 3845 * Arguments: un - driver soft state (unit) structure 3846 * id - table or config file vid/pid 3847 * idlen - length of the vid/pid (bytes) 3848 * 3849 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3850 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3851 */ 3852 3853 static int 3854 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3855 { 3856 char *p1; 3857 char *p2; 3858 int cnt; 3859 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3860 sizeof (SD_INQUIRY(un)->inq_pid); 3861 3862 ASSERT(un != NULL); 3863 p2 = un->un_sd->sd_inq->inq_vid; 3864 ASSERT(id != NULL); 3865 p1 = id; 3866 3867 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3868 /* 3869 * Note: string p1 is terminated by a NUL but string p2 3870 * isn't. The end of p2 is determined by cnt. 3871 */ 3872 for (;;) { 3873 /* skip over any extra blanks in both strings */ 3874 while ((*p1 != '\0') && (*p1 == ' ')) { 3875 p1++; 3876 } 3877 while ((cnt != 0) && (*p2 == ' ')) { 3878 p2++; 3879 cnt--; 3880 } 3881 3882 /* compare the two strings */ 3883 if ((cnt == 0) || 3884 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3885 break; 3886 } 3887 while ((cnt > 0) && 3888 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3889 p1++; 3890 p2++; 3891 cnt--; 3892 } 3893 } 3894 } 3895 3896 /* return SD_SUCCESS if both strings match */ 3897 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3898 } 3899 3900 3901 /* 3902 * Function: sd_chk_vers1_data 3903 * 3904 * Description: Verify the version 1 device properties provided by the 3905 * user via the configuration file 3906 * 3907 * Arguments: un - driver soft state (unit) structure 3908 * flags - integer mask indicating properties to be set 3909 * prop_list - integer list of property values 3910 * list_len - length of user provided data 3911 * 3912 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3913 * SD_FAILURE - Indicates the user provided data is invalid 3914 */ 3915 3916 static int 3917 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3918 int list_len, char *dataname_ptr) 3919 { 3920 int i; 3921 int mask = 1; 3922 int index = 0; 3923 3924 ASSERT(un != NULL); 3925 3926 /* Check for a NULL property name and list */ 3927 if (dataname_ptr == NULL) { 3928 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3929 "sd_chk_vers1_data: NULL data property name."); 3930 return (SD_FAILURE); 3931 } 3932 if (prop_list == NULL) { 3933 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3934 "sd_chk_vers1_data: %s NULL data property list.", 3935 dataname_ptr); 3936 return (SD_FAILURE); 3937 } 3938 3939 /* Display a warning if undefined bits are set in the flags */ 3940 if (flags & ~SD_CONF_BIT_MASK) { 3941 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3942 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3943 "Properties not set.", 3944 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3945 return (SD_FAILURE); 3946 } 3947 3948 /* 3949 * Verify the length of the list by identifying the highest bit set 3950 * in the flags and validating that the property list has a length 3951 * up to the index of this bit. 3952 */ 3953 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3954 if (flags & mask) { 3955 index++; 3956 } 3957 mask = 1 << i; 3958 } 3959 if ((list_len / sizeof (int)) < (index + 2)) { 3960 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3961 "sd_chk_vers1_data: " 3962 "Data property list %s size is incorrect. " 3963 "Properties not set.", dataname_ptr); 3964 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3965 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3966 return (SD_FAILURE); 3967 } 3968 return (SD_SUCCESS); 3969 } 3970 3971 3972 /* 3973 * Function: sd_set_vers1_properties 3974 * 3975 * Description: Set version 1 device properties based on a property list 3976 * retrieved from the driver configuration file or static 3977 * configuration table. Version 1 properties have the format: 3978 * 3979 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3980 * 3981 * where the prop0 value will be used to set prop0 if bit0 3982 * is set in the flags 3983 * 3984 * Arguments: un - driver soft state (unit) structure 3985 * flags - integer mask indicating properties to be set 3986 * prop_list - integer list of property values 3987 */ 3988 3989 static void 3990 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3991 { 3992 ASSERT(un != NULL); 3993 3994 /* 3995 * Set the flag to indicate cache is to be disabled. An attempt 3996 * to disable the cache via sd_cache_control() will be made 3997 * later during attach once the basic initialization is complete. 3998 */ 3999 if (flags & SD_CONF_BSET_NOCACHE) { 4000 un->un_f_opt_disable_cache = TRUE; 4001 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4002 "sd_set_vers1_properties: caching disabled flag set\n"); 4003 } 4004 4005 /* CD-specific configuration parameters */ 4006 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4007 un->un_f_cfg_playmsf_bcd = TRUE; 4008 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4009 "sd_set_vers1_properties: playmsf_bcd set\n"); 4010 } 4011 if (flags & SD_CONF_BSET_READSUB_BCD) { 4012 un->un_f_cfg_readsub_bcd = TRUE; 4013 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4014 "sd_set_vers1_properties: readsub_bcd set\n"); 4015 } 4016 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4017 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4018 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4019 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4020 } 4021 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4022 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4023 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4024 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4025 } 4026 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4027 un->un_f_cfg_no_read_header = TRUE; 4028 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4029 "sd_set_vers1_properties: no_read_header set\n"); 4030 } 4031 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4032 un->un_f_cfg_read_cd_xd4 = TRUE; 4033 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4034 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4035 } 4036 4037 /* Support for devices which do not have valid/unique serial numbers */ 4038 if (flags & SD_CONF_BSET_FAB_DEVID) { 4039 un->un_f_opt_fab_devid = TRUE; 4040 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4041 "sd_set_vers1_properties: fab_devid bit set\n"); 4042 } 4043 4044 /* Support for user throttle configuration */ 4045 if (flags & SD_CONF_BSET_THROTTLE) { 4046 ASSERT(prop_list != NULL); 4047 un->un_saved_throttle = un->un_throttle = 4048 prop_list->sdt_throttle; 4049 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4050 "sd_set_vers1_properties: throttle set to %d\n", 4051 prop_list->sdt_throttle); 4052 } 4053 4054 /* Set the per disk retry count according to the conf file or table. */ 4055 if (flags & SD_CONF_BSET_NRR_COUNT) { 4056 ASSERT(prop_list != NULL); 4057 if (prop_list->sdt_not_rdy_retries) { 4058 un->un_notready_retry_count = 4059 prop_list->sdt_not_rdy_retries; 4060 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4061 "sd_set_vers1_properties: not ready retry count" 4062 " set to %d\n", un->un_notready_retry_count); 4063 } 4064 } 4065 4066 /* The controller type is reported for generic disk driver ioctls */ 4067 if (flags & SD_CONF_BSET_CTYPE) { 4068 ASSERT(prop_list != NULL); 4069 switch (prop_list->sdt_ctype) { 4070 case CTYPE_CDROM: 4071 un->un_ctype = prop_list->sdt_ctype; 4072 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4073 "sd_set_vers1_properties: ctype set to " 4074 "CTYPE_CDROM\n"); 4075 break; 4076 case CTYPE_CCS: 4077 un->un_ctype = prop_list->sdt_ctype; 4078 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4079 "sd_set_vers1_properties: ctype set to " 4080 "CTYPE_CCS\n"); 4081 break; 4082 case CTYPE_ROD: /* RW optical */ 4083 un->un_ctype = prop_list->sdt_ctype; 4084 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4085 "sd_set_vers1_properties: ctype set to " 4086 "CTYPE_ROD\n"); 4087 break; 4088 default: 4089 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4090 "sd_set_vers1_properties: Could not set " 4091 "invalid ctype value (%d)", 4092 prop_list->sdt_ctype); 4093 } 4094 } 4095 4096 /* Purple failover timeout */ 4097 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4098 ASSERT(prop_list != NULL); 4099 un->un_busy_retry_count = 4100 prop_list->sdt_busy_retries; 4101 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4102 "sd_set_vers1_properties: " 4103 "busy retry count set to %d\n", 4104 un->un_busy_retry_count); 4105 } 4106 4107 /* Purple reset retry count */ 4108 if (flags & SD_CONF_BSET_RST_RETRIES) { 4109 ASSERT(prop_list != NULL); 4110 un->un_reset_retry_count = 4111 prop_list->sdt_reset_retries; 4112 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4113 "sd_set_vers1_properties: " 4114 "reset retry count set to %d\n", 4115 un->un_reset_retry_count); 4116 } 4117 4118 /* Purple reservation release timeout */ 4119 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4120 ASSERT(prop_list != NULL); 4121 un->un_reserve_release_time = 4122 prop_list->sdt_reserv_rel_time; 4123 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4124 "sd_set_vers1_properties: " 4125 "reservation release timeout set to %d\n", 4126 un->un_reserve_release_time); 4127 } 4128 4129 /* 4130 * Driver flag telling the driver to verify that no commands are pending 4131 * for a device before issuing a Test Unit Ready. This is a workaround 4132 * for a firmware bug in some Seagate eliteI drives. 4133 */ 4134 if (flags & SD_CONF_BSET_TUR_CHECK) { 4135 un->un_f_cfg_tur_check = TRUE; 4136 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4137 "sd_set_vers1_properties: tur queue check set\n"); 4138 } 4139 4140 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4141 un->un_min_throttle = prop_list->sdt_min_throttle; 4142 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4143 "sd_set_vers1_properties: min throttle set to %d\n", 4144 un->un_min_throttle); 4145 } 4146 4147 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4148 un->un_f_disksort_disabled = 4149 (prop_list->sdt_disk_sort_dis != 0) ? 4150 TRUE : FALSE; 4151 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4152 "sd_set_vers1_properties: disksort disabled " 4153 "flag set to %d\n", 4154 prop_list->sdt_disk_sort_dis); 4155 } 4156 4157 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4158 un->un_f_lun_reset_enabled = 4159 (prop_list->sdt_lun_reset_enable != 0) ? 4160 TRUE : FALSE; 4161 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4162 "sd_set_vers1_properties: lun reset enabled " 4163 "flag set to %d\n", 4164 prop_list->sdt_lun_reset_enable); 4165 } 4166 4167 /* 4168 * Validate the throttle values. 4169 * If any of the numbers are invalid, set everything to defaults. 4170 */ 4171 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4172 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4173 (un->un_min_throttle > un->un_throttle)) { 4174 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4175 un->un_min_throttle = sd_min_throttle; 4176 } 4177 } 4178 4179 /* 4180 * Function: sd_is_lsi() 4181 * 4182 * Description: Check for lsi devices, step through the static device 4183 * table to match vid/pid. 4184 * 4185 * Args: un - ptr to sd_lun 4186 * 4187 * Notes: When creating new LSI property, need to add the new LSI property 4188 * to this function. 4189 */ 4190 static void 4191 sd_is_lsi(struct sd_lun *un) 4192 { 4193 char *id = NULL; 4194 int table_index; 4195 int idlen; 4196 void *prop; 4197 4198 ASSERT(un != NULL); 4199 for (table_index = 0; table_index < sd_disk_table_size; 4200 table_index++) { 4201 id = sd_disk_table[table_index].device_id; 4202 idlen = strlen(id); 4203 if (idlen == 0) { 4204 continue; 4205 } 4206 4207 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4208 prop = sd_disk_table[table_index].properties; 4209 if (prop == &lsi_properties || 4210 prop == &lsi_oem_properties || 4211 prop == &lsi_properties_scsi || 4212 prop == &symbios_properties) { 4213 un->un_f_cfg_is_lsi = TRUE; 4214 } 4215 break; 4216 } 4217 } 4218 } 4219 4220 /* 4221 * Function: sd_get_physical_geometry 4222 * 4223 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4224 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4225 * target, and use this information to initialize the physical 4226 * geometry cache specified by pgeom_p. 4227 * 4228 * MODE SENSE is an optional command, so failure in this case 4229 * does not necessarily denote an error. We want to use the 4230 * MODE SENSE commands to derive the physical geometry of the 4231 * device, but if either command fails, the logical geometry is 4232 * used as the fallback for disk label geometry in cmlb. 4233 * 4234 * This requires that un->un_blockcount and un->un_tgt_blocksize 4235 * have already been initialized for the current target and 4236 * that the current values be passed as args so that we don't 4237 * end up ever trying to use -1 as a valid value. This could 4238 * happen if either value is reset while we're not holding 4239 * the mutex. 4240 * 4241 * Arguments: un - driver soft state (unit) structure 4242 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4243 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4244 * to use the USCSI "direct" chain and bypass the normal 4245 * command waitq. 4246 * 4247 * Context: Kernel thread only (can sleep). 4248 */ 4249 4250 static int 4251 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4252 diskaddr_t capacity, int lbasize, int path_flag) 4253 { 4254 struct mode_format *page3p; 4255 struct mode_geometry *page4p; 4256 struct mode_header *headerp; 4257 int sector_size; 4258 int nsect; 4259 int nhead; 4260 int ncyl; 4261 int intrlv; 4262 int spc; 4263 diskaddr_t modesense_capacity; 4264 int rpm; 4265 int bd_len; 4266 int mode_header_length; 4267 uchar_t *p3bufp; 4268 uchar_t *p4bufp; 4269 int cdbsize; 4270 int ret = EIO; 4271 4272 ASSERT(un != NULL); 4273 4274 if (lbasize == 0) { 4275 if (ISCD(un)) { 4276 lbasize = 2048; 4277 } else { 4278 lbasize = un->un_sys_blocksize; 4279 } 4280 } 4281 pgeom_p->g_secsize = (unsigned short)lbasize; 4282 4283 /* 4284 * If the unit is a cd/dvd drive MODE SENSE page three 4285 * and MODE SENSE page four are reserved (see SBC spec 4286 * and MMC spec). To prevent soft errors just return 4287 * using the default LBA size. 4288 */ 4289 if (ISCD(un)) 4290 return (ret); 4291 4292 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4293 4294 /* 4295 * Retrieve MODE SENSE page 3 - Format Device Page 4296 */ 4297 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4298 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4299 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4300 != 0) { 4301 SD_ERROR(SD_LOG_COMMON, un, 4302 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4303 goto page3_exit; 4304 } 4305 4306 /* 4307 * Determine size of Block Descriptors in order to locate the mode 4308 * page data. ATAPI devices return 0, SCSI devices should return 4309 * MODE_BLK_DESC_LENGTH. 4310 */ 4311 headerp = (struct mode_header *)p3bufp; 4312 if (un->un_f_cfg_is_atapi == TRUE) { 4313 struct mode_header_grp2 *mhp = 4314 (struct mode_header_grp2 *)headerp; 4315 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4316 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4317 } else { 4318 mode_header_length = MODE_HEADER_LENGTH; 4319 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4320 } 4321 4322 if (bd_len > MODE_BLK_DESC_LENGTH) { 4323 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4324 "received unexpected bd_len of %d, page3\n", bd_len); 4325 goto page3_exit; 4326 } 4327 4328 page3p = (struct mode_format *) 4329 ((caddr_t)headerp + mode_header_length + bd_len); 4330 4331 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4332 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4333 "mode sense pg3 code mismatch %d\n", 4334 page3p->mode_page.code); 4335 goto page3_exit; 4336 } 4337 4338 /* 4339 * Use this physical geometry data only if BOTH MODE SENSE commands 4340 * complete successfully; otherwise, revert to the logical geometry. 4341 * So, we need to save everything in temporary variables. 4342 */ 4343 sector_size = BE_16(page3p->data_bytes_sect); 4344 4345 /* 4346 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4347 */ 4348 if (sector_size == 0) { 4349 sector_size = un->un_sys_blocksize; 4350 } else { 4351 sector_size &= ~(un->un_sys_blocksize - 1); 4352 } 4353 4354 nsect = BE_16(page3p->sect_track); 4355 intrlv = BE_16(page3p->interleave); 4356 4357 SD_INFO(SD_LOG_COMMON, un, 4358 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4359 SD_INFO(SD_LOG_COMMON, un, 4360 " mode page: %d; nsect: %d; sector size: %d;\n", 4361 page3p->mode_page.code, nsect, sector_size); 4362 SD_INFO(SD_LOG_COMMON, un, 4363 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4364 BE_16(page3p->track_skew), 4365 BE_16(page3p->cylinder_skew)); 4366 4367 4368 /* 4369 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4370 */ 4371 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4372 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4373 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4374 != 0) { 4375 SD_ERROR(SD_LOG_COMMON, un, 4376 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4377 goto page4_exit; 4378 } 4379 4380 /* 4381 * Determine size of Block Descriptors in order to locate the mode 4382 * page data. ATAPI devices return 0, SCSI devices should return 4383 * MODE_BLK_DESC_LENGTH. 4384 */ 4385 headerp = (struct mode_header *)p4bufp; 4386 if (un->un_f_cfg_is_atapi == TRUE) { 4387 struct mode_header_grp2 *mhp = 4388 (struct mode_header_grp2 *)headerp; 4389 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4390 } else { 4391 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4392 } 4393 4394 if (bd_len > MODE_BLK_DESC_LENGTH) { 4395 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4396 "received unexpected bd_len of %d, page4\n", bd_len); 4397 goto page4_exit; 4398 } 4399 4400 page4p = (struct mode_geometry *) 4401 ((caddr_t)headerp + mode_header_length + bd_len); 4402 4403 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4404 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4405 "mode sense pg4 code mismatch %d\n", 4406 page4p->mode_page.code); 4407 goto page4_exit; 4408 } 4409 4410 /* 4411 * Stash the data now, after we know that both commands completed. 4412 */ 4413 4414 4415 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4416 spc = nhead * nsect; 4417 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4418 rpm = BE_16(page4p->rpm); 4419 4420 modesense_capacity = spc * ncyl; 4421 4422 SD_INFO(SD_LOG_COMMON, un, 4423 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4424 SD_INFO(SD_LOG_COMMON, un, 4425 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4426 SD_INFO(SD_LOG_COMMON, un, 4427 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4428 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4429 (void *)pgeom_p, capacity); 4430 4431 /* 4432 * Compensate if the drive's geometry is not rectangular, i.e., 4433 * the product of C * H * S returned by MODE SENSE >= that returned 4434 * by read capacity. This is an idiosyncrasy of the original x86 4435 * disk subsystem. 4436 */ 4437 if (modesense_capacity >= capacity) { 4438 SD_INFO(SD_LOG_COMMON, un, 4439 "sd_get_physical_geometry: adjusting acyl; " 4440 "old: %d; new: %d\n", pgeom_p->g_acyl, 4441 (modesense_capacity - capacity + spc - 1) / spc); 4442 if (sector_size != 0) { 4443 /* 1243403: NEC D38x7 drives don't support sec size */ 4444 pgeom_p->g_secsize = (unsigned short)sector_size; 4445 } 4446 pgeom_p->g_nsect = (unsigned short)nsect; 4447 pgeom_p->g_nhead = (unsigned short)nhead; 4448 pgeom_p->g_capacity = capacity; 4449 pgeom_p->g_acyl = 4450 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4451 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4452 } 4453 4454 pgeom_p->g_rpm = (unsigned short)rpm; 4455 pgeom_p->g_intrlv = (unsigned short)intrlv; 4456 ret = 0; 4457 4458 SD_INFO(SD_LOG_COMMON, un, 4459 "sd_get_physical_geometry: mode sense geometry:\n"); 4460 SD_INFO(SD_LOG_COMMON, un, 4461 " nsect: %d; sector size: %d; interlv: %d\n", 4462 nsect, sector_size, intrlv); 4463 SD_INFO(SD_LOG_COMMON, un, 4464 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4465 nhead, ncyl, rpm, modesense_capacity); 4466 SD_INFO(SD_LOG_COMMON, un, 4467 "sd_get_physical_geometry: (cached)\n"); 4468 SD_INFO(SD_LOG_COMMON, un, 4469 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4470 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4471 pgeom_p->g_nhead, pgeom_p->g_nsect); 4472 SD_INFO(SD_LOG_COMMON, un, 4473 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4474 pgeom_p->g_secsize, pgeom_p->g_capacity, 4475 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4476 4477 page4_exit: 4478 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4479 page3_exit: 4480 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4481 4482 return (ret); 4483 } 4484 4485 /* 4486 * Function: sd_get_virtual_geometry 4487 * 4488 * Description: Ask the controller to tell us about the target device. 4489 * 4490 * Arguments: un - pointer to softstate 4491 * capacity - disk capacity in #blocks 4492 * lbasize - disk block size in bytes 4493 * 4494 * Context: Kernel thread only 4495 */ 4496 4497 static int 4498 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4499 diskaddr_t capacity, int lbasize) 4500 { 4501 uint_t geombuf; 4502 int spc; 4503 4504 ASSERT(un != NULL); 4505 4506 /* Set sector size, and total number of sectors */ 4507 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4508 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4509 4510 /* Let the HBA tell us its geometry */ 4511 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4512 4513 /* A value of -1 indicates an undefined "geometry" property */ 4514 if (geombuf == (-1)) { 4515 return (EINVAL); 4516 } 4517 4518 /* Initialize the logical geometry cache. */ 4519 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4520 lgeom_p->g_nsect = geombuf & 0xffff; 4521 lgeom_p->g_secsize = un->un_sys_blocksize; 4522 4523 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4524 4525 /* 4526 * Note: The driver originally converted the capacity value from 4527 * target blocks to system blocks. However, the capacity value passed 4528 * to this routine is already in terms of system blocks (this scaling 4529 * is done when the READ CAPACITY command is issued and processed). 4530 * This 'error' may have gone undetected because the usage of g_ncyl 4531 * (which is based upon g_capacity) is very limited within the driver 4532 */ 4533 lgeom_p->g_capacity = capacity; 4534 4535 /* 4536 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4537 * hba may return zero values if the device has been removed. 4538 */ 4539 if (spc == 0) { 4540 lgeom_p->g_ncyl = 0; 4541 } else { 4542 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4543 } 4544 lgeom_p->g_acyl = 0; 4545 4546 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4547 return (0); 4548 4549 } 4550 /* 4551 * Function: sd_update_block_info 4552 * 4553 * Description: Calculate a byte count to sector count bitshift value 4554 * from sector size. 4555 * 4556 * Arguments: un: unit struct. 4557 * lbasize: new target sector size 4558 * capacity: new target capacity, ie. block count 4559 * 4560 * Context: Kernel thread context 4561 */ 4562 4563 static void 4564 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4565 { 4566 uint_t dblk; 4567 4568 if (lbasize != 0) { 4569 un->un_tgt_blocksize = lbasize; 4570 un->un_f_tgt_blocksize_is_valid = TRUE; 4571 } 4572 4573 if (capacity != 0) { 4574 un->un_blockcount = capacity; 4575 un->un_f_blockcount_is_valid = TRUE; 4576 } 4577 4578 /* 4579 * Update device capacity properties. 4580 * 4581 * 'device-nblocks' number of blocks in target's units 4582 * 'device-blksize' data bearing size of target's block 4583 * 4584 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4585 * not be a power of two for checksumming disks with 520/528 byte 4586 * sectors. 4587 */ 4588 if (un->un_f_tgt_blocksize_is_valid && 4589 un->un_f_blockcount_is_valid && 4590 un->un_sys_blocksize) { 4591 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4592 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4593 "device-nblocks", un->un_blockcount / dblk); 4594 /* 4595 * To save memory, only define "device-blksize" when its 4596 * value is differnet than the default DEV_BSIZE value. 4597 */ 4598 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4599 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4600 SD_DEVINFO(un), "device-blksize", 4601 un->un_sys_blocksize * dblk); 4602 } 4603 } 4604 4605 4606 /* 4607 * Function: sd_register_devid 4608 * 4609 * Description: This routine will obtain the device id information from the 4610 * target, obtain the serial number, and register the device 4611 * id with the ddi framework. 4612 * 4613 * Arguments: devi - the system's dev_info_t for the device. 4614 * un - driver soft state (unit) structure 4615 * reservation_flag - indicates if a reservation conflict 4616 * occurred during attach 4617 * 4618 * Context: Kernel Thread 4619 */ 4620 static void 4621 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4622 { 4623 int rval = 0; 4624 uchar_t *inq80 = NULL; 4625 size_t inq80_len = MAX_INQUIRY_SIZE; 4626 size_t inq80_resid = 0; 4627 uchar_t *inq83 = NULL; 4628 size_t inq83_len = MAX_INQUIRY_SIZE; 4629 size_t inq83_resid = 0; 4630 int dlen, len; 4631 char *sn; 4632 4633 ASSERT(un != NULL); 4634 ASSERT(mutex_owned(SD_MUTEX(un))); 4635 ASSERT((SD_DEVINFO(un)) == devi); 4636 4637 /* 4638 * This is the case of antiquated Sun disk drives that have the 4639 * FAB_DEVID property set in the disk_table. These drives 4640 * manage the devid's by storing them in last 2 available sectors 4641 * on the drive and have them fabricated by the ddi layer by calling 4642 * ddi_devid_init and passing the DEVID_FAB flag. 4643 */ 4644 if (un->un_f_opt_fab_devid == TRUE) { 4645 /* 4646 * Depending on EINVAL isn't reliable, since a reserved disk 4647 * may result in invalid geometry, so check to make sure a 4648 * reservation conflict did not occur during attach. 4649 */ 4650 if ((sd_get_devid(un) == EINVAL) && 4651 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4652 /* 4653 * The devid is invalid AND there is no reservation 4654 * conflict. Fabricate a new devid. 4655 */ 4656 (void) sd_create_devid(un); 4657 } 4658 4659 /* Register the devid if it exists */ 4660 if (un->un_devid != NULL) { 4661 (void) ddi_devid_register(SD_DEVINFO(un), 4662 un->un_devid); 4663 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4664 "sd_register_devid: Devid Fabricated\n"); 4665 } 4666 return; 4667 } 4668 4669 /* 4670 * We check the availibility of the World Wide Name (0x83) and Unit 4671 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4672 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4673 * 0x83 is availible, that is the best choice. Our next choice is 4674 * 0x80. If neither are availible, we munge the devid from the device 4675 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4676 * to fabricate a devid for non-Sun qualified disks. 4677 */ 4678 if (sd_check_vpd_page_support(un) == 0) { 4679 /* collect page 80 data if available */ 4680 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4681 4682 mutex_exit(SD_MUTEX(un)); 4683 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4684 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4685 0x01, 0x80, &inq80_resid); 4686 4687 if (rval != 0) { 4688 kmem_free(inq80, inq80_len); 4689 inq80 = NULL; 4690 inq80_len = 0; 4691 } else if (ddi_prop_exists( 4692 DDI_DEV_T_NONE, SD_DEVINFO(un), 4693 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4694 INQUIRY_SERIAL_NO) == 0) { 4695 /* 4696 * If we don't already have a serial number 4697 * property, do quick verify of data returned 4698 * and define property. 4699 */ 4700 dlen = inq80_len - inq80_resid; 4701 len = (size_t)inq80[3]; 4702 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4703 /* 4704 * Ensure sn termination, skip leading 4705 * blanks, and create property 4706 * 'inquiry-serial-no'. 4707 */ 4708 sn = (char *)&inq80[4]; 4709 sn[len] = 0; 4710 while (*sn && (*sn == ' ')) 4711 sn++; 4712 if (*sn) { 4713 (void) ddi_prop_update_string( 4714 DDI_DEV_T_NONE, 4715 SD_DEVINFO(un), 4716 INQUIRY_SERIAL_NO, sn); 4717 } 4718 } 4719 } 4720 mutex_enter(SD_MUTEX(un)); 4721 } 4722 4723 /* collect page 83 data if available */ 4724 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4725 mutex_exit(SD_MUTEX(un)); 4726 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4727 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4728 0x01, 0x83, &inq83_resid); 4729 4730 if (rval != 0) { 4731 kmem_free(inq83, inq83_len); 4732 inq83 = NULL; 4733 inq83_len = 0; 4734 } 4735 mutex_enter(SD_MUTEX(un)); 4736 } 4737 } 4738 4739 /* encode best devid possible based on data available */ 4740 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4741 (char *)ddi_driver_name(SD_DEVINFO(un)), 4742 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4743 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4744 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4745 4746 /* devid successfully encoded, register devid */ 4747 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4748 4749 } else { 4750 /* 4751 * Unable to encode a devid based on data available. 4752 * This is not a Sun qualified disk. Older Sun disk 4753 * drives that have the SD_FAB_DEVID property 4754 * set in the disk_table and non Sun qualified 4755 * disks are treated in the same manner. These 4756 * drives manage the devid's by storing them in 4757 * last 2 available sectors on the drive and 4758 * have them fabricated by the ddi layer by 4759 * calling ddi_devid_init and passing the 4760 * DEVID_FAB flag. 4761 * Create a fabricate devid only if there's no 4762 * fabricate devid existed. 4763 */ 4764 if (sd_get_devid(un) == EINVAL) { 4765 (void) sd_create_devid(un); 4766 } 4767 un->un_f_opt_fab_devid = TRUE; 4768 4769 /* Register the devid if it exists */ 4770 if (un->un_devid != NULL) { 4771 (void) ddi_devid_register(SD_DEVINFO(un), 4772 un->un_devid); 4773 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4774 "sd_register_devid: devid fabricated using " 4775 "ddi framework\n"); 4776 } 4777 } 4778 4779 /* clean up resources */ 4780 if (inq80 != NULL) { 4781 kmem_free(inq80, inq80_len); 4782 } 4783 if (inq83 != NULL) { 4784 kmem_free(inq83, inq83_len); 4785 } 4786 } 4787 4788 4789 4790 /* 4791 * Function: sd_get_devid 4792 * 4793 * Description: This routine will return 0 if a valid device id has been 4794 * obtained from the target and stored in the soft state. If a 4795 * valid device id has not been previously read and stored, a 4796 * read attempt will be made. 4797 * 4798 * Arguments: un - driver soft state (unit) structure 4799 * 4800 * Return Code: 0 if we successfully get the device id 4801 * 4802 * Context: Kernel Thread 4803 */ 4804 4805 static int 4806 sd_get_devid(struct sd_lun *un) 4807 { 4808 struct dk_devid *dkdevid; 4809 ddi_devid_t tmpid; 4810 uint_t *ip; 4811 size_t sz; 4812 diskaddr_t blk; 4813 int status; 4814 int chksum; 4815 int i; 4816 size_t buffer_size; 4817 4818 ASSERT(un != NULL); 4819 ASSERT(mutex_owned(SD_MUTEX(un))); 4820 4821 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4822 un); 4823 4824 if (un->un_devid != NULL) { 4825 return (0); 4826 } 4827 4828 mutex_exit(SD_MUTEX(un)); 4829 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4830 (void *)SD_PATH_DIRECT) != 0) { 4831 mutex_enter(SD_MUTEX(un)); 4832 return (EINVAL); 4833 } 4834 4835 /* 4836 * Read and verify device id, stored in the reserved cylinders at the 4837 * end of the disk. Backup label is on the odd sectors of the last 4838 * track of the last cylinder. Device id will be on track of the next 4839 * to last cylinder. 4840 */ 4841 mutex_enter(SD_MUTEX(un)); 4842 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4843 mutex_exit(SD_MUTEX(un)); 4844 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4845 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4846 SD_PATH_DIRECT); 4847 if (status != 0) { 4848 goto error; 4849 } 4850 4851 /* Validate the revision */ 4852 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4853 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4854 status = EINVAL; 4855 goto error; 4856 } 4857 4858 /* Calculate the checksum */ 4859 chksum = 0; 4860 ip = (uint_t *)dkdevid; 4861 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4862 i++) { 4863 chksum ^= ip[i]; 4864 } 4865 4866 /* Compare the checksums */ 4867 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4868 status = EINVAL; 4869 goto error; 4870 } 4871 4872 /* Validate the device id */ 4873 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4874 status = EINVAL; 4875 goto error; 4876 } 4877 4878 /* 4879 * Store the device id in the driver soft state 4880 */ 4881 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4882 tmpid = kmem_alloc(sz, KM_SLEEP); 4883 4884 mutex_enter(SD_MUTEX(un)); 4885 4886 un->un_devid = tmpid; 4887 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4888 4889 kmem_free(dkdevid, buffer_size); 4890 4891 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4892 4893 return (status); 4894 error: 4895 mutex_enter(SD_MUTEX(un)); 4896 kmem_free(dkdevid, buffer_size); 4897 return (status); 4898 } 4899 4900 4901 /* 4902 * Function: sd_create_devid 4903 * 4904 * Description: This routine will fabricate the device id and write it 4905 * to the disk. 4906 * 4907 * Arguments: un - driver soft state (unit) structure 4908 * 4909 * Return Code: value of the fabricated device id 4910 * 4911 * Context: Kernel Thread 4912 */ 4913 4914 static ddi_devid_t 4915 sd_create_devid(struct sd_lun *un) 4916 { 4917 ASSERT(un != NULL); 4918 4919 /* Fabricate the devid */ 4920 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4921 == DDI_FAILURE) { 4922 return (NULL); 4923 } 4924 4925 /* Write the devid to disk */ 4926 if (sd_write_deviceid(un) != 0) { 4927 ddi_devid_free(un->un_devid); 4928 un->un_devid = NULL; 4929 } 4930 4931 return (un->un_devid); 4932 } 4933 4934 4935 /* 4936 * Function: sd_write_deviceid 4937 * 4938 * Description: This routine will write the device id to the disk 4939 * reserved sector. 4940 * 4941 * Arguments: un - driver soft state (unit) structure 4942 * 4943 * Return Code: EINVAL 4944 * value returned by sd_send_scsi_cmd 4945 * 4946 * Context: Kernel Thread 4947 */ 4948 4949 static int 4950 sd_write_deviceid(struct sd_lun *un) 4951 { 4952 struct dk_devid *dkdevid; 4953 diskaddr_t blk; 4954 uint_t *ip, chksum; 4955 int status; 4956 int i; 4957 4958 ASSERT(mutex_owned(SD_MUTEX(un))); 4959 4960 mutex_exit(SD_MUTEX(un)); 4961 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4962 (void *)SD_PATH_DIRECT) != 0) { 4963 mutex_enter(SD_MUTEX(un)); 4964 return (-1); 4965 } 4966 4967 4968 /* Allocate the buffer */ 4969 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 4970 4971 /* Fill in the revision */ 4972 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 4973 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 4974 4975 /* Copy in the device id */ 4976 mutex_enter(SD_MUTEX(un)); 4977 bcopy(un->un_devid, &dkdevid->dkd_devid, 4978 ddi_devid_sizeof(un->un_devid)); 4979 mutex_exit(SD_MUTEX(un)); 4980 4981 /* Calculate the checksum */ 4982 chksum = 0; 4983 ip = (uint_t *)dkdevid; 4984 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4985 i++) { 4986 chksum ^= ip[i]; 4987 } 4988 4989 /* Fill-in checksum */ 4990 DKD_FORMCHKSUM(chksum, dkdevid); 4991 4992 /* Write the reserved sector */ 4993 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 4994 SD_PATH_DIRECT); 4995 4996 kmem_free(dkdevid, un->un_sys_blocksize); 4997 4998 mutex_enter(SD_MUTEX(un)); 4999 return (status); 5000 } 5001 5002 5003 /* 5004 * Function: sd_check_vpd_page_support 5005 * 5006 * Description: This routine sends an inquiry command with the EVPD bit set and 5007 * a page code of 0x00 to the device. It is used to determine which 5008 * vital product pages are availible to find the devid. We are 5009 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5010 * device does not support that command. 5011 * 5012 * Arguments: un - driver soft state (unit) structure 5013 * 5014 * Return Code: 0 - success 5015 * 1 - check condition 5016 * 5017 * Context: This routine can sleep. 5018 */ 5019 5020 static int 5021 sd_check_vpd_page_support(struct sd_lun *un) 5022 { 5023 uchar_t *page_list = NULL; 5024 uchar_t page_length = 0xff; /* Use max possible length */ 5025 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5026 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5027 int rval = 0; 5028 int counter; 5029 5030 ASSERT(un != NULL); 5031 ASSERT(mutex_owned(SD_MUTEX(un))); 5032 5033 mutex_exit(SD_MUTEX(un)); 5034 5035 /* 5036 * We'll set the page length to the maximum to save figuring it out 5037 * with an additional call. 5038 */ 5039 page_list = kmem_zalloc(page_length, KM_SLEEP); 5040 5041 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5042 page_code, NULL); 5043 5044 mutex_enter(SD_MUTEX(un)); 5045 5046 /* 5047 * Now we must validate that the device accepted the command, as some 5048 * drives do not support it. If the drive does support it, we will 5049 * return 0, and the supported pages will be in un_vpd_page_mask. If 5050 * not, we return -1. 5051 */ 5052 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5053 /* Loop to find one of the 2 pages we need */ 5054 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5055 5056 /* 5057 * Pages are returned in ascending order, and 0x83 is what we 5058 * are hoping for. 5059 */ 5060 while ((page_list[counter] <= 0x83) && 5061 (counter <= (page_list[VPD_PAGE_LENGTH] + 5062 VPD_HEAD_OFFSET))) { 5063 /* 5064 * Add 3 because page_list[3] is the number of 5065 * pages minus 3 5066 */ 5067 5068 switch (page_list[counter]) { 5069 case 0x00: 5070 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5071 break; 5072 case 0x80: 5073 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5074 break; 5075 case 0x81: 5076 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5077 break; 5078 case 0x82: 5079 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5080 break; 5081 case 0x83: 5082 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5083 break; 5084 } 5085 counter++; 5086 } 5087 5088 } else { 5089 rval = -1; 5090 5091 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5092 "sd_check_vpd_page_support: This drive does not implement " 5093 "VPD pages.\n"); 5094 } 5095 5096 kmem_free(page_list, page_length); 5097 5098 return (rval); 5099 } 5100 5101 5102 /* 5103 * Function: sd_setup_pm 5104 * 5105 * Description: Initialize Power Management on the device 5106 * 5107 * Context: Kernel Thread 5108 */ 5109 5110 static void 5111 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5112 { 5113 uint_t log_page_size; 5114 uchar_t *log_page_data; 5115 int rval; 5116 5117 /* 5118 * Since we are called from attach, holding a mutex for 5119 * un is unnecessary. Because some of the routines called 5120 * from here require SD_MUTEX to not be held, assert this 5121 * right up front. 5122 */ 5123 ASSERT(!mutex_owned(SD_MUTEX(un))); 5124 /* 5125 * Since the sd device does not have the 'reg' property, 5126 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5127 * The following code is to tell cpr that this device 5128 * DOES need to be suspended and resumed. 5129 */ 5130 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5131 "pm-hardware-state", "needs-suspend-resume"); 5132 5133 /* 5134 * This complies with the new power management framework 5135 * for certain desktop machines. Create the pm_components 5136 * property as a string array property. 5137 */ 5138 if (un->un_f_pm_supported) { 5139 /* 5140 * not all devices have a motor, try it first. 5141 * some devices may return ILLEGAL REQUEST, some 5142 * will hang 5143 * The following START_STOP_UNIT is used to check if target 5144 * device has a motor. 5145 */ 5146 un->un_f_start_stop_supported = TRUE; 5147 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5148 SD_PATH_DIRECT) != 0) { 5149 un->un_f_start_stop_supported = FALSE; 5150 } 5151 5152 /* 5153 * create pm properties anyways otherwise the parent can't 5154 * go to sleep 5155 */ 5156 (void) sd_create_pm_components(devi, un); 5157 un->un_f_pm_is_enabled = TRUE; 5158 return; 5159 } 5160 5161 if (!un->un_f_log_sense_supported) { 5162 un->un_power_level = SD_SPINDLE_ON; 5163 un->un_f_pm_is_enabled = FALSE; 5164 return; 5165 } 5166 5167 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5168 5169 #ifdef SDDEBUG 5170 if (sd_force_pm_supported) { 5171 /* Force a successful result */ 5172 rval = 1; 5173 } 5174 #endif 5175 5176 /* 5177 * If the start-stop cycle counter log page is not supported 5178 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5179 * then we should not create the pm_components property. 5180 */ 5181 if (rval == -1) { 5182 /* 5183 * Error. 5184 * Reading log sense failed, most likely this is 5185 * an older drive that does not support log sense. 5186 * If this fails auto-pm is not supported. 5187 */ 5188 un->un_power_level = SD_SPINDLE_ON; 5189 un->un_f_pm_is_enabled = FALSE; 5190 5191 } else if (rval == 0) { 5192 /* 5193 * Page not found. 5194 * The start stop cycle counter is implemented as page 5195 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5196 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5197 */ 5198 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5199 /* 5200 * Page found, use this one. 5201 */ 5202 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5203 un->un_f_pm_is_enabled = TRUE; 5204 } else { 5205 /* 5206 * Error or page not found. 5207 * auto-pm is not supported for this device. 5208 */ 5209 un->un_power_level = SD_SPINDLE_ON; 5210 un->un_f_pm_is_enabled = FALSE; 5211 } 5212 } else { 5213 /* 5214 * Page found, use it. 5215 */ 5216 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5217 un->un_f_pm_is_enabled = TRUE; 5218 } 5219 5220 5221 if (un->un_f_pm_is_enabled == TRUE) { 5222 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5223 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5224 5225 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5226 log_page_size, un->un_start_stop_cycle_page, 5227 0x01, 0, SD_PATH_DIRECT); 5228 #ifdef SDDEBUG 5229 if (sd_force_pm_supported) { 5230 /* Force a successful result */ 5231 rval = 0; 5232 } 5233 #endif 5234 5235 /* 5236 * If the Log sense for Page( Start/stop cycle counter page) 5237 * succeeds, then power managment is supported and we can 5238 * enable auto-pm. 5239 */ 5240 if (rval == 0) { 5241 (void) sd_create_pm_components(devi, un); 5242 } else { 5243 un->un_power_level = SD_SPINDLE_ON; 5244 un->un_f_pm_is_enabled = FALSE; 5245 } 5246 5247 kmem_free(log_page_data, log_page_size); 5248 } 5249 } 5250 5251 5252 /* 5253 * Function: sd_create_pm_components 5254 * 5255 * Description: Initialize PM property. 5256 * 5257 * Context: Kernel thread context 5258 */ 5259 5260 static void 5261 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5262 { 5263 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5264 5265 ASSERT(!mutex_owned(SD_MUTEX(un))); 5266 5267 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5268 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5269 /* 5270 * When components are initially created they are idle, 5271 * power up any non-removables. 5272 * Note: the return value of pm_raise_power can't be used 5273 * for determining if PM should be enabled for this device. 5274 * Even if you check the return values and remove this 5275 * property created above, the PM framework will not honor the 5276 * change after the first call to pm_raise_power. Hence, 5277 * removal of that property does not help if pm_raise_power 5278 * fails. In the case of removable media, the start/stop 5279 * will fail if the media is not present. 5280 */ 5281 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5282 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5283 mutex_enter(SD_MUTEX(un)); 5284 un->un_power_level = SD_SPINDLE_ON; 5285 mutex_enter(&un->un_pm_mutex); 5286 /* Set to on and not busy. */ 5287 un->un_pm_count = 0; 5288 } else { 5289 mutex_enter(SD_MUTEX(un)); 5290 un->un_power_level = SD_SPINDLE_OFF; 5291 mutex_enter(&un->un_pm_mutex); 5292 /* Set to off. */ 5293 un->un_pm_count = -1; 5294 } 5295 mutex_exit(&un->un_pm_mutex); 5296 mutex_exit(SD_MUTEX(un)); 5297 } else { 5298 un->un_power_level = SD_SPINDLE_ON; 5299 un->un_f_pm_is_enabled = FALSE; 5300 } 5301 } 5302 5303 5304 /* 5305 * Function: sd_ddi_suspend 5306 * 5307 * Description: Performs system power-down operations. This includes 5308 * setting the drive state to indicate its suspended so 5309 * that no new commands will be accepted. Also, wait for 5310 * all commands that are in transport or queued to a timer 5311 * for retry to complete. All timeout threads are cancelled. 5312 * 5313 * Return Code: DDI_FAILURE or DDI_SUCCESS 5314 * 5315 * Context: Kernel thread context 5316 */ 5317 5318 static int 5319 sd_ddi_suspend(dev_info_t *devi) 5320 { 5321 struct sd_lun *un; 5322 clock_t wait_cmds_complete; 5323 5324 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5325 if (un == NULL) { 5326 return (DDI_FAILURE); 5327 } 5328 5329 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5330 5331 mutex_enter(SD_MUTEX(un)); 5332 5333 /* Return success if the device is already suspended. */ 5334 if (un->un_state == SD_STATE_SUSPENDED) { 5335 mutex_exit(SD_MUTEX(un)); 5336 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5337 "device already suspended, exiting\n"); 5338 return (DDI_SUCCESS); 5339 } 5340 5341 /* Return failure if the device is being used by HA */ 5342 if (un->un_resvd_status & 5343 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5344 mutex_exit(SD_MUTEX(un)); 5345 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5346 "device in use by HA, exiting\n"); 5347 return (DDI_FAILURE); 5348 } 5349 5350 /* 5351 * Return failure if the device is in a resource wait 5352 * or power changing state. 5353 */ 5354 if ((un->un_state == SD_STATE_RWAIT) || 5355 (un->un_state == SD_STATE_PM_CHANGING)) { 5356 mutex_exit(SD_MUTEX(un)); 5357 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5358 "device in resource wait state, exiting\n"); 5359 return (DDI_FAILURE); 5360 } 5361 5362 5363 un->un_save_state = un->un_last_state; 5364 New_state(un, SD_STATE_SUSPENDED); 5365 5366 /* 5367 * Wait for all commands that are in transport or queued to a timer 5368 * for retry to complete. 5369 * 5370 * While waiting, no new commands will be accepted or sent because of 5371 * the new state we set above. 5372 * 5373 * Wait till current operation has completed. If we are in the resource 5374 * wait state (with an intr outstanding) then we need to wait till the 5375 * intr completes and starts the next cmd. We want to wait for 5376 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5377 */ 5378 wait_cmds_complete = ddi_get_lbolt() + 5379 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5380 5381 while (un->un_ncmds_in_transport != 0) { 5382 /* 5383 * Fail if commands do not finish in the specified time. 5384 */ 5385 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5386 wait_cmds_complete) == -1) { 5387 /* 5388 * Undo the state changes made above. Everything 5389 * must go back to it's original value. 5390 */ 5391 Restore_state(un); 5392 un->un_last_state = un->un_save_state; 5393 /* Wake up any threads that might be waiting. */ 5394 cv_broadcast(&un->un_suspend_cv); 5395 mutex_exit(SD_MUTEX(un)); 5396 SD_ERROR(SD_LOG_IO_PM, un, 5397 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5398 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5399 return (DDI_FAILURE); 5400 } 5401 } 5402 5403 /* 5404 * Cancel SCSI watch thread and timeouts, if any are active 5405 */ 5406 5407 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5408 opaque_t temp_token = un->un_swr_token; 5409 mutex_exit(SD_MUTEX(un)); 5410 scsi_watch_suspend(temp_token); 5411 mutex_enter(SD_MUTEX(un)); 5412 } 5413 5414 if (un->un_reset_throttle_timeid != NULL) { 5415 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5416 un->un_reset_throttle_timeid = NULL; 5417 mutex_exit(SD_MUTEX(un)); 5418 (void) untimeout(temp_id); 5419 mutex_enter(SD_MUTEX(un)); 5420 } 5421 5422 if (un->un_dcvb_timeid != NULL) { 5423 timeout_id_t temp_id = un->un_dcvb_timeid; 5424 un->un_dcvb_timeid = NULL; 5425 mutex_exit(SD_MUTEX(un)); 5426 (void) untimeout(temp_id); 5427 mutex_enter(SD_MUTEX(un)); 5428 } 5429 5430 mutex_enter(&un->un_pm_mutex); 5431 if (un->un_pm_timeid != NULL) { 5432 timeout_id_t temp_id = un->un_pm_timeid; 5433 un->un_pm_timeid = NULL; 5434 mutex_exit(&un->un_pm_mutex); 5435 mutex_exit(SD_MUTEX(un)); 5436 (void) untimeout(temp_id); 5437 mutex_enter(SD_MUTEX(un)); 5438 } else { 5439 mutex_exit(&un->un_pm_mutex); 5440 } 5441 5442 if (un->un_retry_timeid != NULL) { 5443 timeout_id_t temp_id = un->un_retry_timeid; 5444 un->un_retry_timeid = NULL; 5445 mutex_exit(SD_MUTEX(un)); 5446 (void) untimeout(temp_id); 5447 mutex_enter(SD_MUTEX(un)); 5448 } 5449 5450 if (un->un_direct_priority_timeid != NULL) { 5451 timeout_id_t temp_id = un->un_direct_priority_timeid; 5452 un->un_direct_priority_timeid = NULL; 5453 mutex_exit(SD_MUTEX(un)); 5454 (void) untimeout(temp_id); 5455 mutex_enter(SD_MUTEX(un)); 5456 } 5457 5458 if (un->un_f_is_fibre == TRUE) { 5459 /* 5460 * Remove callbacks for insert and remove events 5461 */ 5462 if (un->un_insert_event != NULL) { 5463 mutex_exit(SD_MUTEX(un)); 5464 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5465 mutex_enter(SD_MUTEX(un)); 5466 un->un_insert_event = NULL; 5467 } 5468 5469 if (un->un_remove_event != NULL) { 5470 mutex_exit(SD_MUTEX(un)); 5471 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5472 mutex_enter(SD_MUTEX(un)); 5473 un->un_remove_event = NULL; 5474 } 5475 } 5476 5477 mutex_exit(SD_MUTEX(un)); 5478 5479 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5480 5481 return (DDI_SUCCESS); 5482 } 5483 5484 5485 /* 5486 * Function: sd_ddi_pm_suspend 5487 * 5488 * Description: Set the drive state to low power. 5489 * Someone else is required to actually change the drive 5490 * power level. 5491 * 5492 * Arguments: un - driver soft state (unit) structure 5493 * 5494 * Return Code: DDI_FAILURE or DDI_SUCCESS 5495 * 5496 * Context: Kernel thread context 5497 */ 5498 5499 static int 5500 sd_ddi_pm_suspend(struct sd_lun *un) 5501 { 5502 ASSERT(un != NULL); 5503 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5504 5505 ASSERT(!mutex_owned(SD_MUTEX(un))); 5506 mutex_enter(SD_MUTEX(un)); 5507 5508 /* 5509 * Exit if power management is not enabled for this device, or if 5510 * the device is being used by HA. 5511 */ 5512 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5513 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5514 mutex_exit(SD_MUTEX(un)); 5515 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5516 return (DDI_SUCCESS); 5517 } 5518 5519 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5520 un->un_ncmds_in_driver); 5521 5522 /* 5523 * See if the device is not busy, ie.: 5524 * - we have no commands in the driver for this device 5525 * - not waiting for resources 5526 */ 5527 if ((un->un_ncmds_in_driver == 0) && 5528 (un->un_state != SD_STATE_RWAIT)) { 5529 /* 5530 * The device is not busy, so it is OK to go to low power state. 5531 * Indicate low power, but rely on someone else to actually 5532 * change it. 5533 */ 5534 mutex_enter(&un->un_pm_mutex); 5535 un->un_pm_count = -1; 5536 mutex_exit(&un->un_pm_mutex); 5537 un->un_power_level = SD_SPINDLE_OFF; 5538 } 5539 5540 mutex_exit(SD_MUTEX(un)); 5541 5542 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5543 5544 return (DDI_SUCCESS); 5545 } 5546 5547 5548 /* 5549 * Function: sd_ddi_resume 5550 * 5551 * Description: Performs system power-up operations.. 5552 * 5553 * Return Code: DDI_SUCCESS 5554 * DDI_FAILURE 5555 * 5556 * Context: Kernel thread context 5557 */ 5558 5559 static int 5560 sd_ddi_resume(dev_info_t *devi) 5561 { 5562 struct sd_lun *un; 5563 5564 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5565 if (un == NULL) { 5566 return (DDI_FAILURE); 5567 } 5568 5569 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5570 5571 mutex_enter(SD_MUTEX(un)); 5572 Restore_state(un); 5573 5574 /* 5575 * Restore the state which was saved to give the 5576 * the right state in un_last_state 5577 */ 5578 un->un_last_state = un->un_save_state; 5579 /* 5580 * Note: throttle comes back at full. 5581 * Also note: this MUST be done before calling pm_raise_power 5582 * otherwise the system can get hung in biowait. The scenario where 5583 * this'll happen is under cpr suspend. Writing of the system 5584 * state goes through sddump, which writes 0 to un_throttle. If 5585 * writing the system state then fails, example if the partition is 5586 * too small, then cpr attempts a resume. If throttle isn't restored 5587 * from the saved value until after calling pm_raise_power then 5588 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5589 * in biowait. 5590 */ 5591 un->un_throttle = un->un_saved_throttle; 5592 5593 /* 5594 * The chance of failure is very rare as the only command done in power 5595 * entry point is START command when you transition from 0->1 or 5596 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5597 * which suspend was done. Ignore the return value as the resume should 5598 * not be failed. In the case of removable media the media need not be 5599 * inserted and hence there is a chance that raise power will fail with 5600 * media not present. 5601 */ 5602 if (un->un_f_attach_spinup) { 5603 mutex_exit(SD_MUTEX(un)); 5604 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5605 mutex_enter(SD_MUTEX(un)); 5606 } 5607 5608 /* 5609 * Don't broadcast to the suspend cv and therefore possibly 5610 * start I/O until after power has been restored. 5611 */ 5612 cv_broadcast(&un->un_suspend_cv); 5613 cv_broadcast(&un->un_state_cv); 5614 5615 /* restart thread */ 5616 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5617 scsi_watch_resume(un->un_swr_token); 5618 } 5619 5620 #if (defined(__fibre)) 5621 if (un->un_f_is_fibre == TRUE) { 5622 /* 5623 * Add callbacks for insert and remove events 5624 */ 5625 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5626 sd_init_event_callbacks(un); 5627 } 5628 } 5629 #endif 5630 5631 /* 5632 * Transport any pending commands to the target. 5633 * 5634 * If this is a low-activity device commands in queue will have to wait 5635 * until new commands come in, which may take awhile. Also, we 5636 * specifically don't check un_ncmds_in_transport because we know that 5637 * there really are no commands in progress after the unit was 5638 * suspended and we could have reached the throttle level, been 5639 * suspended, and have no new commands coming in for awhile. Highly 5640 * unlikely, but so is the low-activity disk scenario. 5641 */ 5642 ddi_xbuf_dispatch(un->un_xbuf_attr); 5643 5644 sd_start_cmds(un, NULL); 5645 mutex_exit(SD_MUTEX(un)); 5646 5647 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5648 5649 return (DDI_SUCCESS); 5650 } 5651 5652 5653 /* 5654 * Function: sd_ddi_pm_resume 5655 * 5656 * Description: Set the drive state to powered on. 5657 * Someone else is required to actually change the drive 5658 * power level. 5659 * 5660 * Arguments: un - driver soft state (unit) structure 5661 * 5662 * Return Code: DDI_SUCCESS 5663 * 5664 * Context: Kernel thread context 5665 */ 5666 5667 static int 5668 sd_ddi_pm_resume(struct sd_lun *un) 5669 { 5670 ASSERT(un != NULL); 5671 5672 ASSERT(!mutex_owned(SD_MUTEX(un))); 5673 mutex_enter(SD_MUTEX(un)); 5674 un->un_power_level = SD_SPINDLE_ON; 5675 5676 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5677 mutex_enter(&un->un_pm_mutex); 5678 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5679 un->un_pm_count++; 5680 ASSERT(un->un_pm_count == 0); 5681 /* 5682 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5683 * un_suspend_cv is for a system resume, not a power management 5684 * device resume. (4297749) 5685 * cv_broadcast(&un->un_suspend_cv); 5686 */ 5687 } 5688 mutex_exit(&un->un_pm_mutex); 5689 mutex_exit(SD_MUTEX(un)); 5690 5691 return (DDI_SUCCESS); 5692 } 5693 5694 5695 /* 5696 * Function: sd_pm_idletimeout_handler 5697 * 5698 * Description: A timer routine that's active only while a device is busy. 5699 * The purpose is to extend slightly the pm framework's busy 5700 * view of the device to prevent busy/idle thrashing for 5701 * back-to-back commands. Do this by comparing the current time 5702 * to the time at which the last command completed and when the 5703 * difference is greater than sd_pm_idletime, call 5704 * pm_idle_component. In addition to indicating idle to the pm 5705 * framework, update the chain type to again use the internal pm 5706 * layers of the driver. 5707 * 5708 * Arguments: arg - driver soft state (unit) structure 5709 * 5710 * Context: Executes in a timeout(9F) thread context 5711 */ 5712 5713 static void 5714 sd_pm_idletimeout_handler(void *arg) 5715 { 5716 struct sd_lun *un = arg; 5717 5718 time_t now; 5719 5720 mutex_enter(&sd_detach_mutex); 5721 if (un->un_detach_count != 0) { 5722 /* Abort if the instance is detaching */ 5723 mutex_exit(&sd_detach_mutex); 5724 return; 5725 } 5726 mutex_exit(&sd_detach_mutex); 5727 5728 now = ddi_get_time(); 5729 /* 5730 * Grab both mutexes, in the proper order, since we're accessing 5731 * both PM and softstate variables. 5732 */ 5733 mutex_enter(SD_MUTEX(un)); 5734 mutex_enter(&un->un_pm_mutex); 5735 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5736 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5737 /* 5738 * Update the chain types. 5739 * This takes affect on the next new command received. 5740 */ 5741 if (un->un_f_non_devbsize_supported) { 5742 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5743 } else { 5744 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5745 } 5746 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5747 5748 SD_TRACE(SD_LOG_IO_PM, un, 5749 "sd_pm_idletimeout_handler: idling device\n"); 5750 (void) pm_idle_component(SD_DEVINFO(un), 0); 5751 un->un_pm_idle_timeid = NULL; 5752 } else { 5753 un->un_pm_idle_timeid = 5754 timeout(sd_pm_idletimeout_handler, un, 5755 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5756 } 5757 mutex_exit(&un->un_pm_mutex); 5758 mutex_exit(SD_MUTEX(un)); 5759 } 5760 5761 5762 /* 5763 * Function: sd_pm_timeout_handler 5764 * 5765 * Description: Callback to tell framework we are idle. 5766 * 5767 * Context: timeout(9f) thread context. 5768 */ 5769 5770 static void 5771 sd_pm_timeout_handler(void *arg) 5772 { 5773 struct sd_lun *un = arg; 5774 5775 (void) pm_idle_component(SD_DEVINFO(un), 0); 5776 mutex_enter(&un->un_pm_mutex); 5777 un->un_pm_timeid = NULL; 5778 mutex_exit(&un->un_pm_mutex); 5779 } 5780 5781 5782 /* 5783 * Function: sdpower 5784 * 5785 * Description: PM entry point. 5786 * 5787 * Return Code: DDI_SUCCESS 5788 * DDI_FAILURE 5789 * 5790 * Context: Kernel thread context 5791 */ 5792 5793 static int 5794 sdpower(dev_info_t *devi, int component, int level) 5795 { 5796 struct sd_lun *un; 5797 int instance; 5798 int rval = DDI_SUCCESS; 5799 uint_t i, log_page_size, maxcycles, ncycles; 5800 uchar_t *log_page_data; 5801 int log_sense_page; 5802 int medium_present; 5803 time_t intvlp; 5804 dev_t dev; 5805 struct pm_trans_data sd_pm_tran_data; 5806 uchar_t save_state; 5807 int sval; 5808 uchar_t state_before_pm; 5809 int got_semaphore_here; 5810 5811 instance = ddi_get_instance(devi); 5812 5813 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5814 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5815 component != 0) { 5816 return (DDI_FAILURE); 5817 } 5818 5819 dev = sd_make_device(SD_DEVINFO(un)); 5820 5821 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5822 5823 /* 5824 * Must synchronize power down with close. 5825 * Attempt to decrement/acquire the open/close semaphore, 5826 * but do NOT wait on it. If it's not greater than zero, 5827 * ie. it can't be decremented without waiting, then 5828 * someone else, either open or close, already has it 5829 * and the try returns 0. Use that knowledge here to determine 5830 * if it's OK to change the device power level. 5831 * Also, only increment it on exit if it was decremented, ie. gotten, 5832 * here. 5833 */ 5834 got_semaphore_here = sema_tryp(&un->un_semoclose); 5835 5836 mutex_enter(SD_MUTEX(un)); 5837 5838 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5839 un->un_ncmds_in_driver); 5840 5841 /* 5842 * If un_ncmds_in_driver is non-zero it indicates commands are 5843 * already being processed in the driver, or if the semaphore was 5844 * not gotten here it indicates an open or close is being processed. 5845 * At the same time somebody is requesting to go low power which 5846 * can't happen, therefore we need to return failure. 5847 */ 5848 if ((level == SD_SPINDLE_OFF) && 5849 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5850 mutex_exit(SD_MUTEX(un)); 5851 5852 if (got_semaphore_here != 0) { 5853 sema_v(&un->un_semoclose); 5854 } 5855 SD_TRACE(SD_LOG_IO_PM, un, 5856 "sdpower: exit, device has queued cmds.\n"); 5857 return (DDI_FAILURE); 5858 } 5859 5860 /* 5861 * if it is OFFLINE that means the disk is completely dead 5862 * in our case we have to put the disk in on or off by sending commands 5863 * Of course that will fail anyway so return back here. 5864 * 5865 * Power changes to a device that's OFFLINE or SUSPENDED 5866 * are not allowed. 5867 */ 5868 if ((un->un_state == SD_STATE_OFFLINE) || 5869 (un->un_state == SD_STATE_SUSPENDED)) { 5870 mutex_exit(SD_MUTEX(un)); 5871 5872 if (got_semaphore_here != 0) { 5873 sema_v(&un->un_semoclose); 5874 } 5875 SD_TRACE(SD_LOG_IO_PM, un, 5876 "sdpower: exit, device is off-line.\n"); 5877 return (DDI_FAILURE); 5878 } 5879 5880 /* 5881 * Change the device's state to indicate it's power level 5882 * is being changed. Do this to prevent a power off in the 5883 * middle of commands, which is especially bad on devices 5884 * that are really powered off instead of just spun down. 5885 */ 5886 state_before_pm = un->un_state; 5887 un->un_state = SD_STATE_PM_CHANGING; 5888 5889 mutex_exit(SD_MUTEX(un)); 5890 5891 /* 5892 * If "pm-capable" property is set to TRUE by HBA drivers, 5893 * bypass the following checking, otherwise, check the log 5894 * sense information for this device 5895 */ 5896 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5897 /* 5898 * Get the log sense information to understand whether the 5899 * the powercycle counts have gone beyond the threshhold. 5900 */ 5901 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5902 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5903 5904 mutex_enter(SD_MUTEX(un)); 5905 log_sense_page = un->un_start_stop_cycle_page; 5906 mutex_exit(SD_MUTEX(un)); 5907 5908 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5909 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5910 #ifdef SDDEBUG 5911 if (sd_force_pm_supported) { 5912 /* Force a successful result */ 5913 rval = 0; 5914 } 5915 #endif 5916 if (rval != 0) { 5917 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5918 "Log Sense Failed\n"); 5919 kmem_free(log_page_data, log_page_size); 5920 /* Cannot support power management on those drives */ 5921 5922 if (got_semaphore_here != 0) { 5923 sema_v(&un->un_semoclose); 5924 } 5925 /* 5926 * On exit put the state back to it's original value 5927 * and broadcast to anyone waiting for the power 5928 * change completion. 5929 */ 5930 mutex_enter(SD_MUTEX(un)); 5931 un->un_state = state_before_pm; 5932 cv_broadcast(&un->un_suspend_cv); 5933 mutex_exit(SD_MUTEX(un)); 5934 SD_TRACE(SD_LOG_IO_PM, un, 5935 "sdpower: exit, Log Sense Failed.\n"); 5936 return (DDI_FAILURE); 5937 } 5938 5939 /* 5940 * From the page data - Convert the essential information to 5941 * pm_trans_data 5942 */ 5943 maxcycles = 5944 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5945 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5946 5947 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5948 5949 ncycles = 5950 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5951 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5952 5953 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5954 5955 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5956 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5957 log_page_data[8+i]; 5958 } 5959 5960 kmem_free(log_page_data, log_page_size); 5961 5962 /* 5963 * Call pm_trans_check routine to get the Ok from 5964 * the global policy 5965 */ 5966 5967 sd_pm_tran_data.format = DC_SCSI_FORMAT; 5968 sd_pm_tran_data.un.scsi_cycles.flag = 0; 5969 5970 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 5971 #ifdef SDDEBUG 5972 if (sd_force_pm_supported) { 5973 /* Force a successful result */ 5974 rval = 1; 5975 } 5976 #endif 5977 switch (rval) { 5978 case 0: 5979 /* 5980 * Not Ok to Power cycle or error in parameters passed 5981 * Would have given the advised time to consider power 5982 * cycle. Based on the new intvlp parameter we are 5983 * supposed to pretend we are busy so that pm framework 5984 * will never call our power entry point. Because of 5985 * that install a timeout handler and wait for the 5986 * recommended time to elapse so that power management 5987 * can be effective again. 5988 * 5989 * To effect this behavior, call pm_busy_component to 5990 * indicate to the framework this device is busy. 5991 * By not adjusting un_pm_count the rest of PM in 5992 * the driver will function normally, and independant 5993 * of this but because the framework is told the device 5994 * is busy it won't attempt powering down until it gets 5995 * a matching idle. The timeout handler sends this. 5996 * Note: sd_pm_entry can't be called here to do this 5997 * because sdpower may have been called as a result 5998 * of a call to pm_raise_power from within sd_pm_entry. 5999 * 6000 * If a timeout handler is already active then 6001 * don't install another. 6002 */ 6003 mutex_enter(&un->un_pm_mutex); 6004 if (un->un_pm_timeid == NULL) { 6005 un->un_pm_timeid = 6006 timeout(sd_pm_timeout_handler, 6007 un, intvlp * drv_usectohz(1000000)); 6008 mutex_exit(&un->un_pm_mutex); 6009 (void) pm_busy_component(SD_DEVINFO(un), 0); 6010 } else { 6011 mutex_exit(&un->un_pm_mutex); 6012 } 6013 if (got_semaphore_here != 0) { 6014 sema_v(&un->un_semoclose); 6015 } 6016 /* 6017 * On exit put the state back to it's original value 6018 * and broadcast to anyone waiting for the power 6019 * change completion. 6020 */ 6021 mutex_enter(SD_MUTEX(un)); 6022 un->un_state = state_before_pm; 6023 cv_broadcast(&un->un_suspend_cv); 6024 mutex_exit(SD_MUTEX(un)); 6025 6026 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6027 "trans check Failed, not ok to power cycle.\n"); 6028 return (DDI_FAILURE); 6029 6030 case -1: 6031 if (got_semaphore_here != 0) { 6032 sema_v(&un->un_semoclose); 6033 } 6034 /* 6035 * On exit put the state back to it's original value 6036 * and broadcast to anyone waiting for the power 6037 * change completion. 6038 */ 6039 mutex_enter(SD_MUTEX(un)); 6040 un->un_state = state_before_pm; 6041 cv_broadcast(&un->un_suspend_cv); 6042 mutex_exit(SD_MUTEX(un)); 6043 SD_TRACE(SD_LOG_IO_PM, un, 6044 "sdpower: exit, trans check command Failed.\n"); 6045 return (DDI_FAILURE); 6046 } 6047 } 6048 6049 if (level == SD_SPINDLE_OFF) { 6050 /* 6051 * Save the last state... if the STOP FAILS we need it 6052 * for restoring 6053 */ 6054 mutex_enter(SD_MUTEX(un)); 6055 save_state = un->un_last_state; 6056 /* 6057 * There must not be any cmds. getting processed 6058 * in the driver when we get here. Power to the 6059 * device is potentially going off. 6060 */ 6061 ASSERT(un->un_ncmds_in_driver == 0); 6062 mutex_exit(SD_MUTEX(un)); 6063 6064 /* 6065 * For now suspend the device completely before spindle is 6066 * turned off 6067 */ 6068 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6069 if (got_semaphore_here != 0) { 6070 sema_v(&un->un_semoclose); 6071 } 6072 /* 6073 * On exit put the state back to it's original value 6074 * and broadcast to anyone waiting for the power 6075 * change completion. 6076 */ 6077 mutex_enter(SD_MUTEX(un)); 6078 un->un_state = state_before_pm; 6079 cv_broadcast(&un->un_suspend_cv); 6080 mutex_exit(SD_MUTEX(un)); 6081 SD_TRACE(SD_LOG_IO_PM, un, 6082 "sdpower: exit, PM suspend Failed.\n"); 6083 return (DDI_FAILURE); 6084 } 6085 } 6086 6087 /* 6088 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6089 * close, or strategy. Dump no long uses this routine, it uses it's 6090 * own code so it can be done in polled mode. 6091 */ 6092 6093 medium_present = TRUE; 6094 6095 /* 6096 * When powering up, issue a TUR in case the device is at unit 6097 * attention. Don't do retries. Bypass the PM layer, otherwise 6098 * a deadlock on un_pm_busy_cv will occur. 6099 */ 6100 if (level == SD_SPINDLE_ON) { 6101 (void) sd_send_scsi_TEST_UNIT_READY(un, 6102 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6103 } 6104 6105 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6106 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6107 6108 sval = sd_send_scsi_START_STOP_UNIT(un, 6109 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6110 SD_PATH_DIRECT); 6111 /* Command failed, check for media present. */ 6112 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6113 medium_present = FALSE; 6114 } 6115 6116 /* 6117 * The conditions of interest here are: 6118 * if a spindle off with media present fails, 6119 * then restore the state and return an error. 6120 * else if a spindle on fails, 6121 * then return an error (there's no state to restore). 6122 * In all other cases we setup for the new state 6123 * and return success. 6124 */ 6125 switch (level) { 6126 case SD_SPINDLE_OFF: 6127 if ((medium_present == TRUE) && (sval != 0)) { 6128 /* The stop command from above failed */ 6129 rval = DDI_FAILURE; 6130 /* 6131 * The stop command failed, and we have media 6132 * present. Put the level back by calling the 6133 * sd_pm_resume() and set the state back to 6134 * it's previous value. 6135 */ 6136 (void) sd_ddi_pm_resume(un); 6137 mutex_enter(SD_MUTEX(un)); 6138 un->un_last_state = save_state; 6139 mutex_exit(SD_MUTEX(un)); 6140 break; 6141 } 6142 /* 6143 * The stop command from above succeeded. 6144 */ 6145 if (un->un_f_monitor_media_state) { 6146 /* 6147 * Terminate watch thread in case of removable media 6148 * devices going into low power state. This is as per 6149 * the requirements of pm framework, otherwise commands 6150 * will be generated for the device (through watch 6151 * thread), even when the device is in low power state. 6152 */ 6153 mutex_enter(SD_MUTEX(un)); 6154 un->un_f_watcht_stopped = FALSE; 6155 if (un->un_swr_token != NULL) { 6156 opaque_t temp_token = un->un_swr_token; 6157 un->un_f_watcht_stopped = TRUE; 6158 un->un_swr_token = NULL; 6159 mutex_exit(SD_MUTEX(un)); 6160 (void) scsi_watch_request_terminate(temp_token, 6161 SCSI_WATCH_TERMINATE_WAIT); 6162 } else { 6163 mutex_exit(SD_MUTEX(un)); 6164 } 6165 } 6166 break; 6167 6168 default: /* The level requested is spindle on... */ 6169 /* 6170 * Legacy behavior: return success on a failed spinup 6171 * if there is no media in the drive. 6172 * Do this by looking at medium_present here. 6173 */ 6174 if ((sval != 0) && medium_present) { 6175 /* The start command from above failed */ 6176 rval = DDI_FAILURE; 6177 break; 6178 } 6179 /* 6180 * The start command from above succeeded 6181 * Resume the devices now that we have 6182 * started the disks 6183 */ 6184 (void) sd_ddi_pm_resume(un); 6185 6186 /* 6187 * Resume the watch thread since it was suspended 6188 * when the device went into low power mode. 6189 */ 6190 if (un->un_f_monitor_media_state) { 6191 mutex_enter(SD_MUTEX(un)); 6192 if (un->un_f_watcht_stopped == TRUE) { 6193 opaque_t temp_token; 6194 6195 un->un_f_watcht_stopped = FALSE; 6196 mutex_exit(SD_MUTEX(un)); 6197 temp_token = scsi_watch_request_submit( 6198 SD_SCSI_DEVP(un), 6199 sd_check_media_time, 6200 SENSE_LENGTH, sd_media_watch_cb, 6201 (caddr_t)dev); 6202 mutex_enter(SD_MUTEX(un)); 6203 un->un_swr_token = temp_token; 6204 } 6205 mutex_exit(SD_MUTEX(un)); 6206 } 6207 } 6208 if (got_semaphore_here != 0) { 6209 sema_v(&un->un_semoclose); 6210 } 6211 /* 6212 * On exit put the state back to it's original value 6213 * and broadcast to anyone waiting for the power 6214 * change completion. 6215 */ 6216 mutex_enter(SD_MUTEX(un)); 6217 un->un_state = state_before_pm; 6218 cv_broadcast(&un->un_suspend_cv); 6219 mutex_exit(SD_MUTEX(un)); 6220 6221 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6222 6223 return (rval); 6224 } 6225 6226 6227 6228 /* 6229 * Function: sdattach 6230 * 6231 * Description: Driver's attach(9e) entry point function. 6232 * 6233 * Arguments: devi - opaque device info handle 6234 * cmd - attach type 6235 * 6236 * Return Code: DDI_SUCCESS 6237 * DDI_FAILURE 6238 * 6239 * Context: Kernel thread context 6240 */ 6241 6242 static int 6243 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6244 { 6245 switch (cmd) { 6246 case DDI_ATTACH: 6247 return (sd_unit_attach(devi)); 6248 case DDI_RESUME: 6249 return (sd_ddi_resume(devi)); 6250 default: 6251 break; 6252 } 6253 return (DDI_FAILURE); 6254 } 6255 6256 6257 /* 6258 * Function: sddetach 6259 * 6260 * Description: Driver's detach(9E) entry point function. 6261 * 6262 * Arguments: devi - opaque device info handle 6263 * cmd - detach type 6264 * 6265 * Return Code: DDI_SUCCESS 6266 * DDI_FAILURE 6267 * 6268 * Context: Kernel thread context 6269 */ 6270 6271 static int 6272 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6273 { 6274 switch (cmd) { 6275 case DDI_DETACH: 6276 return (sd_unit_detach(devi)); 6277 case DDI_SUSPEND: 6278 return (sd_ddi_suspend(devi)); 6279 default: 6280 break; 6281 } 6282 return (DDI_FAILURE); 6283 } 6284 6285 6286 /* 6287 * Function: sd_sync_with_callback 6288 * 6289 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6290 * state while the callback routine is active. 6291 * 6292 * Arguments: un: softstate structure for the instance 6293 * 6294 * Context: Kernel thread context 6295 */ 6296 6297 static void 6298 sd_sync_with_callback(struct sd_lun *un) 6299 { 6300 ASSERT(un != NULL); 6301 6302 mutex_enter(SD_MUTEX(un)); 6303 6304 ASSERT(un->un_in_callback >= 0); 6305 6306 while (un->un_in_callback > 0) { 6307 mutex_exit(SD_MUTEX(un)); 6308 delay(2); 6309 mutex_enter(SD_MUTEX(un)); 6310 } 6311 6312 mutex_exit(SD_MUTEX(un)); 6313 } 6314 6315 /* 6316 * Function: sd_unit_attach 6317 * 6318 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6319 * the soft state structure for the device and performs 6320 * all necessary structure and device initializations. 6321 * 6322 * Arguments: devi: the system's dev_info_t for the device. 6323 * 6324 * Return Code: DDI_SUCCESS if attach is successful. 6325 * DDI_FAILURE if any part of the attach fails. 6326 * 6327 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6328 * Kernel thread context only. Can sleep. 6329 */ 6330 6331 static int 6332 sd_unit_attach(dev_info_t *devi) 6333 { 6334 struct scsi_device *devp; 6335 struct sd_lun *un; 6336 char *variantp; 6337 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6338 int instance; 6339 int rval; 6340 int wc_enabled; 6341 int tgt; 6342 uint64_t capacity; 6343 uint_t lbasize = 0; 6344 dev_info_t *pdip = ddi_get_parent(devi); 6345 int offbyone = 0; 6346 int geom_label_valid = 0; 6347 6348 /* 6349 * Retrieve the target driver's private data area. This was set 6350 * up by the HBA. 6351 */ 6352 devp = ddi_get_driver_private(devi); 6353 6354 /* 6355 * Retrieve the target ID of the device. 6356 */ 6357 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6358 SCSI_ADDR_PROP_TARGET, -1); 6359 6360 /* 6361 * Since we have no idea what state things were left in by the last 6362 * user of the device, set up some 'default' settings, ie. turn 'em 6363 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6364 * Do this before the scsi_probe, which sends an inquiry. 6365 * This is a fix for bug (4430280). 6366 * Of special importance is wide-xfer. The drive could have been left 6367 * in wide transfer mode by the last driver to communicate with it, 6368 * this includes us. If that's the case, and if the following is not 6369 * setup properly or we don't re-negotiate with the drive prior to 6370 * transferring data to/from the drive, it causes bus parity errors, 6371 * data overruns, and unexpected interrupts. This first occurred when 6372 * the fix for bug (4378686) was made. 6373 */ 6374 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6375 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6376 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6377 6378 /* 6379 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6380 * on a target. Setting it per lun instance actually sets the 6381 * capability of this target, which affects those luns already 6382 * attached on the same target. So during attach, we can only disable 6383 * this capability only when no other lun has been attached on this 6384 * target. By doing this, we assume a target has the same tagged-qing 6385 * capability for every lun. The condition can be removed when HBA 6386 * is changed to support per lun based tagged-qing capability. 6387 */ 6388 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6389 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6390 } 6391 6392 /* 6393 * Use scsi_probe() to issue an INQUIRY command to the device. 6394 * This call will allocate and fill in the scsi_inquiry structure 6395 * and point the sd_inq member of the scsi_device structure to it. 6396 * If the attach succeeds, then this memory will not be de-allocated 6397 * (via scsi_unprobe()) until the instance is detached. 6398 */ 6399 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6400 goto probe_failed; 6401 } 6402 6403 /* 6404 * Check the device type as specified in the inquiry data and 6405 * claim it if it is of a type that we support. 6406 */ 6407 switch (devp->sd_inq->inq_dtype) { 6408 case DTYPE_DIRECT: 6409 break; 6410 case DTYPE_RODIRECT: 6411 break; 6412 case DTYPE_OPTICAL: 6413 break; 6414 case DTYPE_NOTPRESENT: 6415 default: 6416 /* Unsupported device type; fail the attach. */ 6417 goto probe_failed; 6418 } 6419 6420 /* 6421 * Allocate the soft state structure for this unit. 6422 * 6423 * We rely upon this memory being set to all zeroes by 6424 * ddi_soft_state_zalloc(). We assume that any member of the 6425 * soft state structure that is not explicitly initialized by 6426 * this routine will have a value of zero. 6427 */ 6428 instance = ddi_get_instance(devp->sd_dev); 6429 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6430 goto probe_failed; 6431 } 6432 6433 /* 6434 * Retrieve a pointer to the newly-allocated soft state. 6435 * 6436 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6437 * was successful, unless something has gone horribly wrong and the 6438 * ddi's soft state internals are corrupt (in which case it is 6439 * probably better to halt here than just fail the attach....) 6440 */ 6441 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6442 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6443 instance); 6444 /*NOTREACHED*/ 6445 } 6446 6447 /* 6448 * Link the back ptr of the driver soft state to the scsi_device 6449 * struct for this lun. 6450 * Save a pointer to the softstate in the driver-private area of 6451 * the scsi_device struct. 6452 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6453 * we first set un->un_sd below. 6454 */ 6455 un->un_sd = devp; 6456 devp->sd_private = (opaque_t)un; 6457 6458 /* 6459 * The following must be after devp is stored in the soft state struct. 6460 */ 6461 #ifdef SDDEBUG 6462 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6463 "%s_unit_attach: un:0x%p instance:%d\n", 6464 ddi_driver_name(devi), un, instance); 6465 #endif 6466 6467 /* 6468 * Set up the device type and node type (for the minor nodes). 6469 * By default we assume that the device can at least support the 6470 * Common Command Set. Call it a CD-ROM if it reports itself 6471 * as a RODIRECT device. 6472 */ 6473 switch (devp->sd_inq->inq_dtype) { 6474 case DTYPE_RODIRECT: 6475 un->un_node_type = DDI_NT_CD_CHAN; 6476 un->un_ctype = CTYPE_CDROM; 6477 break; 6478 case DTYPE_OPTICAL: 6479 un->un_node_type = DDI_NT_BLOCK_CHAN; 6480 un->un_ctype = CTYPE_ROD; 6481 break; 6482 default: 6483 un->un_node_type = DDI_NT_BLOCK_CHAN; 6484 un->un_ctype = CTYPE_CCS; 6485 break; 6486 } 6487 6488 /* 6489 * Try to read the interconnect type from the HBA. 6490 * 6491 * Note: This driver is currently compiled as two binaries, a parallel 6492 * scsi version (sd) and a fibre channel version (ssd). All functional 6493 * differences are determined at compile time. In the future a single 6494 * binary will be provided and the inteconnect type will be used to 6495 * differentiate between fibre and parallel scsi behaviors. At that time 6496 * it will be necessary for all fibre channel HBAs to support this 6497 * property. 6498 * 6499 * set un_f_is_fiber to TRUE ( default fiber ) 6500 */ 6501 un->un_f_is_fibre = TRUE; 6502 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6503 case INTERCONNECT_SSA: 6504 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6505 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6506 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6507 break; 6508 case INTERCONNECT_PARALLEL: 6509 un->un_f_is_fibre = FALSE; 6510 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6511 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6512 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6513 break; 6514 case INTERCONNECT_SATA: 6515 un->un_f_is_fibre = FALSE; 6516 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6517 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6518 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6519 break; 6520 case INTERCONNECT_FIBRE: 6521 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6522 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6523 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6524 break; 6525 case INTERCONNECT_FABRIC: 6526 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6527 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6529 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6530 break; 6531 default: 6532 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6533 /* 6534 * The HBA does not support the "interconnect-type" property 6535 * (or did not provide a recognized type). 6536 * 6537 * Note: This will be obsoleted when a single fibre channel 6538 * and parallel scsi driver is delivered. In the meantime the 6539 * interconnect type will be set to the platform default.If that 6540 * type is not parallel SCSI, it means that we should be 6541 * assuming "ssd" semantics. However, here this also means that 6542 * the FC HBA is not supporting the "interconnect-type" property 6543 * like we expect it to, so log this occurrence. 6544 */ 6545 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6546 if (!SD_IS_PARALLEL_SCSI(un)) { 6547 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6548 "sd_unit_attach: un:0x%p Assuming " 6549 "INTERCONNECT_FIBRE\n", un); 6550 } else { 6551 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6552 "sd_unit_attach: un:0x%p Assuming " 6553 "INTERCONNECT_PARALLEL\n", un); 6554 un->un_f_is_fibre = FALSE; 6555 } 6556 #else 6557 /* 6558 * Note: This source will be implemented when a single fibre 6559 * channel and parallel scsi driver is delivered. The default 6560 * will be to assume that if a device does not support the 6561 * "interconnect-type" property it is a parallel SCSI HBA and 6562 * we will set the interconnect type for parallel scsi. 6563 */ 6564 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6565 un->un_f_is_fibre = FALSE; 6566 #endif 6567 break; 6568 } 6569 6570 if (un->un_f_is_fibre == TRUE) { 6571 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6572 SCSI_VERSION_3) { 6573 switch (un->un_interconnect_type) { 6574 case SD_INTERCONNECT_FIBRE: 6575 case SD_INTERCONNECT_SSA: 6576 un->un_node_type = DDI_NT_BLOCK_WWN; 6577 break; 6578 default: 6579 break; 6580 } 6581 } 6582 } 6583 6584 /* 6585 * Initialize the Request Sense command for the target 6586 */ 6587 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6588 goto alloc_rqs_failed; 6589 } 6590 6591 /* 6592 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6593 * with separate binary for sd and ssd. 6594 * 6595 * x86 has 1 binary, un_retry_count is set base on connection type. 6596 * The hardcoded values will go away when Sparc uses 1 binary 6597 * for sd and ssd. This hardcoded values need to match 6598 * SD_RETRY_COUNT in sddef.h 6599 * The value used is base on interconnect type. 6600 * fibre = 3, parallel = 5 6601 */ 6602 #if defined(__i386) || defined(__amd64) 6603 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6604 #else 6605 un->un_retry_count = SD_RETRY_COUNT; 6606 #endif 6607 6608 /* 6609 * Set the per disk retry count to the default number of retries 6610 * for disks and CDROMs. This value can be overridden by the 6611 * disk property list or an entry in sd.conf. 6612 */ 6613 un->un_notready_retry_count = 6614 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6615 : DISK_NOT_READY_RETRY_COUNT(un); 6616 6617 /* 6618 * Set the busy retry count to the default value of un_retry_count. 6619 * This can be overridden by entries in sd.conf or the device 6620 * config table. 6621 */ 6622 un->un_busy_retry_count = un->un_retry_count; 6623 6624 /* 6625 * Init the reset threshold for retries. This number determines 6626 * how many retries must be performed before a reset can be issued 6627 * (for certain error conditions). This can be overridden by entries 6628 * in sd.conf or the device config table. 6629 */ 6630 un->un_reset_retry_count = (un->un_retry_count / 2); 6631 6632 /* 6633 * Set the victim_retry_count to the default un_retry_count 6634 */ 6635 un->un_victim_retry_count = (2 * un->un_retry_count); 6636 6637 /* 6638 * Set the reservation release timeout to the default value of 6639 * 5 seconds. This can be overridden by entries in ssd.conf or the 6640 * device config table. 6641 */ 6642 un->un_reserve_release_time = 5; 6643 6644 /* 6645 * Set up the default maximum transfer size. Note that this may 6646 * get updated later in the attach, when setting up default wide 6647 * operations for disks. 6648 */ 6649 #if defined(__i386) || defined(__amd64) 6650 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6651 #else 6652 un->un_max_xfer_size = (uint_t)maxphys; 6653 #endif 6654 6655 /* 6656 * Get "allow bus device reset" property (defaults to "enabled" if 6657 * the property was not defined). This is to disable bus resets for 6658 * certain kinds of error recovery. Note: In the future when a run-time 6659 * fibre check is available the soft state flag should default to 6660 * enabled. 6661 */ 6662 if (un->un_f_is_fibre == TRUE) { 6663 un->un_f_allow_bus_device_reset = TRUE; 6664 } else { 6665 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6666 "allow-bus-device-reset", 1) != 0) { 6667 un->un_f_allow_bus_device_reset = TRUE; 6668 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6669 "sd_unit_attach: un:0x%p Bus device reset " 6670 "enabled\n", un); 6671 } else { 6672 un->un_f_allow_bus_device_reset = FALSE; 6673 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6674 "sd_unit_attach: un:0x%p Bus device reset " 6675 "disabled\n", un); 6676 } 6677 } 6678 6679 /* 6680 * Check if this is an ATAPI device. ATAPI devices use Group 1 6681 * Read/Write commands and Group 2 Mode Sense/Select commands. 6682 * 6683 * Note: The "obsolete" way of doing this is to check for the "atapi" 6684 * property. The new "variant" property with a value of "atapi" has been 6685 * introduced so that future 'variants' of standard SCSI behavior (like 6686 * atapi) could be specified by the underlying HBA drivers by supplying 6687 * a new value for the "variant" property, instead of having to define a 6688 * new property. 6689 */ 6690 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6691 un->un_f_cfg_is_atapi = TRUE; 6692 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6693 "sd_unit_attach: un:0x%p Atapi device\n", un); 6694 } 6695 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6696 &variantp) == DDI_PROP_SUCCESS) { 6697 if (strcmp(variantp, "atapi") == 0) { 6698 un->un_f_cfg_is_atapi = TRUE; 6699 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6700 "sd_unit_attach: un:0x%p Atapi device\n", un); 6701 } 6702 ddi_prop_free(variantp); 6703 } 6704 6705 un->un_cmd_timeout = SD_IO_TIME; 6706 6707 /* Info on current states, statuses, etc. (Updated frequently) */ 6708 un->un_state = SD_STATE_NORMAL; 6709 un->un_last_state = SD_STATE_NORMAL; 6710 6711 /* Control & status info for command throttling */ 6712 un->un_throttle = sd_max_throttle; 6713 un->un_saved_throttle = sd_max_throttle; 6714 un->un_min_throttle = sd_min_throttle; 6715 6716 if (un->un_f_is_fibre == TRUE) { 6717 un->un_f_use_adaptive_throttle = TRUE; 6718 } else { 6719 un->un_f_use_adaptive_throttle = FALSE; 6720 } 6721 6722 /* Removable media support. */ 6723 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6724 un->un_mediastate = DKIO_NONE; 6725 un->un_specified_mediastate = DKIO_NONE; 6726 6727 /* CVs for suspend/resume (PM or DR) */ 6728 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6729 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6730 6731 /* Power management support. */ 6732 un->un_power_level = SD_SPINDLE_UNINIT; 6733 6734 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6735 un->un_f_wcc_inprog = 0; 6736 6737 /* 6738 * The open/close semaphore is used to serialize threads executing 6739 * in the driver's open & close entry point routines for a given 6740 * instance. 6741 */ 6742 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6743 6744 /* 6745 * The conf file entry and softstate variable is a forceful override, 6746 * meaning a non-zero value must be entered to change the default. 6747 */ 6748 un->un_f_disksort_disabled = FALSE; 6749 6750 /* 6751 * Retrieve the properties from the static driver table or the driver 6752 * configuration file (.conf) for this unit and update the soft state 6753 * for the device as needed for the indicated properties. 6754 * Note: the property configuration needs to occur here as some of the 6755 * following routines may have dependancies on soft state flags set 6756 * as part of the driver property configuration. 6757 */ 6758 sd_read_unit_properties(un); 6759 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6760 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6761 6762 /* 6763 * Only if a device has "hotpluggable" property, it is 6764 * treated as hotpluggable device. Otherwise, it is 6765 * regarded as non-hotpluggable one. 6766 */ 6767 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6768 -1) != -1) { 6769 un->un_f_is_hotpluggable = TRUE; 6770 } 6771 6772 /* 6773 * set unit's attributes(flags) according to "hotpluggable" and 6774 * RMB bit in INQUIRY data. 6775 */ 6776 sd_set_unit_attributes(un, devi); 6777 6778 /* 6779 * By default, we mark the capacity, lbasize, and geometry 6780 * as invalid. Only if we successfully read a valid capacity 6781 * will we update the un_blockcount and un_tgt_blocksize with the 6782 * valid values (the geometry will be validated later). 6783 */ 6784 un->un_f_blockcount_is_valid = FALSE; 6785 un->un_f_tgt_blocksize_is_valid = FALSE; 6786 6787 /* 6788 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6789 * otherwise. 6790 */ 6791 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6792 un->un_blockcount = 0; 6793 6794 /* 6795 * Set up the per-instance info needed to determine the correct 6796 * CDBs and other info for issuing commands to the target. 6797 */ 6798 sd_init_cdb_limits(un); 6799 6800 /* 6801 * Set up the IO chains to use, based upon the target type. 6802 */ 6803 if (un->un_f_non_devbsize_supported) { 6804 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6805 } else { 6806 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6807 } 6808 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6809 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6810 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6811 6812 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6813 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6814 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6815 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6816 6817 6818 if (ISCD(un)) { 6819 un->un_additional_codes = sd_additional_codes; 6820 } else { 6821 un->un_additional_codes = NULL; 6822 } 6823 6824 /* 6825 * Create the kstats here so they can be available for attach-time 6826 * routines that send commands to the unit (either polled or via 6827 * sd_send_scsi_cmd). 6828 * 6829 * Note: This is a critical sequence that needs to be maintained: 6830 * 1) Instantiate the kstats here, before any routines using the 6831 * iopath (i.e. sd_send_scsi_cmd). 6832 * 2) Instantiate and initialize the partition stats 6833 * (sd_set_pstats). 6834 * 3) Initialize the error stats (sd_set_errstats), following 6835 * sd_validate_geometry(),sd_register_devid(), 6836 * and sd_cache_control(). 6837 */ 6838 6839 un->un_stats = kstat_create(sd_label, instance, 6840 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6841 if (un->un_stats != NULL) { 6842 un->un_stats->ks_lock = SD_MUTEX(un); 6843 kstat_install(un->un_stats); 6844 } 6845 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6846 "sd_unit_attach: un:0x%p un_stats created\n", un); 6847 6848 sd_create_errstats(un, instance); 6849 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6850 "sd_unit_attach: un:0x%p errstats created\n", un); 6851 6852 /* 6853 * The following if/else code was relocated here from below as part 6854 * of the fix for bug (4430280). However with the default setup added 6855 * on entry to this routine, it's no longer absolutely necessary for 6856 * this to be before the call to sd_spin_up_unit. 6857 */ 6858 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6859 /* 6860 * If SCSI-2 tagged queueing is supported by the target 6861 * and by the host adapter then we will enable it. 6862 */ 6863 un->un_tagflags = 0; 6864 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 6865 (devp->sd_inq->inq_cmdque) && 6866 (un->un_f_arq_enabled == TRUE)) { 6867 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6868 1, 1) == 1) { 6869 un->un_tagflags = FLAG_STAG; 6870 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6871 "sd_unit_attach: un:0x%p tag queueing " 6872 "enabled\n", un); 6873 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6874 "untagged-qing", 0) == 1) { 6875 un->un_f_opt_queueing = TRUE; 6876 un->un_saved_throttle = un->un_throttle = 6877 min(un->un_throttle, 3); 6878 } else { 6879 un->un_f_opt_queueing = FALSE; 6880 un->un_saved_throttle = un->un_throttle = 1; 6881 } 6882 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6883 == 1) && (un->un_f_arq_enabled == TRUE)) { 6884 /* The Host Adapter supports internal queueing. */ 6885 un->un_f_opt_queueing = TRUE; 6886 un->un_saved_throttle = un->un_throttle = 6887 min(un->un_throttle, 3); 6888 } else { 6889 un->un_f_opt_queueing = FALSE; 6890 un->un_saved_throttle = un->un_throttle = 1; 6891 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6892 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6893 } 6894 6895 /* 6896 * Enable large transfers for SATA/SAS drives 6897 */ 6898 if (SD_IS_SERIAL(un)) { 6899 un->un_max_xfer_size = 6900 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6901 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6902 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6903 "sd_unit_attach: un:0x%p max transfer " 6904 "size=0x%x\n", un, un->un_max_xfer_size); 6905 6906 } 6907 6908 /* Setup or tear down default wide operations for disks */ 6909 6910 /* 6911 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6912 * and "ssd_max_xfer_size" to exist simultaneously on the same 6913 * system and be set to different values. In the future this 6914 * code may need to be updated when the ssd module is 6915 * obsoleted and removed from the system. (4299588) 6916 */ 6917 if (SD_IS_PARALLEL_SCSI(un) && 6918 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6919 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6920 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6921 1, 1) == 1) { 6922 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6923 "sd_unit_attach: un:0x%p Wide Transfer " 6924 "enabled\n", un); 6925 } 6926 6927 /* 6928 * If tagged queuing has also been enabled, then 6929 * enable large xfers 6930 */ 6931 if (un->un_saved_throttle == sd_max_throttle) { 6932 un->un_max_xfer_size = 6933 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6934 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6935 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6936 "sd_unit_attach: un:0x%p max transfer " 6937 "size=0x%x\n", un, un->un_max_xfer_size); 6938 } 6939 } else { 6940 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6941 0, 1) == 1) { 6942 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6943 "sd_unit_attach: un:0x%p " 6944 "Wide Transfer disabled\n", un); 6945 } 6946 } 6947 } else { 6948 un->un_tagflags = FLAG_STAG; 6949 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6950 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6951 } 6952 6953 /* 6954 * If this target supports LUN reset, try to enable it. 6955 */ 6956 if (un->un_f_lun_reset_enabled) { 6957 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 6958 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6959 "un:0x%p lun_reset capability set\n", un); 6960 } else { 6961 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6962 "un:0x%p lun-reset capability not set\n", un); 6963 } 6964 } 6965 6966 /* 6967 * At this point in the attach, we have enough info in the 6968 * soft state to be able to issue commands to the target. 6969 * 6970 * All command paths used below MUST issue their commands as 6971 * SD_PATH_DIRECT. This is important as intermediate layers 6972 * are not all initialized yet (such as PM). 6973 */ 6974 6975 /* 6976 * Send a TEST UNIT READY command to the device. This should clear 6977 * any outstanding UNIT ATTENTION that may be present. 6978 * 6979 * Note: Don't check for success, just track if there is a reservation, 6980 * this is a throw away command to clear any unit attentions. 6981 * 6982 * Note: This MUST be the first command issued to the target during 6983 * attach to ensure power on UNIT ATTENTIONS are cleared. 6984 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 6985 * with attempts at spinning up a device with no media. 6986 */ 6987 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 6988 reservation_flag = SD_TARGET_IS_RESERVED; 6989 } 6990 6991 /* 6992 * If the device is NOT a removable media device, attempt to spin 6993 * it up (using the START_STOP_UNIT command) and read its capacity 6994 * (using the READ CAPACITY command). Note, however, that either 6995 * of these could fail and in some cases we would continue with 6996 * the attach despite the failure (see below). 6997 */ 6998 if (un->un_f_descr_format_supported) { 6999 switch (sd_spin_up_unit(un)) { 7000 case 0: 7001 /* 7002 * Spin-up was successful; now try to read the 7003 * capacity. If successful then save the results 7004 * and mark the capacity & lbasize as valid. 7005 */ 7006 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7007 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7008 7009 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7010 &lbasize, SD_PATH_DIRECT)) { 7011 case 0: { 7012 if (capacity > DK_MAX_BLOCKS) { 7013 #ifdef _LP64 7014 if (capacity + 1 > 7015 SD_GROUP1_MAX_ADDRESS) { 7016 /* 7017 * Enable descriptor format 7018 * sense data so that we can 7019 * get 64 bit sense data 7020 * fields. 7021 */ 7022 sd_enable_descr_sense(un); 7023 } 7024 #else 7025 /* 32-bit kernels can't handle this */ 7026 scsi_log(SD_DEVINFO(un), 7027 sd_label, CE_WARN, 7028 "disk has %llu blocks, which " 7029 "is too large for a 32-bit " 7030 "kernel", capacity); 7031 7032 #if defined(__i386) || defined(__amd64) 7033 /* 7034 * 1TB disk was treated as (1T - 512)B 7035 * in the past, so that it might have 7036 * valid VTOC and solaris partitions, 7037 * we have to allow it to continue to 7038 * work. 7039 */ 7040 if (capacity -1 > DK_MAX_BLOCKS) 7041 #endif 7042 goto spinup_failed; 7043 #endif 7044 } 7045 7046 /* 7047 * Here it's not necessary to check the case: 7048 * the capacity of the device is bigger than 7049 * what the max hba cdb can support. Because 7050 * sd_send_scsi_READ_CAPACITY will retrieve 7051 * the capacity by sending USCSI command, which 7052 * is constrained by the max hba cdb. Actually, 7053 * sd_send_scsi_READ_CAPACITY will return 7054 * EINVAL when using bigger cdb than required 7055 * cdb length. Will handle this case in 7056 * "case EINVAL". 7057 */ 7058 7059 /* 7060 * The following relies on 7061 * sd_send_scsi_READ_CAPACITY never 7062 * returning 0 for capacity and/or lbasize. 7063 */ 7064 sd_update_block_info(un, lbasize, capacity); 7065 7066 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7067 "sd_unit_attach: un:0x%p capacity = %ld " 7068 "blocks; lbasize= %ld.\n", un, 7069 un->un_blockcount, un->un_tgt_blocksize); 7070 7071 break; 7072 } 7073 case EINVAL: 7074 /* 7075 * In the case where the max-cdb-length property 7076 * is smaller than the required CDB length for 7077 * a SCSI device, a target driver can fail to 7078 * attach to that device. 7079 */ 7080 scsi_log(SD_DEVINFO(un), 7081 sd_label, CE_WARN, 7082 "disk capacity is too large " 7083 "for current cdb length"); 7084 goto spinup_failed; 7085 case EACCES: 7086 /* 7087 * Should never get here if the spin-up 7088 * succeeded, but code it in anyway. 7089 * From here, just continue with the attach... 7090 */ 7091 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7092 "sd_unit_attach: un:0x%p " 7093 "sd_send_scsi_READ_CAPACITY " 7094 "returned reservation conflict\n", un); 7095 reservation_flag = SD_TARGET_IS_RESERVED; 7096 break; 7097 default: 7098 /* 7099 * Likewise, should never get here if the 7100 * spin-up succeeded. Just continue with 7101 * the attach... 7102 */ 7103 break; 7104 } 7105 break; 7106 case EACCES: 7107 /* 7108 * Device is reserved by another host. In this case 7109 * we could not spin it up or read the capacity, but 7110 * we continue with the attach anyway. 7111 */ 7112 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7113 "sd_unit_attach: un:0x%p spin-up reservation " 7114 "conflict.\n", un); 7115 reservation_flag = SD_TARGET_IS_RESERVED; 7116 break; 7117 default: 7118 /* Fail the attach if the spin-up failed. */ 7119 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7120 "sd_unit_attach: un:0x%p spin-up failed.", un); 7121 goto spinup_failed; 7122 } 7123 } 7124 7125 /* 7126 * Check to see if this is a MMC drive 7127 */ 7128 if (ISCD(un)) { 7129 sd_set_mmc_caps(un); 7130 } 7131 7132 7133 /* 7134 * Add a zero-length attribute to tell the world we support 7135 * kernel ioctls (for layered drivers) 7136 */ 7137 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7138 DDI_KERNEL_IOCTL, NULL, 0); 7139 7140 /* 7141 * Add a boolean property to tell the world we support 7142 * the B_FAILFAST flag (for layered drivers) 7143 */ 7144 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7145 "ddi-failfast-supported", NULL, 0); 7146 7147 /* 7148 * Initialize power management 7149 */ 7150 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7151 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7152 sd_setup_pm(un, devi); 7153 if (un->un_f_pm_is_enabled == FALSE) { 7154 /* 7155 * For performance, point to a jump table that does 7156 * not include pm. 7157 * The direct and priority chains don't change with PM. 7158 * 7159 * Note: this is currently done based on individual device 7160 * capabilities. When an interface for determining system 7161 * power enabled state becomes available, or when additional 7162 * layers are added to the command chain, these values will 7163 * have to be re-evaluated for correctness. 7164 */ 7165 if (un->un_f_non_devbsize_supported) { 7166 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7167 } else { 7168 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7169 } 7170 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7171 } 7172 7173 /* 7174 * This property is set to 0 by HA software to avoid retries 7175 * on a reserved disk. (The preferred property name is 7176 * "retry-on-reservation-conflict") (1189689) 7177 * 7178 * Note: The use of a global here can have unintended consequences. A 7179 * per instance variable is preferrable to match the capabilities of 7180 * different underlying hba's (4402600) 7181 */ 7182 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7183 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7184 sd_retry_on_reservation_conflict); 7185 if (sd_retry_on_reservation_conflict != 0) { 7186 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7187 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7188 sd_retry_on_reservation_conflict); 7189 } 7190 7191 /* Set up options for QFULL handling. */ 7192 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7193 "qfull-retries", -1)) != -1) { 7194 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7195 rval, 1); 7196 } 7197 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7198 "qfull-retry-interval", -1)) != -1) { 7199 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7200 rval, 1); 7201 } 7202 7203 /* 7204 * This just prints a message that announces the existence of the 7205 * device. The message is always printed in the system logfile, but 7206 * only appears on the console if the system is booted with the 7207 * -v (verbose) argument. 7208 */ 7209 ddi_report_dev(devi); 7210 7211 un->un_mediastate = DKIO_NONE; 7212 7213 cmlb_alloc_handle(&un->un_cmlbhandle); 7214 7215 #if defined(__i386) || defined(__amd64) 7216 /* 7217 * On x86, compensate for off-by-1 legacy error 7218 */ 7219 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7220 (lbasize == un->un_sys_blocksize)) 7221 offbyone = CMLB_OFF_BY_ONE; 7222 #endif 7223 7224 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7225 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7226 un->un_node_type, offbyone, un->un_cmlbhandle, 7227 (void *)SD_PATH_DIRECT) != 0) { 7228 goto cmlb_attach_failed; 7229 } 7230 7231 7232 /* 7233 * Read and validate the device's geometry (ie, disk label) 7234 * A new unformatted drive will not have a valid geometry, but 7235 * the driver needs to successfully attach to this device so 7236 * the drive can be formatted via ioctls. 7237 */ 7238 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7239 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7240 7241 mutex_enter(SD_MUTEX(un)); 7242 7243 /* 7244 * Read and initialize the devid for the unit. 7245 */ 7246 ASSERT(un->un_errstats != NULL); 7247 if (un->un_f_devid_supported) { 7248 sd_register_devid(un, devi, reservation_flag); 7249 } 7250 mutex_exit(SD_MUTEX(un)); 7251 7252 #if (defined(__fibre)) 7253 /* 7254 * Register callbacks for fibre only. You can't do this soley 7255 * on the basis of the devid_type because this is hba specific. 7256 * We need to query our hba capabilities to find out whether to 7257 * register or not. 7258 */ 7259 if (un->un_f_is_fibre) { 7260 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7261 sd_init_event_callbacks(un); 7262 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7263 "sd_unit_attach: un:0x%p event callbacks inserted", 7264 un); 7265 } 7266 } 7267 #endif 7268 7269 if (un->un_f_opt_disable_cache == TRUE) { 7270 /* 7271 * Disable both read cache and write cache. This is 7272 * the historic behavior of the keywords in the config file. 7273 */ 7274 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7275 0) { 7276 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7277 "sd_unit_attach: un:0x%p Could not disable " 7278 "caching", un); 7279 goto devid_failed; 7280 } 7281 } 7282 7283 /* 7284 * Check the value of the WCE bit now and 7285 * set un_f_write_cache_enabled accordingly. 7286 */ 7287 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7288 mutex_enter(SD_MUTEX(un)); 7289 un->un_f_write_cache_enabled = (wc_enabled != 0); 7290 mutex_exit(SD_MUTEX(un)); 7291 7292 /* 7293 * Find out what type of reservation this disk supports. 7294 */ 7295 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7296 case 0: 7297 /* 7298 * SCSI-3 reservations are supported. 7299 */ 7300 un->un_reservation_type = SD_SCSI3_RESERVATION; 7301 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7302 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7303 break; 7304 case ENOTSUP: 7305 /* 7306 * The PERSISTENT RESERVE IN command would not be recognized by 7307 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7308 */ 7309 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7310 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7311 un->un_reservation_type = SD_SCSI2_RESERVATION; 7312 break; 7313 default: 7314 /* 7315 * default to SCSI-3 reservations 7316 */ 7317 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7318 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7319 un->un_reservation_type = SD_SCSI3_RESERVATION; 7320 break; 7321 } 7322 7323 /* 7324 * Set the pstat and error stat values here, so data obtained during the 7325 * previous attach-time routines is available. 7326 * 7327 * Note: This is a critical sequence that needs to be maintained: 7328 * 1) Instantiate the kstats before any routines using the iopath 7329 * (i.e. sd_send_scsi_cmd). 7330 * 2) Initialize the error stats (sd_set_errstats) and partition 7331 * stats (sd_set_pstats)here, following 7332 * cmlb_validate_geometry(), sd_register_devid(), and 7333 * sd_cache_control(). 7334 */ 7335 7336 if (un->un_f_pkstats_enabled && geom_label_valid) { 7337 sd_set_pstats(un); 7338 SD_TRACE(SD_LOG_IO_PARTITION, un, 7339 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7340 } 7341 7342 sd_set_errstats(un); 7343 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7344 "sd_unit_attach: un:0x%p errstats set\n", un); 7345 7346 7347 /* 7348 * After successfully attaching an instance, we record the information 7349 * of how many luns have been attached on the relative target and 7350 * controller for parallel SCSI. This information is used when sd tries 7351 * to set the tagged queuing capability in HBA. 7352 */ 7353 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7354 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7355 } 7356 7357 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7358 "sd_unit_attach: un:0x%p exit success\n", un); 7359 7360 return (DDI_SUCCESS); 7361 7362 /* 7363 * An error occurred during the attach; clean up & return failure. 7364 */ 7365 7366 devid_failed: 7367 7368 setup_pm_failed: 7369 ddi_remove_minor_node(devi, NULL); 7370 7371 cmlb_attach_failed: 7372 /* 7373 * Cleanup from the scsi_ifsetcap() calls (437868) 7374 */ 7375 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7376 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7377 7378 /* 7379 * Refer to the comments of setting tagged-qing in the beginning of 7380 * sd_unit_attach. We can only disable tagged queuing when there is 7381 * no lun attached on the target. 7382 */ 7383 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7384 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7385 } 7386 7387 if (un->un_f_is_fibre == FALSE) { 7388 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7389 } 7390 7391 spinup_failed: 7392 7393 mutex_enter(SD_MUTEX(un)); 7394 7395 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7396 if (un->un_direct_priority_timeid != NULL) { 7397 timeout_id_t temp_id = un->un_direct_priority_timeid; 7398 un->un_direct_priority_timeid = NULL; 7399 mutex_exit(SD_MUTEX(un)); 7400 (void) untimeout(temp_id); 7401 mutex_enter(SD_MUTEX(un)); 7402 } 7403 7404 /* Cancel any pending start/stop timeouts */ 7405 if (un->un_startstop_timeid != NULL) { 7406 timeout_id_t temp_id = un->un_startstop_timeid; 7407 un->un_startstop_timeid = NULL; 7408 mutex_exit(SD_MUTEX(un)); 7409 (void) untimeout(temp_id); 7410 mutex_enter(SD_MUTEX(un)); 7411 } 7412 7413 /* Cancel any pending reset-throttle timeouts */ 7414 if (un->un_reset_throttle_timeid != NULL) { 7415 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7416 un->un_reset_throttle_timeid = NULL; 7417 mutex_exit(SD_MUTEX(un)); 7418 (void) untimeout(temp_id); 7419 mutex_enter(SD_MUTEX(un)); 7420 } 7421 7422 /* Cancel any pending retry timeouts */ 7423 if (un->un_retry_timeid != NULL) { 7424 timeout_id_t temp_id = un->un_retry_timeid; 7425 un->un_retry_timeid = NULL; 7426 mutex_exit(SD_MUTEX(un)); 7427 (void) untimeout(temp_id); 7428 mutex_enter(SD_MUTEX(un)); 7429 } 7430 7431 /* Cancel any pending delayed cv broadcast timeouts */ 7432 if (un->un_dcvb_timeid != NULL) { 7433 timeout_id_t temp_id = un->un_dcvb_timeid; 7434 un->un_dcvb_timeid = NULL; 7435 mutex_exit(SD_MUTEX(un)); 7436 (void) untimeout(temp_id); 7437 mutex_enter(SD_MUTEX(un)); 7438 } 7439 7440 mutex_exit(SD_MUTEX(un)); 7441 7442 /* There should not be any in-progress I/O so ASSERT this check */ 7443 ASSERT(un->un_ncmds_in_transport == 0); 7444 ASSERT(un->un_ncmds_in_driver == 0); 7445 7446 /* Do not free the softstate if the callback routine is active */ 7447 sd_sync_with_callback(un); 7448 7449 /* 7450 * Partition stats apparently are not used with removables. These would 7451 * not have been created during attach, so no need to clean them up... 7452 */ 7453 if (un->un_stats != NULL) { 7454 kstat_delete(un->un_stats); 7455 un->un_stats = NULL; 7456 } 7457 if (un->un_errstats != NULL) { 7458 kstat_delete(un->un_errstats); 7459 un->un_errstats = NULL; 7460 } 7461 7462 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7463 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7464 7465 ddi_prop_remove_all(devi); 7466 sema_destroy(&un->un_semoclose); 7467 cv_destroy(&un->un_state_cv); 7468 7469 getrbuf_failed: 7470 7471 sd_free_rqs(un); 7472 7473 alloc_rqs_failed: 7474 7475 devp->sd_private = NULL; 7476 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7477 7478 get_softstate_failed: 7479 /* 7480 * Note: the man pages are unclear as to whether or not doing a 7481 * ddi_soft_state_free(sd_state, instance) is the right way to 7482 * clean up after the ddi_soft_state_zalloc() if the subsequent 7483 * ddi_get_soft_state() fails. The implication seems to be 7484 * that the get_soft_state cannot fail if the zalloc succeeds. 7485 */ 7486 ddi_soft_state_free(sd_state, instance); 7487 7488 probe_failed: 7489 scsi_unprobe(devp); 7490 #ifdef SDDEBUG 7491 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 7492 (sd_level_mask & SD_LOGMASK_TRACE)) { 7493 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 7494 (void *)un); 7495 } 7496 #endif 7497 return (DDI_FAILURE); 7498 } 7499 7500 7501 /* 7502 * Function: sd_unit_detach 7503 * 7504 * Description: Performs DDI_DETACH processing for sddetach(). 7505 * 7506 * Return Code: DDI_SUCCESS 7507 * DDI_FAILURE 7508 * 7509 * Context: Kernel thread context 7510 */ 7511 7512 static int 7513 sd_unit_detach(dev_info_t *devi) 7514 { 7515 struct scsi_device *devp; 7516 struct sd_lun *un; 7517 int i; 7518 int tgt; 7519 dev_t dev; 7520 dev_info_t *pdip = ddi_get_parent(devi); 7521 int instance = ddi_get_instance(devi); 7522 7523 mutex_enter(&sd_detach_mutex); 7524 7525 /* 7526 * Fail the detach for any of the following: 7527 * - Unable to get the sd_lun struct for the instance 7528 * - A layered driver has an outstanding open on the instance 7529 * - Another thread is already detaching this instance 7530 * - Another thread is currently performing an open 7531 */ 7532 devp = ddi_get_driver_private(devi); 7533 if ((devp == NULL) || 7534 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7535 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7536 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7537 mutex_exit(&sd_detach_mutex); 7538 return (DDI_FAILURE); 7539 } 7540 7541 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7542 7543 /* 7544 * Mark this instance as currently in a detach, to inhibit any 7545 * opens from a layered driver. 7546 */ 7547 un->un_detach_count++; 7548 mutex_exit(&sd_detach_mutex); 7549 7550 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7551 SCSI_ADDR_PROP_TARGET, -1); 7552 7553 dev = sd_make_device(SD_DEVINFO(un)); 7554 7555 #ifndef lint 7556 _NOTE(COMPETING_THREADS_NOW); 7557 #endif 7558 7559 mutex_enter(SD_MUTEX(un)); 7560 7561 /* 7562 * Fail the detach if there are any outstanding layered 7563 * opens on this device. 7564 */ 7565 for (i = 0; i < NDKMAP; i++) { 7566 if (un->un_ocmap.lyropen[i] != 0) { 7567 goto err_notclosed; 7568 } 7569 } 7570 7571 /* 7572 * Verify there are NO outstanding commands issued to this device. 7573 * ie, un_ncmds_in_transport == 0. 7574 * It's possible to have outstanding commands through the physio 7575 * code path, even though everything's closed. 7576 */ 7577 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7578 (un->un_direct_priority_timeid != NULL) || 7579 (un->un_state == SD_STATE_RWAIT)) { 7580 mutex_exit(SD_MUTEX(un)); 7581 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7582 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7583 goto err_stillbusy; 7584 } 7585 7586 /* 7587 * If we have the device reserved, release the reservation. 7588 */ 7589 if ((un->un_resvd_status & SD_RESERVE) && 7590 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7591 mutex_exit(SD_MUTEX(un)); 7592 /* 7593 * Note: sd_reserve_release sends a command to the device 7594 * via the sd_ioctlcmd() path, and can sleep. 7595 */ 7596 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7597 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7598 "sd_dr_detach: Cannot release reservation \n"); 7599 } 7600 } else { 7601 mutex_exit(SD_MUTEX(un)); 7602 } 7603 7604 /* 7605 * Untimeout any reserve recover, throttle reset, restart unit 7606 * and delayed broadcast timeout threads. Protect the timeout pointer 7607 * from getting nulled by their callback functions. 7608 */ 7609 mutex_enter(SD_MUTEX(un)); 7610 if (un->un_resvd_timeid != NULL) { 7611 timeout_id_t temp_id = un->un_resvd_timeid; 7612 un->un_resvd_timeid = NULL; 7613 mutex_exit(SD_MUTEX(un)); 7614 (void) untimeout(temp_id); 7615 mutex_enter(SD_MUTEX(un)); 7616 } 7617 7618 if (un->un_reset_throttle_timeid != NULL) { 7619 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7620 un->un_reset_throttle_timeid = NULL; 7621 mutex_exit(SD_MUTEX(un)); 7622 (void) untimeout(temp_id); 7623 mutex_enter(SD_MUTEX(un)); 7624 } 7625 7626 if (un->un_startstop_timeid != NULL) { 7627 timeout_id_t temp_id = un->un_startstop_timeid; 7628 un->un_startstop_timeid = NULL; 7629 mutex_exit(SD_MUTEX(un)); 7630 (void) untimeout(temp_id); 7631 mutex_enter(SD_MUTEX(un)); 7632 } 7633 7634 if (un->un_dcvb_timeid != NULL) { 7635 timeout_id_t temp_id = un->un_dcvb_timeid; 7636 un->un_dcvb_timeid = NULL; 7637 mutex_exit(SD_MUTEX(un)); 7638 (void) untimeout(temp_id); 7639 } else { 7640 mutex_exit(SD_MUTEX(un)); 7641 } 7642 7643 /* Remove any pending reservation reclaim requests for this device */ 7644 sd_rmv_resv_reclaim_req(dev); 7645 7646 mutex_enter(SD_MUTEX(un)); 7647 7648 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7649 if (un->un_direct_priority_timeid != NULL) { 7650 timeout_id_t temp_id = un->un_direct_priority_timeid; 7651 un->un_direct_priority_timeid = NULL; 7652 mutex_exit(SD_MUTEX(un)); 7653 (void) untimeout(temp_id); 7654 mutex_enter(SD_MUTEX(un)); 7655 } 7656 7657 /* Cancel any active multi-host disk watch thread requests */ 7658 if (un->un_mhd_token != NULL) { 7659 mutex_exit(SD_MUTEX(un)); 7660 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7661 if (scsi_watch_request_terminate(un->un_mhd_token, 7662 SCSI_WATCH_TERMINATE_NOWAIT)) { 7663 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7664 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7665 /* 7666 * Note: We are returning here after having removed 7667 * some driver timeouts above. This is consistent with 7668 * the legacy implementation but perhaps the watch 7669 * terminate call should be made with the wait flag set. 7670 */ 7671 goto err_stillbusy; 7672 } 7673 mutex_enter(SD_MUTEX(un)); 7674 un->un_mhd_token = NULL; 7675 } 7676 7677 if (un->un_swr_token != NULL) { 7678 mutex_exit(SD_MUTEX(un)); 7679 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7680 if (scsi_watch_request_terminate(un->un_swr_token, 7681 SCSI_WATCH_TERMINATE_NOWAIT)) { 7682 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7683 "sd_dr_detach: Cannot cancel swr watch request\n"); 7684 /* 7685 * Note: We are returning here after having removed 7686 * some driver timeouts above. This is consistent with 7687 * the legacy implementation but perhaps the watch 7688 * terminate call should be made with the wait flag set. 7689 */ 7690 goto err_stillbusy; 7691 } 7692 mutex_enter(SD_MUTEX(un)); 7693 un->un_swr_token = NULL; 7694 } 7695 7696 mutex_exit(SD_MUTEX(un)); 7697 7698 /* 7699 * Clear any scsi_reset_notifies. We clear the reset notifies 7700 * if we have not registered one. 7701 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7702 */ 7703 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7704 sd_mhd_reset_notify_cb, (caddr_t)un); 7705 7706 /* 7707 * protect the timeout pointers from getting nulled by 7708 * their callback functions during the cancellation process. 7709 * In such a scenario untimeout can be invoked with a null value. 7710 */ 7711 _NOTE(NO_COMPETING_THREADS_NOW); 7712 7713 mutex_enter(&un->un_pm_mutex); 7714 if (un->un_pm_idle_timeid != NULL) { 7715 timeout_id_t temp_id = un->un_pm_idle_timeid; 7716 un->un_pm_idle_timeid = NULL; 7717 mutex_exit(&un->un_pm_mutex); 7718 7719 /* 7720 * Timeout is active; cancel it. 7721 * Note that it'll never be active on a device 7722 * that does not support PM therefore we don't 7723 * have to check before calling pm_idle_component. 7724 */ 7725 (void) untimeout(temp_id); 7726 (void) pm_idle_component(SD_DEVINFO(un), 0); 7727 mutex_enter(&un->un_pm_mutex); 7728 } 7729 7730 /* 7731 * Check whether there is already a timeout scheduled for power 7732 * management. If yes then don't lower the power here, that's. 7733 * the timeout handler's job. 7734 */ 7735 if (un->un_pm_timeid != NULL) { 7736 timeout_id_t temp_id = un->un_pm_timeid; 7737 un->un_pm_timeid = NULL; 7738 mutex_exit(&un->un_pm_mutex); 7739 /* 7740 * Timeout is active; cancel it. 7741 * Note that it'll never be active on a device 7742 * that does not support PM therefore we don't 7743 * have to check before calling pm_idle_component. 7744 */ 7745 (void) untimeout(temp_id); 7746 (void) pm_idle_component(SD_DEVINFO(un), 0); 7747 7748 } else { 7749 mutex_exit(&un->un_pm_mutex); 7750 if ((un->un_f_pm_is_enabled == TRUE) && 7751 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7752 DDI_SUCCESS)) { 7753 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7754 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7755 /* 7756 * Fix for bug: 4297749, item # 13 7757 * The above test now includes a check to see if PM is 7758 * supported by this device before call 7759 * pm_lower_power(). 7760 * Note, the following is not dead code. The call to 7761 * pm_lower_power above will generate a call back into 7762 * our sdpower routine which might result in a timeout 7763 * handler getting activated. Therefore the following 7764 * code is valid and necessary. 7765 */ 7766 mutex_enter(&un->un_pm_mutex); 7767 if (un->un_pm_timeid != NULL) { 7768 timeout_id_t temp_id = un->un_pm_timeid; 7769 un->un_pm_timeid = NULL; 7770 mutex_exit(&un->un_pm_mutex); 7771 (void) untimeout(temp_id); 7772 (void) pm_idle_component(SD_DEVINFO(un), 0); 7773 } else { 7774 mutex_exit(&un->un_pm_mutex); 7775 } 7776 } 7777 } 7778 7779 /* 7780 * Cleanup from the scsi_ifsetcap() calls (437868) 7781 * Relocated here from above to be after the call to 7782 * pm_lower_power, which was getting errors. 7783 */ 7784 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7785 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7786 7787 /* 7788 * Currently, tagged queuing is supported per target based by HBA. 7789 * Setting this per lun instance actually sets the capability of this 7790 * target in HBA, which affects those luns already attached on the 7791 * same target. So during detach, we can only disable this capability 7792 * only when this is the only lun left on this target. By doing 7793 * this, we assume a target has the same tagged queuing capability 7794 * for every lun. The condition can be removed when HBA is changed to 7795 * support per lun based tagged queuing capability. 7796 */ 7797 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7798 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7799 } 7800 7801 if (un->un_f_is_fibre == FALSE) { 7802 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7803 } 7804 7805 /* 7806 * Remove any event callbacks, fibre only 7807 */ 7808 if (un->un_f_is_fibre == TRUE) { 7809 if ((un->un_insert_event != NULL) && 7810 (ddi_remove_event_handler(un->un_insert_cb_id) != 7811 DDI_SUCCESS)) { 7812 /* 7813 * Note: We are returning here after having done 7814 * substantial cleanup above. This is consistent 7815 * with the legacy implementation but this may not 7816 * be the right thing to do. 7817 */ 7818 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7819 "sd_dr_detach: Cannot cancel insert event\n"); 7820 goto err_remove_event; 7821 } 7822 un->un_insert_event = NULL; 7823 7824 if ((un->un_remove_event != NULL) && 7825 (ddi_remove_event_handler(un->un_remove_cb_id) != 7826 DDI_SUCCESS)) { 7827 /* 7828 * Note: We are returning here after having done 7829 * substantial cleanup above. This is consistent 7830 * with the legacy implementation but this may not 7831 * be the right thing to do. 7832 */ 7833 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7834 "sd_dr_detach: Cannot cancel remove event\n"); 7835 goto err_remove_event; 7836 } 7837 un->un_remove_event = NULL; 7838 } 7839 7840 /* Do not free the softstate if the callback routine is active */ 7841 sd_sync_with_callback(un); 7842 7843 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7844 cmlb_free_handle(&un->un_cmlbhandle); 7845 7846 /* 7847 * Hold the detach mutex here, to make sure that no other threads ever 7848 * can access a (partially) freed soft state structure. 7849 */ 7850 mutex_enter(&sd_detach_mutex); 7851 7852 /* 7853 * Clean up the soft state struct. 7854 * Cleanup is done in reverse order of allocs/inits. 7855 * At this point there should be no competing threads anymore. 7856 */ 7857 7858 /* Unregister and free device id. */ 7859 ddi_devid_unregister(devi); 7860 if (un->un_devid) { 7861 ddi_devid_free(un->un_devid); 7862 un->un_devid = NULL; 7863 } 7864 7865 /* 7866 * Destroy wmap cache if it exists. 7867 */ 7868 if (un->un_wm_cache != NULL) { 7869 kmem_cache_destroy(un->un_wm_cache); 7870 un->un_wm_cache = NULL; 7871 } 7872 7873 /* 7874 * kstat cleanup is done in detach for all device types (4363169). 7875 * We do not want to fail detach if the device kstats are not deleted 7876 * since there is a confusion about the devo_refcnt for the device. 7877 * We just delete the kstats and let detach complete successfully. 7878 */ 7879 if (un->un_stats != NULL) { 7880 kstat_delete(un->un_stats); 7881 un->un_stats = NULL; 7882 } 7883 if (un->un_errstats != NULL) { 7884 kstat_delete(un->un_errstats); 7885 un->un_errstats = NULL; 7886 } 7887 7888 /* Remove partition stats */ 7889 if (un->un_f_pkstats_enabled) { 7890 for (i = 0; i < NSDMAP; i++) { 7891 if (un->un_pstats[i] != NULL) { 7892 kstat_delete(un->un_pstats[i]); 7893 un->un_pstats[i] = NULL; 7894 } 7895 } 7896 } 7897 7898 /* Remove xbuf registration */ 7899 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7900 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7901 7902 /* Remove driver properties */ 7903 ddi_prop_remove_all(devi); 7904 7905 mutex_destroy(&un->un_pm_mutex); 7906 cv_destroy(&un->un_pm_busy_cv); 7907 7908 cv_destroy(&un->un_wcc_cv); 7909 7910 /* Open/close semaphore */ 7911 sema_destroy(&un->un_semoclose); 7912 7913 /* Removable media condvar. */ 7914 cv_destroy(&un->un_state_cv); 7915 7916 /* Suspend/resume condvar. */ 7917 cv_destroy(&un->un_suspend_cv); 7918 cv_destroy(&un->un_disk_busy_cv); 7919 7920 sd_free_rqs(un); 7921 7922 /* Free up soft state */ 7923 devp->sd_private = NULL; 7924 7925 bzero(un, sizeof (struct sd_lun)); 7926 ddi_soft_state_free(sd_state, instance); 7927 7928 mutex_exit(&sd_detach_mutex); 7929 7930 /* This frees up the INQUIRY data associated with the device. */ 7931 scsi_unprobe(devp); 7932 7933 /* 7934 * After successfully detaching an instance, we update the information 7935 * of how many luns have been attached in the relative target and 7936 * controller for parallel SCSI. This information is used when sd tries 7937 * to set the tagged queuing capability in HBA. 7938 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 7939 * check if the device is parallel SCSI. However, we don't need to 7940 * check here because we've already checked during attach. No device 7941 * that is not parallel SCSI is in the chain. 7942 */ 7943 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7944 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 7945 } 7946 7947 return (DDI_SUCCESS); 7948 7949 err_notclosed: 7950 mutex_exit(SD_MUTEX(un)); 7951 7952 err_stillbusy: 7953 _NOTE(NO_COMPETING_THREADS_NOW); 7954 7955 err_remove_event: 7956 mutex_enter(&sd_detach_mutex); 7957 un->un_detach_count--; 7958 mutex_exit(&sd_detach_mutex); 7959 7960 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 7961 return (DDI_FAILURE); 7962 } 7963 7964 7965 /* 7966 * Function: sd_create_errstats 7967 * 7968 * Description: This routine instantiates the device error stats. 7969 * 7970 * Note: During attach the stats are instantiated first so they are 7971 * available for attach-time routines that utilize the driver 7972 * iopath to send commands to the device. The stats are initialized 7973 * separately so data obtained during some attach-time routines is 7974 * available. (4362483) 7975 * 7976 * Arguments: un - driver soft state (unit) structure 7977 * instance - driver instance 7978 * 7979 * Context: Kernel thread context 7980 */ 7981 7982 static void 7983 sd_create_errstats(struct sd_lun *un, int instance) 7984 { 7985 struct sd_errstats *stp; 7986 char kstatmodule_err[KSTAT_STRLEN]; 7987 char kstatname[KSTAT_STRLEN]; 7988 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 7989 7990 ASSERT(un != NULL); 7991 7992 if (un->un_errstats != NULL) { 7993 return; 7994 } 7995 7996 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 7997 "%serr", sd_label); 7998 (void) snprintf(kstatname, sizeof (kstatname), 7999 "%s%d,err", sd_label, instance); 8000 8001 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8002 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8003 8004 if (un->un_errstats == NULL) { 8005 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8006 "sd_create_errstats: Failed kstat_create\n"); 8007 return; 8008 } 8009 8010 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8011 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8012 KSTAT_DATA_UINT32); 8013 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8014 KSTAT_DATA_UINT32); 8015 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8016 KSTAT_DATA_UINT32); 8017 kstat_named_init(&stp->sd_vid, "Vendor", 8018 KSTAT_DATA_CHAR); 8019 kstat_named_init(&stp->sd_pid, "Product", 8020 KSTAT_DATA_CHAR); 8021 kstat_named_init(&stp->sd_revision, "Revision", 8022 KSTAT_DATA_CHAR); 8023 kstat_named_init(&stp->sd_serial, "Serial No", 8024 KSTAT_DATA_CHAR); 8025 kstat_named_init(&stp->sd_capacity, "Size", 8026 KSTAT_DATA_ULONGLONG); 8027 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8028 KSTAT_DATA_UINT32); 8029 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8030 KSTAT_DATA_UINT32); 8031 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8032 KSTAT_DATA_UINT32); 8033 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8034 KSTAT_DATA_UINT32); 8035 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8036 KSTAT_DATA_UINT32); 8037 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8038 KSTAT_DATA_UINT32); 8039 8040 un->un_errstats->ks_private = un; 8041 un->un_errstats->ks_update = nulldev; 8042 8043 kstat_install(un->un_errstats); 8044 } 8045 8046 8047 /* 8048 * Function: sd_set_errstats 8049 * 8050 * Description: This routine sets the value of the vendor id, product id, 8051 * revision, serial number, and capacity device error stats. 8052 * 8053 * Note: During attach the stats are instantiated first so they are 8054 * available for attach-time routines that utilize the driver 8055 * iopath to send commands to the device. The stats are initialized 8056 * separately so data obtained during some attach-time routines is 8057 * available. (4362483) 8058 * 8059 * Arguments: un - driver soft state (unit) structure 8060 * 8061 * Context: Kernel thread context 8062 */ 8063 8064 static void 8065 sd_set_errstats(struct sd_lun *un) 8066 { 8067 struct sd_errstats *stp; 8068 8069 ASSERT(un != NULL); 8070 ASSERT(un->un_errstats != NULL); 8071 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8072 ASSERT(stp != NULL); 8073 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8074 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8075 (void) strncpy(stp->sd_revision.value.c, 8076 un->un_sd->sd_inq->inq_revision, 4); 8077 8078 /* 8079 * All the errstats are persistent across detach/attach, 8080 * so reset all the errstats here in case of the hot 8081 * replacement of disk drives, except for not changed 8082 * Sun qualified drives. 8083 */ 8084 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8085 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8086 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8087 stp->sd_softerrs.value.ui32 = 0; 8088 stp->sd_harderrs.value.ui32 = 0; 8089 stp->sd_transerrs.value.ui32 = 0; 8090 stp->sd_rq_media_err.value.ui32 = 0; 8091 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8092 stp->sd_rq_nodev_err.value.ui32 = 0; 8093 stp->sd_rq_recov_err.value.ui32 = 0; 8094 stp->sd_rq_illrq_err.value.ui32 = 0; 8095 stp->sd_rq_pfa_err.value.ui32 = 0; 8096 } 8097 8098 /* 8099 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8100 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8101 * (4376302)) 8102 */ 8103 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8104 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8105 sizeof (SD_INQUIRY(un)->inq_serial)); 8106 } 8107 8108 if (un->un_f_blockcount_is_valid != TRUE) { 8109 /* 8110 * Set capacity error stat to 0 for no media. This ensures 8111 * a valid capacity is displayed in response to 'iostat -E' 8112 * when no media is present in the device. 8113 */ 8114 stp->sd_capacity.value.ui64 = 0; 8115 } else { 8116 /* 8117 * Multiply un_blockcount by un->un_sys_blocksize to get 8118 * capacity. 8119 * 8120 * Note: for non-512 blocksize devices "un_blockcount" has been 8121 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8122 * (un_tgt_blocksize / un->un_sys_blocksize). 8123 */ 8124 stp->sd_capacity.value.ui64 = (uint64_t) 8125 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8126 } 8127 } 8128 8129 8130 /* 8131 * Function: sd_set_pstats 8132 * 8133 * Description: This routine instantiates and initializes the partition 8134 * stats for each partition with more than zero blocks. 8135 * (4363169) 8136 * 8137 * Arguments: un - driver soft state (unit) structure 8138 * 8139 * Context: Kernel thread context 8140 */ 8141 8142 static void 8143 sd_set_pstats(struct sd_lun *un) 8144 { 8145 char kstatname[KSTAT_STRLEN]; 8146 int instance; 8147 int i; 8148 diskaddr_t nblks = 0; 8149 char *partname = NULL; 8150 8151 ASSERT(un != NULL); 8152 8153 instance = ddi_get_instance(SD_DEVINFO(un)); 8154 8155 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8156 for (i = 0; i < NSDMAP; i++) { 8157 8158 if (cmlb_partinfo(un->un_cmlbhandle, i, 8159 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8160 continue; 8161 mutex_enter(SD_MUTEX(un)); 8162 8163 if ((un->un_pstats[i] == NULL) && 8164 (nblks != 0)) { 8165 8166 (void) snprintf(kstatname, sizeof (kstatname), 8167 "%s%d,%s", sd_label, instance, 8168 partname); 8169 8170 un->un_pstats[i] = kstat_create(sd_label, 8171 instance, kstatname, "partition", KSTAT_TYPE_IO, 8172 1, KSTAT_FLAG_PERSISTENT); 8173 if (un->un_pstats[i] != NULL) { 8174 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8175 kstat_install(un->un_pstats[i]); 8176 } 8177 } 8178 mutex_exit(SD_MUTEX(un)); 8179 } 8180 } 8181 8182 8183 #if (defined(__fibre)) 8184 /* 8185 * Function: sd_init_event_callbacks 8186 * 8187 * Description: This routine initializes the insertion and removal event 8188 * callbacks. (fibre only) 8189 * 8190 * Arguments: un - driver soft state (unit) structure 8191 * 8192 * Context: Kernel thread context 8193 */ 8194 8195 static void 8196 sd_init_event_callbacks(struct sd_lun *un) 8197 { 8198 ASSERT(un != NULL); 8199 8200 if ((un->un_insert_event == NULL) && 8201 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8202 &un->un_insert_event) == DDI_SUCCESS)) { 8203 /* 8204 * Add the callback for an insertion event 8205 */ 8206 (void) ddi_add_event_handler(SD_DEVINFO(un), 8207 un->un_insert_event, sd_event_callback, (void *)un, 8208 &(un->un_insert_cb_id)); 8209 } 8210 8211 if ((un->un_remove_event == NULL) && 8212 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8213 &un->un_remove_event) == DDI_SUCCESS)) { 8214 /* 8215 * Add the callback for a removal event 8216 */ 8217 (void) ddi_add_event_handler(SD_DEVINFO(un), 8218 un->un_remove_event, sd_event_callback, (void *)un, 8219 &(un->un_remove_cb_id)); 8220 } 8221 } 8222 8223 8224 /* 8225 * Function: sd_event_callback 8226 * 8227 * Description: This routine handles insert/remove events (photon). The 8228 * state is changed to OFFLINE which can be used to supress 8229 * error msgs. (fibre only) 8230 * 8231 * Arguments: un - driver soft state (unit) structure 8232 * 8233 * Context: Callout thread context 8234 */ 8235 /* ARGSUSED */ 8236 static void 8237 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8238 void *bus_impldata) 8239 { 8240 struct sd_lun *un = (struct sd_lun *)arg; 8241 8242 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8243 if (event == un->un_insert_event) { 8244 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8245 mutex_enter(SD_MUTEX(un)); 8246 if (un->un_state == SD_STATE_OFFLINE) { 8247 if (un->un_last_state != SD_STATE_SUSPENDED) { 8248 un->un_state = un->un_last_state; 8249 } else { 8250 /* 8251 * We have gone through SUSPEND/RESUME while 8252 * we were offline. Restore the last state 8253 */ 8254 un->un_state = un->un_save_state; 8255 } 8256 } 8257 mutex_exit(SD_MUTEX(un)); 8258 8259 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8260 } else if (event == un->un_remove_event) { 8261 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8262 mutex_enter(SD_MUTEX(un)); 8263 /* 8264 * We need to handle an event callback that occurs during 8265 * the suspend operation, since we don't prevent it. 8266 */ 8267 if (un->un_state != SD_STATE_OFFLINE) { 8268 if (un->un_state != SD_STATE_SUSPENDED) { 8269 New_state(un, SD_STATE_OFFLINE); 8270 } else { 8271 un->un_last_state = SD_STATE_OFFLINE; 8272 } 8273 } 8274 mutex_exit(SD_MUTEX(un)); 8275 } else { 8276 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8277 "!Unknown event\n"); 8278 } 8279 8280 } 8281 #endif 8282 8283 /* 8284 * Function: sd_cache_control() 8285 * 8286 * Description: This routine is the driver entry point for setting 8287 * read and write caching by modifying the WCE (write cache 8288 * enable) and RCD (read cache disable) bits of mode 8289 * page 8 (MODEPAGE_CACHING). 8290 * 8291 * Arguments: un - driver soft state (unit) structure 8292 * rcd_flag - flag for controlling the read cache 8293 * wce_flag - flag for controlling the write cache 8294 * 8295 * Return Code: EIO 8296 * code returned by sd_send_scsi_MODE_SENSE and 8297 * sd_send_scsi_MODE_SELECT 8298 * 8299 * Context: Kernel Thread 8300 */ 8301 8302 static int 8303 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8304 { 8305 struct mode_caching *mode_caching_page; 8306 uchar_t *header; 8307 size_t buflen; 8308 int hdrlen; 8309 int bd_len; 8310 int rval = 0; 8311 struct mode_header_grp2 *mhp; 8312 8313 ASSERT(un != NULL); 8314 8315 /* 8316 * Do a test unit ready, otherwise a mode sense may not work if this 8317 * is the first command sent to the device after boot. 8318 */ 8319 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8320 8321 if (un->un_f_cfg_is_atapi == TRUE) { 8322 hdrlen = MODE_HEADER_LENGTH_GRP2; 8323 } else { 8324 hdrlen = MODE_HEADER_LENGTH; 8325 } 8326 8327 /* 8328 * Allocate memory for the retrieved mode page and its headers. Set 8329 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8330 * we get all of the mode sense data otherwise, the mode select 8331 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8332 */ 8333 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8334 sizeof (struct mode_cache_scsi3); 8335 8336 header = kmem_zalloc(buflen, KM_SLEEP); 8337 8338 /* Get the information from the device. */ 8339 if (un->un_f_cfg_is_atapi == TRUE) { 8340 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8341 MODEPAGE_CACHING, SD_PATH_DIRECT); 8342 } else { 8343 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8344 MODEPAGE_CACHING, SD_PATH_DIRECT); 8345 } 8346 if (rval != 0) { 8347 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8348 "sd_cache_control: Mode Sense Failed\n"); 8349 kmem_free(header, buflen); 8350 return (rval); 8351 } 8352 8353 /* 8354 * Determine size of Block Descriptors in order to locate 8355 * the mode page data. ATAPI devices return 0, SCSI devices 8356 * should return MODE_BLK_DESC_LENGTH. 8357 */ 8358 if (un->un_f_cfg_is_atapi == TRUE) { 8359 mhp = (struct mode_header_grp2 *)header; 8360 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8361 } else { 8362 bd_len = ((struct mode_header *)header)->bdesc_length; 8363 } 8364 8365 if (bd_len > MODE_BLK_DESC_LENGTH) { 8366 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8367 "sd_cache_control: Mode Sense returned invalid " 8368 "block descriptor length\n"); 8369 kmem_free(header, buflen); 8370 return (EIO); 8371 } 8372 8373 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8374 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8375 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8376 " caching page code mismatch %d\n", 8377 mode_caching_page->mode_page.code); 8378 kmem_free(header, buflen); 8379 return (EIO); 8380 } 8381 8382 /* Check the relevant bits on successful mode sense. */ 8383 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8384 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8385 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8386 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8387 8388 size_t sbuflen; 8389 uchar_t save_pg; 8390 8391 /* 8392 * Construct select buffer length based on the 8393 * length of the sense data returned. 8394 */ 8395 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8396 sizeof (struct mode_page) + 8397 (int)mode_caching_page->mode_page.length; 8398 8399 /* 8400 * Set the caching bits as requested. 8401 */ 8402 if (rcd_flag == SD_CACHE_ENABLE) 8403 mode_caching_page->rcd = 0; 8404 else if (rcd_flag == SD_CACHE_DISABLE) 8405 mode_caching_page->rcd = 1; 8406 8407 if (wce_flag == SD_CACHE_ENABLE) 8408 mode_caching_page->wce = 1; 8409 else if (wce_flag == SD_CACHE_DISABLE) 8410 mode_caching_page->wce = 0; 8411 8412 /* 8413 * Save the page if the mode sense says the 8414 * drive supports it. 8415 */ 8416 save_pg = mode_caching_page->mode_page.ps ? 8417 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8418 8419 /* Clear reserved bits before mode select. */ 8420 mode_caching_page->mode_page.ps = 0; 8421 8422 /* 8423 * Clear out mode header for mode select. 8424 * The rest of the retrieved page will be reused. 8425 */ 8426 bzero(header, hdrlen); 8427 8428 if (un->un_f_cfg_is_atapi == TRUE) { 8429 mhp = (struct mode_header_grp2 *)header; 8430 mhp->bdesc_length_hi = bd_len >> 8; 8431 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8432 } else { 8433 ((struct mode_header *)header)->bdesc_length = bd_len; 8434 } 8435 8436 /* Issue mode select to change the cache settings */ 8437 if (un->un_f_cfg_is_atapi == TRUE) { 8438 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8439 sbuflen, save_pg, SD_PATH_DIRECT); 8440 } else { 8441 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8442 sbuflen, save_pg, SD_PATH_DIRECT); 8443 } 8444 } 8445 8446 kmem_free(header, buflen); 8447 return (rval); 8448 } 8449 8450 8451 /* 8452 * Function: sd_get_write_cache_enabled() 8453 * 8454 * Description: This routine is the driver entry point for determining if 8455 * write caching is enabled. It examines the WCE (write cache 8456 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8457 * 8458 * Arguments: un - driver soft state (unit) structure 8459 * is_enabled - pointer to int where write cache enabled state 8460 * is returned (non-zero -> write cache enabled) 8461 * 8462 * 8463 * Return Code: EIO 8464 * code returned by sd_send_scsi_MODE_SENSE 8465 * 8466 * Context: Kernel Thread 8467 * 8468 * NOTE: If ioctl is added to disable write cache, this sequence should 8469 * be followed so that no locking is required for accesses to 8470 * un->un_f_write_cache_enabled: 8471 * do mode select to clear wce 8472 * do synchronize cache to flush cache 8473 * set un->un_f_write_cache_enabled = FALSE 8474 * 8475 * Conversely, an ioctl to enable the write cache should be done 8476 * in this order: 8477 * set un->un_f_write_cache_enabled = TRUE 8478 * do mode select to set wce 8479 */ 8480 8481 static int 8482 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8483 { 8484 struct mode_caching *mode_caching_page; 8485 uchar_t *header; 8486 size_t buflen; 8487 int hdrlen; 8488 int bd_len; 8489 int rval = 0; 8490 8491 ASSERT(un != NULL); 8492 ASSERT(is_enabled != NULL); 8493 8494 /* in case of error, flag as enabled */ 8495 *is_enabled = TRUE; 8496 8497 /* 8498 * Do a test unit ready, otherwise a mode sense may not work if this 8499 * is the first command sent to the device after boot. 8500 */ 8501 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8502 8503 if (un->un_f_cfg_is_atapi == TRUE) { 8504 hdrlen = MODE_HEADER_LENGTH_GRP2; 8505 } else { 8506 hdrlen = MODE_HEADER_LENGTH; 8507 } 8508 8509 /* 8510 * Allocate memory for the retrieved mode page and its headers. Set 8511 * a pointer to the page itself. 8512 */ 8513 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8514 header = kmem_zalloc(buflen, KM_SLEEP); 8515 8516 /* Get the information from the device. */ 8517 if (un->un_f_cfg_is_atapi == TRUE) { 8518 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8519 MODEPAGE_CACHING, SD_PATH_DIRECT); 8520 } else { 8521 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8522 MODEPAGE_CACHING, SD_PATH_DIRECT); 8523 } 8524 if (rval != 0) { 8525 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8526 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8527 kmem_free(header, buflen); 8528 return (rval); 8529 } 8530 8531 /* 8532 * Determine size of Block Descriptors in order to locate 8533 * the mode page data. ATAPI devices return 0, SCSI devices 8534 * should return MODE_BLK_DESC_LENGTH. 8535 */ 8536 if (un->un_f_cfg_is_atapi == TRUE) { 8537 struct mode_header_grp2 *mhp; 8538 mhp = (struct mode_header_grp2 *)header; 8539 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8540 } else { 8541 bd_len = ((struct mode_header *)header)->bdesc_length; 8542 } 8543 8544 if (bd_len > MODE_BLK_DESC_LENGTH) { 8545 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8546 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8547 "block descriptor length\n"); 8548 kmem_free(header, buflen); 8549 return (EIO); 8550 } 8551 8552 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8553 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8554 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8555 " caching page code mismatch %d\n", 8556 mode_caching_page->mode_page.code); 8557 kmem_free(header, buflen); 8558 return (EIO); 8559 } 8560 *is_enabled = mode_caching_page->wce; 8561 8562 kmem_free(header, buflen); 8563 return (0); 8564 } 8565 8566 8567 /* 8568 * Function: sd_make_device 8569 * 8570 * Description: Utility routine to return the Solaris device number from 8571 * the data in the device's dev_info structure. 8572 * 8573 * Return Code: The Solaris device number 8574 * 8575 * Context: Any 8576 */ 8577 8578 static dev_t 8579 sd_make_device(dev_info_t *devi) 8580 { 8581 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8582 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8583 } 8584 8585 8586 /* 8587 * Function: sd_pm_entry 8588 * 8589 * Description: Called at the start of a new command to manage power 8590 * and busy status of a device. This includes determining whether 8591 * the current power state of the device is sufficient for 8592 * performing the command or whether it must be changed. 8593 * The PM framework is notified appropriately. 8594 * Only with a return status of DDI_SUCCESS will the 8595 * component be busy to the framework. 8596 * 8597 * All callers of sd_pm_entry must check the return status 8598 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8599 * of DDI_FAILURE indicates the device failed to power up. 8600 * In this case un_pm_count has been adjusted so the result 8601 * on exit is still powered down, ie. count is less than 0. 8602 * Calling sd_pm_exit with this count value hits an ASSERT. 8603 * 8604 * Return Code: DDI_SUCCESS or DDI_FAILURE 8605 * 8606 * Context: Kernel thread context. 8607 */ 8608 8609 static int 8610 sd_pm_entry(struct sd_lun *un) 8611 { 8612 int return_status = DDI_SUCCESS; 8613 8614 ASSERT(!mutex_owned(SD_MUTEX(un))); 8615 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8616 8617 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8618 8619 if (un->un_f_pm_is_enabled == FALSE) { 8620 SD_TRACE(SD_LOG_IO_PM, un, 8621 "sd_pm_entry: exiting, PM not enabled\n"); 8622 return (return_status); 8623 } 8624 8625 /* 8626 * Just increment a counter if PM is enabled. On the transition from 8627 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8628 * the count with each IO and mark the device as idle when the count 8629 * hits 0. 8630 * 8631 * If the count is less than 0 the device is powered down. If a powered 8632 * down device is successfully powered up then the count must be 8633 * incremented to reflect the power up. Note that it'll get incremented 8634 * a second time to become busy. 8635 * 8636 * Because the following has the potential to change the device state 8637 * and must release the un_pm_mutex to do so, only one thread can be 8638 * allowed through at a time. 8639 */ 8640 8641 mutex_enter(&un->un_pm_mutex); 8642 while (un->un_pm_busy == TRUE) { 8643 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8644 } 8645 un->un_pm_busy = TRUE; 8646 8647 if (un->un_pm_count < 1) { 8648 8649 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8650 8651 /* 8652 * Indicate we are now busy so the framework won't attempt to 8653 * power down the device. This call will only fail if either 8654 * we passed a bad component number or the device has no 8655 * components. Neither of these should ever happen. 8656 */ 8657 mutex_exit(&un->un_pm_mutex); 8658 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8659 ASSERT(return_status == DDI_SUCCESS); 8660 8661 mutex_enter(&un->un_pm_mutex); 8662 8663 if (un->un_pm_count < 0) { 8664 mutex_exit(&un->un_pm_mutex); 8665 8666 SD_TRACE(SD_LOG_IO_PM, un, 8667 "sd_pm_entry: power up component\n"); 8668 8669 /* 8670 * pm_raise_power will cause sdpower to be called 8671 * which brings the device power level to the 8672 * desired state, ON in this case. If successful, 8673 * un_pm_count and un_power_level will be updated 8674 * appropriately. 8675 */ 8676 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8677 SD_SPINDLE_ON); 8678 8679 mutex_enter(&un->un_pm_mutex); 8680 8681 if (return_status != DDI_SUCCESS) { 8682 /* 8683 * Power up failed. 8684 * Idle the device and adjust the count 8685 * so the result on exit is that we're 8686 * still powered down, ie. count is less than 0. 8687 */ 8688 SD_TRACE(SD_LOG_IO_PM, un, 8689 "sd_pm_entry: power up failed," 8690 " idle the component\n"); 8691 8692 (void) pm_idle_component(SD_DEVINFO(un), 0); 8693 un->un_pm_count--; 8694 } else { 8695 /* 8696 * Device is powered up, verify the 8697 * count is non-negative. 8698 * This is debug only. 8699 */ 8700 ASSERT(un->un_pm_count == 0); 8701 } 8702 } 8703 8704 if (return_status == DDI_SUCCESS) { 8705 /* 8706 * For performance, now that the device has been tagged 8707 * as busy, and it's known to be powered up, update the 8708 * chain types to use jump tables that do not include 8709 * pm. This significantly lowers the overhead and 8710 * therefore improves performance. 8711 */ 8712 8713 mutex_exit(&un->un_pm_mutex); 8714 mutex_enter(SD_MUTEX(un)); 8715 SD_TRACE(SD_LOG_IO_PM, un, 8716 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8717 un->un_uscsi_chain_type); 8718 8719 if (un->un_f_non_devbsize_supported) { 8720 un->un_buf_chain_type = 8721 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8722 } else { 8723 un->un_buf_chain_type = 8724 SD_CHAIN_INFO_DISK_NO_PM; 8725 } 8726 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8727 8728 SD_TRACE(SD_LOG_IO_PM, un, 8729 " changed uscsi_chain_type to %d\n", 8730 un->un_uscsi_chain_type); 8731 mutex_exit(SD_MUTEX(un)); 8732 mutex_enter(&un->un_pm_mutex); 8733 8734 if (un->un_pm_idle_timeid == NULL) { 8735 /* 300 ms. */ 8736 un->un_pm_idle_timeid = 8737 timeout(sd_pm_idletimeout_handler, un, 8738 (drv_usectohz((clock_t)300000))); 8739 /* 8740 * Include an extra call to busy which keeps the 8741 * device busy with-respect-to the PM layer 8742 * until the timer fires, at which time it'll 8743 * get the extra idle call. 8744 */ 8745 (void) pm_busy_component(SD_DEVINFO(un), 0); 8746 } 8747 } 8748 } 8749 un->un_pm_busy = FALSE; 8750 /* Next... */ 8751 cv_signal(&un->un_pm_busy_cv); 8752 8753 un->un_pm_count++; 8754 8755 SD_TRACE(SD_LOG_IO_PM, un, 8756 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8757 8758 mutex_exit(&un->un_pm_mutex); 8759 8760 return (return_status); 8761 } 8762 8763 8764 /* 8765 * Function: sd_pm_exit 8766 * 8767 * Description: Called at the completion of a command to manage busy 8768 * status for the device. If the device becomes idle the 8769 * PM framework is notified. 8770 * 8771 * Context: Kernel thread context 8772 */ 8773 8774 static void 8775 sd_pm_exit(struct sd_lun *un) 8776 { 8777 ASSERT(!mutex_owned(SD_MUTEX(un))); 8778 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8779 8780 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8781 8782 /* 8783 * After attach the following flag is only read, so don't 8784 * take the penalty of acquiring a mutex for it. 8785 */ 8786 if (un->un_f_pm_is_enabled == TRUE) { 8787 8788 mutex_enter(&un->un_pm_mutex); 8789 un->un_pm_count--; 8790 8791 SD_TRACE(SD_LOG_IO_PM, un, 8792 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8793 8794 ASSERT(un->un_pm_count >= 0); 8795 if (un->un_pm_count == 0) { 8796 mutex_exit(&un->un_pm_mutex); 8797 8798 SD_TRACE(SD_LOG_IO_PM, un, 8799 "sd_pm_exit: idle component\n"); 8800 8801 (void) pm_idle_component(SD_DEVINFO(un), 0); 8802 8803 } else { 8804 mutex_exit(&un->un_pm_mutex); 8805 } 8806 } 8807 8808 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 8809 } 8810 8811 8812 /* 8813 * Function: sdopen 8814 * 8815 * Description: Driver's open(9e) entry point function. 8816 * 8817 * Arguments: dev_i - pointer to device number 8818 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 8819 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 8820 * cred_p - user credential pointer 8821 * 8822 * Return Code: EINVAL 8823 * ENXIO 8824 * EIO 8825 * EROFS 8826 * EBUSY 8827 * 8828 * Context: Kernel thread context 8829 */ 8830 /* ARGSUSED */ 8831 static int 8832 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 8833 { 8834 struct sd_lun *un; 8835 int nodelay; 8836 int part; 8837 uint64_t partmask; 8838 int instance; 8839 dev_t dev; 8840 int rval = EIO; 8841 diskaddr_t nblks = 0; 8842 8843 /* Validate the open type */ 8844 if (otyp >= OTYPCNT) { 8845 return (EINVAL); 8846 } 8847 8848 dev = *dev_p; 8849 instance = SDUNIT(dev); 8850 mutex_enter(&sd_detach_mutex); 8851 8852 /* 8853 * Fail the open if there is no softstate for the instance, or 8854 * if another thread somewhere is trying to detach the instance. 8855 */ 8856 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 8857 (un->un_detach_count != 0)) { 8858 mutex_exit(&sd_detach_mutex); 8859 /* 8860 * The probe cache only needs to be cleared when open (9e) fails 8861 * with ENXIO (4238046). 8862 */ 8863 /* 8864 * un-conditionally clearing probe cache is ok with 8865 * separate sd/ssd binaries 8866 * x86 platform can be an issue with both parallel 8867 * and fibre in 1 binary 8868 */ 8869 sd_scsi_clear_probe_cache(); 8870 return (ENXIO); 8871 } 8872 8873 /* 8874 * The un_layer_count is to prevent another thread in specfs from 8875 * trying to detach the instance, which can happen when we are 8876 * called from a higher-layer driver instead of thru specfs. 8877 * This will not be needed when DDI provides a layered driver 8878 * interface that allows specfs to know that an instance is in 8879 * use by a layered driver & should not be detached. 8880 * 8881 * Note: the semantics for layered driver opens are exactly one 8882 * close for every open. 8883 */ 8884 if (otyp == OTYP_LYR) { 8885 un->un_layer_count++; 8886 } 8887 8888 /* 8889 * Keep a count of the current # of opens in progress. This is because 8890 * some layered drivers try to call us as a regular open. This can 8891 * cause problems that we cannot prevent, however by keeping this count 8892 * we can at least keep our open and detach routines from racing against 8893 * each other under such conditions. 8894 */ 8895 un->un_opens_in_progress++; 8896 mutex_exit(&sd_detach_mutex); 8897 8898 nodelay = (flag & (FNDELAY | FNONBLOCK)); 8899 part = SDPART(dev); 8900 partmask = 1 << part; 8901 8902 /* 8903 * We use a semaphore here in order to serialize 8904 * open and close requests on the device. 8905 */ 8906 sema_p(&un->un_semoclose); 8907 8908 mutex_enter(SD_MUTEX(un)); 8909 8910 /* 8911 * All device accesses go thru sdstrategy() where we check 8912 * on suspend status but there could be a scsi_poll command, 8913 * which bypasses sdstrategy(), so we need to check pm 8914 * status. 8915 */ 8916 8917 if (!nodelay) { 8918 while ((un->un_state == SD_STATE_SUSPENDED) || 8919 (un->un_state == SD_STATE_PM_CHANGING)) { 8920 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 8921 } 8922 8923 mutex_exit(SD_MUTEX(un)); 8924 if (sd_pm_entry(un) != DDI_SUCCESS) { 8925 rval = EIO; 8926 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 8927 "sdopen: sd_pm_entry failed\n"); 8928 goto open_failed_with_pm; 8929 } 8930 mutex_enter(SD_MUTEX(un)); 8931 } 8932 8933 /* check for previous exclusive open */ 8934 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 8935 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 8936 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 8937 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 8938 8939 if (un->un_exclopen & (partmask)) { 8940 goto excl_open_fail; 8941 } 8942 8943 if (flag & FEXCL) { 8944 int i; 8945 if (un->un_ocmap.lyropen[part]) { 8946 goto excl_open_fail; 8947 } 8948 for (i = 0; i < (OTYPCNT - 1); i++) { 8949 if (un->un_ocmap.regopen[i] & (partmask)) { 8950 goto excl_open_fail; 8951 } 8952 } 8953 } 8954 8955 /* 8956 * Check the write permission if this is a removable media device, 8957 * NDELAY has not been set, and writable permission is requested. 8958 * 8959 * Note: If NDELAY was set and this is write-protected media the WRITE 8960 * attempt will fail with EIO as part of the I/O processing. This is a 8961 * more permissive implementation that allows the open to succeed and 8962 * WRITE attempts to fail when appropriate. 8963 */ 8964 if (un->un_f_chk_wp_open) { 8965 if ((flag & FWRITE) && (!nodelay)) { 8966 mutex_exit(SD_MUTEX(un)); 8967 /* 8968 * Defer the check for write permission on writable 8969 * DVD drive till sdstrategy and will not fail open even 8970 * if FWRITE is set as the device can be writable 8971 * depending upon the media and the media can change 8972 * after the call to open(). 8973 */ 8974 if (un->un_f_dvdram_writable_device == FALSE) { 8975 if (ISCD(un) || sr_check_wp(dev)) { 8976 rval = EROFS; 8977 mutex_enter(SD_MUTEX(un)); 8978 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8979 "write to cd or write protected media\n"); 8980 goto open_fail; 8981 } 8982 } 8983 mutex_enter(SD_MUTEX(un)); 8984 } 8985 } 8986 8987 /* 8988 * If opening in NDELAY/NONBLOCK mode, just return. 8989 * Check if disk is ready and has a valid geometry later. 8990 */ 8991 if (!nodelay) { 8992 mutex_exit(SD_MUTEX(un)); 8993 rval = sd_ready_and_valid(un); 8994 mutex_enter(SD_MUTEX(un)); 8995 /* 8996 * Fail if device is not ready or if the number of disk 8997 * blocks is zero or negative for non CD devices. 8998 */ 8999 9000 nblks = 0; 9001 9002 if (rval == SD_READY_VALID && (!ISCD(un))) { 9003 /* if cmlb_partinfo fails, nblks remains 0 */ 9004 mutex_exit(SD_MUTEX(un)); 9005 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9006 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9007 mutex_enter(SD_MUTEX(un)); 9008 } 9009 9010 if ((rval != SD_READY_VALID) || 9011 (!ISCD(un) && nblks <= 0)) { 9012 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9013 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9014 "device not ready or invalid disk block value\n"); 9015 goto open_fail; 9016 } 9017 #if defined(__i386) || defined(__amd64) 9018 } else { 9019 uchar_t *cp; 9020 /* 9021 * x86 requires special nodelay handling, so that p0 is 9022 * always defined and accessible. 9023 * Invalidate geometry only if device is not already open. 9024 */ 9025 cp = &un->un_ocmap.chkd[0]; 9026 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9027 if (*cp != (uchar_t)0) { 9028 break; 9029 } 9030 cp++; 9031 } 9032 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9033 mutex_exit(SD_MUTEX(un)); 9034 cmlb_invalidate(un->un_cmlbhandle, 9035 (void *)SD_PATH_DIRECT); 9036 mutex_enter(SD_MUTEX(un)); 9037 } 9038 9039 #endif 9040 } 9041 9042 if (otyp == OTYP_LYR) { 9043 un->un_ocmap.lyropen[part]++; 9044 } else { 9045 un->un_ocmap.regopen[otyp] |= partmask; 9046 } 9047 9048 /* Set up open and exclusive open flags */ 9049 if (flag & FEXCL) { 9050 un->un_exclopen |= (partmask); 9051 } 9052 9053 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9054 "open of part %d type %d\n", part, otyp); 9055 9056 mutex_exit(SD_MUTEX(un)); 9057 if (!nodelay) { 9058 sd_pm_exit(un); 9059 } 9060 9061 sema_v(&un->un_semoclose); 9062 9063 mutex_enter(&sd_detach_mutex); 9064 un->un_opens_in_progress--; 9065 mutex_exit(&sd_detach_mutex); 9066 9067 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9068 return (DDI_SUCCESS); 9069 9070 excl_open_fail: 9071 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9072 rval = EBUSY; 9073 9074 open_fail: 9075 mutex_exit(SD_MUTEX(un)); 9076 9077 /* 9078 * On a failed open we must exit the pm management. 9079 */ 9080 if (!nodelay) { 9081 sd_pm_exit(un); 9082 } 9083 open_failed_with_pm: 9084 sema_v(&un->un_semoclose); 9085 9086 mutex_enter(&sd_detach_mutex); 9087 un->un_opens_in_progress--; 9088 if (otyp == OTYP_LYR) { 9089 un->un_layer_count--; 9090 } 9091 mutex_exit(&sd_detach_mutex); 9092 9093 return (rval); 9094 } 9095 9096 9097 /* 9098 * Function: sdclose 9099 * 9100 * Description: Driver's close(9e) entry point function. 9101 * 9102 * Arguments: dev - device number 9103 * flag - file status flag, informational only 9104 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9105 * cred_p - user credential pointer 9106 * 9107 * Return Code: ENXIO 9108 * 9109 * Context: Kernel thread context 9110 */ 9111 /* ARGSUSED */ 9112 static int 9113 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9114 { 9115 struct sd_lun *un; 9116 uchar_t *cp; 9117 int part; 9118 int nodelay; 9119 int rval = 0; 9120 9121 /* Validate the open type */ 9122 if (otyp >= OTYPCNT) { 9123 return (ENXIO); 9124 } 9125 9126 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9127 return (ENXIO); 9128 } 9129 9130 part = SDPART(dev); 9131 nodelay = flag & (FNDELAY | FNONBLOCK); 9132 9133 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9134 "sdclose: close of part %d type %d\n", part, otyp); 9135 9136 /* 9137 * We use a semaphore here in order to serialize 9138 * open and close requests on the device. 9139 */ 9140 sema_p(&un->un_semoclose); 9141 9142 mutex_enter(SD_MUTEX(un)); 9143 9144 /* Don't proceed if power is being changed. */ 9145 while (un->un_state == SD_STATE_PM_CHANGING) { 9146 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9147 } 9148 9149 if (un->un_exclopen & (1 << part)) { 9150 un->un_exclopen &= ~(1 << part); 9151 } 9152 9153 /* Update the open partition map */ 9154 if (otyp == OTYP_LYR) { 9155 un->un_ocmap.lyropen[part] -= 1; 9156 } else { 9157 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9158 } 9159 9160 cp = &un->un_ocmap.chkd[0]; 9161 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9162 if (*cp != NULL) { 9163 break; 9164 } 9165 cp++; 9166 } 9167 9168 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9169 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9170 9171 /* 9172 * We avoid persistance upon the last close, and set 9173 * the throttle back to the maximum. 9174 */ 9175 un->un_throttle = un->un_saved_throttle; 9176 9177 if (un->un_state == SD_STATE_OFFLINE) { 9178 if (un->un_f_is_fibre == FALSE) { 9179 scsi_log(SD_DEVINFO(un), sd_label, 9180 CE_WARN, "offline\n"); 9181 } 9182 mutex_exit(SD_MUTEX(un)); 9183 cmlb_invalidate(un->un_cmlbhandle, 9184 (void *)SD_PATH_DIRECT); 9185 mutex_enter(SD_MUTEX(un)); 9186 9187 } else { 9188 /* 9189 * Flush any outstanding writes in NVRAM cache. 9190 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9191 * cmd, it may not work for non-Pluto devices. 9192 * SYNCHRONIZE CACHE is not required for removables, 9193 * except DVD-RAM drives. 9194 * 9195 * Also note: because SYNCHRONIZE CACHE is currently 9196 * the only command issued here that requires the 9197 * drive be powered up, only do the power up before 9198 * sending the Sync Cache command. If additional 9199 * commands are added which require a powered up 9200 * drive, the following sequence may have to change. 9201 * 9202 * And finally, note that parallel SCSI on SPARC 9203 * only issues a Sync Cache to DVD-RAM, a newly 9204 * supported device. 9205 */ 9206 #if defined(__i386) || defined(__amd64) 9207 if (un->un_f_sync_cache_supported || 9208 un->un_f_dvdram_writable_device == TRUE) { 9209 #else 9210 if (un->un_f_dvdram_writable_device == TRUE) { 9211 #endif 9212 mutex_exit(SD_MUTEX(un)); 9213 if (sd_pm_entry(un) == DDI_SUCCESS) { 9214 rval = 9215 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9216 NULL); 9217 /* ignore error if not supported */ 9218 if (rval == ENOTSUP) { 9219 rval = 0; 9220 } else if (rval != 0) { 9221 rval = EIO; 9222 } 9223 sd_pm_exit(un); 9224 } else { 9225 rval = EIO; 9226 } 9227 mutex_enter(SD_MUTEX(un)); 9228 } 9229 9230 /* 9231 * For devices which supports DOOR_LOCK, send an ALLOW 9232 * MEDIA REMOVAL command, but don't get upset if it 9233 * fails. We need to raise the power of the drive before 9234 * we can call sd_send_scsi_DOORLOCK() 9235 */ 9236 if (un->un_f_doorlock_supported) { 9237 mutex_exit(SD_MUTEX(un)); 9238 if (sd_pm_entry(un) == DDI_SUCCESS) { 9239 rval = sd_send_scsi_DOORLOCK(un, 9240 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9241 9242 sd_pm_exit(un); 9243 if (ISCD(un) && (rval != 0) && 9244 (nodelay != 0)) { 9245 rval = ENXIO; 9246 } 9247 } else { 9248 rval = EIO; 9249 } 9250 mutex_enter(SD_MUTEX(un)); 9251 } 9252 9253 /* 9254 * If a device has removable media, invalidate all 9255 * parameters related to media, such as geometry, 9256 * blocksize, and blockcount. 9257 */ 9258 if (un->un_f_has_removable_media) { 9259 sr_ejected(un); 9260 } 9261 9262 /* 9263 * Destroy the cache (if it exists) which was 9264 * allocated for the write maps since this is 9265 * the last close for this media. 9266 */ 9267 if (un->un_wm_cache) { 9268 /* 9269 * Check if there are pending commands. 9270 * and if there are give a warning and 9271 * do not destroy the cache. 9272 */ 9273 if (un->un_ncmds_in_driver > 0) { 9274 scsi_log(SD_DEVINFO(un), 9275 sd_label, CE_WARN, 9276 "Unable to clean up memory " 9277 "because of pending I/O\n"); 9278 } else { 9279 kmem_cache_destroy( 9280 un->un_wm_cache); 9281 un->un_wm_cache = NULL; 9282 } 9283 } 9284 } 9285 } 9286 9287 mutex_exit(SD_MUTEX(un)); 9288 sema_v(&un->un_semoclose); 9289 9290 if (otyp == OTYP_LYR) { 9291 mutex_enter(&sd_detach_mutex); 9292 /* 9293 * The detach routine may run when the layer count 9294 * drops to zero. 9295 */ 9296 un->un_layer_count--; 9297 mutex_exit(&sd_detach_mutex); 9298 } 9299 9300 return (rval); 9301 } 9302 9303 9304 /* 9305 * Function: sd_ready_and_valid 9306 * 9307 * Description: Test if device is ready and has a valid geometry. 9308 * 9309 * Arguments: dev - device number 9310 * un - driver soft state (unit) structure 9311 * 9312 * Return Code: SD_READY_VALID ready and valid label 9313 * SD_NOT_READY_VALID not ready, no label 9314 * SD_RESERVED_BY_OTHERS reservation conflict 9315 * 9316 * Context: Never called at interrupt context. 9317 */ 9318 9319 static int 9320 sd_ready_and_valid(struct sd_lun *un) 9321 { 9322 struct sd_errstats *stp; 9323 uint64_t capacity; 9324 uint_t lbasize; 9325 int rval = SD_READY_VALID; 9326 char name_str[48]; 9327 int is_valid; 9328 9329 ASSERT(un != NULL); 9330 ASSERT(!mutex_owned(SD_MUTEX(un))); 9331 9332 mutex_enter(SD_MUTEX(un)); 9333 /* 9334 * If a device has removable media, we must check if media is 9335 * ready when checking if this device is ready and valid. 9336 */ 9337 if (un->un_f_has_removable_media) { 9338 mutex_exit(SD_MUTEX(un)); 9339 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9340 rval = SD_NOT_READY_VALID; 9341 mutex_enter(SD_MUTEX(un)); 9342 goto done; 9343 } 9344 9345 is_valid = SD_IS_VALID_LABEL(un); 9346 mutex_enter(SD_MUTEX(un)); 9347 if (!is_valid || 9348 (un->un_f_blockcount_is_valid == FALSE) || 9349 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9350 9351 /* capacity has to be read every open. */ 9352 mutex_exit(SD_MUTEX(un)); 9353 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9354 &lbasize, SD_PATH_DIRECT) != 0) { 9355 cmlb_invalidate(un->un_cmlbhandle, 9356 (void *)SD_PATH_DIRECT); 9357 mutex_enter(SD_MUTEX(un)); 9358 rval = SD_NOT_READY_VALID; 9359 goto done; 9360 } else { 9361 mutex_enter(SD_MUTEX(un)); 9362 sd_update_block_info(un, lbasize, capacity); 9363 } 9364 } 9365 9366 /* 9367 * Check if the media in the device is writable or not. 9368 */ 9369 if (!is_valid && ISCD(un)) { 9370 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9371 } 9372 9373 } else { 9374 /* 9375 * Do a test unit ready to clear any unit attention from non-cd 9376 * devices. 9377 */ 9378 mutex_exit(SD_MUTEX(un)); 9379 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9380 mutex_enter(SD_MUTEX(un)); 9381 } 9382 9383 9384 /* 9385 * If this is a non 512 block device, allocate space for 9386 * the wmap cache. This is being done here since every time 9387 * a media is changed this routine will be called and the 9388 * block size is a function of media rather than device. 9389 */ 9390 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9391 if (!(un->un_wm_cache)) { 9392 (void) snprintf(name_str, sizeof (name_str), 9393 "%s%d_cache", 9394 ddi_driver_name(SD_DEVINFO(un)), 9395 ddi_get_instance(SD_DEVINFO(un))); 9396 un->un_wm_cache = kmem_cache_create( 9397 name_str, sizeof (struct sd_w_map), 9398 8, sd_wm_cache_constructor, 9399 sd_wm_cache_destructor, NULL, 9400 (void *)un, NULL, 0); 9401 if (!(un->un_wm_cache)) { 9402 rval = ENOMEM; 9403 goto done; 9404 } 9405 } 9406 } 9407 9408 if (un->un_state == SD_STATE_NORMAL) { 9409 /* 9410 * If the target is not yet ready here (defined by a TUR 9411 * failure), invalidate the geometry and print an 'offline' 9412 * message. This is a legacy message, as the state of the 9413 * target is not actually changed to SD_STATE_OFFLINE. 9414 * 9415 * If the TUR fails for EACCES (Reservation Conflict), 9416 * SD_RESERVED_BY_OTHERS will be returned to indicate 9417 * reservation conflict. If the TUR fails for other 9418 * reasons, SD_NOT_READY_VALID will be returned. 9419 */ 9420 int err; 9421 9422 mutex_exit(SD_MUTEX(un)); 9423 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9424 mutex_enter(SD_MUTEX(un)); 9425 9426 if (err != 0) { 9427 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9428 "offline or reservation conflict\n"); 9429 mutex_exit(SD_MUTEX(un)); 9430 cmlb_invalidate(un->un_cmlbhandle, 9431 (void *)SD_PATH_DIRECT); 9432 mutex_enter(SD_MUTEX(un)); 9433 if (err == EACCES) { 9434 rval = SD_RESERVED_BY_OTHERS; 9435 } else { 9436 rval = SD_NOT_READY_VALID; 9437 } 9438 goto done; 9439 } 9440 } 9441 9442 if (un->un_f_format_in_progress == FALSE) { 9443 mutex_exit(SD_MUTEX(un)); 9444 if (cmlb_validate(un->un_cmlbhandle, 0, 9445 (void *)SD_PATH_DIRECT) != 0) { 9446 rval = SD_NOT_READY_VALID; 9447 mutex_enter(SD_MUTEX(un)); 9448 goto done; 9449 } 9450 if (un->un_f_pkstats_enabled) { 9451 sd_set_pstats(un); 9452 SD_TRACE(SD_LOG_IO_PARTITION, un, 9453 "sd_ready_and_valid: un:0x%p pstats created and " 9454 "set\n", un); 9455 } 9456 mutex_enter(SD_MUTEX(un)); 9457 } 9458 9459 /* 9460 * If this device supports DOOR_LOCK command, try and send 9461 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9462 * if it fails. For a CD, however, it is an error 9463 */ 9464 if (un->un_f_doorlock_supported) { 9465 mutex_exit(SD_MUTEX(un)); 9466 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9467 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9468 rval = SD_NOT_READY_VALID; 9469 mutex_enter(SD_MUTEX(un)); 9470 goto done; 9471 } 9472 mutex_enter(SD_MUTEX(un)); 9473 } 9474 9475 /* The state has changed, inform the media watch routines */ 9476 un->un_mediastate = DKIO_INSERTED; 9477 cv_broadcast(&un->un_state_cv); 9478 rval = SD_READY_VALID; 9479 9480 done: 9481 9482 /* 9483 * Initialize the capacity kstat value, if no media previously 9484 * (capacity kstat is 0) and a media has been inserted 9485 * (un_blockcount > 0). 9486 */ 9487 if (un->un_errstats != NULL) { 9488 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9489 if ((stp->sd_capacity.value.ui64 == 0) && 9490 (un->un_f_blockcount_is_valid == TRUE)) { 9491 stp->sd_capacity.value.ui64 = 9492 (uint64_t)((uint64_t)un->un_blockcount * 9493 un->un_sys_blocksize); 9494 } 9495 } 9496 9497 mutex_exit(SD_MUTEX(un)); 9498 return (rval); 9499 } 9500 9501 9502 /* 9503 * Function: sdmin 9504 * 9505 * Description: Routine to limit the size of a data transfer. Used in 9506 * conjunction with physio(9F). 9507 * 9508 * Arguments: bp - pointer to the indicated buf(9S) struct. 9509 * 9510 * Context: Kernel thread context. 9511 */ 9512 9513 static void 9514 sdmin(struct buf *bp) 9515 { 9516 struct sd_lun *un; 9517 int instance; 9518 9519 instance = SDUNIT(bp->b_edev); 9520 9521 un = ddi_get_soft_state(sd_state, instance); 9522 ASSERT(un != NULL); 9523 9524 if (bp->b_bcount > un->un_max_xfer_size) { 9525 bp->b_bcount = un->un_max_xfer_size; 9526 } 9527 } 9528 9529 9530 /* 9531 * Function: sdread 9532 * 9533 * Description: Driver's read(9e) entry point function. 9534 * 9535 * Arguments: dev - device number 9536 * uio - structure pointer describing where data is to be stored 9537 * in user's space 9538 * cred_p - user credential pointer 9539 * 9540 * Return Code: ENXIO 9541 * EIO 9542 * EINVAL 9543 * value returned by physio 9544 * 9545 * Context: Kernel thread context. 9546 */ 9547 /* ARGSUSED */ 9548 static int 9549 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9550 { 9551 struct sd_lun *un = NULL; 9552 int secmask; 9553 int err; 9554 9555 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9556 return (ENXIO); 9557 } 9558 9559 ASSERT(!mutex_owned(SD_MUTEX(un))); 9560 9561 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9562 mutex_enter(SD_MUTEX(un)); 9563 /* 9564 * Because the call to sd_ready_and_valid will issue I/O we 9565 * must wait here if either the device is suspended or 9566 * if it's power level is changing. 9567 */ 9568 while ((un->un_state == SD_STATE_SUSPENDED) || 9569 (un->un_state == SD_STATE_PM_CHANGING)) { 9570 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9571 } 9572 un->un_ncmds_in_driver++; 9573 mutex_exit(SD_MUTEX(un)); 9574 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9575 mutex_enter(SD_MUTEX(un)); 9576 un->un_ncmds_in_driver--; 9577 ASSERT(un->un_ncmds_in_driver >= 0); 9578 mutex_exit(SD_MUTEX(un)); 9579 return (EIO); 9580 } 9581 mutex_enter(SD_MUTEX(un)); 9582 un->un_ncmds_in_driver--; 9583 ASSERT(un->un_ncmds_in_driver >= 0); 9584 mutex_exit(SD_MUTEX(un)); 9585 } 9586 9587 /* 9588 * Read requests are restricted to multiples of the system block size. 9589 */ 9590 secmask = un->un_sys_blocksize - 1; 9591 9592 if (uio->uio_loffset & ((offset_t)(secmask))) { 9593 SD_ERROR(SD_LOG_READ_WRITE, un, 9594 "sdread: file offset not modulo %d\n", 9595 un->un_sys_blocksize); 9596 err = EINVAL; 9597 } else if (uio->uio_iov->iov_len & (secmask)) { 9598 SD_ERROR(SD_LOG_READ_WRITE, un, 9599 "sdread: transfer length not modulo %d\n", 9600 un->un_sys_blocksize); 9601 err = EINVAL; 9602 } else { 9603 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9604 } 9605 return (err); 9606 } 9607 9608 9609 /* 9610 * Function: sdwrite 9611 * 9612 * Description: Driver's write(9e) entry point function. 9613 * 9614 * Arguments: dev - device number 9615 * uio - structure pointer describing where data is stored in 9616 * user's space 9617 * cred_p - user credential pointer 9618 * 9619 * Return Code: ENXIO 9620 * EIO 9621 * EINVAL 9622 * value returned by physio 9623 * 9624 * Context: Kernel thread context. 9625 */ 9626 /* ARGSUSED */ 9627 static int 9628 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9629 { 9630 struct sd_lun *un = NULL; 9631 int secmask; 9632 int err; 9633 9634 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9635 return (ENXIO); 9636 } 9637 9638 ASSERT(!mutex_owned(SD_MUTEX(un))); 9639 9640 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9641 mutex_enter(SD_MUTEX(un)); 9642 /* 9643 * Because the call to sd_ready_and_valid will issue I/O we 9644 * must wait here if either the device is suspended or 9645 * if it's power level is changing. 9646 */ 9647 while ((un->un_state == SD_STATE_SUSPENDED) || 9648 (un->un_state == SD_STATE_PM_CHANGING)) { 9649 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9650 } 9651 un->un_ncmds_in_driver++; 9652 mutex_exit(SD_MUTEX(un)); 9653 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9654 mutex_enter(SD_MUTEX(un)); 9655 un->un_ncmds_in_driver--; 9656 ASSERT(un->un_ncmds_in_driver >= 0); 9657 mutex_exit(SD_MUTEX(un)); 9658 return (EIO); 9659 } 9660 mutex_enter(SD_MUTEX(un)); 9661 un->un_ncmds_in_driver--; 9662 ASSERT(un->un_ncmds_in_driver >= 0); 9663 mutex_exit(SD_MUTEX(un)); 9664 } 9665 9666 /* 9667 * Write requests are restricted to multiples of the system block size. 9668 */ 9669 secmask = un->un_sys_blocksize - 1; 9670 9671 if (uio->uio_loffset & ((offset_t)(secmask))) { 9672 SD_ERROR(SD_LOG_READ_WRITE, un, 9673 "sdwrite: file offset not modulo %d\n", 9674 un->un_sys_blocksize); 9675 err = EINVAL; 9676 } else if (uio->uio_iov->iov_len & (secmask)) { 9677 SD_ERROR(SD_LOG_READ_WRITE, un, 9678 "sdwrite: transfer length not modulo %d\n", 9679 un->un_sys_blocksize); 9680 err = EINVAL; 9681 } else { 9682 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9683 } 9684 return (err); 9685 } 9686 9687 9688 /* 9689 * Function: sdaread 9690 * 9691 * Description: Driver's aread(9e) entry point function. 9692 * 9693 * Arguments: dev - device number 9694 * aio - structure pointer describing where data is to be stored 9695 * cred_p - user credential pointer 9696 * 9697 * Return Code: ENXIO 9698 * EIO 9699 * EINVAL 9700 * value returned by aphysio 9701 * 9702 * Context: Kernel thread context. 9703 */ 9704 /* ARGSUSED */ 9705 static int 9706 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9707 { 9708 struct sd_lun *un = NULL; 9709 struct uio *uio = aio->aio_uio; 9710 int secmask; 9711 int err; 9712 9713 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9714 return (ENXIO); 9715 } 9716 9717 ASSERT(!mutex_owned(SD_MUTEX(un))); 9718 9719 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9720 mutex_enter(SD_MUTEX(un)); 9721 /* 9722 * Because the call to sd_ready_and_valid will issue I/O we 9723 * must wait here if either the device is suspended or 9724 * if it's power level is changing. 9725 */ 9726 while ((un->un_state == SD_STATE_SUSPENDED) || 9727 (un->un_state == SD_STATE_PM_CHANGING)) { 9728 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9729 } 9730 un->un_ncmds_in_driver++; 9731 mutex_exit(SD_MUTEX(un)); 9732 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9733 mutex_enter(SD_MUTEX(un)); 9734 un->un_ncmds_in_driver--; 9735 ASSERT(un->un_ncmds_in_driver >= 0); 9736 mutex_exit(SD_MUTEX(un)); 9737 return (EIO); 9738 } 9739 mutex_enter(SD_MUTEX(un)); 9740 un->un_ncmds_in_driver--; 9741 ASSERT(un->un_ncmds_in_driver >= 0); 9742 mutex_exit(SD_MUTEX(un)); 9743 } 9744 9745 /* 9746 * Read requests are restricted to multiples of the system block size. 9747 */ 9748 secmask = un->un_sys_blocksize - 1; 9749 9750 if (uio->uio_loffset & ((offset_t)(secmask))) { 9751 SD_ERROR(SD_LOG_READ_WRITE, un, 9752 "sdaread: file offset not modulo %d\n", 9753 un->un_sys_blocksize); 9754 err = EINVAL; 9755 } else if (uio->uio_iov->iov_len & (secmask)) { 9756 SD_ERROR(SD_LOG_READ_WRITE, un, 9757 "sdaread: transfer length not modulo %d\n", 9758 un->un_sys_blocksize); 9759 err = EINVAL; 9760 } else { 9761 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9762 } 9763 return (err); 9764 } 9765 9766 9767 /* 9768 * Function: sdawrite 9769 * 9770 * Description: Driver's awrite(9e) entry point function. 9771 * 9772 * Arguments: dev - device number 9773 * aio - structure pointer describing where data is stored 9774 * cred_p - user credential pointer 9775 * 9776 * Return Code: ENXIO 9777 * EIO 9778 * EINVAL 9779 * value returned by aphysio 9780 * 9781 * Context: Kernel thread context. 9782 */ 9783 /* ARGSUSED */ 9784 static int 9785 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9786 { 9787 struct sd_lun *un = NULL; 9788 struct uio *uio = aio->aio_uio; 9789 int secmask; 9790 int err; 9791 9792 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9793 return (ENXIO); 9794 } 9795 9796 ASSERT(!mutex_owned(SD_MUTEX(un))); 9797 9798 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9799 mutex_enter(SD_MUTEX(un)); 9800 /* 9801 * Because the call to sd_ready_and_valid will issue I/O we 9802 * must wait here if either the device is suspended or 9803 * if it's power level is changing. 9804 */ 9805 while ((un->un_state == SD_STATE_SUSPENDED) || 9806 (un->un_state == SD_STATE_PM_CHANGING)) { 9807 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9808 } 9809 un->un_ncmds_in_driver++; 9810 mutex_exit(SD_MUTEX(un)); 9811 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9812 mutex_enter(SD_MUTEX(un)); 9813 un->un_ncmds_in_driver--; 9814 ASSERT(un->un_ncmds_in_driver >= 0); 9815 mutex_exit(SD_MUTEX(un)); 9816 return (EIO); 9817 } 9818 mutex_enter(SD_MUTEX(un)); 9819 un->un_ncmds_in_driver--; 9820 ASSERT(un->un_ncmds_in_driver >= 0); 9821 mutex_exit(SD_MUTEX(un)); 9822 } 9823 9824 /* 9825 * Write requests are restricted to multiples of the system block size. 9826 */ 9827 secmask = un->un_sys_blocksize - 1; 9828 9829 if (uio->uio_loffset & ((offset_t)(secmask))) { 9830 SD_ERROR(SD_LOG_READ_WRITE, un, 9831 "sdawrite: file offset not modulo %d\n", 9832 un->un_sys_blocksize); 9833 err = EINVAL; 9834 } else if (uio->uio_iov->iov_len & (secmask)) { 9835 SD_ERROR(SD_LOG_READ_WRITE, un, 9836 "sdawrite: transfer length not modulo %d\n", 9837 un->un_sys_blocksize); 9838 err = EINVAL; 9839 } else { 9840 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 9841 } 9842 return (err); 9843 } 9844 9845 9846 9847 9848 9849 /* 9850 * Driver IO processing follows the following sequence: 9851 * 9852 * sdioctl(9E) sdstrategy(9E) biodone(9F) 9853 * | | ^ 9854 * v v | 9855 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 9856 * | | | | 9857 * v | | | 9858 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 9859 * | | ^ ^ 9860 * v v | | 9861 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 9862 * | | | | 9863 * +---+ | +------------+ +-------+ 9864 * | | | | 9865 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9866 * | v | | 9867 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 9868 * | | ^ | 9869 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9870 * | v | | 9871 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 9872 * | | ^ | 9873 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9874 * | v | | 9875 * | sd_checksum_iostart() sd_checksum_iodone() | 9876 * | | ^ | 9877 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 9878 * | v | | 9879 * | sd_pm_iostart() sd_pm_iodone() | 9880 * | | ^ | 9881 * | | | | 9882 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 9883 * | ^ 9884 * v | 9885 * sd_core_iostart() | 9886 * | | 9887 * | +------>(*destroypkt)() 9888 * +-> sd_start_cmds() <-+ | | 9889 * | | | v 9890 * | | | scsi_destroy_pkt(9F) 9891 * | | | 9892 * +->(*initpkt)() +- sdintr() 9893 * | | | | 9894 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 9895 * | +-> scsi_setup_cdb(9F) | 9896 * | | 9897 * +--> scsi_transport(9F) | 9898 * | | 9899 * +----> SCSA ---->+ 9900 * 9901 * 9902 * This code is based upon the following presumptions: 9903 * 9904 * - iostart and iodone functions operate on buf(9S) structures. These 9905 * functions perform the necessary operations on the buf(9S) and pass 9906 * them along to the next function in the chain by using the macros 9907 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 9908 * (for iodone side functions). 9909 * 9910 * - The iostart side functions may sleep. The iodone side functions 9911 * are called under interrupt context and may NOT sleep. Therefore 9912 * iodone side functions also may not call iostart side functions. 9913 * (NOTE: iostart side functions should NOT sleep for memory, as 9914 * this could result in deadlock.) 9915 * 9916 * - An iostart side function may call its corresponding iodone side 9917 * function directly (if necessary). 9918 * 9919 * - In the event of an error, an iostart side function can return a buf(9S) 9920 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 9921 * b_error in the usual way of course). 9922 * 9923 * - The taskq mechanism may be used by the iodone side functions to dispatch 9924 * requests to the iostart side functions. The iostart side functions in 9925 * this case would be called under the context of a taskq thread, so it's 9926 * OK for them to block/sleep/spin in this case. 9927 * 9928 * - iostart side functions may allocate "shadow" buf(9S) structs and 9929 * pass them along to the next function in the chain. The corresponding 9930 * iodone side functions must coalesce the "shadow" bufs and return 9931 * the "original" buf to the next higher layer. 9932 * 9933 * - The b_private field of the buf(9S) struct holds a pointer to 9934 * an sd_xbuf struct, which contains information needed to 9935 * construct the scsi_pkt for the command. 9936 * 9937 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 9938 * layer must acquire & release the SD_MUTEX(un) as needed. 9939 */ 9940 9941 9942 /* 9943 * Create taskq for all targets in the system. This is created at 9944 * _init(9E) and destroyed at _fini(9E). 9945 * 9946 * Note: here we set the minalloc to a reasonably high number to ensure that 9947 * we will have an adequate supply of task entries available at interrupt time. 9948 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 9949 * sd_create_taskq(). Since we do not want to sleep for allocations at 9950 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 9951 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 9952 * requests any one instant in time. 9953 */ 9954 #define SD_TASKQ_NUMTHREADS 8 9955 #define SD_TASKQ_MINALLOC 256 9956 #define SD_TASKQ_MAXALLOC 256 9957 9958 static taskq_t *sd_tq = NULL; 9959 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 9960 9961 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 9962 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 9963 9964 /* 9965 * The following task queue is being created for the write part of 9966 * read-modify-write of non-512 block size devices. 9967 * Limit the number of threads to 1 for now. This number has been chosen 9968 * considering the fact that it applies only to dvd ram drives/MO drives 9969 * currently. Performance for which is not main criteria at this stage. 9970 * Note: It needs to be explored if we can use a single taskq in future 9971 */ 9972 #define SD_WMR_TASKQ_NUMTHREADS 1 9973 static taskq_t *sd_wmr_tq = NULL; 9974 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 9975 9976 /* 9977 * Function: sd_taskq_create 9978 * 9979 * Description: Create taskq thread(s) and preallocate task entries 9980 * 9981 * Return Code: Returns a pointer to the allocated taskq_t. 9982 * 9983 * Context: Can sleep. Requires blockable context. 9984 * 9985 * Notes: - The taskq() facility currently is NOT part of the DDI. 9986 * (definitely NOT recommeded for 3rd-party drivers!) :-) 9987 * - taskq_create() will block for memory, also it will panic 9988 * if it cannot create the requested number of threads. 9989 * - Currently taskq_create() creates threads that cannot be 9990 * swapped. 9991 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 9992 * supply of taskq entries at interrupt time (ie, so that we 9993 * do not have to sleep for memory) 9994 */ 9995 9996 static void 9997 sd_taskq_create(void) 9998 { 9999 char taskq_name[TASKQ_NAMELEN]; 10000 10001 ASSERT(sd_tq == NULL); 10002 ASSERT(sd_wmr_tq == NULL); 10003 10004 (void) snprintf(taskq_name, sizeof (taskq_name), 10005 "%s_drv_taskq", sd_label); 10006 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10007 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10008 TASKQ_PREPOPULATE)); 10009 10010 (void) snprintf(taskq_name, sizeof (taskq_name), 10011 "%s_rmw_taskq", sd_label); 10012 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10013 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10014 TASKQ_PREPOPULATE)); 10015 } 10016 10017 10018 /* 10019 * Function: sd_taskq_delete 10020 * 10021 * Description: Complementary cleanup routine for sd_taskq_create(). 10022 * 10023 * Context: Kernel thread context. 10024 */ 10025 10026 static void 10027 sd_taskq_delete(void) 10028 { 10029 ASSERT(sd_tq != NULL); 10030 ASSERT(sd_wmr_tq != NULL); 10031 taskq_destroy(sd_tq); 10032 taskq_destroy(sd_wmr_tq); 10033 sd_tq = NULL; 10034 sd_wmr_tq = NULL; 10035 } 10036 10037 10038 /* 10039 * Function: sdstrategy 10040 * 10041 * Description: Driver's strategy (9E) entry point function. 10042 * 10043 * Arguments: bp - pointer to buf(9S) 10044 * 10045 * Return Code: Always returns zero 10046 * 10047 * Context: Kernel thread context. 10048 */ 10049 10050 static int 10051 sdstrategy(struct buf *bp) 10052 { 10053 struct sd_lun *un; 10054 10055 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10056 if (un == NULL) { 10057 bioerror(bp, EIO); 10058 bp->b_resid = bp->b_bcount; 10059 biodone(bp); 10060 return (0); 10061 } 10062 /* As was done in the past, fail new cmds. if state is dumping. */ 10063 if (un->un_state == SD_STATE_DUMPING) { 10064 bioerror(bp, ENXIO); 10065 bp->b_resid = bp->b_bcount; 10066 biodone(bp); 10067 return (0); 10068 } 10069 10070 ASSERT(!mutex_owned(SD_MUTEX(un))); 10071 10072 /* 10073 * Commands may sneak in while we released the mutex in 10074 * DDI_SUSPEND, we should block new commands. However, old 10075 * commands that are still in the driver at this point should 10076 * still be allowed to drain. 10077 */ 10078 mutex_enter(SD_MUTEX(un)); 10079 /* 10080 * Must wait here if either the device is suspended or 10081 * if it's power level is changing. 10082 */ 10083 while ((un->un_state == SD_STATE_SUSPENDED) || 10084 (un->un_state == SD_STATE_PM_CHANGING)) { 10085 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10086 } 10087 10088 un->un_ncmds_in_driver++; 10089 10090 /* 10091 * atapi: Since we are running the CD for now in PIO mode we need to 10092 * call bp_mapin here to avoid bp_mapin called interrupt context under 10093 * the HBA's init_pkt routine. 10094 */ 10095 if (un->un_f_cfg_is_atapi == TRUE) { 10096 mutex_exit(SD_MUTEX(un)); 10097 bp_mapin(bp); 10098 mutex_enter(SD_MUTEX(un)); 10099 } 10100 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10101 un->un_ncmds_in_driver); 10102 10103 mutex_exit(SD_MUTEX(un)); 10104 10105 /* 10106 * This will (eventually) allocate the sd_xbuf area and 10107 * call sd_xbuf_strategy(). We just want to return the 10108 * result of ddi_xbuf_qstrategy so that we have an opt- 10109 * imized tail call which saves us a stack frame. 10110 */ 10111 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10112 } 10113 10114 10115 /* 10116 * Function: sd_xbuf_strategy 10117 * 10118 * Description: Function for initiating IO operations via the 10119 * ddi_xbuf_qstrategy() mechanism. 10120 * 10121 * Context: Kernel thread context. 10122 */ 10123 10124 static void 10125 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10126 { 10127 struct sd_lun *un = arg; 10128 10129 ASSERT(bp != NULL); 10130 ASSERT(xp != NULL); 10131 ASSERT(un != NULL); 10132 ASSERT(!mutex_owned(SD_MUTEX(un))); 10133 10134 /* 10135 * Initialize the fields in the xbuf and save a pointer to the 10136 * xbuf in bp->b_private. 10137 */ 10138 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10139 10140 /* Send the buf down the iostart chain */ 10141 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10142 } 10143 10144 10145 /* 10146 * Function: sd_xbuf_init 10147 * 10148 * Description: Prepare the given sd_xbuf struct for use. 10149 * 10150 * Arguments: un - ptr to softstate 10151 * bp - ptr to associated buf(9S) 10152 * xp - ptr to associated sd_xbuf 10153 * chain_type - IO chain type to use: 10154 * SD_CHAIN_NULL 10155 * SD_CHAIN_BUFIO 10156 * SD_CHAIN_USCSI 10157 * SD_CHAIN_DIRECT 10158 * SD_CHAIN_DIRECT_PRIORITY 10159 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10160 * initialization; may be NULL if none. 10161 * 10162 * Context: Kernel thread context 10163 */ 10164 10165 static void 10166 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10167 uchar_t chain_type, void *pktinfop) 10168 { 10169 int index; 10170 10171 ASSERT(un != NULL); 10172 ASSERT(bp != NULL); 10173 ASSERT(xp != NULL); 10174 10175 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10176 bp, chain_type); 10177 10178 xp->xb_un = un; 10179 xp->xb_pktp = NULL; 10180 xp->xb_pktinfo = pktinfop; 10181 xp->xb_private = bp->b_private; 10182 xp->xb_blkno = (daddr_t)bp->b_blkno; 10183 10184 /* 10185 * Set up the iostart and iodone chain indexes in the xbuf, based 10186 * upon the specified chain type to use. 10187 */ 10188 switch (chain_type) { 10189 case SD_CHAIN_NULL: 10190 /* 10191 * Fall thru to just use the values for the buf type, even 10192 * tho for the NULL chain these values will never be used. 10193 */ 10194 /* FALLTHRU */ 10195 case SD_CHAIN_BUFIO: 10196 index = un->un_buf_chain_type; 10197 break; 10198 case SD_CHAIN_USCSI: 10199 index = un->un_uscsi_chain_type; 10200 break; 10201 case SD_CHAIN_DIRECT: 10202 index = un->un_direct_chain_type; 10203 break; 10204 case SD_CHAIN_DIRECT_PRIORITY: 10205 index = un->un_priority_chain_type; 10206 break; 10207 default: 10208 /* We're really broken if we ever get here... */ 10209 panic("sd_xbuf_init: illegal chain type!"); 10210 /*NOTREACHED*/ 10211 } 10212 10213 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10214 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10215 10216 /* 10217 * It might be a bit easier to simply bzero the entire xbuf above, 10218 * but it turns out that since we init a fair number of members anyway, 10219 * we save a fair number cycles by doing explicit assignment of zero. 10220 */ 10221 xp->xb_pkt_flags = 0; 10222 xp->xb_dma_resid = 0; 10223 xp->xb_retry_count = 0; 10224 xp->xb_victim_retry_count = 0; 10225 xp->xb_ua_retry_count = 0; 10226 xp->xb_sense_bp = NULL; 10227 xp->xb_sense_status = 0; 10228 xp->xb_sense_state = 0; 10229 xp->xb_sense_resid = 0; 10230 10231 bp->b_private = xp; 10232 bp->b_flags &= ~(B_DONE | B_ERROR); 10233 bp->b_resid = 0; 10234 bp->av_forw = NULL; 10235 bp->av_back = NULL; 10236 bioerror(bp, 0); 10237 10238 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10239 } 10240 10241 10242 /* 10243 * Function: sd_uscsi_strategy 10244 * 10245 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10246 * 10247 * Arguments: bp - buf struct ptr 10248 * 10249 * Return Code: Always returns 0 10250 * 10251 * Context: Kernel thread context 10252 */ 10253 10254 static int 10255 sd_uscsi_strategy(struct buf *bp) 10256 { 10257 struct sd_lun *un; 10258 struct sd_uscsi_info *uip; 10259 struct sd_xbuf *xp; 10260 uchar_t chain_type; 10261 10262 ASSERT(bp != NULL); 10263 10264 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10265 if (un == NULL) { 10266 bioerror(bp, EIO); 10267 bp->b_resid = bp->b_bcount; 10268 biodone(bp); 10269 return (0); 10270 } 10271 10272 ASSERT(!mutex_owned(SD_MUTEX(un))); 10273 10274 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10275 10276 mutex_enter(SD_MUTEX(un)); 10277 /* 10278 * atapi: Since we are running the CD for now in PIO mode we need to 10279 * call bp_mapin here to avoid bp_mapin called interrupt context under 10280 * the HBA's init_pkt routine. 10281 */ 10282 if (un->un_f_cfg_is_atapi == TRUE) { 10283 mutex_exit(SD_MUTEX(un)); 10284 bp_mapin(bp); 10285 mutex_enter(SD_MUTEX(un)); 10286 } 10287 un->un_ncmds_in_driver++; 10288 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10289 un->un_ncmds_in_driver); 10290 mutex_exit(SD_MUTEX(un)); 10291 10292 /* 10293 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10294 */ 10295 ASSERT(bp->b_private != NULL); 10296 uip = (struct sd_uscsi_info *)bp->b_private; 10297 10298 switch (uip->ui_flags) { 10299 case SD_PATH_DIRECT: 10300 chain_type = SD_CHAIN_DIRECT; 10301 break; 10302 case SD_PATH_DIRECT_PRIORITY: 10303 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10304 break; 10305 default: 10306 chain_type = SD_CHAIN_USCSI; 10307 break; 10308 } 10309 10310 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 10311 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10312 10313 /* Use the index obtained within xbuf_init */ 10314 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10315 10316 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10317 10318 return (0); 10319 } 10320 10321 /* 10322 * Function: sd_send_scsi_cmd 10323 * 10324 * Description: Runs a USCSI command for user (when called thru sdioctl), 10325 * or for the driver 10326 * 10327 * Arguments: dev - the dev_t for the device 10328 * incmd - ptr to a valid uscsi_cmd struct 10329 * flag - bit flag, indicating open settings, 32/64 bit type 10330 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10331 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10332 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10333 * to use the USCSI "direct" chain and bypass the normal 10334 * command waitq. 10335 * 10336 * Return Code: 0 - successful completion of the given command 10337 * EIO - scsi_uscsi_handle_command() failed 10338 * ENXIO - soft state not found for specified dev 10339 * EINVAL 10340 * EFAULT - copyin/copyout error 10341 * return code of scsi_uscsi_handle_command(): 10342 * EIO 10343 * ENXIO 10344 * EACCES 10345 * 10346 * Context: Waits for command to complete. Can sleep. 10347 */ 10348 10349 static int 10350 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10351 enum uio_seg dataspace, int path_flag) 10352 { 10353 struct sd_uscsi_info *uip; 10354 struct uscsi_cmd *uscmd; 10355 struct sd_lun *un; 10356 int format = 0; 10357 int rval; 10358 10359 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10360 if (un == NULL) { 10361 return (ENXIO); 10362 } 10363 10364 ASSERT(!mutex_owned(SD_MUTEX(un))); 10365 10366 #ifdef SDDEBUG 10367 switch (dataspace) { 10368 case UIO_USERSPACE: 10369 SD_TRACE(SD_LOG_IO, un, 10370 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10371 break; 10372 case UIO_SYSSPACE: 10373 SD_TRACE(SD_LOG_IO, un, 10374 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10375 break; 10376 default: 10377 SD_TRACE(SD_LOG_IO, un, 10378 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10379 break; 10380 } 10381 #endif 10382 10383 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10384 SD_ADDRESS(un), &uscmd); 10385 if (rval != 0) { 10386 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10387 "scsi_uscsi_alloc_and_copyin failed\n", un); 10388 return (rval); 10389 } 10390 10391 if ((uscmd->uscsi_cdb != NULL) && 10392 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10393 mutex_enter(SD_MUTEX(un)); 10394 un->un_f_format_in_progress = TRUE; 10395 mutex_exit(SD_MUTEX(un)); 10396 format = 1; 10397 } 10398 10399 /* 10400 * Allocate an sd_uscsi_info struct and fill it with the info 10401 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10402 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10403 * since we allocate the buf here in this function, we do not 10404 * need to preserve the prior contents of b_private. 10405 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10406 */ 10407 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10408 uip->ui_flags = path_flag; 10409 uip->ui_cmdp = uscmd; 10410 10411 /* 10412 * Commands sent with priority are intended for error recovery 10413 * situations, and do not have retries performed. 10414 */ 10415 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10416 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10417 } 10418 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10419 10420 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10421 sd_uscsi_strategy, NULL, uip); 10422 10423 #ifdef SDDEBUG 10424 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10425 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10426 uscmd->uscsi_status, uscmd->uscsi_resid); 10427 if (uscmd->uscsi_bufaddr != NULL) { 10428 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10429 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10430 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10431 if (dataspace == UIO_SYSSPACE) { 10432 SD_DUMP_MEMORY(un, SD_LOG_IO, 10433 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10434 uscmd->uscsi_buflen, SD_LOG_HEX); 10435 } 10436 } 10437 #endif 10438 10439 if (format == 1) { 10440 mutex_enter(SD_MUTEX(un)); 10441 un->un_f_format_in_progress = FALSE; 10442 mutex_exit(SD_MUTEX(un)); 10443 } 10444 10445 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10446 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10447 10448 return (rval); 10449 } 10450 10451 10452 /* 10453 * Function: sd_buf_iodone 10454 * 10455 * Description: Frees the sd_xbuf & returns the buf to its originator. 10456 * 10457 * Context: May be called from interrupt context. 10458 */ 10459 /* ARGSUSED */ 10460 static void 10461 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10462 { 10463 struct sd_xbuf *xp; 10464 10465 ASSERT(un != NULL); 10466 ASSERT(bp != NULL); 10467 ASSERT(!mutex_owned(SD_MUTEX(un))); 10468 10469 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10470 10471 xp = SD_GET_XBUF(bp); 10472 ASSERT(xp != NULL); 10473 10474 mutex_enter(SD_MUTEX(un)); 10475 10476 /* 10477 * Grab time when the cmd completed. 10478 * This is used for determining if the system has been 10479 * idle long enough to make it idle to the PM framework. 10480 * This is for lowering the overhead, and therefore improving 10481 * performance per I/O operation. 10482 */ 10483 un->un_pm_idle_time = ddi_get_time(); 10484 10485 un->un_ncmds_in_driver--; 10486 ASSERT(un->un_ncmds_in_driver >= 0); 10487 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10488 un->un_ncmds_in_driver); 10489 10490 mutex_exit(SD_MUTEX(un)); 10491 10492 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10493 biodone(bp); /* bp is gone after this */ 10494 10495 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10496 } 10497 10498 10499 /* 10500 * Function: sd_uscsi_iodone 10501 * 10502 * Description: Frees the sd_xbuf & returns the buf to its originator. 10503 * 10504 * Context: May be called from interrupt context. 10505 */ 10506 /* ARGSUSED */ 10507 static void 10508 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10509 { 10510 struct sd_xbuf *xp; 10511 10512 ASSERT(un != NULL); 10513 ASSERT(bp != NULL); 10514 10515 xp = SD_GET_XBUF(bp); 10516 ASSERT(xp != NULL); 10517 ASSERT(!mutex_owned(SD_MUTEX(un))); 10518 10519 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10520 10521 bp->b_private = xp->xb_private; 10522 10523 mutex_enter(SD_MUTEX(un)); 10524 10525 /* 10526 * Grab time when the cmd completed. 10527 * This is used for determining if the system has been 10528 * idle long enough to make it idle to the PM framework. 10529 * This is for lowering the overhead, and therefore improving 10530 * performance per I/O operation. 10531 */ 10532 un->un_pm_idle_time = ddi_get_time(); 10533 10534 un->un_ncmds_in_driver--; 10535 ASSERT(un->un_ncmds_in_driver >= 0); 10536 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10537 un->un_ncmds_in_driver); 10538 10539 mutex_exit(SD_MUTEX(un)); 10540 10541 kmem_free(xp, sizeof (struct sd_xbuf)); 10542 biodone(bp); 10543 10544 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10545 } 10546 10547 10548 /* 10549 * Function: sd_mapblockaddr_iostart 10550 * 10551 * Description: Verify request lies within the partition limits for 10552 * the indicated minor device. Issue "overrun" buf if 10553 * request would exceed partition range. Converts 10554 * partition-relative block address to absolute. 10555 * 10556 * Context: Can sleep 10557 * 10558 * Issues: This follows what the old code did, in terms of accessing 10559 * some of the partition info in the unit struct without holding 10560 * the mutext. This is a general issue, if the partition info 10561 * can be altered while IO is in progress... as soon as we send 10562 * a buf, its partitioning can be invalid before it gets to the 10563 * device. Probably the right fix is to move partitioning out 10564 * of the driver entirely. 10565 */ 10566 10567 static void 10568 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10569 { 10570 diskaddr_t nblocks; /* #blocks in the given partition */ 10571 daddr_t blocknum; /* Block number specified by the buf */ 10572 size_t requested_nblocks; 10573 size_t available_nblocks; 10574 int partition; 10575 diskaddr_t partition_offset; 10576 struct sd_xbuf *xp; 10577 10578 10579 ASSERT(un != NULL); 10580 ASSERT(bp != NULL); 10581 ASSERT(!mutex_owned(SD_MUTEX(un))); 10582 10583 SD_TRACE(SD_LOG_IO_PARTITION, un, 10584 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10585 10586 xp = SD_GET_XBUF(bp); 10587 ASSERT(xp != NULL); 10588 10589 /* 10590 * If the geometry is not indicated as valid, attempt to access 10591 * the unit & verify the geometry/label. This can be the case for 10592 * removable-media devices, of if the device was opened in 10593 * NDELAY/NONBLOCK mode. 10594 */ 10595 if (!SD_IS_VALID_LABEL(un) && 10596 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10597 /* 10598 * For removable devices it is possible to start an I/O 10599 * without a media by opening the device in nodelay mode. 10600 * Also for writable CDs there can be many scenarios where 10601 * there is no geometry yet but volume manager is trying to 10602 * issue a read() just because it can see TOC on the CD. So 10603 * do not print a message for removables. 10604 */ 10605 if (!un->un_f_has_removable_media) { 10606 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10607 "i/o to invalid geometry\n"); 10608 } 10609 bioerror(bp, EIO); 10610 bp->b_resid = bp->b_bcount; 10611 SD_BEGIN_IODONE(index, un, bp); 10612 return; 10613 } 10614 10615 partition = SDPART(bp->b_edev); 10616 10617 nblocks = 0; 10618 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10619 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10620 10621 /* 10622 * blocknum is the starting block number of the request. At this 10623 * point it is still relative to the start of the minor device. 10624 */ 10625 blocknum = xp->xb_blkno; 10626 10627 /* 10628 * Legacy: If the starting block number is one past the last block 10629 * in the partition, do not set B_ERROR in the buf. 10630 */ 10631 if (blocknum == nblocks) { 10632 goto error_exit; 10633 } 10634 10635 /* 10636 * Confirm that the first block of the request lies within the 10637 * partition limits. Also the requested number of bytes must be 10638 * a multiple of the system block size. 10639 */ 10640 if ((blocknum < 0) || (blocknum >= nblocks) || 10641 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10642 bp->b_flags |= B_ERROR; 10643 goto error_exit; 10644 } 10645 10646 /* 10647 * If the requsted # blocks exceeds the available # blocks, that 10648 * is an overrun of the partition. 10649 */ 10650 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10651 available_nblocks = (size_t)(nblocks - blocknum); 10652 ASSERT(nblocks >= blocknum); 10653 10654 if (requested_nblocks > available_nblocks) { 10655 /* 10656 * Allocate an "overrun" buf to allow the request to proceed 10657 * for the amount of space available in the partition. The 10658 * amount not transferred will be added into the b_resid 10659 * when the operation is complete. The overrun buf 10660 * replaces the original buf here, and the original buf 10661 * is saved inside the overrun buf, for later use. 10662 */ 10663 size_t resid = SD_SYSBLOCKS2BYTES(un, 10664 (offset_t)(requested_nblocks - available_nblocks)); 10665 size_t count = bp->b_bcount - resid; 10666 /* 10667 * Note: count is an unsigned entity thus it'll NEVER 10668 * be less than 0 so ASSERT the original values are 10669 * correct. 10670 */ 10671 ASSERT(bp->b_bcount >= resid); 10672 10673 bp = sd_bioclone_alloc(bp, count, blocknum, 10674 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10675 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10676 ASSERT(xp != NULL); 10677 } 10678 10679 /* At this point there should be no residual for this buf. */ 10680 ASSERT(bp->b_resid == 0); 10681 10682 /* Convert the block number to an absolute address. */ 10683 xp->xb_blkno += partition_offset; 10684 10685 SD_NEXT_IOSTART(index, un, bp); 10686 10687 SD_TRACE(SD_LOG_IO_PARTITION, un, 10688 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10689 10690 return; 10691 10692 error_exit: 10693 bp->b_resid = bp->b_bcount; 10694 SD_BEGIN_IODONE(index, un, bp); 10695 SD_TRACE(SD_LOG_IO_PARTITION, un, 10696 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10697 } 10698 10699 10700 /* 10701 * Function: sd_mapblockaddr_iodone 10702 * 10703 * Description: Completion-side processing for partition management. 10704 * 10705 * Context: May be called under interrupt context 10706 */ 10707 10708 static void 10709 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10710 { 10711 /* int partition; */ /* Not used, see below. */ 10712 ASSERT(un != NULL); 10713 ASSERT(bp != NULL); 10714 ASSERT(!mutex_owned(SD_MUTEX(un))); 10715 10716 SD_TRACE(SD_LOG_IO_PARTITION, un, 10717 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10718 10719 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10720 /* 10721 * We have an "overrun" buf to deal with... 10722 */ 10723 struct sd_xbuf *xp; 10724 struct buf *obp; /* ptr to the original buf */ 10725 10726 xp = SD_GET_XBUF(bp); 10727 ASSERT(xp != NULL); 10728 10729 /* Retrieve the pointer to the original buf */ 10730 obp = (struct buf *)xp->xb_private; 10731 ASSERT(obp != NULL); 10732 10733 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10734 bioerror(obp, bp->b_error); 10735 10736 sd_bioclone_free(bp); 10737 10738 /* 10739 * Get back the original buf. 10740 * Note that since the restoration of xb_blkno below 10741 * was removed, the sd_xbuf is not needed. 10742 */ 10743 bp = obp; 10744 /* 10745 * xp = SD_GET_XBUF(bp); 10746 * ASSERT(xp != NULL); 10747 */ 10748 } 10749 10750 /* 10751 * Convert sd->xb_blkno back to a minor-device relative value. 10752 * Note: this has been commented out, as it is not needed in the 10753 * current implementation of the driver (ie, since this function 10754 * is at the top of the layering chains, so the info will be 10755 * discarded) and it is in the "hot" IO path. 10756 * 10757 * partition = getminor(bp->b_edev) & SDPART_MASK; 10758 * xp->xb_blkno -= un->un_offset[partition]; 10759 */ 10760 10761 SD_NEXT_IODONE(index, un, bp); 10762 10763 SD_TRACE(SD_LOG_IO_PARTITION, un, 10764 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10765 } 10766 10767 10768 /* 10769 * Function: sd_mapblocksize_iostart 10770 * 10771 * Description: Convert between system block size (un->un_sys_blocksize) 10772 * and target block size (un->un_tgt_blocksize). 10773 * 10774 * Context: Can sleep to allocate resources. 10775 * 10776 * Assumptions: A higher layer has already performed any partition validation, 10777 * and converted the xp->xb_blkno to an absolute value relative 10778 * to the start of the device. 10779 * 10780 * It is also assumed that the higher layer has implemented 10781 * an "overrun" mechanism for the case where the request would 10782 * read/write beyond the end of a partition. In this case we 10783 * assume (and ASSERT) that bp->b_resid == 0. 10784 * 10785 * Note: The implementation for this routine assumes the target 10786 * block size remains constant between allocation and transport. 10787 */ 10788 10789 static void 10790 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 10791 { 10792 struct sd_mapblocksize_info *bsp; 10793 struct sd_xbuf *xp; 10794 offset_t first_byte; 10795 daddr_t start_block, end_block; 10796 daddr_t request_bytes; 10797 ushort_t is_aligned = FALSE; 10798 10799 ASSERT(un != NULL); 10800 ASSERT(bp != NULL); 10801 ASSERT(!mutex_owned(SD_MUTEX(un))); 10802 ASSERT(bp->b_resid == 0); 10803 10804 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10805 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 10806 10807 /* 10808 * For a non-writable CD, a write request is an error 10809 */ 10810 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 10811 (un->un_f_mmc_writable_media == FALSE)) { 10812 bioerror(bp, EIO); 10813 bp->b_resid = bp->b_bcount; 10814 SD_BEGIN_IODONE(index, un, bp); 10815 return; 10816 } 10817 10818 /* 10819 * We do not need a shadow buf if the device is using 10820 * un->un_sys_blocksize as its block size or if bcount == 0. 10821 * In this case there is no layer-private data block allocated. 10822 */ 10823 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10824 (bp->b_bcount == 0)) { 10825 goto done; 10826 } 10827 10828 #if defined(__i386) || defined(__amd64) 10829 /* We do not support non-block-aligned transfers for ROD devices */ 10830 ASSERT(!ISROD(un)); 10831 #endif 10832 10833 xp = SD_GET_XBUF(bp); 10834 ASSERT(xp != NULL); 10835 10836 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10837 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 10838 un->un_tgt_blocksize, un->un_sys_blocksize); 10839 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10840 "request start block:0x%x\n", xp->xb_blkno); 10841 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10842 "request len:0x%x\n", bp->b_bcount); 10843 10844 /* 10845 * Allocate the layer-private data area for the mapblocksize layer. 10846 * Layers are allowed to use the xp_private member of the sd_xbuf 10847 * struct to store the pointer to their layer-private data block, but 10848 * each layer also has the responsibility of restoring the prior 10849 * contents of xb_private before returning the buf/xbuf to the 10850 * higher layer that sent it. 10851 * 10852 * Here we save the prior contents of xp->xb_private into the 10853 * bsp->mbs_oprivate field of our layer-private data area. This value 10854 * is restored by sd_mapblocksize_iodone() just prior to freeing up 10855 * the layer-private area and returning the buf/xbuf to the layer 10856 * that sent it. 10857 * 10858 * Note that here we use kmem_zalloc for the allocation as there are 10859 * parts of the mapblocksize code that expect certain fields to be 10860 * zero unless explicitly set to a required value. 10861 */ 10862 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10863 bsp->mbs_oprivate = xp->xb_private; 10864 xp->xb_private = bsp; 10865 10866 /* 10867 * This treats the data on the disk (target) as an array of bytes. 10868 * first_byte is the byte offset, from the beginning of the device, 10869 * to the location of the request. This is converted from a 10870 * un->un_sys_blocksize block address to a byte offset, and then back 10871 * to a block address based upon a un->un_tgt_blocksize block size. 10872 * 10873 * xp->xb_blkno should be absolute upon entry into this function, 10874 * but, but it is based upon partitions that use the "system" 10875 * block size. It must be adjusted to reflect the block size of 10876 * the target. 10877 * 10878 * Note that end_block is actually the block that follows the last 10879 * block of the request, but that's what is needed for the computation. 10880 */ 10881 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 10882 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 10883 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 10884 un->un_tgt_blocksize; 10885 10886 /* request_bytes is rounded up to a multiple of the target block size */ 10887 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 10888 10889 /* 10890 * See if the starting address of the request and the request 10891 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 10892 * then we do not need to allocate a shadow buf to handle the request. 10893 */ 10894 if (((first_byte % un->un_tgt_blocksize) == 0) && 10895 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 10896 is_aligned = TRUE; 10897 } 10898 10899 if ((bp->b_flags & B_READ) == 0) { 10900 /* 10901 * Lock the range for a write operation. An aligned request is 10902 * considered a simple write; otherwise the request must be a 10903 * read-modify-write. 10904 */ 10905 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 10906 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 10907 } 10908 10909 /* 10910 * Alloc a shadow buf if the request is not aligned. Also, this is 10911 * where the READ command is generated for a read-modify-write. (The 10912 * write phase is deferred until after the read completes.) 10913 */ 10914 if (is_aligned == FALSE) { 10915 10916 struct sd_mapblocksize_info *shadow_bsp; 10917 struct sd_xbuf *shadow_xp; 10918 struct buf *shadow_bp; 10919 10920 /* 10921 * Allocate the shadow buf and it associated xbuf. Note that 10922 * after this call the xb_blkno value in both the original 10923 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 10924 * same: absolute relative to the start of the device, and 10925 * adjusted for the target block size. The b_blkno in the 10926 * shadow buf will also be set to this value. We should never 10927 * change b_blkno in the original bp however. 10928 * 10929 * Note also that the shadow buf will always need to be a 10930 * READ command, regardless of whether the incoming command 10931 * is a READ or a WRITE. 10932 */ 10933 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 10934 xp->xb_blkno, 10935 (int (*)(struct buf *)) sd_mapblocksize_iodone); 10936 10937 shadow_xp = SD_GET_XBUF(shadow_bp); 10938 10939 /* 10940 * Allocate the layer-private data for the shadow buf. 10941 * (No need to preserve xb_private in the shadow xbuf.) 10942 */ 10943 shadow_xp->xb_private = shadow_bsp = 10944 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10945 10946 /* 10947 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 10948 * to figure out where the start of the user data is (based upon 10949 * the system block size) in the data returned by the READ 10950 * command (which will be based upon the target blocksize). Note 10951 * that this is only really used if the request is unaligned. 10952 */ 10953 bsp->mbs_copy_offset = (ssize_t)(first_byte - 10954 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 10955 ASSERT((bsp->mbs_copy_offset >= 0) && 10956 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 10957 10958 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 10959 10960 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 10961 10962 /* Transfer the wmap (if any) to the shadow buf */ 10963 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 10964 bsp->mbs_wmp = NULL; 10965 10966 /* 10967 * The shadow buf goes on from here in place of the 10968 * original buf. 10969 */ 10970 shadow_bsp->mbs_orig_bp = bp; 10971 bp = shadow_bp; 10972 } 10973 10974 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10975 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 10976 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10977 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 10978 request_bytes); 10979 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10980 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 10981 10982 done: 10983 SD_NEXT_IOSTART(index, un, bp); 10984 10985 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10986 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 10987 } 10988 10989 10990 /* 10991 * Function: sd_mapblocksize_iodone 10992 * 10993 * Description: Completion side processing for block-size mapping. 10994 * 10995 * Context: May be called under interrupt context 10996 */ 10997 10998 static void 10999 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11000 { 11001 struct sd_mapblocksize_info *bsp; 11002 struct sd_xbuf *xp; 11003 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11004 struct buf *orig_bp; /* ptr to the original buf */ 11005 offset_t shadow_end; 11006 offset_t request_end; 11007 offset_t shadow_start; 11008 ssize_t copy_offset; 11009 size_t copy_length; 11010 size_t shortfall; 11011 uint_t is_write; /* TRUE if this bp is a WRITE */ 11012 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11013 11014 ASSERT(un != NULL); 11015 ASSERT(bp != NULL); 11016 11017 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11018 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11019 11020 /* 11021 * There is no shadow buf or layer-private data if the target is 11022 * using un->un_sys_blocksize as its block size or if bcount == 0. 11023 */ 11024 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11025 (bp->b_bcount == 0)) { 11026 goto exit; 11027 } 11028 11029 xp = SD_GET_XBUF(bp); 11030 ASSERT(xp != NULL); 11031 11032 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11033 bsp = xp->xb_private; 11034 11035 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11036 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11037 11038 if (is_write) { 11039 /* 11040 * For a WRITE request we must free up the block range that 11041 * we have locked up. This holds regardless of whether this is 11042 * an aligned write request or a read-modify-write request. 11043 */ 11044 sd_range_unlock(un, bsp->mbs_wmp); 11045 bsp->mbs_wmp = NULL; 11046 } 11047 11048 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11049 /* 11050 * An aligned read or write command will have no shadow buf; 11051 * there is not much else to do with it. 11052 */ 11053 goto done; 11054 } 11055 11056 orig_bp = bsp->mbs_orig_bp; 11057 ASSERT(orig_bp != NULL); 11058 orig_xp = SD_GET_XBUF(orig_bp); 11059 ASSERT(orig_xp != NULL); 11060 ASSERT(!mutex_owned(SD_MUTEX(un))); 11061 11062 if (!is_write && has_wmap) { 11063 /* 11064 * A READ with a wmap means this is the READ phase of a 11065 * read-modify-write. If an error occurred on the READ then 11066 * we do not proceed with the WRITE phase or copy any data. 11067 * Just release the write maps and return with an error. 11068 */ 11069 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11070 orig_bp->b_resid = orig_bp->b_bcount; 11071 bioerror(orig_bp, bp->b_error); 11072 sd_range_unlock(un, bsp->mbs_wmp); 11073 goto freebuf_done; 11074 } 11075 } 11076 11077 /* 11078 * Here is where we set up to copy the data from the shadow buf 11079 * into the space associated with the original buf. 11080 * 11081 * To deal with the conversion between block sizes, these 11082 * computations treat the data as an array of bytes, with the 11083 * first byte (byte 0) corresponding to the first byte in the 11084 * first block on the disk. 11085 */ 11086 11087 /* 11088 * shadow_start and shadow_len indicate the location and size of 11089 * the data returned with the shadow IO request. 11090 */ 11091 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11092 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11093 11094 /* 11095 * copy_offset gives the offset (in bytes) from the start of the first 11096 * block of the READ request to the beginning of the data. We retrieve 11097 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11098 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11099 * data to be copied (in bytes). 11100 */ 11101 copy_offset = bsp->mbs_copy_offset; 11102 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11103 copy_length = orig_bp->b_bcount; 11104 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11105 11106 /* 11107 * Set up the resid and error fields of orig_bp as appropriate. 11108 */ 11109 if (shadow_end >= request_end) { 11110 /* We got all the requested data; set resid to zero */ 11111 orig_bp->b_resid = 0; 11112 } else { 11113 /* 11114 * We failed to get enough data to fully satisfy the original 11115 * request. Just copy back whatever data we got and set 11116 * up the residual and error code as required. 11117 * 11118 * 'shortfall' is the amount by which the data received with the 11119 * shadow buf has "fallen short" of the requested amount. 11120 */ 11121 shortfall = (size_t)(request_end - shadow_end); 11122 11123 if (shortfall > orig_bp->b_bcount) { 11124 /* 11125 * We did not get enough data to even partially 11126 * fulfill the original request. The residual is 11127 * equal to the amount requested. 11128 */ 11129 orig_bp->b_resid = orig_bp->b_bcount; 11130 } else { 11131 /* 11132 * We did not get all the data that we requested 11133 * from the device, but we will try to return what 11134 * portion we did get. 11135 */ 11136 orig_bp->b_resid = shortfall; 11137 } 11138 ASSERT(copy_length >= orig_bp->b_resid); 11139 copy_length -= orig_bp->b_resid; 11140 } 11141 11142 /* Propagate the error code from the shadow buf to the original buf */ 11143 bioerror(orig_bp, bp->b_error); 11144 11145 if (is_write) { 11146 goto freebuf_done; /* No data copying for a WRITE */ 11147 } 11148 11149 if (has_wmap) { 11150 /* 11151 * This is a READ command from the READ phase of a 11152 * read-modify-write request. We have to copy the data given 11153 * by the user OVER the data returned by the READ command, 11154 * then convert the command from a READ to a WRITE and send 11155 * it back to the target. 11156 */ 11157 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11158 copy_length); 11159 11160 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11161 11162 /* 11163 * Dispatch the WRITE command to the taskq thread, which 11164 * will in turn send the command to the target. When the 11165 * WRITE command completes, we (sd_mapblocksize_iodone()) 11166 * will get called again as part of the iodone chain 11167 * processing for it. Note that we will still be dealing 11168 * with the shadow buf at that point. 11169 */ 11170 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11171 KM_NOSLEEP) != 0) { 11172 /* 11173 * Dispatch was successful so we are done. Return 11174 * without going any higher up the iodone chain. Do 11175 * not free up any layer-private data until after the 11176 * WRITE completes. 11177 */ 11178 return; 11179 } 11180 11181 /* 11182 * Dispatch of the WRITE command failed; set up the error 11183 * condition and send this IO back up the iodone chain. 11184 */ 11185 bioerror(orig_bp, EIO); 11186 orig_bp->b_resid = orig_bp->b_bcount; 11187 11188 } else { 11189 /* 11190 * This is a regular READ request (ie, not a RMW). Copy the 11191 * data from the shadow buf into the original buf. The 11192 * copy_offset compensates for any "misalignment" between the 11193 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11194 * original buf (with its un->un_sys_blocksize blocks). 11195 */ 11196 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11197 copy_length); 11198 } 11199 11200 freebuf_done: 11201 11202 /* 11203 * At this point we still have both the shadow buf AND the original 11204 * buf to deal with, as well as the layer-private data area in each. 11205 * Local variables are as follows: 11206 * 11207 * bp -- points to shadow buf 11208 * xp -- points to xbuf of shadow buf 11209 * bsp -- points to layer-private data area of shadow buf 11210 * orig_bp -- points to original buf 11211 * 11212 * First free the shadow buf and its associated xbuf, then free the 11213 * layer-private data area from the shadow buf. There is no need to 11214 * restore xb_private in the shadow xbuf. 11215 */ 11216 sd_shadow_buf_free(bp); 11217 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11218 11219 /* 11220 * Now update the local variables to point to the original buf, xbuf, 11221 * and layer-private area. 11222 */ 11223 bp = orig_bp; 11224 xp = SD_GET_XBUF(bp); 11225 ASSERT(xp != NULL); 11226 ASSERT(xp == orig_xp); 11227 bsp = xp->xb_private; 11228 ASSERT(bsp != NULL); 11229 11230 done: 11231 /* 11232 * Restore xb_private to whatever it was set to by the next higher 11233 * layer in the chain, then free the layer-private data area. 11234 */ 11235 xp->xb_private = bsp->mbs_oprivate; 11236 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11237 11238 exit: 11239 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11240 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11241 11242 SD_NEXT_IODONE(index, un, bp); 11243 } 11244 11245 11246 /* 11247 * Function: sd_checksum_iostart 11248 * 11249 * Description: A stub function for a layer that's currently not used. 11250 * For now just a placeholder. 11251 * 11252 * Context: Kernel thread context 11253 */ 11254 11255 static void 11256 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11257 { 11258 ASSERT(un != NULL); 11259 ASSERT(bp != NULL); 11260 ASSERT(!mutex_owned(SD_MUTEX(un))); 11261 SD_NEXT_IOSTART(index, un, bp); 11262 } 11263 11264 11265 /* 11266 * Function: sd_checksum_iodone 11267 * 11268 * Description: A stub function for a layer that's currently not used. 11269 * For now just a placeholder. 11270 * 11271 * Context: May be called under interrupt context 11272 */ 11273 11274 static void 11275 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11276 { 11277 ASSERT(un != NULL); 11278 ASSERT(bp != NULL); 11279 ASSERT(!mutex_owned(SD_MUTEX(un))); 11280 SD_NEXT_IODONE(index, un, bp); 11281 } 11282 11283 11284 /* 11285 * Function: sd_checksum_uscsi_iostart 11286 * 11287 * Description: A stub function for a layer that's currently not used. 11288 * For now just a placeholder. 11289 * 11290 * Context: Kernel thread context 11291 */ 11292 11293 static void 11294 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11295 { 11296 ASSERT(un != NULL); 11297 ASSERT(bp != NULL); 11298 ASSERT(!mutex_owned(SD_MUTEX(un))); 11299 SD_NEXT_IOSTART(index, un, bp); 11300 } 11301 11302 11303 /* 11304 * Function: sd_checksum_uscsi_iodone 11305 * 11306 * Description: A stub function for a layer that's currently not used. 11307 * For now just a placeholder. 11308 * 11309 * Context: May be called under interrupt context 11310 */ 11311 11312 static void 11313 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11314 { 11315 ASSERT(un != NULL); 11316 ASSERT(bp != NULL); 11317 ASSERT(!mutex_owned(SD_MUTEX(un))); 11318 SD_NEXT_IODONE(index, un, bp); 11319 } 11320 11321 11322 /* 11323 * Function: sd_pm_iostart 11324 * 11325 * Description: iostart-side routine for Power mangement. 11326 * 11327 * Context: Kernel thread context 11328 */ 11329 11330 static void 11331 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11332 { 11333 ASSERT(un != NULL); 11334 ASSERT(bp != NULL); 11335 ASSERT(!mutex_owned(SD_MUTEX(un))); 11336 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11337 11338 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11339 11340 if (sd_pm_entry(un) != DDI_SUCCESS) { 11341 /* 11342 * Set up to return the failed buf back up the 'iodone' 11343 * side of the calling chain. 11344 */ 11345 bioerror(bp, EIO); 11346 bp->b_resid = bp->b_bcount; 11347 11348 SD_BEGIN_IODONE(index, un, bp); 11349 11350 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11351 return; 11352 } 11353 11354 SD_NEXT_IOSTART(index, un, bp); 11355 11356 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11357 } 11358 11359 11360 /* 11361 * Function: sd_pm_iodone 11362 * 11363 * Description: iodone-side routine for power mangement. 11364 * 11365 * Context: may be called from interrupt context 11366 */ 11367 11368 static void 11369 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11370 { 11371 ASSERT(un != NULL); 11372 ASSERT(bp != NULL); 11373 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11374 11375 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11376 11377 /* 11378 * After attach the following flag is only read, so don't 11379 * take the penalty of acquiring a mutex for it. 11380 */ 11381 if (un->un_f_pm_is_enabled == TRUE) { 11382 sd_pm_exit(un); 11383 } 11384 11385 SD_NEXT_IODONE(index, un, bp); 11386 11387 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11388 } 11389 11390 11391 /* 11392 * Function: sd_core_iostart 11393 * 11394 * Description: Primary driver function for enqueuing buf(9S) structs from 11395 * the system and initiating IO to the target device 11396 * 11397 * Context: Kernel thread context. Can sleep. 11398 * 11399 * Assumptions: - The given xp->xb_blkno is absolute 11400 * (ie, relative to the start of the device). 11401 * - The IO is to be done using the native blocksize of 11402 * the device, as specified in un->un_tgt_blocksize. 11403 */ 11404 /* ARGSUSED */ 11405 static void 11406 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11407 { 11408 struct sd_xbuf *xp; 11409 11410 ASSERT(un != NULL); 11411 ASSERT(bp != NULL); 11412 ASSERT(!mutex_owned(SD_MUTEX(un))); 11413 ASSERT(bp->b_resid == 0); 11414 11415 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11416 11417 xp = SD_GET_XBUF(bp); 11418 ASSERT(xp != NULL); 11419 11420 mutex_enter(SD_MUTEX(un)); 11421 11422 /* 11423 * If we are currently in the failfast state, fail any new IO 11424 * that has B_FAILFAST set, then return. 11425 */ 11426 if ((bp->b_flags & B_FAILFAST) && 11427 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11428 mutex_exit(SD_MUTEX(un)); 11429 bioerror(bp, EIO); 11430 bp->b_resid = bp->b_bcount; 11431 SD_BEGIN_IODONE(index, un, bp); 11432 return; 11433 } 11434 11435 if (SD_IS_DIRECT_PRIORITY(xp)) { 11436 /* 11437 * Priority command -- transport it immediately. 11438 * 11439 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11440 * because all direct priority commands should be associated 11441 * with error recovery actions which we don't want to retry. 11442 */ 11443 sd_start_cmds(un, bp); 11444 } else { 11445 /* 11446 * Normal command -- add it to the wait queue, then start 11447 * transporting commands from the wait queue. 11448 */ 11449 sd_add_buf_to_waitq(un, bp); 11450 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11451 sd_start_cmds(un, NULL); 11452 } 11453 11454 mutex_exit(SD_MUTEX(un)); 11455 11456 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11457 } 11458 11459 11460 /* 11461 * Function: sd_init_cdb_limits 11462 * 11463 * Description: This is to handle scsi_pkt initialization differences 11464 * between the driver platforms. 11465 * 11466 * Legacy behaviors: 11467 * 11468 * If the block number or the sector count exceeds the 11469 * capabilities of a Group 0 command, shift over to a 11470 * Group 1 command. We don't blindly use Group 1 11471 * commands because a) some drives (CDC Wren IVs) get a 11472 * bit confused, and b) there is probably a fair amount 11473 * of speed difference for a target to receive and decode 11474 * a 10 byte command instead of a 6 byte command. 11475 * 11476 * The xfer time difference of 6 vs 10 byte CDBs is 11477 * still significant so this code is still worthwhile. 11478 * 10 byte CDBs are very inefficient with the fas HBA driver 11479 * and older disks. Each CDB byte took 1 usec with some 11480 * popular disks. 11481 * 11482 * Context: Must be called at attach time 11483 */ 11484 11485 static void 11486 sd_init_cdb_limits(struct sd_lun *un) 11487 { 11488 int hba_cdb_limit; 11489 11490 /* 11491 * Use CDB_GROUP1 commands for most devices except for 11492 * parallel SCSI fixed drives in which case we get better 11493 * performance using CDB_GROUP0 commands (where applicable). 11494 */ 11495 un->un_mincdb = SD_CDB_GROUP1; 11496 #if !defined(__fibre) 11497 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11498 !un->un_f_has_removable_media) { 11499 un->un_mincdb = SD_CDB_GROUP0; 11500 } 11501 #endif 11502 11503 /* 11504 * Try to read the max-cdb-length supported by HBA. 11505 */ 11506 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11507 if (0 >= un->un_max_hba_cdb) { 11508 un->un_max_hba_cdb = CDB_GROUP4; 11509 hba_cdb_limit = SD_CDB_GROUP4; 11510 } else if (0 < un->un_max_hba_cdb && 11511 un->un_max_hba_cdb < CDB_GROUP1) { 11512 hba_cdb_limit = SD_CDB_GROUP0; 11513 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11514 un->un_max_hba_cdb < CDB_GROUP5) { 11515 hba_cdb_limit = SD_CDB_GROUP1; 11516 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11517 un->un_max_hba_cdb < CDB_GROUP4) { 11518 hba_cdb_limit = SD_CDB_GROUP5; 11519 } else { 11520 hba_cdb_limit = SD_CDB_GROUP4; 11521 } 11522 11523 /* 11524 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11525 * commands for fixed disks unless we are building for a 32 bit 11526 * kernel. 11527 */ 11528 #ifdef _LP64 11529 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11530 min(hba_cdb_limit, SD_CDB_GROUP4); 11531 #else 11532 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11533 min(hba_cdb_limit, SD_CDB_GROUP1); 11534 #endif 11535 11536 /* 11537 * x86 systems require the PKT_DMA_PARTIAL flag 11538 */ 11539 #if defined(__x86) 11540 un->un_pkt_flags = PKT_DMA_PARTIAL; 11541 #else 11542 un->un_pkt_flags = 0; 11543 #endif 11544 11545 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11546 ? sizeof (struct scsi_arq_status) : 1); 11547 un->un_cmd_timeout = (ushort_t)sd_io_time; 11548 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11549 } 11550 11551 11552 /* 11553 * Function: sd_initpkt_for_buf 11554 * 11555 * Description: Allocate and initialize for transport a scsi_pkt struct, 11556 * based upon the info specified in the given buf struct. 11557 * 11558 * Assumes the xb_blkno in the request is absolute (ie, 11559 * relative to the start of the device (NOT partition!). 11560 * Also assumes that the request is using the native block 11561 * size of the device (as returned by the READ CAPACITY 11562 * command). 11563 * 11564 * Return Code: SD_PKT_ALLOC_SUCCESS 11565 * SD_PKT_ALLOC_FAILURE 11566 * SD_PKT_ALLOC_FAILURE_NO_DMA 11567 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11568 * 11569 * Context: Kernel thread and may be called from software interrupt context 11570 * as part of a sdrunout callback. This function may not block or 11571 * call routines that block 11572 */ 11573 11574 static int 11575 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11576 { 11577 struct sd_xbuf *xp; 11578 struct scsi_pkt *pktp = NULL; 11579 struct sd_lun *un; 11580 size_t blockcount; 11581 daddr_t startblock; 11582 int rval; 11583 int cmd_flags; 11584 11585 ASSERT(bp != NULL); 11586 ASSERT(pktpp != NULL); 11587 xp = SD_GET_XBUF(bp); 11588 ASSERT(xp != NULL); 11589 un = SD_GET_UN(bp); 11590 ASSERT(un != NULL); 11591 ASSERT(mutex_owned(SD_MUTEX(un))); 11592 ASSERT(bp->b_resid == 0); 11593 11594 SD_TRACE(SD_LOG_IO_CORE, un, 11595 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11596 11597 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11598 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11599 /* 11600 * Already have a scsi_pkt -- just need DMA resources. 11601 * We must recompute the CDB in case the mapping returns 11602 * a nonzero pkt_resid. 11603 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11604 * that is being retried, the unmap/remap of the DMA resouces 11605 * will result in the entire transfer starting over again 11606 * from the very first block. 11607 */ 11608 ASSERT(xp->xb_pktp != NULL); 11609 pktp = xp->xb_pktp; 11610 } else { 11611 pktp = NULL; 11612 } 11613 #endif /* __i386 || __amd64 */ 11614 11615 startblock = xp->xb_blkno; /* Absolute block num. */ 11616 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11617 11618 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11619 11620 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11621 11622 #else 11623 11624 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11625 11626 #endif 11627 11628 /* 11629 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11630 * call scsi_init_pkt, and build the CDB. 11631 */ 11632 rval = sd_setup_rw_pkt(un, &pktp, bp, 11633 cmd_flags, sdrunout, (caddr_t)un, 11634 startblock, blockcount); 11635 11636 if (rval == 0) { 11637 /* 11638 * Success. 11639 * 11640 * If partial DMA is being used and required for this transfer. 11641 * set it up here. 11642 */ 11643 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11644 (pktp->pkt_resid != 0)) { 11645 11646 /* 11647 * Save the CDB length and pkt_resid for the 11648 * next xfer 11649 */ 11650 xp->xb_dma_resid = pktp->pkt_resid; 11651 11652 /* rezero resid */ 11653 pktp->pkt_resid = 0; 11654 11655 } else { 11656 xp->xb_dma_resid = 0; 11657 } 11658 11659 pktp->pkt_flags = un->un_tagflags; 11660 pktp->pkt_time = un->un_cmd_timeout; 11661 pktp->pkt_comp = sdintr; 11662 11663 pktp->pkt_private = bp; 11664 *pktpp = pktp; 11665 11666 SD_TRACE(SD_LOG_IO_CORE, un, 11667 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11668 11669 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11670 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11671 #endif 11672 11673 return (SD_PKT_ALLOC_SUCCESS); 11674 11675 } 11676 11677 /* 11678 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11679 * from sd_setup_rw_pkt. 11680 */ 11681 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11682 11683 if (rval == SD_PKT_ALLOC_FAILURE) { 11684 *pktpp = NULL; 11685 /* 11686 * Set the driver state to RWAIT to indicate the driver 11687 * is waiting on resource allocations. The driver will not 11688 * suspend, pm_suspend, or detatch while the state is RWAIT. 11689 */ 11690 New_state(un, SD_STATE_RWAIT); 11691 11692 SD_ERROR(SD_LOG_IO_CORE, un, 11693 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11694 11695 if ((bp->b_flags & B_ERROR) != 0) { 11696 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11697 } 11698 return (SD_PKT_ALLOC_FAILURE); 11699 } else { 11700 /* 11701 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11702 * 11703 * This should never happen. Maybe someone messed with the 11704 * kernel's minphys? 11705 */ 11706 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11707 "Request rejected: too large for CDB: " 11708 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11709 SD_ERROR(SD_LOG_IO_CORE, un, 11710 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11711 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11712 11713 } 11714 } 11715 11716 11717 /* 11718 * Function: sd_destroypkt_for_buf 11719 * 11720 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11721 * 11722 * Context: Kernel thread or interrupt context 11723 */ 11724 11725 static void 11726 sd_destroypkt_for_buf(struct buf *bp) 11727 { 11728 ASSERT(bp != NULL); 11729 ASSERT(SD_GET_UN(bp) != NULL); 11730 11731 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11732 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11733 11734 ASSERT(SD_GET_PKTP(bp) != NULL); 11735 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11736 11737 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11738 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11739 } 11740 11741 /* 11742 * Function: sd_setup_rw_pkt 11743 * 11744 * Description: Determines appropriate CDB group for the requested LBA 11745 * and transfer length, calls scsi_init_pkt, and builds 11746 * the CDB. Do not use for partial DMA transfers except 11747 * for the initial transfer since the CDB size must 11748 * remain constant. 11749 * 11750 * Context: Kernel thread and may be called from software interrupt 11751 * context as part of a sdrunout callback. This function may not 11752 * block or call routines that block 11753 */ 11754 11755 11756 int 11757 sd_setup_rw_pkt(struct sd_lun *un, 11758 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11759 int (*callback)(caddr_t), caddr_t callback_arg, 11760 diskaddr_t lba, uint32_t blockcount) 11761 { 11762 struct scsi_pkt *return_pktp; 11763 union scsi_cdb *cdbp; 11764 struct sd_cdbinfo *cp = NULL; 11765 int i; 11766 11767 /* 11768 * See which size CDB to use, based upon the request. 11769 */ 11770 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11771 11772 /* 11773 * Check lba and block count against sd_cdbtab limits. 11774 * In the partial DMA case, we have to use the same size 11775 * CDB for all the transfers. Check lba + blockcount 11776 * against the max LBA so we know that segment of the 11777 * transfer can use the CDB we select. 11778 */ 11779 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11780 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11781 11782 /* 11783 * The command will fit into the CDB type 11784 * specified by sd_cdbtab[i]. 11785 */ 11786 cp = sd_cdbtab + i; 11787 11788 /* 11789 * Call scsi_init_pkt so we can fill in the 11790 * CDB. 11791 */ 11792 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11793 bp, cp->sc_grpcode, un->un_status_len, 0, 11794 flags, callback, callback_arg); 11795 11796 if (return_pktp != NULL) { 11797 11798 /* 11799 * Return new value of pkt 11800 */ 11801 *pktpp = return_pktp; 11802 11803 /* 11804 * To be safe, zero the CDB insuring there is 11805 * no leftover data from a previous command. 11806 */ 11807 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 11808 11809 /* 11810 * Handle partial DMA mapping 11811 */ 11812 if (return_pktp->pkt_resid != 0) { 11813 11814 /* 11815 * Not going to xfer as many blocks as 11816 * originally expected 11817 */ 11818 blockcount -= 11819 SD_BYTES2TGTBLOCKS(un, 11820 return_pktp->pkt_resid); 11821 } 11822 11823 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 11824 11825 /* 11826 * Set command byte based on the CDB 11827 * type we matched. 11828 */ 11829 cdbp->scc_cmd = cp->sc_grpmask | 11830 ((bp->b_flags & B_READ) ? 11831 SCMD_READ : SCMD_WRITE); 11832 11833 SD_FILL_SCSI1_LUN(un, return_pktp); 11834 11835 /* 11836 * Fill in LBA and length 11837 */ 11838 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 11839 (cp->sc_grpcode == CDB_GROUP4) || 11840 (cp->sc_grpcode == CDB_GROUP0) || 11841 (cp->sc_grpcode == CDB_GROUP5)); 11842 11843 if (cp->sc_grpcode == CDB_GROUP1) { 11844 FORMG1ADDR(cdbp, lba); 11845 FORMG1COUNT(cdbp, blockcount); 11846 return (0); 11847 } else if (cp->sc_grpcode == CDB_GROUP4) { 11848 FORMG4LONGADDR(cdbp, lba); 11849 FORMG4COUNT(cdbp, blockcount); 11850 return (0); 11851 } else if (cp->sc_grpcode == CDB_GROUP0) { 11852 FORMG0ADDR(cdbp, lba); 11853 FORMG0COUNT(cdbp, blockcount); 11854 return (0); 11855 } else if (cp->sc_grpcode == CDB_GROUP5) { 11856 FORMG5ADDR(cdbp, lba); 11857 FORMG5COUNT(cdbp, blockcount); 11858 return (0); 11859 } 11860 11861 /* 11862 * It should be impossible to not match one 11863 * of the CDB types above, so we should never 11864 * reach this point. Set the CDB command byte 11865 * to test-unit-ready to avoid writing 11866 * to somewhere we don't intend. 11867 */ 11868 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 11869 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11870 } else { 11871 /* 11872 * Couldn't get scsi_pkt 11873 */ 11874 return (SD_PKT_ALLOC_FAILURE); 11875 } 11876 } 11877 } 11878 11879 /* 11880 * None of the available CDB types were suitable. This really 11881 * should never happen: on a 64 bit system we support 11882 * READ16/WRITE16 which will hold an entire 64 bit disk address 11883 * and on a 32 bit system we will refuse to bind to a device 11884 * larger than 2TB so addresses will never be larger than 32 bits. 11885 */ 11886 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11887 } 11888 11889 #if defined(__i386) || defined(__amd64) 11890 /* 11891 * Function: sd_setup_next_rw_pkt 11892 * 11893 * Description: Setup packet for partial DMA transfers, except for the 11894 * initial transfer. sd_setup_rw_pkt should be used for 11895 * the initial transfer. 11896 * 11897 * Context: Kernel thread and may be called from interrupt context. 11898 */ 11899 11900 int 11901 sd_setup_next_rw_pkt(struct sd_lun *un, 11902 struct scsi_pkt *pktp, struct buf *bp, 11903 diskaddr_t lba, uint32_t blockcount) 11904 { 11905 uchar_t com; 11906 union scsi_cdb *cdbp; 11907 uchar_t cdb_group_id; 11908 11909 ASSERT(pktp != NULL); 11910 ASSERT(pktp->pkt_cdbp != NULL); 11911 11912 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 11913 com = cdbp->scc_cmd; 11914 cdb_group_id = CDB_GROUPID(com); 11915 11916 ASSERT((cdb_group_id == CDB_GROUPID_0) || 11917 (cdb_group_id == CDB_GROUPID_1) || 11918 (cdb_group_id == CDB_GROUPID_4) || 11919 (cdb_group_id == CDB_GROUPID_5)); 11920 11921 /* 11922 * Move pkt to the next portion of the xfer. 11923 * func is NULL_FUNC so we do not have to release 11924 * the disk mutex here. 11925 */ 11926 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 11927 NULL_FUNC, NULL) == pktp) { 11928 /* Success. Handle partial DMA */ 11929 if (pktp->pkt_resid != 0) { 11930 blockcount -= 11931 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 11932 } 11933 11934 cdbp->scc_cmd = com; 11935 SD_FILL_SCSI1_LUN(un, pktp); 11936 if (cdb_group_id == CDB_GROUPID_1) { 11937 FORMG1ADDR(cdbp, lba); 11938 FORMG1COUNT(cdbp, blockcount); 11939 return (0); 11940 } else if (cdb_group_id == CDB_GROUPID_4) { 11941 FORMG4LONGADDR(cdbp, lba); 11942 FORMG4COUNT(cdbp, blockcount); 11943 return (0); 11944 } else if (cdb_group_id == CDB_GROUPID_0) { 11945 FORMG0ADDR(cdbp, lba); 11946 FORMG0COUNT(cdbp, blockcount); 11947 return (0); 11948 } else if (cdb_group_id == CDB_GROUPID_5) { 11949 FORMG5ADDR(cdbp, lba); 11950 FORMG5COUNT(cdbp, blockcount); 11951 return (0); 11952 } 11953 11954 /* Unreachable */ 11955 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11956 } 11957 11958 /* 11959 * Error setting up next portion of cmd transfer. 11960 * Something is definitely very wrong and this 11961 * should not happen. 11962 */ 11963 return (SD_PKT_ALLOC_FAILURE); 11964 } 11965 #endif /* defined(__i386) || defined(__amd64) */ 11966 11967 /* 11968 * Function: sd_initpkt_for_uscsi 11969 * 11970 * Description: Allocate and initialize for transport a scsi_pkt struct, 11971 * based upon the info specified in the given uscsi_cmd struct. 11972 * 11973 * Return Code: SD_PKT_ALLOC_SUCCESS 11974 * SD_PKT_ALLOC_FAILURE 11975 * SD_PKT_ALLOC_FAILURE_NO_DMA 11976 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11977 * 11978 * Context: Kernel thread and may be called from software interrupt context 11979 * as part of a sdrunout callback. This function may not block or 11980 * call routines that block 11981 */ 11982 11983 static int 11984 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 11985 { 11986 struct uscsi_cmd *uscmd; 11987 struct sd_xbuf *xp; 11988 struct scsi_pkt *pktp; 11989 struct sd_lun *un; 11990 uint32_t flags = 0; 11991 11992 ASSERT(bp != NULL); 11993 ASSERT(pktpp != NULL); 11994 xp = SD_GET_XBUF(bp); 11995 ASSERT(xp != NULL); 11996 un = SD_GET_UN(bp); 11997 ASSERT(un != NULL); 11998 ASSERT(mutex_owned(SD_MUTEX(un))); 11999 12000 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12001 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12002 ASSERT(uscmd != NULL); 12003 12004 SD_TRACE(SD_LOG_IO_CORE, un, 12005 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12006 12007 /* 12008 * Allocate the scsi_pkt for the command. 12009 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12010 * during scsi_init_pkt time and will continue to use the 12011 * same path as long as the same scsi_pkt is used without 12012 * intervening scsi_dma_free(). Since uscsi command does 12013 * not call scsi_dmafree() before retry failed command, it 12014 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12015 * set such that scsi_vhci can use other available path for 12016 * retry. Besides, ucsci command does not allow DMA breakup, 12017 * so there is no need to set PKT_DMA_PARTIAL flag. 12018 */ 12019 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12020 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12021 sizeof (struct scsi_arq_status), 0, 12022 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12023 sdrunout, (caddr_t)un); 12024 12025 if (pktp == NULL) { 12026 *pktpp = NULL; 12027 /* 12028 * Set the driver state to RWAIT to indicate the driver 12029 * is waiting on resource allocations. The driver will not 12030 * suspend, pm_suspend, or detatch while the state is RWAIT. 12031 */ 12032 New_state(un, SD_STATE_RWAIT); 12033 12034 SD_ERROR(SD_LOG_IO_CORE, un, 12035 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12036 12037 if ((bp->b_flags & B_ERROR) != 0) { 12038 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12039 } 12040 return (SD_PKT_ALLOC_FAILURE); 12041 } 12042 12043 /* 12044 * We do not do DMA breakup for USCSI commands, so return failure 12045 * here if all the needed DMA resources were not allocated. 12046 */ 12047 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12048 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12049 scsi_destroy_pkt(pktp); 12050 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12051 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12052 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12053 } 12054 12055 /* Init the cdb from the given uscsi struct */ 12056 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12057 uscmd->uscsi_cdb[0], 0, 0, 0); 12058 12059 SD_FILL_SCSI1_LUN(un, pktp); 12060 12061 /* 12062 * Set up the optional USCSI flags. See the uscsi (7I) man page 12063 * for listing of the supported flags. 12064 */ 12065 12066 if (uscmd->uscsi_flags & USCSI_SILENT) { 12067 flags |= FLAG_SILENT; 12068 } 12069 12070 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12071 flags |= FLAG_DIAGNOSE; 12072 } 12073 12074 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12075 flags |= FLAG_ISOLATE; 12076 } 12077 12078 if (un->un_f_is_fibre == FALSE) { 12079 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12080 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12081 } 12082 } 12083 12084 /* 12085 * Set the pkt flags here so we save time later. 12086 * Note: These flags are NOT in the uscsi man page!!! 12087 */ 12088 if (uscmd->uscsi_flags & USCSI_HEAD) { 12089 flags |= FLAG_HEAD; 12090 } 12091 12092 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12093 flags |= FLAG_NOINTR; 12094 } 12095 12096 /* 12097 * For tagged queueing, things get a bit complicated. 12098 * Check first for head of queue and last for ordered queue. 12099 * If neither head nor order, use the default driver tag flags. 12100 */ 12101 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12102 if (uscmd->uscsi_flags & USCSI_HTAG) { 12103 flags |= FLAG_HTAG; 12104 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12105 flags |= FLAG_OTAG; 12106 } else { 12107 flags |= un->un_tagflags & FLAG_TAGMASK; 12108 } 12109 } 12110 12111 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12112 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12113 } 12114 12115 pktp->pkt_flags = flags; 12116 12117 /* Copy the caller's CDB into the pkt... */ 12118 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12119 12120 if (uscmd->uscsi_timeout == 0) { 12121 pktp->pkt_time = un->un_uscsi_timeout; 12122 } else { 12123 pktp->pkt_time = uscmd->uscsi_timeout; 12124 } 12125 12126 /* need it later to identify USCSI request in sdintr */ 12127 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12128 12129 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12130 12131 pktp->pkt_private = bp; 12132 pktp->pkt_comp = sdintr; 12133 *pktpp = pktp; 12134 12135 SD_TRACE(SD_LOG_IO_CORE, un, 12136 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12137 12138 return (SD_PKT_ALLOC_SUCCESS); 12139 } 12140 12141 12142 /* 12143 * Function: sd_destroypkt_for_uscsi 12144 * 12145 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12146 * IOs.. Also saves relevant info into the associated uscsi_cmd 12147 * struct. 12148 * 12149 * Context: May be called under interrupt context 12150 */ 12151 12152 static void 12153 sd_destroypkt_for_uscsi(struct buf *bp) 12154 { 12155 struct uscsi_cmd *uscmd; 12156 struct sd_xbuf *xp; 12157 struct scsi_pkt *pktp; 12158 struct sd_lun *un; 12159 12160 ASSERT(bp != NULL); 12161 xp = SD_GET_XBUF(bp); 12162 ASSERT(xp != NULL); 12163 un = SD_GET_UN(bp); 12164 ASSERT(un != NULL); 12165 ASSERT(!mutex_owned(SD_MUTEX(un))); 12166 pktp = SD_GET_PKTP(bp); 12167 ASSERT(pktp != NULL); 12168 12169 SD_TRACE(SD_LOG_IO_CORE, un, 12170 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12171 12172 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12173 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12174 ASSERT(uscmd != NULL); 12175 12176 /* Save the status and the residual into the uscsi_cmd struct */ 12177 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12178 uscmd->uscsi_resid = bp->b_resid; 12179 12180 /* 12181 * If enabled, copy any saved sense data into the area specified 12182 * by the uscsi command. 12183 */ 12184 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12185 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12186 /* 12187 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12188 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12189 */ 12190 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12191 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12192 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 12193 } 12194 12195 /* We are done with the scsi_pkt; free it now */ 12196 ASSERT(SD_GET_PKTP(bp) != NULL); 12197 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12198 12199 SD_TRACE(SD_LOG_IO_CORE, un, 12200 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12201 } 12202 12203 12204 /* 12205 * Function: sd_bioclone_alloc 12206 * 12207 * Description: Allocate a buf(9S) and init it as per the given buf 12208 * and the various arguments. The associated sd_xbuf 12209 * struct is (nearly) duplicated. The struct buf *bp 12210 * argument is saved in new_xp->xb_private. 12211 * 12212 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12213 * datalen - size of data area for the shadow bp 12214 * blkno - starting LBA 12215 * func - function pointer for b_iodone in the shadow buf. (May 12216 * be NULL if none.) 12217 * 12218 * Return Code: Pointer to allocates buf(9S) struct 12219 * 12220 * Context: Can sleep. 12221 */ 12222 12223 static struct buf * 12224 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12225 daddr_t blkno, int (*func)(struct buf *)) 12226 { 12227 struct sd_lun *un; 12228 struct sd_xbuf *xp; 12229 struct sd_xbuf *new_xp; 12230 struct buf *new_bp; 12231 12232 ASSERT(bp != NULL); 12233 xp = SD_GET_XBUF(bp); 12234 ASSERT(xp != NULL); 12235 un = SD_GET_UN(bp); 12236 ASSERT(un != NULL); 12237 ASSERT(!mutex_owned(SD_MUTEX(un))); 12238 12239 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12240 NULL, KM_SLEEP); 12241 12242 new_bp->b_lblkno = blkno; 12243 12244 /* 12245 * Allocate an xbuf for the shadow bp and copy the contents of the 12246 * original xbuf into it. 12247 */ 12248 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12249 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12250 12251 /* 12252 * The given bp is automatically saved in the xb_private member 12253 * of the new xbuf. Callers are allowed to depend on this. 12254 */ 12255 new_xp->xb_private = bp; 12256 12257 new_bp->b_private = new_xp; 12258 12259 return (new_bp); 12260 } 12261 12262 /* 12263 * Function: sd_shadow_buf_alloc 12264 * 12265 * Description: Allocate a buf(9S) and init it as per the given buf 12266 * and the various arguments. The associated sd_xbuf 12267 * struct is (nearly) duplicated. The struct buf *bp 12268 * argument is saved in new_xp->xb_private. 12269 * 12270 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12271 * datalen - size of data area for the shadow bp 12272 * bflags - B_READ or B_WRITE (pseudo flag) 12273 * blkno - starting LBA 12274 * func - function pointer for b_iodone in the shadow buf. (May 12275 * be NULL if none.) 12276 * 12277 * Return Code: Pointer to allocates buf(9S) struct 12278 * 12279 * Context: Can sleep. 12280 */ 12281 12282 static struct buf * 12283 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12284 daddr_t blkno, int (*func)(struct buf *)) 12285 { 12286 struct sd_lun *un; 12287 struct sd_xbuf *xp; 12288 struct sd_xbuf *new_xp; 12289 struct buf *new_bp; 12290 12291 ASSERT(bp != NULL); 12292 xp = SD_GET_XBUF(bp); 12293 ASSERT(xp != NULL); 12294 un = SD_GET_UN(bp); 12295 ASSERT(un != NULL); 12296 ASSERT(!mutex_owned(SD_MUTEX(un))); 12297 12298 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12299 bp_mapin(bp); 12300 } 12301 12302 bflags &= (B_READ | B_WRITE); 12303 #if defined(__i386) || defined(__amd64) 12304 new_bp = getrbuf(KM_SLEEP); 12305 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12306 new_bp->b_bcount = datalen; 12307 new_bp->b_flags = bflags | 12308 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12309 #else 12310 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12311 datalen, bflags, SLEEP_FUNC, NULL); 12312 #endif 12313 new_bp->av_forw = NULL; 12314 new_bp->av_back = NULL; 12315 new_bp->b_dev = bp->b_dev; 12316 new_bp->b_blkno = blkno; 12317 new_bp->b_iodone = func; 12318 new_bp->b_edev = bp->b_edev; 12319 new_bp->b_resid = 0; 12320 12321 /* We need to preserve the B_FAILFAST flag */ 12322 if (bp->b_flags & B_FAILFAST) { 12323 new_bp->b_flags |= B_FAILFAST; 12324 } 12325 12326 /* 12327 * Allocate an xbuf for the shadow bp and copy the contents of the 12328 * original xbuf into it. 12329 */ 12330 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12331 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12332 12333 /* Need later to copy data between the shadow buf & original buf! */ 12334 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12335 12336 /* 12337 * The given bp is automatically saved in the xb_private member 12338 * of the new xbuf. Callers are allowed to depend on this. 12339 */ 12340 new_xp->xb_private = bp; 12341 12342 new_bp->b_private = new_xp; 12343 12344 return (new_bp); 12345 } 12346 12347 /* 12348 * Function: sd_bioclone_free 12349 * 12350 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12351 * in the larger than partition operation. 12352 * 12353 * Context: May be called under interrupt context 12354 */ 12355 12356 static void 12357 sd_bioclone_free(struct buf *bp) 12358 { 12359 struct sd_xbuf *xp; 12360 12361 ASSERT(bp != NULL); 12362 xp = SD_GET_XBUF(bp); 12363 ASSERT(xp != NULL); 12364 12365 /* 12366 * Call bp_mapout() before freeing the buf, in case a lower 12367 * layer or HBA had done a bp_mapin(). we must do this here 12368 * as we are the "originator" of the shadow buf. 12369 */ 12370 bp_mapout(bp); 12371 12372 /* 12373 * Null out b_iodone before freeing the bp, to ensure that the driver 12374 * never gets confused by a stale value in this field. (Just a little 12375 * extra defensiveness here.) 12376 */ 12377 bp->b_iodone = NULL; 12378 12379 freerbuf(bp); 12380 12381 kmem_free(xp, sizeof (struct sd_xbuf)); 12382 } 12383 12384 /* 12385 * Function: sd_shadow_buf_free 12386 * 12387 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12388 * 12389 * Context: May be called under interrupt context 12390 */ 12391 12392 static void 12393 sd_shadow_buf_free(struct buf *bp) 12394 { 12395 struct sd_xbuf *xp; 12396 12397 ASSERT(bp != NULL); 12398 xp = SD_GET_XBUF(bp); 12399 ASSERT(xp != NULL); 12400 12401 #if defined(__sparc) 12402 /* 12403 * Call bp_mapout() before freeing the buf, in case a lower 12404 * layer or HBA had done a bp_mapin(). we must do this here 12405 * as we are the "originator" of the shadow buf. 12406 */ 12407 bp_mapout(bp); 12408 #endif 12409 12410 /* 12411 * Null out b_iodone before freeing the bp, to ensure that the driver 12412 * never gets confused by a stale value in this field. (Just a little 12413 * extra defensiveness here.) 12414 */ 12415 bp->b_iodone = NULL; 12416 12417 #if defined(__i386) || defined(__amd64) 12418 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12419 freerbuf(bp); 12420 #else 12421 scsi_free_consistent_buf(bp); 12422 #endif 12423 12424 kmem_free(xp, sizeof (struct sd_xbuf)); 12425 } 12426 12427 12428 /* 12429 * Function: sd_print_transport_rejected_message 12430 * 12431 * Description: This implements the ludicrously complex rules for printing 12432 * a "transport rejected" message. This is to address the 12433 * specific problem of having a flood of this error message 12434 * produced when a failover occurs. 12435 * 12436 * Context: Any. 12437 */ 12438 12439 static void 12440 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12441 int code) 12442 { 12443 ASSERT(un != NULL); 12444 ASSERT(mutex_owned(SD_MUTEX(un))); 12445 ASSERT(xp != NULL); 12446 12447 /* 12448 * Print the "transport rejected" message under the following 12449 * conditions: 12450 * 12451 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12452 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12453 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12454 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12455 * scsi_transport(9F) (which indicates that the target might have 12456 * gone off-line). This uses the un->un_tran_fatal_count 12457 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12458 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12459 * from scsi_transport(). 12460 * 12461 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12462 * the preceeding cases in order for the message to be printed. 12463 */ 12464 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12465 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12466 (code != TRAN_FATAL_ERROR) || 12467 (un->un_tran_fatal_count == 1)) { 12468 switch (code) { 12469 case TRAN_BADPKT: 12470 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12471 "transport rejected bad packet\n"); 12472 break; 12473 case TRAN_FATAL_ERROR: 12474 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12475 "transport rejected fatal error\n"); 12476 break; 12477 default: 12478 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12479 "transport rejected (%d)\n", code); 12480 break; 12481 } 12482 } 12483 } 12484 } 12485 12486 12487 /* 12488 * Function: sd_add_buf_to_waitq 12489 * 12490 * Description: Add the given buf(9S) struct to the wait queue for the 12491 * instance. If sorting is enabled, then the buf is added 12492 * to the queue via an elevator sort algorithm (a la 12493 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12494 * If sorting is not enabled, then the buf is just added 12495 * to the end of the wait queue. 12496 * 12497 * Return Code: void 12498 * 12499 * Context: Does not sleep/block, therefore technically can be called 12500 * from any context. However if sorting is enabled then the 12501 * execution time is indeterminate, and may take long if 12502 * the wait queue grows large. 12503 */ 12504 12505 static void 12506 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12507 { 12508 struct buf *ap; 12509 12510 ASSERT(bp != NULL); 12511 ASSERT(un != NULL); 12512 ASSERT(mutex_owned(SD_MUTEX(un))); 12513 12514 /* If the queue is empty, add the buf as the only entry & return. */ 12515 if (un->un_waitq_headp == NULL) { 12516 ASSERT(un->un_waitq_tailp == NULL); 12517 un->un_waitq_headp = un->un_waitq_tailp = bp; 12518 bp->av_forw = NULL; 12519 return; 12520 } 12521 12522 ASSERT(un->un_waitq_tailp != NULL); 12523 12524 /* 12525 * If sorting is disabled, just add the buf to the tail end of 12526 * the wait queue and return. 12527 */ 12528 if (un->un_f_disksort_disabled) { 12529 un->un_waitq_tailp->av_forw = bp; 12530 un->un_waitq_tailp = bp; 12531 bp->av_forw = NULL; 12532 return; 12533 } 12534 12535 /* 12536 * Sort thru the list of requests currently on the wait queue 12537 * and add the new buf request at the appropriate position. 12538 * 12539 * The un->un_waitq_headp is an activity chain pointer on which 12540 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12541 * first queue holds those requests which are positioned after 12542 * the current SD_GET_BLKNO() (in the first request); the second holds 12543 * requests which came in after their SD_GET_BLKNO() number was passed. 12544 * Thus we implement a one way scan, retracting after reaching 12545 * the end of the drive to the first request on the second 12546 * queue, at which time it becomes the first queue. 12547 * A one-way scan is natural because of the way UNIX read-ahead 12548 * blocks are allocated. 12549 * 12550 * If we lie after the first request, then we must locate the 12551 * second request list and add ourselves to it. 12552 */ 12553 ap = un->un_waitq_headp; 12554 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12555 while (ap->av_forw != NULL) { 12556 /* 12557 * Look for an "inversion" in the (normally 12558 * ascending) block numbers. This indicates 12559 * the start of the second request list. 12560 */ 12561 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12562 /* 12563 * Search the second request list for the 12564 * first request at a larger block number. 12565 * We go before that; however if there is 12566 * no such request, we go at the end. 12567 */ 12568 do { 12569 if (SD_GET_BLKNO(bp) < 12570 SD_GET_BLKNO(ap->av_forw)) { 12571 goto insert; 12572 } 12573 ap = ap->av_forw; 12574 } while (ap->av_forw != NULL); 12575 goto insert; /* after last */ 12576 } 12577 ap = ap->av_forw; 12578 } 12579 12580 /* 12581 * No inversions... we will go after the last, and 12582 * be the first request in the second request list. 12583 */ 12584 goto insert; 12585 } 12586 12587 /* 12588 * Request is at/after the current request... 12589 * sort in the first request list. 12590 */ 12591 while (ap->av_forw != NULL) { 12592 /* 12593 * We want to go after the current request (1) if 12594 * there is an inversion after it (i.e. it is the end 12595 * of the first request list), or (2) if the next 12596 * request is a larger block no. than our request. 12597 */ 12598 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12599 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12600 goto insert; 12601 } 12602 ap = ap->av_forw; 12603 } 12604 12605 /* 12606 * Neither a second list nor a larger request, therefore 12607 * we go at the end of the first list (which is the same 12608 * as the end of the whole schebang). 12609 */ 12610 insert: 12611 bp->av_forw = ap->av_forw; 12612 ap->av_forw = bp; 12613 12614 /* 12615 * If we inserted onto the tail end of the waitq, make sure the 12616 * tail pointer is updated. 12617 */ 12618 if (ap == un->un_waitq_tailp) { 12619 un->un_waitq_tailp = bp; 12620 } 12621 } 12622 12623 12624 /* 12625 * Function: sd_start_cmds 12626 * 12627 * Description: Remove and transport cmds from the driver queues. 12628 * 12629 * Arguments: un - pointer to the unit (soft state) struct for the target. 12630 * 12631 * immed_bp - ptr to a buf to be transported immediately. Only 12632 * the immed_bp is transported; bufs on the waitq are not 12633 * processed and the un_retry_bp is not checked. If immed_bp is 12634 * NULL, then normal queue processing is performed. 12635 * 12636 * Context: May be called from kernel thread context, interrupt context, 12637 * or runout callback context. This function may not block or 12638 * call routines that block. 12639 */ 12640 12641 static void 12642 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12643 { 12644 struct sd_xbuf *xp; 12645 struct buf *bp; 12646 void (*statp)(kstat_io_t *); 12647 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12648 void (*saved_statp)(kstat_io_t *); 12649 #endif 12650 int rval; 12651 12652 ASSERT(un != NULL); 12653 ASSERT(mutex_owned(SD_MUTEX(un))); 12654 ASSERT(un->un_ncmds_in_transport >= 0); 12655 ASSERT(un->un_throttle >= 0); 12656 12657 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12658 12659 do { 12660 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12661 saved_statp = NULL; 12662 #endif 12663 12664 /* 12665 * If we are syncing or dumping, fail the command to 12666 * avoid recursively calling back into scsi_transport(). 12667 * The dump I/O itself uses a separate code path so this 12668 * only prevents non-dump I/O from being sent while dumping. 12669 * File system sync takes place before dumping begins. 12670 * During panic, filesystem I/O is allowed provided 12671 * un_in_callback is <= 1. This is to prevent recursion 12672 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12673 * sd_start_cmds and so on. See panic.c for more information 12674 * about the states the system can be in during panic. 12675 */ 12676 if ((un->un_state == SD_STATE_DUMPING) || 12677 (ddi_in_panic() && (un->un_in_callback > 1))) { 12678 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12679 "sd_start_cmds: panicking\n"); 12680 goto exit; 12681 } 12682 12683 if ((bp = immed_bp) != NULL) { 12684 /* 12685 * We have a bp that must be transported immediately. 12686 * It's OK to transport the immed_bp here without doing 12687 * the throttle limit check because the immed_bp is 12688 * always used in a retry/recovery case. This means 12689 * that we know we are not at the throttle limit by 12690 * virtue of the fact that to get here we must have 12691 * already gotten a command back via sdintr(). This also 12692 * relies on (1) the command on un_retry_bp preventing 12693 * further commands from the waitq from being issued; 12694 * and (2) the code in sd_retry_command checking the 12695 * throttle limit before issuing a delayed or immediate 12696 * retry. This holds even if the throttle limit is 12697 * currently ratcheted down from its maximum value. 12698 */ 12699 statp = kstat_runq_enter; 12700 if (bp == un->un_retry_bp) { 12701 ASSERT((un->un_retry_statp == NULL) || 12702 (un->un_retry_statp == kstat_waitq_enter) || 12703 (un->un_retry_statp == 12704 kstat_runq_back_to_waitq)); 12705 /* 12706 * If the waitq kstat was incremented when 12707 * sd_set_retry_bp() queued this bp for a retry, 12708 * then we must set up statp so that the waitq 12709 * count will get decremented correctly below. 12710 * Also we must clear un->un_retry_statp to 12711 * ensure that we do not act on a stale value 12712 * in this field. 12713 */ 12714 if ((un->un_retry_statp == kstat_waitq_enter) || 12715 (un->un_retry_statp == 12716 kstat_runq_back_to_waitq)) { 12717 statp = kstat_waitq_to_runq; 12718 } 12719 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12720 saved_statp = un->un_retry_statp; 12721 #endif 12722 un->un_retry_statp = NULL; 12723 12724 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12725 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12726 "un_throttle:%d un_ncmds_in_transport:%d\n", 12727 un, un->un_retry_bp, un->un_throttle, 12728 un->un_ncmds_in_transport); 12729 } else { 12730 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12731 "processing priority bp:0x%p\n", bp); 12732 } 12733 12734 } else if ((bp = un->un_waitq_headp) != NULL) { 12735 /* 12736 * A command on the waitq is ready to go, but do not 12737 * send it if: 12738 * 12739 * (1) the throttle limit has been reached, or 12740 * (2) a retry is pending, or 12741 * (3) a START_STOP_UNIT callback pending, or 12742 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12743 * command is pending. 12744 * 12745 * For all of these conditions, IO processing will 12746 * restart after the condition is cleared. 12747 */ 12748 if (un->un_ncmds_in_transport >= un->un_throttle) { 12749 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12750 "sd_start_cmds: exiting, " 12751 "throttle limit reached!\n"); 12752 goto exit; 12753 } 12754 if (un->un_retry_bp != NULL) { 12755 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12756 "sd_start_cmds: exiting, retry pending!\n"); 12757 goto exit; 12758 } 12759 if (un->un_startstop_timeid != NULL) { 12760 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12761 "sd_start_cmds: exiting, " 12762 "START_STOP pending!\n"); 12763 goto exit; 12764 } 12765 if (un->un_direct_priority_timeid != NULL) { 12766 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12767 "sd_start_cmds: exiting, " 12768 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12769 goto exit; 12770 } 12771 12772 /* Dequeue the command */ 12773 un->un_waitq_headp = bp->av_forw; 12774 if (un->un_waitq_headp == NULL) { 12775 un->un_waitq_tailp = NULL; 12776 } 12777 bp->av_forw = NULL; 12778 statp = kstat_waitq_to_runq; 12779 SD_TRACE(SD_LOG_IO_CORE, un, 12780 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 12781 12782 } else { 12783 /* No work to do so bail out now */ 12784 SD_TRACE(SD_LOG_IO_CORE, un, 12785 "sd_start_cmds: no more work, exiting!\n"); 12786 goto exit; 12787 } 12788 12789 /* 12790 * Reset the state to normal. This is the mechanism by which 12791 * the state transitions from either SD_STATE_RWAIT or 12792 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 12793 * If state is SD_STATE_PM_CHANGING then this command is 12794 * part of the device power control and the state must 12795 * not be put back to normal. Doing so would would 12796 * allow new commands to proceed when they shouldn't, 12797 * the device may be going off. 12798 */ 12799 if ((un->un_state != SD_STATE_SUSPENDED) && 12800 (un->un_state != SD_STATE_PM_CHANGING)) { 12801 New_state(un, SD_STATE_NORMAL); 12802 } 12803 12804 xp = SD_GET_XBUF(bp); 12805 ASSERT(xp != NULL); 12806 12807 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12808 /* 12809 * Allocate the scsi_pkt if we need one, or attach DMA 12810 * resources if we have a scsi_pkt that needs them. The 12811 * latter should only occur for commands that are being 12812 * retried. 12813 */ 12814 if ((xp->xb_pktp == NULL) || 12815 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 12816 #else 12817 if (xp->xb_pktp == NULL) { 12818 #endif 12819 /* 12820 * There is no scsi_pkt allocated for this buf. Call 12821 * the initpkt function to allocate & init one. 12822 * 12823 * The scsi_init_pkt runout callback functionality is 12824 * implemented as follows: 12825 * 12826 * 1) The initpkt function always calls 12827 * scsi_init_pkt(9F) with sdrunout specified as the 12828 * callback routine. 12829 * 2) A successful packet allocation is initialized and 12830 * the I/O is transported. 12831 * 3) The I/O associated with an allocation resource 12832 * failure is left on its queue to be retried via 12833 * runout or the next I/O. 12834 * 4) The I/O associated with a DMA error is removed 12835 * from the queue and failed with EIO. Processing of 12836 * the transport queues is also halted to be 12837 * restarted via runout or the next I/O. 12838 * 5) The I/O associated with a CDB size or packet 12839 * size error is removed from the queue and failed 12840 * with EIO. Processing of the transport queues is 12841 * continued. 12842 * 12843 * Note: there is no interface for canceling a runout 12844 * callback. To prevent the driver from detaching or 12845 * suspending while a runout is pending the driver 12846 * state is set to SD_STATE_RWAIT 12847 * 12848 * Note: using the scsi_init_pkt callback facility can 12849 * result in an I/O request persisting at the head of 12850 * the list which cannot be satisfied even after 12851 * multiple retries. In the future the driver may 12852 * implement some kind of maximum runout count before 12853 * failing an I/O. 12854 * 12855 * Note: the use of funcp below may seem superfluous, 12856 * but it helps warlock figure out the correct 12857 * initpkt function calls (see [s]sd.wlcmd). 12858 */ 12859 struct scsi_pkt *pktp; 12860 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 12861 12862 ASSERT(bp != un->un_rqs_bp); 12863 12864 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 12865 switch ((*funcp)(bp, &pktp)) { 12866 case SD_PKT_ALLOC_SUCCESS: 12867 xp->xb_pktp = pktp; 12868 SD_TRACE(SD_LOG_IO_CORE, un, 12869 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 12870 pktp); 12871 goto got_pkt; 12872 12873 case SD_PKT_ALLOC_FAILURE: 12874 /* 12875 * Temporary (hopefully) resource depletion. 12876 * Since retries and RQS commands always have a 12877 * scsi_pkt allocated, these cases should never 12878 * get here. So the only cases this needs to 12879 * handle is a bp from the waitq (which we put 12880 * back onto the waitq for sdrunout), or a bp 12881 * sent as an immed_bp (which we just fail). 12882 */ 12883 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12884 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 12885 12886 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12887 12888 if (bp == immed_bp) { 12889 /* 12890 * If SD_XB_DMA_FREED is clear, then 12891 * this is a failure to allocate a 12892 * scsi_pkt, and we must fail the 12893 * command. 12894 */ 12895 if ((xp->xb_pkt_flags & 12896 SD_XB_DMA_FREED) == 0) { 12897 break; 12898 } 12899 12900 /* 12901 * If this immediate command is NOT our 12902 * un_retry_bp, then we must fail it. 12903 */ 12904 if (bp != un->un_retry_bp) { 12905 break; 12906 } 12907 12908 /* 12909 * We get here if this cmd is our 12910 * un_retry_bp that was DMAFREED, but 12911 * scsi_init_pkt() failed to reallocate 12912 * DMA resources when we attempted to 12913 * retry it. This can happen when an 12914 * mpxio failover is in progress, but 12915 * we don't want to just fail the 12916 * command in this case. 12917 * 12918 * Use timeout(9F) to restart it after 12919 * a 100ms delay. We don't want to 12920 * let sdrunout() restart it, because 12921 * sdrunout() is just supposed to start 12922 * commands that are sitting on the 12923 * wait queue. The un_retry_bp stays 12924 * set until the command completes, but 12925 * sdrunout can be called many times 12926 * before that happens. Since sdrunout 12927 * cannot tell if the un_retry_bp is 12928 * already in the transport, it could 12929 * end up calling scsi_transport() for 12930 * the un_retry_bp multiple times. 12931 * 12932 * Also: don't schedule the callback 12933 * if some other callback is already 12934 * pending. 12935 */ 12936 if (un->un_retry_statp == NULL) { 12937 /* 12938 * restore the kstat pointer to 12939 * keep kstat counts coherent 12940 * when we do retry the command. 12941 */ 12942 un->un_retry_statp = 12943 saved_statp; 12944 } 12945 12946 if ((un->un_startstop_timeid == NULL) && 12947 (un->un_retry_timeid == NULL) && 12948 (un->un_direct_priority_timeid == 12949 NULL)) { 12950 12951 un->un_retry_timeid = 12952 timeout( 12953 sd_start_retry_command, 12954 un, SD_RESTART_TIMEOUT); 12955 } 12956 goto exit; 12957 } 12958 12959 #else 12960 if (bp == immed_bp) { 12961 break; /* Just fail the command */ 12962 } 12963 #endif 12964 12965 /* Add the buf back to the head of the waitq */ 12966 bp->av_forw = un->un_waitq_headp; 12967 un->un_waitq_headp = bp; 12968 if (un->un_waitq_tailp == NULL) { 12969 un->un_waitq_tailp = bp; 12970 } 12971 goto exit; 12972 12973 case SD_PKT_ALLOC_FAILURE_NO_DMA: 12974 /* 12975 * HBA DMA resource failure. Fail the command 12976 * and continue processing of the queues. 12977 */ 12978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12979 "sd_start_cmds: " 12980 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 12981 break; 12982 12983 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 12984 /* 12985 * Note:x86: Partial DMA mapping not supported 12986 * for USCSI commands, and all the needed DMA 12987 * resources were not allocated. 12988 */ 12989 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12990 "sd_start_cmds: " 12991 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 12992 break; 12993 12994 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 12995 /* 12996 * Note:x86: Request cannot fit into CDB based 12997 * on lba and len. 12998 */ 12999 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13000 "sd_start_cmds: " 13001 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13002 break; 13003 13004 default: 13005 /* Should NEVER get here! */ 13006 panic("scsi_initpkt error"); 13007 /*NOTREACHED*/ 13008 } 13009 13010 /* 13011 * Fatal error in allocating a scsi_pkt for this buf. 13012 * Update kstats & return the buf with an error code. 13013 * We must use sd_return_failed_command_no_restart() to 13014 * avoid a recursive call back into sd_start_cmds(). 13015 * However this also means that we must keep processing 13016 * the waitq here in order to avoid stalling. 13017 */ 13018 if (statp == kstat_waitq_to_runq) { 13019 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13020 } 13021 sd_return_failed_command_no_restart(un, bp, EIO); 13022 if (bp == immed_bp) { 13023 /* immed_bp is gone by now, so clear this */ 13024 immed_bp = NULL; 13025 } 13026 continue; 13027 } 13028 got_pkt: 13029 if (bp == immed_bp) { 13030 /* goto the head of the class.... */ 13031 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13032 } 13033 13034 un->un_ncmds_in_transport++; 13035 SD_UPDATE_KSTATS(un, statp, bp); 13036 13037 /* 13038 * Call scsi_transport() to send the command to the target. 13039 * According to SCSA architecture, we must drop the mutex here 13040 * before calling scsi_transport() in order to avoid deadlock. 13041 * Note that the scsi_pkt's completion routine can be executed 13042 * (from interrupt context) even before the call to 13043 * scsi_transport() returns. 13044 */ 13045 SD_TRACE(SD_LOG_IO_CORE, un, 13046 "sd_start_cmds: calling scsi_transport()\n"); 13047 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13048 13049 mutex_exit(SD_MUTEX(un)); 13050 rval = scsi_transport(xp->xb_pktp); 13051 mutex_enter(SD_MUTEX(un)); 13052 13053 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13054 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13055 13056 switch (rval) { 13057 case TRAN_ACCEPT: 13058 /* Clear this with every pkt accepted by the HBA */ 13059 un->un_tran_fatal_count = 0; 13060 break; /* Success; try the next cmd (if any) */ 13061 13062 case TRAN_BUSY: 13063 un->un_ncmds_in_transport--; 13064 ASSERT(un->un_ncmds_in_transport >= 0); 13065 13066 /* 13067 * Don't retry request sense, the sense data 13068 * is lost when another request is sent. 13069 * Free up the rqs buf and retry 13070 * the original failed cmd. Update kstat. 13071 */ 13072 if (bp == un->un_rqs_bp) { 13073 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13074 bp = sd_mark_rqs_idle(un, xp); 13075 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13076 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13077 kstat_waitq_enter); 13078 goto exit; 13079 } 13080 13081 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13082 /* 13083 * Free the DMA resources for the scsi_pkt. This will 13084 * allow mpxio to select another path the next time 13085 * we call scsi_transport() with this scsi_pkt. 13086 * See sdintr() for the rationalization behind this. 13087 */ 13088 if ((un->un_f_is_fibre == TRUE) && 13089 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13090 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13091 scsi_dmafree(xp->xb_pktp); 13092 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13093 } 13094 #endif 13095 13096 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13097 /* 13098 * Commands that are SD_PATH_DIRECT_PRIORITY 13099 * are for error recovery situations. These do 13100 * not use the normal command waitq, so if they 13101 * get a TRAN_BUSY we cannot put them back onto 13102 * the waitq for later retry. One possible 13103 * problem is that there could already be some 13104 * other command on un_retry_bp that is waiting 13105 * for this one to complete, so we would be 13106 * deadlocked if we put this command back onto 13107 * the waitq for later retry (since un_retry_bp 13108 * must complete before the driver gets back to 13109 * commands on the waitq). 13110 * 13111 * To avoid deadlock we must schedule a callback 13112 * that will restart this command after a set 13113 * interval. This should keep retrying for as 13114 * long as the underlying transport keeps 13115 * returning TRAN_BUSY (just like for other 13116 * commands). Use the same timeout interval as 13117 * for the ordinary TRAN_BUSY retry. 13118 */ 13119 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13120 "sd_start_cmds: scsi_transport() returned " 13121 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13122 13123 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13124 un->un_direct_priority_timeid = 13125 timeout(sd_start_direct_priority_command, 13126 bp, SD_BSY_TIMEOUT / 500); 13127 13128 goto exit; 13129 } 13130 13131 /* 13132 * For TRAN_BUSY, we want to reduce the throttle value, 13133 * unless we are retrying a command. 13134 */ 13135 if (bp != un->un_retry_bp) { 13136 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13137 } 13138 13139 /* 13140 * Set up the bp to be tried again 10 ms later. 13141 * Note:x86: Is there a timeout value in the sd_lun 13142 * for this condition? 13143 */ 13144 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13145 kstat_runq_back_to_waitq); 13146 goto exit; 13147 13148 case TRAN_FATAL_ERROR: 13149 un->un_tran_fatal_count++; 13150 /* FALLTHRU */ 13151 13152 case TRAN_BADPKT: 13153 default: 13154 un->un_ncmds_in_transport--; 13155 ASSERT(un->un_ncmds_in_transport >= 0); 13156 13157 /* 13158 * If this is our REQUEST SENSE command with a 13159 * transport error, we must get back the pointers 13160 * to the original buf, and mark the REQUEST 13161 * SENSE command as "available". 13162 */ 13163 if (bp == un->un_rqs_bp) { 13164 bp = sd_mark_rqs_idle(un, xp); 13165 xp = SD_GET_XBUF(bp); 13166 } else { 13167 /* 13168 * Legacy behavior: do not update transport 13169 * error count for request sense commands. 13170 */ 13171 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13172 } 13173 13174 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13175 sd_print_transport_rejected_message(un, xp, rval); 13176 13177 /* 13178 * We must use sd_return_failed_command_no_restart() to 13179 * avoid a recursive call back into sd_start_cmds(). 13180 * However this also means that we must keep processing 13181 * the waitq here in order to avoid stalling. 13182 */ 13183 sd_return_failed_command_no_restart(un, bp, EIO); 13184 13185 /* 13186 * Notify any threads waiting in sd_ddi_suspend() that 13187 * a command completion has occurred. 13188 */ 13189 if (un->un_state == SD_STATE_SUSPENDED) { 13190 cv_broadcast(&un->un_disk_busy_cv); 13191 } 13192 13193 if (bp == immed_bp) { 13194 /* immed_bp is gone by now, so clear this */ 13195 immed_bp = NULL; 13196 } 13197 break; 13198 } 13199 13200 } while (immed_bp == NULL); 13201 13202 exit: 13203 ASSERT(mutex_owned(SD_MUTEX(un))); 13204 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13205 } 13206 13207 13208 /* 13209 * Function: sd_return_command 13210 * 13211 * Description: Returns a command to its originator (with or without an 13212 * error). Also starts commands waiting to be transported 13213 * to the target. 13214 * 13215 * Context: May be called from interrupt, kernel, or timeout context 13216 */ 13217 13218 static void 13219 sd_return_command(struct sd_lun *un, struct buf *bp) 13220 { 13221 struct sd_xbuf *xp; 13222 #if defined(__i386) || defined(__amd64) 13223 struct scsi_pkt *pktp; 13224 #endif 13225 13226 ASSERT(bp != NULL); 13227 ASSERT(un != NULL); 13228 ASSERT(mutex_owned(SD_MUTEX(un))); 13229 ASSERT(bp != un->un_rqs_bp); 13230 xp = SD_GET_XBUF(bp); 13231 ASSERT(xp != NULL); 13232 13233 #if defined(__i386) || defined(__amd64) 13234 pktp = SD_GET_PKTP(bp); 13235 #endif 13236 13237 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13238 13239 #if defined(__i386) || defined(__amd64) 13240 /* 13241 * Note:x86: check for the "sdrestart failed" case. 13242 */ 13243 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13244 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13245 (xp->xb_pktp->pkt_resid == 0)) { 13246 13247 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13248 /* 13249 * Successfully set up next portion of cmd 13250 * transfer, try sending it 13251 */ 13252 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13253 NULL, NULL, 0, (clock_t)0, NULL); 13254 sd_start_cmds(un, NULL); 13255 return; /* Note:x86: need a return here? */ 13256 } 13257 } 13258 #endif 13259 13260 /* 13261 * If this is the failfast bp, clear it from un_failfast_bp. This 13262 * can happen if upon being re-tried the failfast bp either 13263 * succeeded or encountered another error (possibly even a different 13264 * error than the one that precipitated the failfast state, but in 13265 * that case it would have had to exhaust retries as well). Regardless, 13266 * this should not occur whenever the instance is in the active 13267 * failfast state. 13268 */ 13269 if (bp == un->un_failfast_bp) { 13270 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13271 un->un_failfast_bp = NULL; 13272 } 13273 13274 /* 13275 * Clear the failfast state upon successful completion of ANY cmd. 13276 */ 13277 if (bp->b_error == 0) { 13278 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13279 } 13280 13281 /* 13282 * This is used if the command was retried one or more times. Show that 13283 * we are done with it, and allow processing of the waitq to resume. 13284 */ 13285 if (bp == un->un_retry_bp) { 13286 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13287 "sd_return_command: un:0x%p: " 13288 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13289 un->un_retry_bp = NULL; 13290 un->un_retry_statp = NULL; 13291 } 13292 13293 SD_UPDATE_RDWR_STATS(un, bp); 13294 SD_UPDATE_PARTITION_STATS(un, bp); 13295 13296 switch (un->un_state) { 13297 case SD_STATE_SUSPENDED: 13298 /* 13299 * Notify any threads waiting in sd_ddi_suspend() that 13300 * a command completion has occurred. 13301 */ 13302 cv_broadcast(&un->un_disk_busy_cv); 13303 break; 13304 default: 13305 sd_start_cmds(un, NULL); 13306 break; 13307 } 13308 13309 /* Return this command up the iodone chain to its originator. */ 13310 mutex_exit(SD_MUTEX(un)); 13311 13312 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13313 xp->xb_pktp = NULL; 13314 13315 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13316 13317 ASSERT(!mutex_owned(SD_MUTEX(un))); 13318 mutex_enter(SD_MUTEX(un)); 13319 13320 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13321 } 13322 13323 13324 /* 13325 * Function: sd_return_failed_command 13326 * 13327 * Description: Command completion when an error occurred. 13328 * 13329 * Context: May be called from interrupt context 13330 */ 13331 13332 static void 13333 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13334 { 13335 ASSERT(bp != NULL); 13336 ASSERT(un != NULL); 13337 ASSERT(mutex_owned(SD_MUTEX(un))); 13338 13339 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13340 "sd_return_failed_command: entry\n"); 13341 13342 /* 13343 * b_resid could already be nonzero due to a partial data 13344 * transfer, so do not change it here. 13345 */ 13346 SD_BIOERROR(bp, errcode); 13347 13348 sd_return_command(un, bp); 13349 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13350 "sd_return_failed_command: exit\n"); 13351 } 13352 13353 13354 /* 13355 * Function: sd_return_failed_command_no_restart 13356 * 13357 * Description: Same as sd_return_failed_command, but ensures that no 13358 * call back into sd_start_cmds will be issued. 13359 * 13360 * Context: May be called from interrupt context 13361 */ 13362 13363 static void 13364 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13365 int errcode) 13366 { 13367 struct sd_xbuf *xp; 13368 13369 ASSERT(bp != NULL); 13370 ASSERT(un != NULL); 13371 ASSERT(mutex_owned(SD_MUTEX(un))); 13372 xp = SD_GET_XBUF(bp); 13373 ASSERT(xp != NULL); 13374 ASSERT(errcode != 0); 13375 13376 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13377 "sd_return_failed_command_no_restart: entry\n"); 13378 13379 /* 13380 * b_resid could already be nonzero due to a partial data 13381 * transfer, so do not change it here. 13382 */ 13383 SD_BIOERROR(bp, errcode); 13384 13385 /* 13386 * If this is the failfast bp, clear it. This can happen if the 13387 * failfast bp encounterd a fatal error when we attempted to 13388 * re-try it (such as a scsi_transport(9F) failure). However 13389 * we should NOT be in an active failfast state if the failfast 13390 * bp is not NULL. 13391 */ 13392 if (bp == un->un_failfast_bp) { 13393 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13394 un->un_failfast_bp = NULL; 13395 } 13396 13397 if (bp == un->un_retry_bp) { 13398 /* 13399 * This command was retried one or more times. Show that we are 13400 * done with it, and allow processing of the waitq to resume. 13401 */ 13402 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13403 "sd_return_failed_command_no_restart: " 13404 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13405 un->un_retry_bp = NULL; 13406 un->un_retry_statp = NULL; 13407 } 13408 13409 SD_UPDATE_RDWR_STATS(un, bp); 13410 SD_UPDATE_PARTITION_STATS(un, bp); 13411 13412 mutex_exit(SD_MUTEX(un)); 13413 13414 if (xp->xb_pktp != NULL) { 13415 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13416 xp->xb_pktp = NULL; 13417 } 13418 13419 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13420 13421 mutex_enter(SD_MUTEX(un)); 13422 13423 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13424 "sd_return_failed_command_no_restart: exit\n"); 13425 } 13426 13427 13428 /* 13429 * Function: sd_retry_command 13430 * 13431 * Description: queue up a command for retry, or (optionally) fail it 13432 * if retry counts are exhausted. 13433 * 13434 * Arguments: un - Pointer to the sd_lun struct for the target. 13435 * 13436 * bp - Pointer to the buf for the command to be retried. 13437 * 13438 * retry_check_flag - Flag to see which (if any) of the retry 13439 * counts should be decremented/checked. If the indicated 13440 * retry count is exhausted, then the command will not be 13441 * retried; it will be failed instead. This should use a 13442 * value equal to one of the following: 13443 * 13444 * SD_RETRIES_NOCHECK 13445 * SD_RESD_RETRIES_STANDARD 13446 * SD_RETRIES_VICTIM 13447 * 13448 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13449 * if the check should be made to see of FLAG_ISOLATE is set 13450 * in the pkt. If FLAG_ISOLATE is set, then the command is 13451 * not retried, it is simply failed. 13452 * 13453 * user_funcp - Ptr to function to call before dispatching the 13454 * command. May be NULL if no action needs to be performed. 13455 * (Primarily intended for printing messages.) 13456 * 13457 * user_arg - Optional argument to be passed along to 13458 * the user_funcp call. 13459 * 13460 * failure_code - errno return code to set in the bp if the 13461 * command is going to be failed. 13462 * 13463 * retry_delay - Retry delay interval in (clock_t) units. May 13464 * be zero which indicates that the retry should be retried 13465 * immediately (ie, without an intervening delay). 13466 * 13467 * statp - Ptr to kstat function to be updated if the command 13468 * is queued for a delayed retry. May be NULL if no kstat 13469 * update is desired. 13470 * 13471 * Context: May be called from interrupt context. 13472 */ 13473 13474 static void 13475 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13476 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13477 code), void *user_arg, int failure_code, clock_t retry_delay, 13478 void (*statp)(kstat_io_t *)) 13479 { 13480 struct sd_xbuf *xp; 13481 struct scsi_pkt *pktp; 13482 13483 ASSERT(un != NULL); 13484 ASSERT(mutex_owned(SD_MUTEX(un))); 13485 ASSERT(bp != NULL); 13486 xp = SD_GET_XBUF(bp); 13487 ASSERT(xp != NULL); 13488 pktp = SD_GET_PKTP(bp); 13489 ASSERT(pktp != NULL); 13490 13491 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13492 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13493 13494 /* 13495 * If we are syncing or dumping, fail the command to avoid 13496 * recursively calling back into scsi_transport(). 13497 */ 13498 if (ddi_in_panic()) { 13499 goto fail_command_no_log; 13500 } 13501 13502 /* 13503 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13504 * log an error and fail the command. 13505 */ 13506 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13507 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13508 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13509 sd_dump_memory(un, SD_LOG_IO, "CDB", 13510 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13511 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13512 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13513 goto fail_command; 13514 } 13515 13516 /* 13517 * If we are suspended, then put the command onto head of the 13518 * wait queue since we don't want to start more commands, and 13519 * clear the un_retry_bp. Next time when we are resumed, will 13520 * handle the command in the wait queue. 13521 */ 13522 switch (un->un_state) { 13523 case SD_STATE_SUSPENDED: 13524 case SD_STATE_DUMPING: 13525 bp->av_forw = un->un_waitq_headp; 13526 un->un_waitq_headp = bp; 13527 if (un->un_waitq_tailp == NULL) { 13528 un->un_waitq_tailp = bp; 13529 } 13530 if (bp == un->un_retry_bp) { 13531 un->un_retry_bp = NULL; 13532 un->un_retry_statp = NULL; 13533 } 13534 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13536 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13537 return; 13538 default: 13539 break; 13540 } 13541 13542 /* 13543 * If the caller wants us to check FLAG_ISOLATE, then see if that 13544 * is set; if it is then we do not want to retry the command. 13545 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13546 */ 13547 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13548 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13549 goto fail_command; 13550 } 13551 } 13552 13553 13554 /* 13555 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13556 * command timeout or a selection timeout has occurred. This means 13557 * that we were unable to establish an kind of communication with 13558 * the target, and subsequent retries and/or commands are likely 13559 * to encounter similar results and take a long time to complete. 13560 * 13561 * If this is a failfast error condition, we need to update the 13562 * failfast state, even if this bp does not have B_FAILFAST set. 13563 */ 13564 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13565 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13566 ASSERT(un->un_failfast_bp == NULL); 13567 /* 13568 * If we are already in the active failfast state, and 13569 * another failfast error condition has been detected, 13570 * then fail this command if it has B_FAILFAST set. 13571 * If B_FAILFAST is clear, then maintain the legacy 13572 * behavior of retrying heroically, even tho this will 13573 * take a lot more time to fail the command. 13574 */ 13575 if (bp->b_flags & B_FAILFAST) { 13576 goto fail_command; 13577 } 13578 } else { 13579 /* 13580 * We're not in the active failfast state, but we 13581 * have a failfast error condition, so we must begin 13582 * transition to the next state. We do this regardless 13583 * of whether or not this bp has B_FAILFAST set. 13584 */ 13585 if (un->un_failfast_bp == NULL) { 13586 /* 13587 * This is the first bp to meet a failfast 13588 * condition so save it on un_failfast_bp & 13589 * do normal retry processing. Do not enter 13590 * active failfast state yet. This marks 13591 * entry into the "failfast pending" state. 13592 */ 13593 un->un_failfast_bp = bp; 13594 13595 } else if (un->un_failfast_bp == bp) { 13596 /* 13597 * This is the second time *this* bp has 13598 * encountered a failfast error condition, 13599 * so enter active failfast state & flush 13600 * queues as appropriate. 13601 */ 13602 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13603 un->un_failfast_bp = NULL; 13604 sd_failfast_flushq(un); 13605 13606 /* 13607 * Fail this bp now if B_FAILFAST set; 13608 * otherwise continue with retries. (It would 13609 * be pretty ironic if this bp succeeded on a 13610 * subsequent retry after we just flushed all 13611 * the queues). 13612 */ 13613 if (bp->b_flags & B_FAILFAST) { 13614 goto fail_command; 13615 } 13616 13617 #if !defined(lint) && !defined(__lint) 13618 } else { 13619 /* 13620 * If neither of the preceeding conditionals 13621 * was true, it means that there is some 13622 * *other* bp that has met an inital failfast 13623 * condition and is currently either being 13624 * retried or is waiting to be retried. In 13625 * that case we should perform normal retry 13626 * processing on *this* bp, since there is a 13627 * chance that the current failfast condition 13628 * is transient and recoverable. If that does 13629 * not turn out to be the case, then retries 13630 * will be cleared when the wait queue is 13631 * flushed anyway. 13632 */ 13633 #endif 13634 } 13635 } 13636 } else { 13637 /* 13638 * SD_RETRIES_FAILFAST is clear, which indicates that we 13639 * likely were able to at least establish some level of 13640 * communication with the target and subsequent commands 13641 * and/or retries are likely to get through to the target, 13642 * In this case we want to be aggressive about clearing 13643 * the failfast state. Note that this does not affect 13644 * the "failfast pending" condition. 13645 */ 13646 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13647 } 13648 13649 13650 /* 13651 * Check the specified retry count to see if we can still do 13652 * any retries with this pkt before we should fail it. 13653 */ 13654 switch (retry_check_flag & SD_RETRIES_MASK) { 13655 case SD_RETRIES_VICTIM: 13656 /* 13657 * Check the victim retry count. If exhausted, then fall 13658 * thru & check against the standard retry count. 13659 */ 13660 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13661 /* Increment count & proceed with the retry */ 13662 xp->xb_victim_retry_count++; 13663 break; 13664 } 13665 /* Victim retries exhausted, fall back to std. retries... */ 13666 /* FALLTHRU */ 13667 13668 case SD_RETRIES_STANDARD: 13669 if (xp->xb_retry_count >= un->un_retry_count) { 13670 /* Retries exhausted, fail the command */ 13671 SD_TRACE(SD_LOG_IO_CORE, un, 13672 "sd_retry_command: retries exhausted!\n"); 13673 /* 13674 * update b_resid for failed SCMD_READ & SCMD_WRITE 13675 * commands with nonzero pkt_resid. 13676 */ 13677 if ((pktp->pkt_reason == CMD_CMPLT) && 13678 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13679 (pktp->pkt_resid != 0)) { 13680 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13681 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13682 SD_UPDATE_B_RESID(bp, pktp); 13683 } 13684 } 13685 goto fail_command; 13686 } 13687 xp->xb_retry_count++; 13688 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13689 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13690 break; 13691 13692 case SD_RETRIES_UA: 13693 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13694 /* Retries exhausted, fail the command */ 13695 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13696 "Unit Attention retries exhausted. " 13697 "Check the target.\n"); 13698 goto fail_command; 13699 } 13700 xp->xb_ua_retry_count++; 13701 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13702 "sd_retry_command: retry count:%d\n", 13703 xp->xb_ua_retry_count); 13704 break; 13705 13706 case SD_RETRIES_BUSY: 13707 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13708 /* Retries exhausted, fail the command */ 13709 SD_TRACE(SD_LOG_IO_CORE, un, 13710 "sd_retry_command: retries exhausted!\n"); 13711 goto fail_command; 13712 } 13713 xp->xb_retry_count++; 13714 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13715 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13716 break; 13717 13718 case SD_RETRIES_NOCHECK: 13719 default: 13720 /* No retry count to check. Just proceed with the retry */ 13721 break; 13722 } 13723 13724 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13725 13726 /* 13727 * If we were given a zero timeout, we must attempt to retry the 13728 * command immediately (ie, without a delay). 13729 */ 13730 if (retry_delay == 0) { 13731 /* 13732 * Check some limiting conditions to see if we can actually 13733 * do the immediate retry. If we cannot, then we must 13734 * fall back to queueing up a delayed retry. 13735 */ 13736 if (un->un_ncmds_in_transport >= un->un_throttle) { 13737 /* 13738 * We are at the throttle limit for the target, 13739 * fall back to delayed retry. 13740 */ 13741 retry_delay = SD_BSY_TIMEOUT; 13742 statp = kstat_waitq_enter; 13743 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13744 "sd_retry_command: immed. retry hit " 13745 "throttle!\n"); 13746 } else { 13747 /* 13748 * We're clear to proceed with the immediate retry. 13749 * First call the user-provided function (if any) 13750 */ 13751 if (user_funcp != NULL) { 13752 (*user_funcp)(un, bp, user_arg, 13753 SD_IMMEDIATE_RETRY_ISSUED); 13754 #ifdef __lock_lint 13755 sd_print_incomplete_msg(un, bp, user_arg, 13756 SD_IMMEDIATE_RETRY_ISSUED); 13757 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13758 SD_IMMEDIATE_RETRY_ISSUED); 13759 sd_print_sense_failed_msg(un, bp, user_arg, 13760 SD_IMMEDIATE_RETRY_ISSUED); 13761 #endif 13762 } 13763 13764 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13765 "sd_retry_command: issuing immediate retry\n"); 13766 13767 /* 13768 * Call sd_start_cmds() to transport the command to 13769 * the target. 13770 */ 13771 sd_start_cmds(un, bp); 13772 13773 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13774 "sd_retry_command exit\n"); 13775 return; 13776 } 13777 } 13778 13779 /* 13780 * Set up to retry the command after a delay. 13781 * First call the user-provided function (if any) 13782 */ 13783 if (user_funcp != NULL) { 13784 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13785 } 13786 13787 sd_set_retry_bp(un, bp, retry_delay, statp); 13788 13789 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13790 return; 13791 13792 fail_command: 13793 13794 if (user_funcp != NULL) { 13795 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 13796 } 13797 13798 fail_command_no_log: 13799 13800 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13801 "sd_retry_command: returning failed command\n"); 13802 13803 sd_return_failed_command(un, bp, failure_code); 13804 13805 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13806 } 13807 13808 13809 /* 13810 * Function: sd_set_retry_bp 13811 * 13812 * Description: Set up the given bp for retry. 13813 * 13814 * Arguments: un - ptr to associated softstate 13815 * bp - ptr to buf(9S) for the command 13816 * retry_delay - time interval before issuing retry (may be 0) 13817 * statp - optional pointer to kstat function 13818 * 13819 * Context: May be called under interrupt context 13820 */ 13821 13822 static void 13823 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 13824 void (*statp)(kstat_io_t *)) 13825 { 13826 ASSERT(un != NULL); 13827 ASSERT(mutex_owned(SD_MUTEX(un))); 13828 ASSERT(bp != NULL); 13829 13830 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13831 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 13832 13833 /* 13834 * Indicate that the command is being retried. This will not allow any 13835 * other commands on the wait queue to be transported to the target 13836 * until this command has been completed (success or failure). The 13837 * "retry command" is not transported to the target until the given 13838 * time delay expires, unless the user specified a 0 retry_delay. 13839 * 13840 * Note: the timeout(9F) callback routine is what actually calls 13841 * sd_start_cmds() to transport the command, with the exception of a 13842 * zero retry_delay. The only current implementor of a zero retry delay 13843 * is the case where a START_STOP_UNIT is sent to spin-up a device. 13844 */ 13845 if (un->un_retry_bp == NULL) { 13846 ASSERT(un->un_retry_statp == NULL); 13847 un->un_retry_bp = bp; 13848 13849 /* 13850 * If the user has not specified a delay the command should 13851 * be queued and no timeout should be scheduled. 13852 */ 13853 if (retry_delay == 0) { 13854 /* 13855 * Save the kstat pointer that will be used in the 13856 * call to SD_UPDATE_KSTATS() below, so that 13857 * sd_start_cmds() can correctly decrement the waitq 13858 * count when it is time to transport this command. 13859 */ 13860 un->un_retry_statp = statp; 13861 goto done; 13862 } 13863 } 13864 13865 if (un->un_retry_bp == bp) { 13866 /* 13867 * Save the kstat pointer that will be used in the call to 13868 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 13869 * correctly decrement the waitq count when it is time to 13870 * transport this command. 13871 */ 13872 un->un_retry_statp = statp; 13873 13874 /* 13875 * Schedule a timeout if: 13876 * 1) The user has specified a delay. 13877 * 2) There is not a START_STOP_UNIT callback pending. 13878 * 13879 * If no delay has been specified, then it is up to the caller 13880 * to ensure that IO processing continues without stalling. 13881 * Effectively, this means that the caller will issue the 13882 * required call to sd_start_cmds(). The START_STOP_UNIT 13883 * callback does this after the START STOP UNIT command has 13884 * completed. In either of these cases we should not schedule 13885 * a timeout callback here. Also don't schedule the timeout if 13886 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 13887 */ 13888 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 13889 (un->un_direct_priority_timeid == NULL)) { 13890 un->un_retry_timeid = 13891 timeout(sd_start_retry_command, un, retry_delay); 13892 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13893 "sd_set_retry_bp: setting timeout: un: 0x%p" 13894 " bp:0x%p un_retry_timeid:0x%p\n", 13895 un, bp, un->un_retry_timeid); 13896 } 13897 } else { 13898 /* 13899 * We only get in here if there is already another command 13900 * waiting to be retried. In this case, we just put the 13901 * given command onto the wait queue, so it can be transported 13902 * after the current retry command has completed. 13903 * 13904 * Also we have to make sure that if the command at the head 13905 * of the wait queue is the un_failfast_bp, that we do not 13906 * put ahead of it any other commands that are to be retried. 13907 */ 13908 if ((un->un_failfast_bp != NULL) && 13909 (un->un_failfast_bp == un->un_waitq_headp)) { 13910 /* 13911 * Enqueue this command AFTER the first command on 13912 * the wait queue (which is also un_failfast_bp). 13913 */ 13914 bp->av_forw = un->un_waitq_headp->av_forw; 13915 un->un_waitq_headp->av_forw = bp; 13916 if (un->un_waitq_headp == un->un_waitq_tailp) { 13917 un->un_waitq_tailp = bp; 13918 } 13919 } else { 13920 /* Enqueue this command at the head of the waitq. */ 13921 bp->av_forw = un->un_waitq_headp; 13922 un->un_waitq_headp = bp; 13923 if (un->un_waitq_tailp == NULL) { 13924 un->un_waitq_tailp = bp; 13925 } 13926 } 13927 13928 if (statp == NULL) { 13929 statp = kstat_waitq_enter; 13930 } 13931 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13932 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 13933 } 13934 13935 done: 13936 if (statp != NULL) { 13937 SD_UPDATE_KSTATS(un, statp, bp); 13938 } 13939 13940 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13941 "sd_set_retry_bp: exit un:0x%p\n", un); 13942 } 13943 13944 13945 /* 13946 * Function: sd_start_retry_command 13947 * 13948 * Description: Start the command that has been waiting on the target's 13949 * retry queue. Called from timeout(9F) context after the 13950 * retry delay interval has expired. 13951 * 13952 * Arguments: arg - pointer to associated softstate for the device. 13953 * 13954 * Context: timeout(9F) thread context. May not sleep. 13955 */ 13956 13957 static void 13958 sd_start_retry_command(void *arg) 13959 { 13960 struct sd_lun *un = arg; 13961 13962 ASSERT(un != NULL); 13963 ASSERT(!mutex_owned(SD_MUTEX(un))); 13964 13965 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13966 "sd_start_retry_command: entry\n"); 13967 13968 mutex_enter(SD_MUTEX(un)); 13969 13970 un->un_retry_timeid = NULL; 13971 13972 if (un->un_retry_bp != NULL) { 13973 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13974 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 13975 un, un->un_retry_bp); 13976 sd_start_cmds(un, un->un_retry_bp); 13977 } 13978 13979 mutex_exit(SD_MUTEX(un)); 13980 13981 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13982 "sd_start_retry_command: exit\n"); 13983 } 13984 13985 13986 /* 13987 * Function: sd_start_direct_priority_command 13988 * 13989 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 13990 * received TRAN_BUSY when we called scsi_transport() to send it 13991 * to the underlying HBA. This function is called from timeout(9F) 13992 * context after the delay interval has expired. 13993 * 13994 * Arguments: arg - pointer to associated buf(9S) to be restarted. 13995 * 13996 * Context: timeout(9F) thread context. May not sleep. 13997 */ 13998 13999 static void 14000 sd_start_direct_priority_command(void *arg) 14001 { 14002 struct buf *priority_bp = arg; 14003 struct sd_lun *un; 14004 14005 ASSERT(priority_bp != NULL); 14006 un = SD_GET_UN(priority_bp); 14007 ASSERT(un != NULL); 14008 ASSERT(!mutex_owned(SD_MUTEX(un))); 14009 14010 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14011 "sd_start_direct_priority_command: entry\n"); 14012 14013 mutex_enter(SD_MUTEX(un)); 14014 un->un_direct_priority_timeid = NULL; 14015 sd_start_cmds(un, priority_bp); 14016 mutex_exit(SD_MUTEX(un)); 14017 14018 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14019 "sd_start_direct_priority_command: exit\n"); 14020 } 14021 14022 14023 /* 14024 * Function: sd_send_request_sense_command 14025 * 14026 * Description: Sends a REQUEST SENSE command to the target 14027 * 14028 * Context: May be called from interrupt context. 14029 */ 14030 14031 static void 14032 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14033 struct scsi_pkt *pktp) 14034 { 14035 ASSERT(bp != NULL); 14036 ASSERT(un != NULL); 14037 ASSERT(mutex_owned(SD_MUTEX(un))); 14038 14039 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14040 "entry: buf:0x%p\n", bp); 14041 14042 /* 14043 * If we are syncing or dumping, then fail the command to avoid a 14044 * recursive callback into scsi_transport(). Also fail the command 14045 * if we are suspended (legacy behavior). 14046 */ 14047 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14048 (un->un_state == SD_STATE_DUMPING)) { 14049 sd_return_failed_command(un, bp, EIO); 14050 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14051 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14052 return; 14053 } 14054 14055 /* 14056 * Retry the failed command and don't issue the request sense if: 14057 * 1) the sense buf is busy 14058 * 2) we have 1 or more outstanding commands on the target 14059 * (the sense data will be cleared or invalidated any way) 14060 * 14061 * Note: There could be an issue with not checking a retry limit here, 14062 * the problem is determining which retry limit to check. 14063 */ 14064 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14065 /* Don't retry if the command is flagged as non-retryable */ 14066 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14067 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14068 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14069 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14070 "sd_send_request_sense_command: " 14071 "at full throttle, retrying exit\n"); 14072 } else { 14073 sd_return_failed_command(un, bp, EIO); 14074 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14075 "sd_send_request_sense_command: " 14076 "at full throttle, non-retryable exit\n"); 14077 } 14078 return; 14079 } 14080 14081 sd_mark_rqs_busy(un, bp); 14082 sd_start_cmds(un, un->un_rqs_bp); 14083 14084 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14085 "sd_send_request_sense_command: exit\n"); 14086 } 14087 14088 14089 /* 14090 * Function: sd_mark_rqs_busy 14091 * 14092 * Description: Indicate that the request sense bp for this instance is 14093 * in use. 14094 * 14095 * Context: May be called under interrupt context 14096 */ 14097 14098 static void 14099 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14100 { 14101 struct sd_xbuf *sense_xp; 14102 14103 ASSERT(un != NULL); 14104 ASSERT(bp != NULL); 14105 ASSERT(mutex_owned(SD_MUTEX(un))); 14106 ASSERT(un->un_sense_isbusy == 0); 14107 14108 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14109 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14110 14111 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14112 ASSERT(sense_xp != NULL); 14113 14114 SD_INFO(SD_LOG_IO, un, 14115 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14116 14117 ASSERT(sense_xp->xb_pktp != NULL); 14118 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14119 == (FLAG_SENSING | FLAG_HEAD)); 14120 14121 un->un_sense_isbusy = 1; 14122 un->un_rqs_bp->b_resid = 0; 14123 sense_xp->xb_pktp->pkt_resid = 0; 14124 sense_xp->xb_pktp->pkt_reason = 0; 14125 14126 /* So we can get back the bp at interrupt time! */ 14127 sense_xp->xb_sense_bp = bp; 14128 14129 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14130 14131 /* 14132 * Mark this buf as awaiting sense data. (This is already set in 14133 * the pkt_flags for the RQS packet.) 14134 */ 14135 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14136 14137 sense_xp->xb_retry_count = 0; 14138 sense_xp->xb_victim_retry_count = 0; 14139 sense_xp->xb_ua_retry_count = 0; 14140 sense_xp->xb_dma_resid = 0; 14141 14142 /* Clean up the fields for auto-request sense */ 14143 sense_xp->xb_sense_status = 0; 14144 sense_xp->xb_sense_state = 0; 14145 sense_xp->xb_sense_resid = 0; 14146 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14147 14148 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14149 } 14150 14151 14152 /* 14153 * Function: sd_mark_rqs_idle 14154 * 14155 * Description: SD_MUTEX must be held continuously through this routine 14156 * to prevent reuse of the rqs struct before the caller can 14157 * complete it's processing. 14158 * 14159 * Return Code: Pointer to the RQS buf 14160 * 14161 * Context: May be called under interrupt context 14162 */ 14163 14164 static struct buf * 14165 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14166 { 14167 struct buf *bp; 14168 ASSERT(un != NULL); 14169 ASSERT(sense_xp != NULL); 14170 ASSERT(mutex_owned(SD_MUTEX(un))); 14171 ASSERT(un->un_sense_isbusy != 0); 14172 14173 un->un_sense_isbusy = 0; 14174 bp = sense_xp->xb_sense_bp; 14175 sense_xp->xb_sense_bp = NULL; 14176 14177 /* This pkt is no longer interested in getting sense data */ 14178 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14179 14180 return (bp); 14181 } 14182 14183 14184 14185 /* 14186 * Function: sd_alloc_rqs 14187 * 14188 * Description: Set up the unit to receive auto request sense data 14189 * 14190 * Return Code: DDI_SUCCESS or DDI_FAILURE 14191 * 14192 * Context: Called under attach(9E) context 14193 */ 14194 14195 static int 14196 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14197 { 14198 struct sd_xbuf *xp; 14199 14200 ASSERT(un != NULL); 14201 ASSERT(!mutex_owned(SD_MUTEX(un))); 14202 ASSERT(un->un_rqs_bp == NULL); 14203 ASSERT(un->un_rqs_pktp == NULL); 14204 14205 /* 14206 * First allocate the required buf and scsi_pkt structs, then set up 14207 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14208 */ 14209 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14210 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14211 if (un->un_rqs_bp == NULL) { 14212 return (DDI_FAILURE); 14213 } 14214 14215 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14216 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14217 14218 if (un->un_rqs_pktp == NULL) { 14219 sd_free_rqs(un); 14220 return (DDI_FAILURE); 14221 } 14222 14223 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14224 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14225 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 14226 14227 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14228 14229 /* Set up the other needed members in the ARQ scsi_pkt. */ 14230 un->un_rqs_pktp->pkt_comp = sdintr; 14231 un->un_rqs_pktp->pkt_time = sd_io_time; 14232 un->un_rqs_pktp->pkt_flags |= 14233 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14234 14235 /* 14236 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14237 * provide any intpkt, destroypkt routines as we take care of 14238 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14239 */ 14240 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14241 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14242 xp->xb_pktp = un->un_rqs_pktp; 14243 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14244 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14245 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14246 14247 /* 14248 * Save the pointer to the request sense private bp so it can 14249 * be retrieved in sdintr. 14250 */ 14251 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14252 ASSERT(un->un_rqs_bp->b_private == xp); 14253 14254 /* 14255 * See if the HBA supports auto-request sense for the specified 14256 * target/lun. If it does, then try to enable it (if not already 14257 * enabled). 14258 * 14259 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14260 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14261 * return success. However, in both of these cases ARQ is always 14262 * enabled and scsi_ifgetcap will always return true. The best approach 14263 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14264 * 14265 * The 3rd case is the HBA (adp) always return enabled on 14266 * scsi_ifgetgetcap even when it's not enable, the best approach 14267 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14268 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14269 */ 14270 14271 if (un->un_f_is_fibre == TRUE) { 14272 un->un_f_arq_enabled = TRUE; 14273 } else { 14274 #if defined(__i386) || defined(__amd64) 14275 /* 14276 * Circumvent the Adaptec bug, remove this code when 14277 * the bug is fixed 14278 */ 14279 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14280 #endif 14281 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14282 case 0: 14283 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14284 "sd_alloc_rqs: HBA supports ARQ\n"); 14285 /* 14286 * ARQ is supported by this HBA but currently is not 14287 * enabled. Attempt to enable it and if successful then 14288 * mark this instance as ARQ enabled. 14289 */ 14290 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14291 == 1) { 14292 /* Successfully enabled ARQ in the HBA */ 14293 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14294 "sd_alloc_rqs: ARQ enabled\n"); 14295 un->un_f_arq_enabled = TRUE; 14296 } else { 14297 /* Could not enable ARQ in the HBA */ 14298 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14299 "sd_alloc_rqs: failed ARQ enable\n"); 14300 un->un_f_arq_enabled = FALSE; 14301 } 14302 break; 14303 case 1: 14304 /* 14305 * ARQ is supported by this HBA and is already enabled. 14306 * Just mark ARQ as enabled for this instance. 14307 */ 14308 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14309 "sd_alloc_rqs: ARQ already enabled\n"); 14310 un->un_f_arq_enabled = TRUE; 14311 break; 14312 default: 14313 /* 14314 * ARQ is not supported by this HBA; disable it for this 14315 * instance. 14316 */ 14317 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14318 "sd_alloc_rqs: HBA does not support ARQ\n"); 14319 un->un_f_arq_enabled = FALSE; 14320 break; 14321 } 14322 } 14323 14324 return (DDI_SUCCESS); 14325 } 14326 14327 14328 /* 14329 * Function: sd_free_rqs 14330 * 14331 * Description: Cleanup for the pre-instance RQS command. 14332 * 14333 * Context: Kernel thread context 14334 */ 14335 14336 static void 14337 sd_free_rqs(struct sd_lun *un) 14338 { 14339 ASSERT(un != NULL); 14340 14341 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14342 14343 /* 14344 * If consistent memory is bound to a scsi_pkt, the pkt 14345 * has to be destroyed *before* freeing the consistent memory. 14346 * Don't change the sequence of this operations. 14347 * scsi_destroy_pkt() might access memory, which isn't allowed, 14348 * after it was freed in scsi_free_consistent_buf(). 14349 */ 14350 if (un->un_rqs_pktp != NULL) { 14351 scsi_destroy_pkt(un->un_rqs_pktp); 14352 un->un_rqs_pktp = NULL; 14353 } 14354 14355 if (un->un_rqs_bp != NULL) { 14356 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 14357 scsi_free_consistent_buf(un->un_rqs_bp); 14358 un->un_rqs_bp = NULL; 14359 } 14360 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14361 } 14362 14363 14364 14365 /* 14366 * Function: sd_reduce_throttle 14367 * 14368 * Description: Reduces the maximum # of outstanding commands on a 14369 * target to the current number of outstanding commands. 14370 * Queues a tiemout(9F) callback to restore the limit 14371 * after a specified interval has elapsed. 14372 * Typically used when we get a TRAN_BUSY return code 14373 * back from scsi_transport(). 14374 * 14375 * Arguments: un - ptr to the sd_lun softstate struct 14376 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14377 * 14378 * Context: May be called from interrupt context 14379 */ 14380 14381 static void 14382 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14383 { 14384 ASSERT(un != NULL); 14385 ASSERT(mutex_owned(SD_MUTEX(un))); 14386 ASSERT(un->un_ncmds_in_transport >= 0); 14387 14388 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14389 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14390 un, un->un_throttle, un->un_ncmds_in_transport); 14391 14392 if (un->un_throttle > 1) { 14393 if (un->un_f_use_adaptive_throttle == TRUE) { 14394 switch (throttle_type) { 14395 case SD_THROTTLE_TRAN_BUSY: 14396 if (un->un_busy_throttle == 0) { 14397 un->un_busy_throttle = un->un_throttle; 14398 } 14399 break; 14400 case SD_THROTTLE_QFULL: 14401 un->un_busy_throttle = 0; 14402 break; 14403 default: 14404 ASSERT(FALSE); 14405 } 14406 14407 if (un->un_ncmds_in_transport > 0) { 14408 un->un_throttle = un->un_ncmds_in_transport; 14409 } 14410 14411 } else { 14412 if (un->un_ncmds_in_transport == 0) { 14413 un->un_throttle = 1; 14414 } else { 14415 un->un_throttle = un->un_ncmds_in_transport; 14416 } 14417 } 14418 } 14419 14420 /* Reschedule the timeout if none is currently active */ 14421 if (un->un_reset_throttle_timeid == NULL) { 14422 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14423 un, SD_THROTTLE_RESET_INTERVAL); 14424 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14425 "sd_reduce_throttle: timeout scheduled!\n"); 14426 } 14427 14428 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14429 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14430 } 14431 14432 14433 14434 /* 14435 * Function: sd_restore_throttle 14436 * 14437 * Description: Callback function for timeout(9F). Resets the current 14438 * value of un->un_throttle to its default. 14439 * 14440 * Arguments: arg - pointer to associated softstate for the device. 14441 * 14442 * Context: May be called from interrupt context 14443 */ 14444 14445 static void 14446 sd_restore_throttle(void *arg) 14447 { 14448 struct sd_lun *un = arg; 14449 14450 ASSERT(un != NULL); 14451 ASSERT(!mutex_owned(SD_MUTEX(un))); 14452 14453 mutex_enter(SD_MUTEX(un)); 14454 14455 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14456 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14457 14458 un->un_reset_throttle_timeid = NULL; 14459 14460 if (un->un_f_use_adaptive_throttle == TRUE) { 14461 /* 14462 * If un_busy_throttle is nonzero, then it contains the 14463 * value that un_throttle was when we got a TRAN_BUSY back 14464 * from scsi_transport(). We want to revert back to this 14465 * value. 14466 * 14467 * In the QFULL case, the throttle limit will incrementally 14468 * increase until it reaches max throttle. 14469 */ 14470 if (un->un_busy_throttle > 0) { 14471 un->un_throttle = un->un_busy_throttle; 14472 un->un_busy_throttle = 0; 14473 } else { 14474 /* 14475 * increase throttle by 10% open gate slowly, schedule 14476 * another restore if saved throttle has not been 14477 * reached 14478 */ 14479 short throttle; 14480 if (sd_qfull_throttle_enable) { 14481 throttle = un->un_throttle + 14482 max((un->un_throttle / 10), 1); 14483 un->un_throttle = 14484 (throttle < un->un_saved_throttle) ? 14485 throttle : un->un_saved_throttle; 14486 if (un->un_throttle < un->un_saved_throttle) { 14487 un->un_reset_throttle_timeid = 14488 timeout(sd_restore_throttle, 14489 un, 14490 SD_QFULL_THROTTLE_RESET_INTERVAL); 14491 } 14492 } 14493 } 14494 14495 /* 14496 * If un_throttle has fallen below the low-water mark, we 14497 * restore the maximum value here (and allow it to ratchet 14498 * down again if necessary). 14499 */ 14500 if (un->un_throttle < un->un_min_throttle) { 14501 un->un_throttle = un->un_saved_throttle; 14502 } 14503 } else { 14504 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14505 "restoring limit from 0x%x to 0x%x\n", 14506 un->un_throttle, un->un_saved_throttle); 14507 un->un_throttle = un->un_saved_throttle; 14508 } 14509 14510 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14511 "sd_restore_throttle: calling sd_start_cmds!\n"); 14512 14513 sd_start_cmds(un, NULL); 14514 14515 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14516 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14517 un, un->un_throttle); 14518 14519 mutex_exit(SD_MUTEX(un)); 14520 14521 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14522 } 14523 14524 /* 14525 * Function: sdrunout 14526 * 14527 * Description: Callback routine for scsi_init_pkt when a resource allocation 14528 * fails. 14529 * 14530 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14531 * soft state instance. 14532 * 14533 * Return Code: The scsi_init_pkt routine allows for the callback function to 14534 * return a 0 indicating the callback should be rescheduled or a 1 14535 * indicating not to reschedule. This routine always returns 1 14536 * because the driver always provides a callback function to 14537 * scsi_init_pkt. This results in a callback always being scheduled 14538 * (via the scsi_init_pkt callback implementation) if a resource 14539 * failure occurs. 14540 * 14541 * Context: This callback function may not block or call routines that block 14542 * 14543 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14544 * request persisting at the head of the list which cannot be 14545 * satisfied even after multiple retries. In the future the driver 14546 * may implement some time of maximum runout count before failing 14547 * an I/O. 14548 */ 14549 14550 static int 14551 sdrunout(caddr_t arg) 14552 { 14553 struct sd_lun *un = (struct sd_lun *)arg; 14554 14555 ASSERT(un != NULL); 14556 ASSERT(!mutex_owned(SD_MUTEX(un))); 14557 14558 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14559 14560 mutex_enter(SD_MUTEX(un)); 14561 sd_start_cmds(un, NULL); 14562 mutex_exit(SD_MUTEX(un)); 14563 /* 14564 * This callback routine always returns 1 (i.e. do not reschedule) 14565 * because we always specify sdrunout as the callback handler for 14566 * scsi_init_pkt inside the call to sd_start_cmds. 14567 */ 14568 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14569 return (1); 14570 } 14571 14572 14573 /* 14574 * Function: sdintr 14575 * 14576 * Description: Completion callback routine for scsi_pkt(9S) structs 14577 * sent to the HBA driver via scsi_transport(9F). 14578 * 14579 * Context: Interrupt context 14580 */ 14581 14582 static void 14583 sdintr(struct scsi_pkt *pktp) 14584 { 14585 struct buf *bp; 14586 struct sd_xbuf *xp; 14587 struct sd_lun *un; 14588 14589 ASSERT(pktp != NULL); 14590 bp = (struct buf *)pktp->pkt_private; 14591 ASSERT(bp != NULL); 14592 xp = SD_GET_XBUF(bp); 14593 ASSERT(xp != NULL); 14594 ASSERT(xp->xb_pktp != NULL); 14595 un = SD_GET_UN(bp); 14596 ASSERT(un != NULL); 14597 ASSERT(!mutex_owned(SD_MUTEX(un))); 14598 14599 #ifdef SD_FAULT_INJECTION 14600 14601 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14602 /* SD FaultInjection */ 14603 sd_faultinjection(pktp); 14604 14605 #endif /* SD_FAULT_INJECTION */ 14606 14607 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14608 " xp:0x%p, un:0x%p\n", bp, xp, un); 14609 14610 mutex_enter(SD_MUTEX(un)); 14611 14612 /* Reduce the count of the #commands currently in transport */ 14613 un->un_ncmds_in_transport--; 14614 ASSERT(un->un_ncmds_in_transport >= 0); 14615 14616 /* Increment counter to indicate that the callback routine is active */ 14617 un->un_in_callback++; 14618 14619 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14620 14621 #ifdef SDDEBUG 14622 if (bp == un->un_retry_bp) { 14623 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14624 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14625 un, un->un_retry_bp, un->un_ncmds_in_transport); 14626 } 14627 #endif 14628 14629 /* 14630 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14631 * state if needed. 14632 */ 14633 if (pktp->pkt_reason == CMD_DEV_GONE) { 14634 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14635 "Device is gone\n"); 14636 if (un->un_mediastate != DKIO_DEV_GONE) { 14637 un->un_mediastate = DKIO_DEV_GONE; 14638 cv_broadcast(&un->un_state_cv); 14639 } 14640 sd_return_failed_command(un, bp, EIO); 14641 goto exit; 14642 } 14643 14644 /* 14645 * First see if the pkt has auto-request sense data with it.... 14646 * Look at the packet state first so we don't take a performance 14647 * hit looking at the arq enabled flag unless absolutely necessary. 14648 */ 14649 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14650 (un->un_f_arq_enabled == TRUE)) { 14651 /* 14652 * The HBA did an auto request sense for this command so check 14653 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14654 * driver command that should not be retried. 14655 */ 14656 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14657 /* 14658 * Save the relevant sense info into the xp for the 14659 * original cmd. 14660 */ 14661 struct scsi_arq_status *asp; 14662 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14663 xp->xb_sense_status = 14664 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14665 xp->xb_sense_state = asp->sts_rqpkt_state; 14666 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14667 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14668 min(sizeof (struct scsi_extended_sense), 14669 SENSE_LENGTH)); 14670 14671 /* fail the command */ 14672 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14673 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14674 sd_return_failed_command(un, bp, EIO); 14675 goto exit; 14676 } 14677 14678 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14679 /* 14680 * We want to either retry or fail this command, so free 14681 * the DMA resources here. If we retry the command then 14682 * the DMA resources will be reallocated in sd_start_cmds(). 14683 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14684 * causes the *entire* transfer to start over again from the 14685 * beginning of the request, even for PARTIAL chunks that 14686 * have already transferred successfully. 14687 */ 14688 if ((un->un_f_is_fibre == TRUE) && 14689 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14690 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14691 scsi_dmafree(pktp); 14692 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14693 } 14694 #endif 14695 14696 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14697 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14698 14699 sd_handle_auto_request_sense(un, bp, xp, pktp); 14700 goto exit; 14701 } 14702 14703 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14704 if (pktp->pkt_flags & FLAG_SENSING) { 14705 /* This pktp is from the unit's REQUEST_SENSE command */ 14706 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14707 "sdintr: sd_handle_request_sense\n"); 14708 sd_handle_request_sense(un, bp, xp, pktp); 14709 goto exit; 14710 } 14711 14712 /* 14713 * Check to see if the command successfully completed as requested; 14714 * this is the most common case (and also the hot performance path). 14715 * 14716 * Requirements for successful completion are: 14717 * pkt_reason is CMD_CMPLT and packet status is status good. 14718 * In addition: 14719 * - A residual of zero indicates successful completion no matter what 14720 * the command is. 14721 * - If the residual is not zero and the command is not a read or 14722 * write, then it's still defined as successful completion. In other 14723 * words, if the command is a read or write the residual must be 14724 * zero for successful completion. 14725 * - If the residual is not zero and the command is a read or 14726 * write, and it's a USCSICMD, then it's still defined as 14727 * successful completion. 14728 */ 14729 if ((pktp->pkt_reason == CMD_CMPLT) && 14730 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14731 14732 /* 14733 * Since this command is returned with a good status, we 14734 * can reset the count for Sonoma failover. 14735 */ 14736 un->un_sonoma_failure_count = 0; 14737 14738 /* 14739 * Return all USCSI commands on good status 14740 */ 14741 if (pktp->pkt_resid == 0) { 14742 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14743 "sdintr: returning command for resid == 0\n"); 14744 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14745 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14746 SD_UPDATE_B_RESID(bp, pktp); 14747 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14748 "sdintr: returning command for resid != 0\n"); 14749 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14750 SD_UPDATE_B_RESID(bp, pktp); 14751 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14752 "sdintr: returning uscsi command\n"); 14753 } else { 14754 goto not_successful; 14755 } 14756 sd_return_command(un, bp); 14757 14758 /* 14759 * Decrement counter to indicate that the callback routine 14760 * is done. 14761 */ 14762 un->un_in_callback--; 14763 ASSERT(un->un_in_callback >= 0); 14764 mutex_exit(SD_MUTEX(un)); 14765 14766 return; 14767 } 14768 14769 not_successful: 14770 14771 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14772 /* 14773 * The following is based upon knowledge of the underlying transport 14774 * and its use of DMA resources. This code should be removed when 14775 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 14776 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 14777 * and sd_start_cmds(). 14778 * 14779 * Free any DMA resources associated with this command if there 14780 * is a chance it could be retried or enqueued for later retry. 14781 * If we keep the DMA binding then mpxio cannot reissue the 14782 * command on another path whenever a path failure occurs. 14783 * 14784 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 14785 * causes the *entire* transfer to start over again from the 14786 * beginning of the request, even for PARTIAL chunks that 14787 * have already transferred successfully. 14788 * 14789 * This is only done for non-uscsi commands (and also skipped for the 14790 * driver's internal RQS command). Also just do this for Fibre Channel 14791 * devices as these are the only ones that support mpxio. 14792 */ 14793 if ((un->un_f_is_fibre == TRUE) && 14794 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14795 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14796 scsi_dmafree(pktp); 14797 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14798 } 14799 #endif 14800 14801 /* 14802 * The command did not successfully complete as requested so check 14803 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14804 * driver command that should not be retried so just return. If 14805 * FLAG_DIAGNOSE is not set the error will be processed below. 14806 */ 14807 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14808 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14809 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 14810 /* 14811 * Issue a request sense if a check condition caused the error 14812 * (we handle the auto request sense case above), otherwise 14813 * just fail the command. 14814 */ 14815 if ((pktp->pkt_reason == CMD_CMPLT) && 14816 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 14817 sd_send_request_sense_command(un, bp, pktp); 14818 } else { 14819 sd_return_failed_command(un, bp, EIO); 14820 } 14821 goto exit; 14822 } 14823 14824 /* 14825 * The command did not successfully complete as requested so process 14826 * the error, retry, and/or attempt recovery. 14827 */ 14828 switch (pktp->pkt_reason) { 14829 case CMD_CMPLT: 14830 switch (SD_GET_PKT_STATUS(pktp)) { 14831 case STATUS_GOOD: 14832 /* 14833 * The command completed successfully with a non-zero 14834 * residual 14835 */ 14836 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14837 "sdintr: STATUS_GOOD \n"); 14838 sd_pkt_status_good(un, bp, xp, pktp); 14839 break; 14840 14841 case STATUS_CHECK: 14842 case STATUS_TERMINATED: 14843 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14844 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 14845 sd_pkt_status_check_condition(un, bp, xp, pktp); 14846 break; 14847 14848 case STATUS_BUSY: 14849 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14850 "sdintr: STATUS_BUSY\n"); 14851 sd_pkt_status_busy(un, bp, xp, pktp); 14852 break; 14853 14854 case STATUS_RESERVATION_CONFLICT: 14855 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14856 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 14857 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 14858 break; 14859 14860 case STATUS_QFULL: 14861 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14862 "sdintr: STATUS_QFULL\n"); 14863 sd_pkt_status_qfull(un, bp, xp, pktp); 14864 break; 14865 14866 case STATUS_MET: 14867 case STATUS_INTERMEDIATE: 14868 case STATUS_SCSI2: 14869 case STATUS_INTERMEDIATE_MET: 14870 case STATUS_ACA_ACTIVE: 14871 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14872 "Unexpected SCSI status received: 0x%x\n", 14873 SD_GET_PKT_STATUS(pktp)); 14874 sd_return_failed_command(un, bp, EIO); 14875 break; 14876 14877 default: 14878 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14879 "Invalid SCSI status received: 0x%x\n", 14880 SD_GET_PKT_STATUS(pktp)); 14881 sd_return_failed_command(un, bp, EIO); 14882 break; 14883 14884 } 14885 break; 14886 14887 case CMD_INCOMPLETE: 14888 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14889 "sdintr: CMD_INCOMPLETE\n"); 14890 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 14891 break; 14892 case CMD_TRAN_ERR: 14893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14894 "sdintr: CMD_TRAN_ERR\n"); 14895 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 14896 break; 14897 case CMD_RESET: 14898 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14899 "sdintr: CMD_RESET \n"); 14900 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 14901 break; 14902 case CMD_ABORTED: 14903 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14904 "sdintr: CMD_ABORTED \n"); 14905 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 14906 break; 14907 case CMD_TIMEOUT: 14908 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14909 "sdintr: CMD_TIMEOUT\n"); 14910 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 14911 break; 14912 case CMD_UNX_BUS_FREE: 14913 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14914 "sdintr: CMD_UNX_BUS_FREE \n"); 14915 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 14916 break; 14917 case CMD_TAG_REJECT: 14918 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14919 "sdintr: CMD_TAG_REJECT\n"); 14920 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 14921 break; 14922 default: 14923 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14924 "sdintr: default\n"); 14925 sd_pkt_reason_default(un, bp, xp, pktp); 14926 break; 14927 } 14928 14929 exit: 14930 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 14931 14932 /* Decrement counter to indicate that the callback routine is done. */ 14933 un->un_in_callback--; 14934 ASSERT(un->un_in_callback >= 0); 14935 14936 /* 14937 * At this point, the pkt has been dispatched, ie, it is either 14938 * being re-tried or has been returned to its caller and should 14939 * not be referenced. 14940 */ 14941 14942 mutex_exit(SD_MUTEX(un)); 14943 } 14944 14945 14946 /* 14947 * Function: sd_print_incomplete_msg 14948 * 14949 * Description: Prints the error message for a CMD_INCOMPLETE error. 14950 * 14951 * Arguments: un - ptr to associated softstate for the device. 14952 * bp - ptr to the buf(9S) for the command. 14953 * arg - message string ptr 14954 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 14955 * or SD_NO_RETRY_ISSUED. 14956 * 14957 * Context: May be called under interrupt context 14958 */ 14959 14960 static void 14961 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 14962 { 14963 struct scsi_pkt *pktp; 14964 char *msgp; 14965 char *cmdp = arg; 14966 14967 ASSERT(un != NULL); 14968 ASSERT(mutex_owned(SD_MUTEX(un))); 14969 ASSERT(bp != NULL); 14970 ASSERT(arg != NULL); 14971 pktp = SD_GET_PKTP(bp); 14972 ASSERT(pktp != NULL); 14973 14974 switch (code) { 14975 case SD_DELAYED_RETRY_ISSUED: 14976 case SD_IMMEDIATE_RETRY_ISSUED: 14977 msgp = "retrying"; 14978 break; 14979 case SD_NO_RETRY_ISSUED: 14980 default: 14981 msgp = "giving up"; 14982 break; 14983 } 14984 14985 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 14986 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14987 "incomplete %s- %s\n", cmdp, msgp); 14988 } 14989 } 14990 14991 14992 14993 /* 14994 * Function: sd_pkt_status_good 14995 * 14996 * Description: Processing for a STATUS_GOOD code in pkt_status. 14997 * 14998 * Context: May be called under interrupt context 14999 */ 15000 15001 static void 15002 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15003 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15004 { 15005 char *cmdp; 15006 15007 ASSERT(un != NULL); 15008 ASSERT(mutex_owned(SD_MUTEX(un))); 15009 ASSERT(bp != NULL); 15010 ASSERT(xp != NULL); 15011 ASSERT(pktp != NULL); 15012 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15013 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15014 ASSERT(pktp->pkt_resid != 0); 15015 15016 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15017 15018 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15019 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15020 case SCMD_READ: 15021 cmdp = "read"; 15022 break; 15023 case SCMD_WRITE: 15024 cmdp = "write"; 15025 break; 15026 default: 15027 SD_UPDATE_B_RESID(bp, pktp); 15028 sd_return_command(un, bp); 15029 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15030 return; 15031 } 15032 15033 /* 15034 * See if we can retry the read/write, preferrably immediately. 15035 * If retries are exhaused, then sd_retry_command() will update 15036 * the b_resid count. 15037 */ 15038 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15039 cmdp, EIO, (clock_t)0, NULL); 15040 15041 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15042 } 15043 15044 15045 15046 15047 15048 /* 15049 * Function: sd_handle_request_sense 15050 * 15051 * Description: Processing for non-auto Request Sense command. 15052 * 15053 * Arguments: un - ptr to associated softstate 15054 * sense_bp - ptr to buf(9S) for the RQS command 15055 * sense_xp - ptr to the sd_xbuf for the RQS command 15056 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15057 * 15058 * Context: May be called under interrupt context 15059 */ 15060 15061 static void 15062 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15063 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15064 { 15065 struct buf *cmd_bp; /* buf for the original command */ 15066 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15067 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15068 15069 ASSERT(un != NULL); 15070 ASSERT(mutex_owned(SD_MUTEX(un))); 15071 ASSERT(sense_bp != NULL); 15072 ASSERT(sense_xp != NULL); 15073 ASSERT(sense_pktp != NULL); 15074 15075 /* 15076 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15077 * RQS command and not the original command. 15078 */ 15079 ASSERT(sense_pktp == un->un_rqs_pktp); 15080 ASSERT(sense_bp == un->un_rqs_bp); 15081 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15082 (FLAG_SENSING | FLAG_HEAD)); 15083 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15084 FLAG_SENSING) == FLAG_SENSING); 15085 15086 /* These are the bp, xp, and pktp for the original command */ 15087 cmd_bp = sense_xp->xb_sense_bp; 15088 cmd_xp = SD_GET_XBUF(cmd_bp); 15089 cmd_pktp = SD_GET_PKTP(cmd_bp); 15090 15091 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15092 /* 15093 * The REQUEST SENSE command failed. Release the REQUEST 15094 * SENSE command for re-use, get back the bp for the original 15095 * command, and attempt to re-try the original command if 15096 * FLAG_DIAGNOSE is not set in the original packet. 15097 */ 15098 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15099 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15100 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15101 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15102 NULL, NULL, EIO, (clock_t)0, NULL); 15103 return; 15104 } 15105 } 15106 15107 /* 15108 * Save the relevant sense info into the xp for the original cmd. 15109 * 15110 * Note: if the request sense failed the state info will be zero 15111 * as set in sd_mark_rqs_busy() 15112 */ 15113 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15114 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15115 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15116 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 15117 15118 /* 15119 * Free up the RQS command.... 15120 * NOTE: 15121 * Must do this BEFORE calling sd_validate_sense_data! 15122 * sd_validate_sense_data may return the original command in 15123 * which case the pkt will be freed and the flags can no 15124 * longer be touched. 15125 * SD_MUTEX is held through this process until the command 15126 * is dispatched based upon the sense data, so there are 15127 * no race conditions. 15128 */ 15129 (void) sd_mark_rqs_idle(un, sense_xp); 15130 15131 /* 15132 * For a retryable command see if we have valid sense data, if so then 15133 * turn it over to sd_decode_sense() to figure out the right course of 15134 * action. Just fail a non-retryable command. 15135 */ 15136 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15137 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 15138 SD_SENSE_DATA_IS_VALID) { 15139 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15140 } 15141 } else { 15142 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15143 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15144 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15145 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15146 sd_return_failed_command(un, cmd_bp, EIO); 15147 } 15148 } 15149 15150 15151 15152 15153 /* 15154 * Function: sd_handle_auto_request_sense 15155 * 15156 * Description: Processing for auto-request sense information. 15157 * 15158 * Arguments: un - ptr to associated softstate 15159 * bp - ptr to buf(9S) for the command 15160 * xp - ptr to the sd_xbuf for the command 15161 * pktp - ptr to the scsi_pkt(9S) for the command 15162 * 15163 * Context: May be called under interrupt context 15164 */ 15165 15166 static void 15167 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15168 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15169 { 15170 struct scsi_arq_status *asp; 15171 15172 ASSERT(un != NULL); 15173 ASSERT(mutex_owned(SD_MUTEX(un))); 15174 ASSERT(bp != NULL); 15175 ASSERT(xp != NULL); 15176 ASSERT(pktp != NULL); 15177 ASSERT(pktp != un->un_rqs_pktp); 15178 ASSERT(bp != un->un_rqs_bp); 15179 15180 /* 15181 * For auto-request sense, we get a scsi_arq_status back from 15182 * the HBA, with the sense data in the sts_sensedata member. 15183 * The pkt_scbp of the packet points to this scsi_arq_status. 15184 */ 15185 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15186 15187 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15188 /* 15189 * The auto REQUEST SENSE failed; see if we can re-try 15190 * the original command. 15191 */ 15192 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15193 "auto request sense failed (reason=%s)\n", 15194 scsi_rname(asp->sts_rqpkt_reason)); 15195 15196 sd_reset_target(un, pktp); 15197 15198 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15199 NULL, NULL, EIO, (clock_t)0, NULL); 15200 return; 15201 } 15202 15203 /* Save the relevant sense info into the xp for the original cmd. */ 15204 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15205 xp->xb_sense_state = asp->sts_rqpkt_state; 15206 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15207 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15208 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 15209 15210 /* 15211 * See if we have valid sense data, if so then turn it over to 15212 * sd_decode_sense() to figure out the right course of action. 15213 */ 15214 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 15215 sd_decode_sense(un, bp, xp, pktp); 15216 } 15217 } 15218 15219 15220 /* 15221 * Function: sd_print_sense_failed_msg 15222 * 15223 * Description: Print log message when RQS has failed. 15224 * 15225 * Arguments: un - ptr to associated softstate 15226 * bp - ptr to buf(9S) for the command 15227 * arg - generic message string ptr 15228 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15229 * or SD_NO_RETRY_ISSUED 15230 * 15231 * Context: May be called from interrupt context 15232 */ 15233 15234 static void 15235 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15236 int code) 15237 { 15238 char *msgp = arg; 15239 15240 ASSERT(un != NULL); 15241 ASSERT(mutex_owned(SD_MUTEX(un))); 15242 ASSERT(bp != NULL); 15243 15244 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15245 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15246 } 15247 } 15248 15249 15250 /* 15251 * Function: sd_validate_sense_data 15252 * 15253 * Description: Check the given sense data for validity. 15254 * If the sense data is not valid, the command will 15255 * be either failed or retried! 15256 * 15257 * Return Code: SD_SENSE_DATA_IS_INVALID 15258 * SD_SENSE_DATA_IS_VALID 15259 * 15260 * Context: May be called from interrupt context 15261 */ 15262 15263 static int 15264 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 15265 { 15266 struct scsi_extended_sense *esp; 15267 struct scsi_pkt *pktp; 15268 size_t actual_len; 15269 char *msgp = NULL; 15270 15271 ASSERT(un != NULL); 15272 ASSERT(mutex_owned(SD_MUTEX(un))); 15273 ASSERT(bp != NULL); 15274 ASSERT(bp != un->un_rqs_bp); 15275 ASSERT(xp != NULL); 15276 15277 pktp = SD_GET_PKTP(bp); 15278 ASSERT(pktp != NULL); 15279 15280 /* 15281 * Check the status of the RQS command (auto or manual). 15282 */ 15283 switch (xp->xb_sense_status & STATUS_MASK) { 15284 case STATUS_GOOD: 15285 break; 15286 15287 case STATUS_RESERVATION_CONFLICT: 15288 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15289 return (SD_SENSE_DATA_IS_INVALID); 15290 15291 case STATUS_BUSY: 15292 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15293 "Busy Status on REQUEST SENSE\n"); 15294 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15295 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15296 return (SD_SENSE_DATA_IS_INVALID); 15297 15298 case STATUS_QFULL: 15299 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15300 "QFULL Status on REQUEST SENSE\n"); 15301 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15302 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15303 return (SD_SENSE_DATA_IS_INVALID); 15304 15305 case STATUS_CHECK: 15306 case STATUS_TERMINATED: 15307 msgp = "Check Condition on REQUEST SENSE\n"; 15308 goto sense_failed; 15309 15310 default: 15311 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15312 goto sense_failed; 15313 } 15314 15315 /* 15316 * See if we got the minimum required amount of sense data. 15317 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15318 * or less. 15319 */ 15320 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 15321 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15322 (actual_len == 0)) { 15323 msgp = "Request Sense couldn't get sense data\n"; 15324 goto sense_failed; 15325 } 15326 15327 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15328 msgp = "Not enough sense information\n"; 15329 goto sense_failed; 15330 } 15331 15332 /* 15333 * We require the extended sense data 15334 */ 15335 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15336 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15337 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15338 static char tmp[8]; 15339 static char buf[148]; 15340 char *p = (char *)(xp->xb_sense_data); 15341 int i; 15342 15343 mutex_enter(&sd_sense_mutex); 15344 (void) strcpy(buf, "undecodable sense information:"); 15345 for (i = 0; i < actual_len; i++) { 15346 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15347 (void) strcpy(&buf[strlen(buf)], tmp); 15348 } 15349 i = strlen(buf); 15350 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15351 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15352 mutex_exit(&sd_sense_mutex); 15353 } 15354 /* Note: Legacy behavior, fail the command with no retry */ 15355 sd_return_failed_command(un, bp, EIO); 15356 return (SD_SENSE_DATA_IS_INVALID); 15357 } 15358 15359 /* 15360 * Check that es_code is valid (es_class concatenated with es_code 15361 * make up the "response code" field. es_class will always be 7, so 15362 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15363 * format. 15364 */ 15365 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15366 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15367 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15368 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15369 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15370 goto sense_failed; 15371 } 15372 15373 return (SD_SENSE_DATA_IS_VALID); 15374 15375 sense_failed: 15376 /* 15377 * If the request sense failed (for whatever reason), attempt 15378 * to retry the original command. 15379 */ 15380 #if defined(__i386) || defined(__amd64) 15381 /* 15382 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15383 * sddef.h for Sparc platform, and x86 uses 1 binary 15384 * for both SCSI/FC. 15385 * The SD_RETRY_DELAY value need to be adjusted here 15386 * when SD_RETRY_DELAY change in sddef.h 15387 */ 15388 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15389 sd_print_sense_failed_msg, msgp, EIO, 15390 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15391 #else 15392 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15393 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15394 #endif 15395 15396 return (SD_SENSE_DATA_IS_INVALID); 15397 } 15398 15399 15400 15401 /* 15402 * Function: sd_decode_sense 15403 * 15404 * Description: Take recovery action(s) when SCSI Sense Data is received. 15405 * 15406 * Context: Interrupt context. 15407 */ 15408 15409 static void 15410 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15411 struct scsi_pkt *pktp) 15412 { 15413 uint8_t sense_key; 15414 15415 ASSERT(un != NULL); 15416 ASSERT(mutex_owned(SD_MUTEX(un))); 15417 ASSERT(bp != NULL); 15418 ASSERT(bp != un->un_rqs_bp); 15419 ASSERT(xp != NULL); 15420 ASSERT(pktp != NULL); 15421 15422 sense_key = scsi_sense_key(xp->xb_sense_data); 15423 15424 switch (sense_key) { 15425 case KEY_NO_SENSE: 15426 sd_sense_key_no_sense(un, bp, xp, pktp); 15427 break; 15428 case KEY_RECOVERABLE_ERROR: 15429 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15430 bp, xp, pktp); 15431 break; 15432 case KEY_NOT_READY: 15433 sd_sense_key_not_ready(un, xp->xb_sense_data, 15434 bp, xp, pktp); 15435 break; 15436 case KEY_MEDIUM_ERROR: 15437 case KEY_HARDWARE_ERROR: 15438 sd_sense_key_medium_or_hardware_error(un, 15439 xp->xb_sense_data, bp, xp, pktp); 15440 break; 15441 case KEY_ILLEGAL_REQUEST: 15442 sd_sense_key_illegal_request(un, bp, xp, pktp); 15443 break; 15444 case KEY_UNIT_ATTENTION: 15445 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15446 bp, xp, pktp); 15447 break; 15448 case KEY_WRITE_PROTECT: 15449 case KEY_VOLUME_OVERFLOW: 15450 case KEY_MISCOMPARE: 15451 sd_sense_key_fail_command(un, bp, xp, pktp); 15452 break; 15453 case KEY_BLANK_CHECK: 15454 sd_sense_key_blank_check(un, bp, xp, pktp); 15455 break; 15456 case KEY_ABORTED_COMMAND: 15457 sd_sense_key_aborted_command(un, bp, xp, pktp); 15458 break; 15459 case KEY_VENDOR_UNIQUE: 15460 case KEY_COPY_ABORTED: 15461 case KEY_EQUAL: 15462 case KEY_RESERVED: 15463 default: 15464 sd_sense_key_default(un, xp->xb_sense_data, 15465 bp, xp, pktp); 15466 break; 15467 } 15468 } 15469 15470 15471 /* 15472 * Function: sd_dump_memory 15473 * 15474 * Description: Debug logging routine to print the contents of a user provided 15475 * buffer. The output of the buffer is broken up into 256 byte 15476 * segments due to a size constraint of the scsi_log. 15477 * implementation. 15478 * 15479 * Arguments: un - ptr to softstate 15480 * comp - component mask 15481 * title - "title" string to preceed data when printed 15482 * data - ptr to data block to be printed 15483 * len - size of data block to be printed 15484 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15485 * 15486 * Context: May be called from interrupt context 15487 */ 15488 15489 #define SD_DUMP_MEMORY_BUF_SIZE 256 15490 15491 static char *sd_dump_format_string[] = { 15492 " 0x%02x", 15493 " %c" 15494 }; 15495 15496 static void 15497 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15498 int len, int fmt) 15499 { 15500 int i, j; 15501 int avail_count; 15502 int start_offset; 15503 int end_offset; 15504 size_t entry_len; 15505 char *bufp; 15506 char *local_buf; 15507 char *format_string; 15508 15509 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15510 15511 /* 15512 * In the debug version of the driver, this function is called from a 15513 * number of places which are NOPs in the release driver. 15514 * The debug driver therefore has additional methods of filtering 15515 * debug output. 15516 */ 15517 #ifdef SDDEBUG 15518 /* 15519 * In the debug version of the driver we can reduce the amount of debug 15520 * messages by setting sd_error_level to something other than 15521 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15522 * sd_component_mask. 15523 */ 15524 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15525 (sd_error_level != SCSI_ERR_ALL)) { 15526 return; 15527 } 15528 if (((sd_component_mask & comp) == 0) || 15529 (sd_error_level != SCSI_ERR_ALL)) { 15530 return; 15531 } 15532 #else 15533 if (sd_error_level != SCSI_ERR_ALL) { 15534 return; 15535 } 15536 #endif 15537 15538 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15539 bufp = local_buf; 15540 /* 15541 * Available length is the length of local_buf[], minus the 15542 * length of the title string, minus one for the ":", minus 15543 * one for the newline, minus one for the NULL terminator. 15544 * This gives the #bytes available for holding the printed 15545 * values from the given data buffer. 15546 */ 15547 if (fmt == SD_LOG_HEX) { 15548 format_string = sd_dump_format_string[0]; 15549 } else /* SD_LOG_CHAR */ { 15550 format_string = sd_dump_format_string[1]; 15551 } 15552 /* 15553 * Available count is the number of elements from the given 15554 * data buffer that we can fit into the available length. 15555 * This is based upon the size of the format string used. 15556 * Make one entry and find it's size. 15557 */ 15558 (void) sprintf(bufp, format_string, data[0]); 15559 entry_len = strlen(bufp); 15560 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15561 15562 j = 0; 15563 while (j < len) { 15564 bufp = local_buf; 15565 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15566 start_offset = j; 15567 15568 end_offset = start_offset + avail_count; 15569 15570 (void) sprintf(bufp, "%s:", title); 15571 bufp += strlen(bufp); 15572 for (i = start_offset; ((i < end_offset) && (j < len)); 15573 i++, j++) { 15574 (void) sprintf(bufp, format_string, data[i]); 15575 bufp += entry_len; 15576 } 15577 (void) sprintf(bufp, "\n"); 15578 15579 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15580 } 15581 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15582 } 15583 15584 /* 15585 * Function: sd_print_sense_msg 15586 * 15587 * Description: Log a message based upon the given sense data. 15588 * 15589 * Arguments: un - ptr to associated softstate 15590 * bp - ptr to buf(9S) for the command 15591 * arg - ptr to associate sd_sense_info struct 15592 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15593 * or SD_NO_RETRY_ISSUED 15594 * 15595 * Context: May be called from interrupt context 15596 */ 15597 15598 static void 15599 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15600 { 15601 struct sd_xbuf *xp; 15602 struct scsi_pkt *pktp; 15603 uint8_t *sensep; 15604 daddr_t request_blkno; 15605 diskaddr_t err_blkno; 15606 int severity; 15607 int pfa_flag; 15608 extern struct scsi_key_strings scsi_cmds[]; 15609 15610 ASSERT(un != NULL); 15611 ASSERT(mutex_owned(SD_MUTEX(un))); 15612 ASSERT(bp != NULL); 15613 xp = SD_GET_XBUF(bp); 15614 ASSERT(xp != NULL); 15615 pktp = SD_GET_PKTP(bp); 15616 ASSERT(pktp != NULL); 15617 ASSERT(arg != NULL); 15618 15619 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15620 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15621 15622 if ((code == SD_DELAYED_RETRY_ISSUED) || 15623 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15624 severity = SCSI_ERR_RETRYABLE; 15625 } 15626 15627 /* Use absolute block number for the request block number */ 15628 request_blkno = xp->xb_blkno; 15629 15630 /* 15631 * Now try to get the error block number from the sense data 15632 */ 15633 sensep = xp->xb_sense_data; 15634 15635 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15636 (uint64_t *)&err_blkno)) { 15637 /* 15638 * We retrieved the error block number from the information 15639 * portion of the sense data. 15640 * 15641 * For USCSI commands we are better off using the error 15642 * block no. as the requested block no. (This is the best 15643 * we can estimate.) 15644 */ 15645 if ((SD_IS_BUFIO(xp) == FALSE) && 15646 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15647 request_blkno = err_blkno; 15648 } 15649 } else { 15650 /* 15651 * Without the es_valid bit set (for fixed format) or an 15652 * information descriptor (for descriptor format) we cannot 15653 * be certain of the error blkno, so just use the 15654 * request_blkno. 15655 */ 15656 err_blkno = (diskaddr_t)request_blkno; 15657 } 15658 15659 /* 15660 * The following will log the buffer contents for the release driver 15661 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15662 * level is set to verbose. 15663 */ 15664 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15665 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15666 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15667 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15668 15669 if (pfa_flag == FALSE) { 15670 /* This is normally only set for USCSI */ 15671 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15672 return; 15673 } 15674 15675 if ((SD_IS_BUFIO(xp) == TRUE) && 15676 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15677 (severity < sd_error_level))) { 15678 return; 15679 } 15680 } 15681 15682 /* 15683 * Check for Sonoma Failover and keep a count of how many failed I/O's 15684 */ 15685 if ((SD_IS_LSI(un)) && 15686 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15687 (scsi_sense_asc(sensep) == 0x94) && 15688 (scsi_sense_ascq(sensep) == 0x01)) { 15689 un->un_sonoma_failure_count++; 15690 if (un->un_sonoma_failure_count > 1) { 15691 return; 15692 } 15693 } 15694 15695 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15696 request_blkno, err_blkno, scsi_cmds, 15697 (struct scsi_extended_sense *)sensep, 15698 un->un_additional_codes, NULL); 15699 } 15700 15701 /* 15702 * Function: sd_sense_key_no_sense 15703 * 15704 * Description: Recovery action when sense data was not received. 15705 * 15706 * Context: May be called from interrupt context 15707 */ 15708 15709 static void 15710 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15711 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15712 { 15713 struct sd_sense_info si; 15714 15715 ASSERT(un != NULL); 15716 ASSERT(mutex_owned(SD_MUTEX(un))); 15717 ASSERT(bp != NULL); 15718 ASSERT(xp != NULL); 15719 ASSERT(pktp != NULL); 15720 15721 si.ssi_severity = SCSI_ERR_FATAL; 15722 si.ssi_pfa_flag = FALSE; 15723 15724 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15725 15726 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15727 &si, EIO, (clock_t)0, NULL); 15728 } 15729 15730 15731 /* 15732 * Function: sd_sense_key_recoverable_error 15733 * 15734 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 15735 * 15736 * Context: May be called from interrupt context 15737 */ 15738 15739 static void 15740 sd_sense_key_recoverable_error(struct sd_lun *un, 15741 uint8_t *sense_datap, 15742 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15743 { 15744 struct sd_sense_info si; 15745 uint8_t asc = scsi_sense_asc(sense_datap); 15746 15747 ASSERT(un != NULL); 15748 ASSERT(mutex_owned(SD_MUTEX(un))); 15749 ASSERT(bp != NULL); 15750 ASSERT(xp != NULL); 15751 ASSERT(pktp != NULL); 15752 15753 /* 15754 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 15755 */ 15756 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 15757 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 15758 si.ssi_severity = SCSI_ERR_INFO; 15759 si.ssi_pfa_flag = TRUE; 15760 } else { 15761 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15762 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 15763 si.ssi_severity = SCSI_ERR_RECOVERED; 15764 si.ssi_pfa_flag = FALSE; 15765 } 15766 15767 if (pktp->pkt_resid == 0) { 15768 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15769 sd_return_command(un, bp); 15770 return; 15771 } 15772 15773 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15774 &si, EIO, (clock_t)0, NULL); 15775 } 15776 15777 15778 15779 15780 /* 15781 * Function: sd_sense_key_not_ready 15782 * 15783 * Description: Recovery actions for a SCSI "Not Ready" sense key. 15784 * 15785 * Context: May be called from interrupt context 15786 */ 15787 15788 static void 15789 sd_sense_key_not_ready(struct sd_lun *un, 15790 uint8_t *sense_datap, 15791 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15792 { 15793 struct sd_sense_info si; 15794 uint8_t asc = scsi_sense_asc(sense_datap); 15795 uint8_t ascq = scsi_sense_ascq(sense_datap); 15796 15797 ASSERT(un != NULL); 15798 ASSERT(mutex_owned(SD_MUTEX(un))); 15799 ASSERT(bp != NULL); 15800 ASSERT(xp != NULL); 15801 ASSERT(pktp != NULL); 15802 15803 si.ssi_severity = SCSI_ERR_FATAL; 15804 si.ssi_pfa_flag = FALSE; 15805 15806 /* 15807 * Update error stats after first NOT READY error. Disks may have 15808 * been powered down and may need to be restarted. For CDROMs, 15809 * report NOT READY errors only if media is present. 15810 */ 15811 if ((ISCD(un) && (asc == 0x3A)) || 15812 (xp->xb_retry_count > 0)) { 15813 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15814 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 15815 } 15816 15817 /* 15818 * Just fail if the "not ready" retry limit has been reached. 15819 */ 15820 if (xp->xb_retry_count >= un->un_notready_retry_count) { 15821 /* Special check for error message printing for removables. */ 15822 if (un->un_f_has_removable_media && (asc == 0x04) && 15823 (ascq >= 0x04)) { 15824 si.ssi_severity = SCSI_ERR_ALL; 15825 } 15826 goto fail_command; 15827 } 15828 15829 /* 15830 * Check the ASC and ASCQ in the sense data as needed, to determine 15831 * what to do. 15832 */ 15833 switch (asc) { 15834 case 0x04: /* LOGICAL UNIT NOT READY */ 15835 /* 15836 * disk drives that don't spin up result in a very long delay 15837 * in format without warning messages. We will log a message 15838 * if the error level is set to verbose. 15839 */ 15840 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15841 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15842 "logical unit not ready, resetting disk\n"); 15843 } 15844 15845 /* 15846 * There are different requirements for CDROMs and disks for 15847 * the number of retries. If a CD-ROM is giving this, it is 15848 * probably reading TOC and is in the process of getting 15849 * ready, so we should keep on trying for a long time to make 15850 * sure that all types of media are taken in account (for 15851 * some media the drive takes a long time to read TOC). For 15852 * disks we do not want to retry this too many times as this 15853 * can cause a long hang in format when the drive refuses to 15854 * spin up (a very common failure). 15855 */ 15856 switch (ascq) { 15857 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 15858 /* 15859 * Disk drives frequently refuse to spin up which 15860 * results in a very long hang in format without 15861 * warning messages. 15862 * 15863 * Note: This code preserves the legacy behavior of 15864 * comparing xb_retry_count against zero for fibre 15865 * channel targets instead of comparing against the 15866 * un_reset_retry_count value. The reason for this 15867 * discrepancy has been so utterly lost beneath the 15868 * Sands of Time that even Indiana Jones could not 15869 * find it. 15870 */ 15871 if (un->un_f_is_fibre == TRUE) { 15872 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15873 (xp->xb_retry_count > 0)) && 15874 (un->un_startstop_timeid == NULL)) { 15875 scsi_log(SD_DEVINFO(un), sd_label, 15876 CE_WARN, "logical unit not ready, " 15877 "resetting disk\n"); 15878 sd_reset_target(un, pktp); 15879 } 15880 } else { 15881 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15882 (xp->xb_retry_count > 15883 un->un_reset_retry_count)) && 15884 (un->un_startstop_timeid == NULL)) { 15885 scsi_log(SD_DEVINFO(un), sd_label, 15886 CE_WARN, "logical unit not ready, " 15887 "resetting disk\n"); 15888 sd_reset_target(un, pktp); 15889 } 15890 } 15891 break; 15892 15893 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 15894 /* 15895 * If the target is in the process of becoming 15896 * ready, just proceed with the retry. This can 15897 * happen with CD-ROMs that take a long time to 15898 * read TOC after a power cycle or reset. 15899 */ 15900 goto do_retry; 15901 15902 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 15903 break; 15904 15905 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 15906 /* 15907 * Retries cannot help here so just fail right away. 15908 */ 15909 goto fail_command; 15910 15911 case 0x88: 15912 /* 15913 * Vendor-unique code for T3/T4: it indicates a 15914 * path problem in a mutipathed config, but as far as 15915 * the target driver is concerned it equates to a fatal 15916 * error, so we should just fail the command right away 15917 * (without printing anything to the console). If this 15918 * is not a T3/T4, fall thru to the default recovery 15919 * action. 15920 * T3/T4 is FC only, don't need to check is_fibre 15921 */ 15922 if (SD_IS_T3(un) || SD_IS_T4(un)) { 15923 sd_return_failed_command(un, bp, EIO); 15924 return; 15925 } 15926 /* FALLTHRU */ 15927 15928 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 15929 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 15930 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 15931 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 15932 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 15933 default: /* Possible future codes in SCSI spec? */ 15934 /* 15935 * For removable-media devices, do not retry if 15936 * ASCQ > 2 as these result mostly from USCSI commands 15937 * on MMC devices issued to check status of an 15938 * operation initiated in immediate mode. Also for 15939 * ASCQ >= 4 do not print console messages as these 15940 * mainly represent a user-initiated operation 15941 * instead of a system failure. 15942 */ 15943 if (un->un_f_has_removable_media) { 15944 si.ssi_severity = SCSI_ERR_ALL; 15945 goto fail_command; 15946 } 15947 break; 15948 } 15949 15950 /* 15951 * As part of our recovery attempt for the NOT READY 15952 * condition, we issue a START STOP UNIT command. However 15953 * we want to wait for a short delay before attempting this 15954 * as there may still be more commands coming back from the 15955 * target with the check condition. To do this we use 15956 * timeout(9F) to call sd_start_stop_unit_callback() after 15957 * the delay interval expires. (sd_start_stop_unit_callback() 15958 * dispatches sd_start_stop_unit_task(), which will issue 15959 * the actual START STOP UNIT command. The delay interval 15960 * is one-half of the delay that we will use to retry the 15961 * command that generated the NOT READY condition. 15962 * 15963 * Note that we could just dispatch sd_start_stop_unit_task() 15964 * from here and allow it to sleep for the delay interval, 15965 * but then we would be tying up the taskq thread 15966 * uncesessarily for the duration of the delay. 15967 * 15968 * Do not issue the START STOP UNIT if the current command 15969 * is already a START STOP UNIT. 15970 */ 15971 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 15972 break; 15973 } 15974 15975 /* 15976 * Do not schedule the timeout if one is already pending. 15977 */ 15978 if (un->un_startstop_timeid != NULL) { 15979 SD_INFO(SD_LOG_ERROR, un, 15980 "sd_sense_key_not_ready: restart already issued to" 15981 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 15982 ddi_get_instance(SD_DEVINFO(un))); 15983 break; 15984 } 15985 15986 /* 15987 * Schedule the START STOP UNIT command, then queue the command 15988 * for a retry. 15989 * 15990 * Note: A timeout is not scheduled for this retry because we 15991 * want the retry to be serial with the START_STOP_UNIT. The 15992 * retry will be started when the START_STOP_UNIT is completed 15993 * in sd_start_stop_unit_task. 15994 */ 15995 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 15996 un, SD_BSY_TIMEOUT / 2); 15997 xp->xb_retry_count++; 15998 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 15999 return; 16000 16001 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16002 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16003 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16004 "unit does not respond to selection\n"); 16005 } 16006 break; 16007 16008 case 0x3A: /* MEDIUM NOT PRESENT */ 16009 if (sd_error_level >= SCSI_ERR_FATAL) { 16010 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16011 "Caddy not inserted in drive\n"); 16012 } 16013 16014 sr_ejected(un); 16015 un->un_mediastate = DKIO_EJECTED; 16016 /* The state has changed, inform the media watch routines */ 16017 cv_broadcast(&un->un_state_cv); 16018 /* Just fail if no media is present in the drive. */ 16019 goto fail_command; 16020 16021 default: 16022 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16023 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16024 "Unit not Ready. Additional sense code 0x%x\n", 16025 asc); 16026 } 16027 break; 16028 } 16029 16030 do_retry: 16031 16032 /* 16033 * Retry the command, as some targets may report NOT READY for 16034 * several seconds after being reset. 16035 */ 16036 xp->xb_retry_count++; 16037 si.ssi_severity = SCSI_ERR_RETRYABLE; 16038 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16039 &si, EIO, SD_BSY_TIMEOUT, NULL); 16040 16041 return; 16042 16043 fail_command: 16044 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16045 sd_return_failed_command(un, bp, EIO); 16046 } 16047 16048 16049 16050 /* 16051 * Function: sd_sense_key_medium_or_hardware_error 16052 * 16053 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16054 * sense key. 16055 * 16056 * Context: May be called from interrupt context 16057 */ 16058 16059 static void 16060 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16061 uint8_t *sense_datap, 16062 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16063 { 16064 struct sd_sense_info si; 16065 uint8_t sense_key = scsi_sense_key(sense_datap); 16066 uint8_t asc = scsi_sense_asc(sense_datap); 16067 16068 ASSERT(un != NULL); 16069 ASSERT(mutex_owned(SD_MUTEX(un))); 16070 ASSERT(bp != NULL); 16071 ASSERT(xp != NULL); 16072 ASSERT(pktp != NULL); 16073 16074 si.ssi_severity = SCSI_ERR_FATAL; 16075 si.ssi_pfa_flag = FALSE; 16076 16077 if (sense_key == KEY_MEDIUM_ERROR) { 16078 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16079 } 16080 16081 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16082 16083 if ((un->un_reset_retry_count != 0) && 16084 (xp->xb_retry_count == un->un_reset_retry_count)) { 16085 mutex_exit(SD_MUTEX(un)); 16086 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16087 if (un->un_f_allow_bus_device_reset == TRUE) { 16088 16089 boolean_t try_resetting_target = B_TRUE; 16090 16091 /* 16092 * We need to be able to handle specific ASC when we are 16093 * handling a KEY_HARDWARE_ERROR. In particular 16094 * taking the default action of resetting the target may 16095 * not be the appropriate way to attempt recovery. 16096 * Resetting a target because of a single LUN failure 16097 * victimizes all LUNs on that target. 16098 * 16099 * This is true for the LSI arrays, if an LSI 16100 * array controller returns an ASC of 0x84 (LUN Dead) we 16101 * should trust it. 16102 */ 16103 16104 if (sense_key == KEY_HARDWARE_ERROR) { 16105 switch (asc) { 16106 case 0x84: 16107 if (SD_IS_LSI(un)) { 16108 try_resetting_target = B_FALSE; 16109 } 16110 break; 16111 default: 16112 break; 16113 } 16114 } 16115 16116 if (try_resetting_target == B_TRUE) { 16117 int reset_retval = 0; 16118 if (un->un_f_lun_reset_enabled == TRUE) { 16119 SD_TRACE(SD_LOG_IO_CORE, un, 16120 "sd_sense_key_medium_or_hardware_" 16121 "error: issuing RESET_LUN\n"); 16122 reset_retval = 16123 scsi_reset(SD_ADDRESS(un), 16124 RESET_LUN); 16125 } 16126 if (reset_retval == 0) { 16127 SD_TRACE(SD_LOG_IO_CORE, un, 16128 "sd_sense_key_medium_or_hardware_" 16129 "error: issuing RESET_TARGET\n"); 16130 (void) scsi_reset(SD_ADDRESS(un), 16131 RESET_TARGET); 16132 } 16133 } 16134 } 16135 mutex_enter(SD_MUTEX(un)); 16136 } 16137 16138 /* 16139 * This really ought to be a fatal error, but we will retry anyway 16140 * as some drives report this as a spurious error. 16141 */ 16142 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16143 &si, EIO, (clock_t)0, NULL); 16144 } 16145 16146 16147 16148 /* 16149 * Function: sd_sense_key_illegal_request 16150 * 16151 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16152 * 16153 * Context: May be called from interrupt context 16154 */ 16155 16156 static void 16157 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16158 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16159 { 16160 struct sd_sense_info si; 16161 16162 ASSERT(un != NULL); 16163 ASSERT(mutex_owned(SD_MUTEX(un))); 16164 ASSERT(bp != NULL); 16165 ASSERT(xp != NULL); 16166 ASSERT(pktp != NULL); 16167 16168 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16169 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16170 16171 si.ssi_severity = SCSI_ERR_INFO; 16172 si.ssi_pfa_flag = FALSE; 16173 16174 /* Pointless to retry if the target thinks it's an illegal request */ 16175 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16176 sd_return_failed_command(un, bp, EIO); 16177 } 16178 16179 16180 16181 16182 /* 16183 * Function: sd_sense_key_unit_attention 16184 * 16185 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16186 * 16187 * Context: May be called from interrupt context 16188 */ 16189 16190 static void 16191 sd_sense_key_unit_attention(struct sd_lun *un, 16192 uint8_t *sense_datap, 16193 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16194 { 16195 /* 16196 * For UNIT ATTENTION we allow retries for one minute. Devices 16197 * like Sonoma can return UNIT ATTENTION close to a minute 16198 * under certain conditions. 16199 */ 16200 int retry_check_flag = SD_RETRIES_UA; 16201 boolean_t kstat_updated = B_FALSE; 16202 struct sd_sense_info si; 16203 uint8_t asc = scsi_sense_asc(sense_datap); 16204 16205 ASSERT(un != NULL); 16206 ASSERT(mutex_owned(SD_MUTEX(un))); 16207 ASSERT(bp != NULL); 16208 ASSERT(xp != NULL); 16209 ASSERT(pktp != NULL); 16210 16211 si.ssi_severity = SCSI_ERR_INFO; 16212 si.ssi_pfa_flag = FALSE; 16213 16214 16215 switch (asc) { 16216 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16217 if (sd_report_pfa != 0) { 16218 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16219 si.ssi_pfa_flag = TRUE; 16220 retry_check_flag = SD_RETRIES_STANDARD; 16221 goto do_retry; 16222 } 16223 16224 break; 16225 16226 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16227 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16228 un->un_resvd_status |= 16229 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16230 } 16231 #ifdef _LP64 16232 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16233 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16234 un, KM_NOSLEEP) == 0) { 16235 /* 16236 * If we can't dispatch the task we'll just 16237 * live without descriptor sense. We can 16238 * try again on the next "unit attention" 16239 */ 16240 SD_ERROR(SD_LOG_ERROR, un, 16241 "sd_sense_key_unit_attention: " 16242 "Could not dispatch " 16243 "sd_reenable_dsense_task\n"); 16244 } 16245 } 16246 #endif /* _LP64 */ 16247 /* FALLTHRU */ 16248 16249 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16250 if (!un->un_f_has_removable_media) { 16251 break; 16252 } 16253 16254 /* 16255 * When we get a unit attention from a removable-media device, 16256 * it may be in a state that will take a long time to recover 16257 * (e.g., from a reset). Since we are executing in interrupt 16258 * context here, we cannot wait around for the device to come 16259 * back. So hand this command off to sd_media_change_task() 16260 * for deferred processing under taskq thread context. (Note 16261 * that the command still may be failed if a problem is 16262 * encountered at a later time.) 16263 */ 16264 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16265 KM_NOSLEEP) == 0) { 16266 /* 16267 * Cannot dispatch the request so fail the command. 16268 */ 16269 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16270 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16271 si.ssi_severity = SCSI_ERR_FATAL; 16272 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16273 sd_return_failed_command(un, bp, EIO); 16274 } 16275 16276 /* 16277 * If failed to dispatch sd_media_change_task(), we already 16278 * updated kstat. If succeed to dispatch sd_media_change_task(), 16279 * we should update kstat later if it encounters an error. So, 16280 * we update kstat_updated flag here. 16281 */ 16282 kstat_updated = B_TRUE; 16283 16284 /* 16285 * Either the command has been successfully dispatched to a 16286 * task Q for retrying, or the dispatch failed. In either case 16287 * do NOT retry again by calling sd_retry_command. This sets up 16288 * two retries of the same command and when one completes and 16289 * frees the resources the other will access freed memory, 16290 * a bad thing. 16291 */ 16292 return; 16293 16294 default: 16295 break; 16296 } 16297 16298 /* 16299 * Update kstat if we haven't done that. 16300 */ 16301 if (!kstat_updated) { 16302 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16303 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16304 } 16305 16306 do_retry: 16307 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16308 EIO, SD_UA_RETRY_DELAY, NULL); 16309 } 16310 16311 16312 16313 /* 16314 * Function: sd_sense_key_fail_command 16315 * 16316 * Description: Use to fail a command when we don't like the sense key that 16317 * was returned. 16318 * 16319 * Context: May be called from interrupt context 16320 */ 16321 16322 static void 16323 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16324 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16325 { 16326 struct sd_sense_info si; 16327 16328 ASSERT(un != NULL); 16329 ASSERT(mutex_owned(SD_MUTEX(un))); 16330 ASSERT(bp != NULL); 16331 ASSERT(xp != NULL); 16332 ASSERT(pktp != NULL); 16333 16334 si.ssi_severity = SCSI_ERR_FATAL; 16335 si.ssi_pfa_flag = FALSE; 16336 16337 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16338 sd_return_failed_command(un, bp, EIO); 16339 } 16340 16341 16342 16343 /* 16344 * Function: sd_sense_key_blank_check 16345 * 16346 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16347 * Has no monetary connotation. 16348 * 16349 * Context: May be called from interrupt context 16350 */ 16351 16352 static void 16353 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16354 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16355 { 16356 struct sd_sense_info si; 16357 16358 ASSERT(un != NULL); 16359 ASSERT(mutex_owned(SD_MUTEX(un))); 16360 ASSERT(bp != NULL); 16361 ASSERT(xp != NULL); 16362 ASSERT(pktp != NULL); 16363 16364 /* 16365 * Blank check is not fatal for removable devices, therefore 16366 * it does not require a console message. 16367 */ 16368 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16369 SCSI_ERR_FATAL; 16370 si.ssi_pfa_flag = FALSE; 16371 16372 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16373 sd_return_failed_command(un, bp, EIO); 16374 } 16375 16376 16377 16378 16379 /* 16380 * Function: sd_sense_key_aborted_command 16381 * 16382 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16383 * 16384 * Context: May be called from interrupt context 16385 */ 16386 16387 static void 16388 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16389 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16390 { 16391 struct sd_sense_info si; 16392 16393 ASSERT(un != NULL); 16394 ASSERT(mutex_owned(SD_MUTEX(un))); 16395 ASSERT(bp != NULL); 16396 ASSERT(xp != NULL); 16397 ASSERT(pktp != NULL); 16398 16399 si.ssi_severity = SCSI_ERR_FATAL; 16400 si.ssi_pfa_flag = FALSE; 16401 16402 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16403 16404 /* 16405 * This really ought to be a fatal error, but we will retry anyway 16406 * as some drives report this as a spurious error. 16407 */ 16408 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16409 &si, EIO, (clock_t)0, NULL); 16410 } 16411 16412 16413 16414 /* 16415 * Function: sd_sense_key_default 16416 * 16417 * Description: Default recovery action for several SCSI sense keys (basically 16418 * attempts a retry). 16419 * 16420 * Context: May be called from interrupt context 16421 */ 16422 16423 static void 16424 sd_sense_key_default(struct sd_lun *un, 16425 uint8_t *sense_datap, 16426 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16427 { 16428 struct sd_sense_info si; 16429 uint8_t sense_key = scsi_sense_key(sense_datap); 16430 16431 ASSERT(un != NULL); 16432 ASSERT(mutex_owned(SD_MUTEX(un))); 16433 ASSERT(bp != NULL); 16434 ASSERT(xp != NULL); 16435 ASSERT(pktp != NULL); 16436 16437 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16438 16439 /* 16440 * Undecoded sense key. Attempt retries and hope that will fix 16441 * the problem. Otherwise, we're dead. 16442 */ 16443 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16444 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16445 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16446 } 16447 16448 si.ssi_severity = SCSI_ERR_FATAL; 16449 si.ssi_pfa_flag = FALSE; 16450 16451 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16452 &si, EIO, (clock_t)0, NULL); 16453 } 16454 16455 16456 16457 /* 16458 * Function: sd_print_retry_msg 16459 * 16460 * Description: Print a message indicating the retry action being taken. 16461 * 16462 * Arguments: un - ptr to associated softstate 16463 * bp - ptr to buf(9S) for the command 16464 * arg - not used. 16465 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16466 * or SD_NO_RETRY_ISSUED 16467 * 16468 * Context: May be called from interrupt context 16469 */ 16470 /* ARGSUSED */ 16471 static void 16472 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16473 { 16474 struct sd_xbuf *xp; 16475 struct scsi_pkt *pktp; 16476 char *reasonp; 16477 char *msgp; 16478 16479 ASSERT(un != NULL); 16480 ASSERT(mutex_owned(SD_MUTEX(un))); 16481 ASSERT(bp != NULL); 16482 pktp = SD_GET_PKTP(bp); 16483 ASSERT(pktp != NULL); 16484 xp = SD_GET_XBUF(bp); 16485 ASSERT(xp != NULL); 16486 16487 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16488 mutex_enter(&un->un_pm_mutex); 16489 if ((un->un_state == SD_STATE_SUSPENDED) || 16490 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16491 (pktp->pkt_flags & FLAG_SILENT)) { 16492 mutex_exit(&un->un_pm_mutex); 16493 goto update_pkt_reason; 16494 } 16495 mutex_exit(&un->un_pm_mutex); 16496 16497 /* 16498 * Suppress messages if they are all the same pkt_reason; with 16499 * TQ, many (up to 256) are returned with the same pkt_reason. 16500 * If we are in panic, then suppress the retry messages. 16501 */ 16502 switch (flag) { 16503 case SD_NO_RETRY_ISSUED: 16504 msgp = "giving up"; 16505 break; 16506 case SD_IMMEDIATE_RETRY_ISSUED: 16507 case SD_DELAYED_RETRY_ISSUED: 16508 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16509 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16510 (sd_error_level != SCSI_ERR_ALL))) { 16511 return; 16512 } 16513 msgp = "retrying command"; 16514 break; 16515 default: 16516 goto update_pkt_reason; 16517 } 16518 16519 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16520 scsi_rname(pktp->pkt_reason)); 16521 16522 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16523 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16524 16525 update_pkt_reason: 16526 /* 16527 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16528 * This is to prevent multiple console messages for the same failure 16529 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16530 * when the command is retried successfully because there still may be 16531 * more commands coming back with the same value of pktp->pkt_reason. 16532 */ 16533 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16534 un->un_last_pkt_reason = pktp->pkt_reason; 16535 } 16536 } 16537 16538 16539 /* 16540 * Function: sd_print_cmd_incomplete_msg 16541 * 16542 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16543 * 16544 * Arguments: un - ptr to associated softstate 16545 * bp - ptr to buf(9S) for the command 16546 * arg - passed to sd_print_retry_msg() 16547 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16548 * or SD_NO_RETRY_ISSUED 16549 * 16550 * Context: May be called from interrupt context 16551 */ 16552 16553 static void 16554 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16555 int code) 16556 { 16557 dev_info_t *dip; 16558 16559 ASSERT(un != NULL); 16560 ASSERT(mutex_owned(SD_MUTEX(un))); 16561 ASSERT(bp != NULL); 16562 16563 switch (code) { 16564 case SD_NO_RETRY_ISSUED: 16565 /* Command was failed. Someone turned off this target? */ 16566 if (un->un_state != SD_STATE_OFFLINE) { 16567 /* 16568 * Suppress message if we are detaching and 16569 * device has been disconnected 16570 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16571 * private interface and not part of the DDI 16572 */ 16573 dip = un->un_sd->sd_dev; 16574 if (!(DEVI_IS_DETACHING(dip) && 16575 DEVI_IS_DEVICE_REMOVED(dip))) { 16576 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16577 "disk not responding to selection\n"); 16578 } 16579 New_state(un, SD_STATE_OFFLINE); 16580 } 16581 break; 16582 16583 case SD_DELAYED_RETRY_ISSUED: 16584 case SD_IMMEDIATE_RETRY_ISSUED: 16585 default: 16586 /* Command was successfully queued for retry */ 16587 sd_print_retry_msg(un, bp, arg, code); 16588 break; 16589 } 16590 } 16591 16592 16593 /* 16594 * Function: sd_pkt_reason_cmd_incomplete 16595 * 16596 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16597 * 16598 * Context: May be called from interrupt context 16599 */ 16600 16601 static void 16602 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16603 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16604 { 16605 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16606 16607 ASSERT(un != NULL); 16608 ASSERT(mutex_owned(SD_MUTEX(un))); 16609 ASSERT(bp != NULL); 16610 ASSERT(xp != NULL); 16611 ASSERT(pktp != NULL); 16612 16613 /* Do not do a reset if selection did not complete */ 16614 /* Note: Should this not just check the bit? */ 16615 if (pktp->pkt_state != STATE_GOT_BUS) { 16616 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16617 sd_reset_target(un, pktp); 16618 } 16619 16620 /* 16621 * If the target was not successfully selected, then set 16622 * SD_RETRIES_FAILFAST to indicate that we lost communication 16623 * with the target, and further retries and/or commands are 16624 * likely to take a long time. 16625 */ 16626 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16627 flag |= SD_RETRIES_FAILFAST; 16628 } 16629 16630 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16631 16632 sd_retry_command(un, bp, flag, 16633 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16634 } 16635 16636 16637 16638 /* 16639 * Function: sd_pkt_reason_cmd_tran_err 16640 * 16641 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16642 * 16643 * Context: May be called from interrupt context 16644 */ 16645 16646 static void 16647 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16648 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16649 { 16650 ASSERT(un != NULL); 16651 ASSERT(mutex_owned(SD_MUTEX(un))); 16652 ASSERT(bp != NULL); 16653 ASSERT(xp != NULL); 16654 ASSERT(pktp != NULL); 16655 16656 /* 16657 * Do not reset if we got a parity error, or if 16658 * selection did not complete. 16659 */ 16660 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16661 /* Note: Should this not just check the bit for pkt_state? */ 16662 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16663 (pktp->pkt_state != STATE_GOT_BUS)) { 16664 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16665 sd_reset_target(un, pktp); 16666 } 16667 16668 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16669 16670 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16671 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16672 } 16673 16674 16675 16676 /* 16677 * Function: sd_pkt_reason_cmd_reset 16678 * 16679 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16680 * 16681 * Context: May be called from interrupt context 16682 */ 16683 16684 static void 16685 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16686 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16687 { 16688 ASSERT(un != NULL); 16689 ASSERT(mutex_owned(SD_MUTEX(un))); 16690 ASSERT(bp != NULL); 16691 ASSERT(xp != NULL); 16692 ASSERT(pktp != NULL); 16693 16694 /* The target may still be running the command, so try to reset. */ 16695 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16696 sd_reset_target(un, pktp); 16697 16698 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16699 16700 /* 16701 * If pkt_reason is CMD_RESET chances are that this pkt got 16702 * reset because another target on this bus caused it. The target 16703 * that caused it should get CMD_TIMEOUT with pkt_statistics 16704 * of STAT_TIMEOUT/STAT_DEV_RESET. 16705 */ 16706 16707 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16708 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16709 } 16710 16711 16712 16713 16714 /* 16715 * Function: sd_pkt_reason_cmd_aborted 16716 * 16717 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16718 * 16719 * Context: May be called from interrupt context 16720 */ 16721 16722 static void 16723 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16724 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16725 { 16726 ASSERT(un != NULL); 16727 ASSERT(mutex_owned(SD_MUTEX(un))); 16728 ASSERT(bp != NULL); 16729 ASSERT(xp != NULL); 16730 ASSERT(pktp != NULL); 16731 16732 /* The target may still be running the command, so try to reset. */ 16733 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16734 sd_reset_target(un, pktp); 16735 16736 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16737 16738 /* 16739 * If pkt_reason is CMD_ABORTED chances are that this pkt got 16740 * aborted because another target on this bus caused it. The target 16741 * that caused it should get CMD_TIMEOUT with pkt_statistics 16742 * of STAT_TIMEOUT/STAT_DEV_RESET. 16743 */ 16744 16745 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16746 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16747 } 16748 16749 16750 16751 /* 16752 * Function: sd_pkt_reason_cmd_timeout 16753 * 16754 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 16755 * 16756 * Context: May be called from interrupt context 16757 */ 16758 16759 static void 16760 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 16761 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16762 { 16763 ASSERT(un != NULL); 16764 ASSERT(mutex_owned(SD_MUTEX(un))); 16765 ASSERT(bp != NULL); 16766 ASSERT(xp != NULL); 16767 ASSERT(pktp != NULL); 16768 16769 16770 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16771 sd_reset_target(un, pktp); 16772 16773 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16774 16775 /* 16776 * A command timeout indicates that we could not establish 16777 * communication with the target, so set SD_RETRIES_FAILFAST 16778 * as further retries/commands are likely to take a long time. 16779 */ 16780 sd_retry_command(un, bp, 16781 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 16782 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16783 } 16784 16785 16786 16787 /* 16788 * Function: sd_pkt_reason_cmd_unx_bus_free 16789 * 16790 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 16791 * 16792 * Context: May be called from interrupt context 16793 */ 16794 16795 static void 16796 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 16797 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16798 { 16799 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 16800 16801 ASSERT(un != NULL); 16802 ASSERT(mutex_owned(SD_MUTEX(un))); 16803 ASSERT(bp != NULL); 16804 ASSERT(xp != NULL); 16805 ASSERT(pktp != NULL); 16806 16807 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16808 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16809 16810 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 16811 sd_print_retry_msg : NULL; 16812 16813 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16814 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16815 } 16816 16817 16818 /* 16819 * Function: sd_pkt_reason_cmd_tag_reject 16820 * 16821 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 16822 * 16823 * Context: May be called from interrupt context 16824 */ 16825 16826 static void 16827 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 16828 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16829 { 16830 ASSERT(un != NULL); 16831 ASSERT(mutex_owned(SD_MUTEX(un))); 16832 ASSERT(bp != NULL); 16833 ASSERT(xp != NULL); 16834 ASSERT(pktp != NULL); 16835 16836 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16837 pktp->pkt_flags = 0; 16838 un->un_tagflags = 0; 16839 if (un->un_f_opt_queueing == TRUE) { 16840 un->un_throttle = min(un->un_throttle, 3); 16841 } else { 16842 un->un_throttle = 1; 16843 } 16844 mutex_exit(SD_MUTEX(un)); 16845 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 16846 mutex_enter(SD_MUTEX(un)); 16847 16848 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16849 16850 /* Legacy behavior not to check retry counts here. */ 16851 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 16852 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16853 } 16854 16855 16856 /* 16857 * Function: sd_pkt_reason_default 16858 * 16859 * Description: Default recovery actions for SCSA pkt_reason values that 16860 * do not have more explicit recovery actions. 16861 * 16862 * Context: May be called from interrupt context 16863 */ 16864 16865 static void 16866 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 16867 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16868 { 16869 ASSERT(un != NULL); 16870 ASSERT(mutex_owned(SD_MUTEX(un))); 16871 ASSERT(bp != NULL); 16872 ASSERT(xp != NULL); 16873 ASSERT(pktp != NULL); 16874 16875 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16876 sd_reset_target(un, pktp); 16877 16878 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16879 16880 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16881 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16882 } 16883 16884 16885 16886 /* 16887 * Function: sd_pkt_status_check_condition 16888 * 16889 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 16890 * 16891 * Context: May be called from interrupt context 16892 */ 16893 16894 static void 16895 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 16896 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16897 { 16898 ASSERT(un != NULL); 16899 ASSERT(mutex_owned(SD_MUTEX(un))); 16900 ASSERT(bp != NULL); 16901 ASSERT(xp != NULL); 16902 ASSERT(pktp != NULL); 16903 16904 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 16905 "entry: buf:0x%p xp:0x%p\n", bp, xp); 16906 16907 /* 16908 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 16909 * command will be retried after the request sense). Otherwise, retry 16910 * the command. Note: we are issuing the request sense even though the 16911 * retry limit may have been reached for the failed command. 16912 */ 16913 if (un->un_f_arq_enabled == FALSE) { 16914 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16915 "no ARQ, sending request sense command\n"); 16916 sd_send_request_sense_command(un, bp, pktp); 16917 } else { 16918 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16919 "ARQ,retrying request sense command\n"); 16920 #if defined(__i386) || defined(__amd64) 16921 /* 16922 * The SD_RETRY_DELAY value need to be adjusted here 16923 * when SD_RETRY_DELAY change in sddef.h 16924 */ 16925 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 16926 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 16927 NULL); 16928 #else 16929 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 16930 EIO, SD_RETRY_DELAY, NULL); 16931 #endif 16932 } 16933 16934 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 16935 } 16936 16937 16938 /* 16939 * Function: sd_pkt_status_busy 16940 * 16941 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 16942 * 16943 * Context: May be called from interrupt context 16944 */ 16945 16946 static void 16947 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16948 struct scsi_pkt *pktp) 16949 { 16950 ASSERT(un != NULL); 16951 ASSERT(mutex_owned(SD_MUTEX(un))); 16952 ASSERT(bp != NULL); 16953 ASSERT(xp != NULL); 16954 ASSERT(pktp != NULL); 16955 16956 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16957 "sd_pkt_status_busy: entry\n"); 16958 16959 /* If retries are exhausted, just fail the command. */ 16960 if (xp->xb_retry_count >= un->un_busy_retry_count) { 16961 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16962 "device busy too long\n"); 16963 sd_return_failed_command(un, bp, EIO); 16964 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16965 "sd_pkt_status_busy: exit\n"); 16966 return; 16967 } 16968 xp->xb_retry_count++; 16969 16970 /* 16971 * Try to reset the target. However, we do not want to perform 16972 * more than one reset if the device continues to fail. The reset 16973 * will be performed when the retry count reaches the reset 16974 * threshold. This threshold should be set such that at least 16975 * one retry is issued before the reset is performed. 16976 */ 16977 if (xp->xb_retry_count == 16978 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 16979 int rval = 0; 16980 mutex_exit(SD_MUTEX(un)); 16981 if (un->un_f_allow_bus_device_reset == TRUE) { 16982 /* 16983 * First try to reset the LUN; if we cannot then 16984 * try to reset the target. 16985 */ 16986 if (un->un_f_lun_reset_enabled == TRUE) { 16987 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16988 "sd_pkt_status_busy: RESET_LUN\n"); 16989 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 16990 } 16991 if (rval == 0) { 16992 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16993 "sd_pkt_status_busy: RESET_TARGET\n"); 16994 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 16995 } 16996 } 16997 if (rval == 0) { 16998 /* 16999 * If the RESET_LUN and/or RESET_TARGET failed, 17000 * try RESET_ALL 17001 */ 17002 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17003 "sd_pkt_status_busy: RESET_ALL\n"); 17004 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17005 } 17006 mutex_enter(SD_MUTEX(un)); 17007 if (rval == 0) { 17008 /* 17009 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17010 * At this point we give up & fail the command. 17011 */ 17012 sd_return_failed_command(un, bp, EIO); 17013 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17014 "sd_pkt_status_busy: exit (failed cmd)\n"); 17015 return; 17016 } 17017 } 17018 17019 /* 17020 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17021 * we have already checked the retry counts above. 17022 */ 17023 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17024 EIO, SD_BSY_TIMEOUT, NULL); 17025 17026 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17027 "sd_pkt_status_busy: exit\n"); 17028 } 17029 17030 17031 /* 17032 * Function: sd_pkt_status_reservation_conflict 17033 * 17034 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17035 * command status. 17036 * 17037 * Context: May be called from interrupt context 17038 */ 17039 17040 static void 17041 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17042 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17043 { 17044 ASSERT(un != NULL); 17045 ASSERT(mutex_owned(SD_MUTEX(un))); 17046 ASSERT(bp != NULL); 17047 ASSERT(xp != NULL); 17048 ASSERT(pktp != NULL); 17049 17050 /* 17051 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17052 * conflict could be due to various reasons like incorrect keys, not 17053 * registered or not reserved etc. So, we return EACCES to the caller. 17054 */ 17055 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17056 int cmd = SD_GET_PKT_OPCODE(pktp); 17057 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17058 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17059 sd_return_failed_command(un, bp, EACCES); 17060 return; 17061 } 17062 } 17063 17064 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17065 17066 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17067 if (sd_failfast_enable != 0) { 17068 /* By definition, we must panic here.... */ 17069 sd_panic_for_res_conflict(un); 17070 /*NOTREACHED*/ 17071 } 17072 SD_ERROR(SD_LOG_IO, un, 17073 "sd_handle_resv_conflict: Disk Reserved\n"); 17074 sd_return_failed_command(un, bp, EACCES); 17075 return; 17076 } 17077 17078 /* 17079 * 1147670: retry only if sd_retry_on_reservation_conflict 17080 * property is set (default is 1). Retries will not succeed 17081 * on a disk reserved by another initiator. HA systems 17082 * may reset this via sd.conf to avoid these retries. 17083 * 17084 * Note: The legacy return code for this failure is EIO, however EACCES 17085 * seems more appropriate for a reservation conflict. 17086 */ 17087 if (sd_retry_on_reservation_conflict == 0) { 17088 SD_ERROR(SD_LOG_IO, un, 17089 "sd_handle_resv_conflict: Device Reserved\n"); 17090 sd_return_failed_command(un, bp, EIO); 17091 return; 17092 } 17093 17094 /* 17095 * Retry the command if we can. 17096 * 17097 * Note: The legacy return code for this failure is EIO, however EACCES 17098 * seems more appropriate for a reservation conflict. 17099 */ 17100 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17101 (clock_t)2, NULL); 17102 } 17103 17104 17105 17106 /* 17107 * Function: sd_pkt_status_qfull 17108 * 17109 * Description: Handle a QUEUE FULL condition from the target. This can 17110 * occur if the HBA does not handle the queue full condition. 17111 * (Basically this means third-party HBAs as Sun HBAs will 17112 * handle the queue full condition.) Note that if there are 17113 * some commands already in the transport, then the queue full 17114 * has occurred because the queue for this nexus is actually 17115 * full. If there are no commands in the transport, then the 17116 * queue full is resulting from some other initiator or lun 17117 * consuming all the resources at the target. 17118 * 17119 * Context: May be called from interrupt context 17120 */ 17121 17122 static void 17123 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17124 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17125 { 17126 ASSERT(un != NULL); 17127 ASSERT(mutex_owned(SD_MUTEX(un))); 17128 ASSERT(bp != NULL); 17129 ASSERT(xp != NULL); 17130 ASSERT(pktp != NULL); 17131 17132 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17133 "sd_pkt_status_qfull: entry\n"); 17134 17135 /* 17136 * Just lower the QFULL throttle and retry the command. Note that 17137 * we do not limit the number of retries here. 17138 */ 17139 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17140 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17141 SD_RESTART_TIMEOUT, NULL); 17142 17143 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17144 "sd_pkt_status_qfull: exit\n"); 17145 } 17146 17147 17148 /* 17149 * Function: sd_reset_target 17150 * 17151 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17152 * RESET_TARGET, or RESET_ALL. 17153 * 17154 * Context: May be called under interrupt context. 17155 */ 17156 17157 static void 17158 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17159 { 17160 int rval = 0; 17161 17162 ASSERT(un != NULL); 17163 ASSERT(mutex_owned(SD_MUTEX(un))); 17164 ASSERT(pktp != NULL); 17165 17166 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17167 17168 /* 17169 * No need to reset if the transport layer has already done so. 17170 */ 17171 if ((pktp->pkt_statistics & 17172 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17173 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17174 "sd_reset_target: no reset\n"); 17175 return; 17176 } 17177 17178 mutex_exit(SD_MUTEX(un)); 17179 17180 if (un->un_f_allow_bus_device_reset == TRUE) { 17181 if (un->un_f_lun_reset_enabled == TRUE) { 17182 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17183 "sd_reset_target: RESET_LUN\n"); 17184 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17185 } 17186 if (rval == 0) { 17187 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17188 "sd_reset_target: RESET_TARGET\n"); 17189 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17190 } 17191 } 17192 17193 if (rval == 0) { 17194 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17195 "sd_reset_target: RESET_ALL\n"); 17196 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17197 } 17198 17199 mutex_enter(SD_MUTEX(un)); 17200 17201 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17202 } 17203 17204 17205 /* 17206 * Function: sd_media_change_task 17207 * 17208 * Description: Recovery action for CDROM to become available. 17209 * 17210 * Context: Executes in a taskq() thread context 17211 */ 17212 17213 static void 17214 sd_media_change_task(void *arg) 17215 { 17216 struct scsi_pkt *pktp = arg; 17217 struct sd_lun *un; 17218 struct buf *bp; 17219 struct sd_xbuf *xp; 17220 int err = 0; 17221 int retry_count = 0; 17222 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17223 struct sd_sense_info si; 17224 17225 ASSERT(pktp != NULL); 17226 bp = (struct buf *)pktp->pkt_private; 17227 ASSERT(bp != NULL); 17228 xp = SD_GET_XBUF(bp); 17229 ASSERT(xp != NULL); 17230 un = SD_GET_UN(bp); 17231 ASSERT(un != NULL); 17232 ASSERT(!mutex_owned(SD_MUTEX(un))); 17233 ASSERT(un->un_f_monitor_media_state); 17234 17235 si.ssi_severity = SCSI_ERR_INFO; 17236 si.ssi_pfa_flag = FALSE; 17237 17238 /* 17239 * When a reset is issued on a CDROM, it takes a long time to 17240 * recover. First few attempts to read capacity and other things 17241 * related to handling unit attention fail (with a ASC 0x4 and 17242 * ASCQ 0x1). In that case we want to do enough retries and we want 17243 * to limit the retries in other cases of genuine failures like 17244 * no media in drive. 17245 */ 17246 while (retry_count++ < retry_limit) { 17247 if ((err = sd_handle_mchange(un)) == 0) { 17248 break; 17249 } 17250 if (err == EAGAIN) { 17251 retry_limit = SD_UNIT_ATTENTION_RETRY; 17252 } 17253 /* Sleep for 0.5 sec. & try again */ 17254 delay(drv_usectohz(500000)); 17255 } 17256 17257 /* 17258 * Dispatch (retry or fail) the original command here, 17259 * along with appropriate console messages.... 17260 * 17261 * Must grab the mutex before calling sd_retry_command, 17262 * sd_print_sense_msg and sd_return_failed_command. 17263 */ 17264 mutex_enter(SD_MUTEX(un)); 17265 if (err != SD_CMD_SUCCESS) { 17266 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17267 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17268 si.ssi_severity = SCSI_ERR_FATAL; 17269 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17270 sd_return_failed_command(un, bp, EIO); 17271 } else { 17272 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17273 &si, EIO, (clock_t)0, NULL); 17274 } 17275 mutex_exit(SD_MUTEX(un)); 17276 } 17277 17278 17279 17280 /* 17281 * Function: sd_handle_mchange 17282 * 17283 * Description: Perform geometry validation & other recovery when CDROM 17284 * has been removed from drive. 17285 * 17286 * Return Code: 0 for success 17287 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17288 * sd_send_scsi_READ_CAPACITY() 17289 * 17290 * Context: Executes in a taskq() thread context 17291 */ 17292 17293 static int 17294 sd_handle_mchange(struct sd_lun *un) 17295 { 17296 uint64_t capacity; 17297 uint32_t lbasize; 17298 int rval; 17299 17300 ASSERT(!mutex_owned(SD_MUTEX(un))); 17301 ASSERT(un->un_f_monitor_media_state); 17302 17303 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17304 SD_PATH_DIRECT_PRIORITY)) != 0) { 17305 return (rval); 17306 } 17307 17308 mutex_enter(SD_MUTEX(un)); 17309 sd_update_block_info(un, lbasize, capacity); 17310 17311 if (un->un_errstats != NULL) { 17312 struct sd_errstats *stp = 17313 (struct sd_errstats *)un->un_errstats->ks_data; 17314 stp->sd_capacity.value.ui64 = (uint64_t) 17315 ((uint64_t)un->un_blockcount * 17316 (uint64_t)un->un_tgt_blocksize); 17317 } 17318 17319 17320 /* 17321 * Check if the media in the device is writable or not 17322 */ 17323 if (ISCD(un)) 17324 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17325 17326 /* 17327 * Note: Maybe let the strategy/partitioning chain worry about getting 17328 * valid geometry. 17329 */ 17330 mutex_exit(SD_MUTEX(un)); 17331 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17332 17333 17334 if (cmlb_validate(un->un_cmlbhandle, 0, 17335 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17336 return (EIO); 17337 } else { 17338 if (un->un_f_pkstats_enabled) { 17339 sd_set_pstats(un); 17340 SD_TRACE(SD_LOG_IO_PARTITION, un, 17341 "sd_handle_mchange: un:0x%p pstats created and " 17342 "set\n", un); 17343 } 17344 } 17345 17346 17347 /* 17348 * Try to lock the door 17349 */ 17350 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17351 SD_PATH_DIRECT_PRIORITY)); 17352 } 17353 17354 17355 /* 17356 * Function: sd_send_scsi_DOORLOCK 17357 * 17358 * Description: Issue the scsi DOOR LOCK command 17359 * 17360 * Arguments: un - pointer to driver soft state (unit) structure for 17361 * this target. 17362 * flag - SD_REMOVAL_ALLOW 17363 * SD_REMOVAL_PREVENT 17364 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17365 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17366 * to use the USCSI "direct" chain and bypass the normal 17367 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17368 * command is issued as part of an error recovery action. 17369 * 17370 * Return Code: 0 - Success 17371 * errno return code from sd_send_scsi_cmd() 17372 * 17373 * Context: Can sleep. 17374 */ 17375 17376 static int 17377 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17378 { 17379 union scsi_cdb cdb; 17380 struct uscsi_cmd ucmd_buf; 17381 struct scsi_extended_sense sense_buf; 17382 int status; 17383 17384 ASSERT(un != NULL); 17385 ASSERT(!mutex_owned(SD_MUTEX(un))); 17386 17387 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17388 17389 /* already determined doorlock is not supported, fake success */ 17390 if (un->un_f_doorlock_supported == FALSE) { 17391 return (0); 17392 } 17393 17394 /* 17395 * If we are ejecting and see an SD_REMOVAL_PREVENT 17396 * ignore the command so we can complete the eject 17397 * operation. 17398 */ 17399 if (flag == SD_REMOVAL_PREVENT) { 17400 mutex_enter(SD_MUTEX(un)); 17401 if (un->un_f_ejecting == TRUE) { 17402 mutex_exit(SD_MUTEX(un)); 17403 return (EAGAIN); 17404 } 17405 mutex_exit(SD_MUTEX(un)); 17406 } 17407 17408 bzero(&cdb, sizeof (cdb)); 17409 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17410 17411 cdb.scc_cmd = SCMD_DOORLOCK; 17412 cdb.cdb_opaque[4] = (uchar_t)flag; 17413 17414 ucmd_buf.uscsi_cdb = (char *)&cdb; 17415 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17416 ucmd_buf.uscsi_bufaddr = NULL; 17417 ucmd_buf.uscsi_buflen = 0; 17418 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17419 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17420 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17421 ucmd_buf.uscsi_timeout = 15; 17422 17423 SD_TRACE(SD_LOG_IO, un, 17424 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17425 17426 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17427 UIO_SYSSPACE, path_flag); 17428 17429 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17430 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17431 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17432 /* fake success and skip subsequent doorlock commands */ 17433 un->un_f_doorlock_supported = FALSE; 17434 return (0); 17435 } 17436 17437 return (status); 17438 } 17439 17440 /* 17441 * Function: sd_send_scsi_READ_CAPACITY 17442 * 17443 * Description: This routine uses the scsi READ CAPACITY command to determine 17444 * the device capacity in number of blocks and the device native 17445 * block size. If this function returns a failure, then the 17446 * values in *capp and *lbap are undefined. If the capacity 17447 * returned is 0xffffffff then the lun is too large for a 17448 * normal READ CAPACITY command and the results of a 17449 * READ CAPACITY 16 will be used instead. 17450 * 17451 * Arguments: un - ptr to soft state struct for the target 17452 * capp - ptr to unsigned 64-bit variable to receive the 17453 * capacity value from the command. 17454 * lbap - ptr to unsigned 32-bit varaible to receive the 17455 * block size value from the command 17456 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17457 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17458 * to use the USCSI "direct" chain and bypass the normal 17459 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17460 * command is issued as part of an error recovery action. 17461 * 17462 * Return Code: 0 - Success 17463 * EIO - IO error 17464 * EACCES - Reservation conflict detected 17465 * EAGAIN - Device is becoming ready 17466 * errno return code from sd_send_scsi_cmd() 17467 * 17468 * Context: Can sleep. Blocks until command completes. 17469 */ 17470 17471 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17472 17473 static int 17474 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17475 int path_flag) 17476 { 17477 struct scsi_extended_sense sense_buf; 17478 struct uscsi_cmd ucmd_buf; 17479 union scsi_cdb cdb; 17480 uint32_t *capacity_buf; 17481 uint64_t capacity; 17482 uint32_t lbasize; 17483 int status; 17484 17485 ASSERT(un != NULL); 17486 ASSERT(!mutex_owned(SD_MUTEX(un))); 17487 ASSERT(capp != NULL); 17488 ASSERT(lbap != NULL); 17489 17490 SD_TRACE(SD_LOG_IO, un, 17491 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17492 17493 /* 17494 * First send a READ_CAPACITY command to the target. 17495 * (This command is mandatory under SCSI-2.) 17496 * 17497 * Set up the CDB for the READ_CAPACITY command. The Partial 17498 * Medium Indicator bit is cleared. The address field must be 17499 * zero if the PMI bit is zero. 17500 */ 17501 bzero(&cdb, sizeof (cdb)); 17502 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17503 17504 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17505 17506 cdb.scc_cmd = SCMD_READ_CAPACITY; 17507 17508 ucmd_buf.uscsi_cdb = (char *)&cdb; 17509 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17510 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17511 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17512 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17513 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17514 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17515 ucmd_buf.uscsi_timeout = 60; 17516 17517 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17518 UIO_SYSSPACE, path_flag); 17519 17520 switch (status) { 17521 case 0: 17522 /* Return failure if we did not get valid capacity data. */ 17523 if (ucmd_buf.uscsi_resid != 0) { 17524 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17525 return (EIO); 17526 } 17527 17528 /* 17529 * Read capacity and block size from the READ CAPACITY 10 data. 17530 * This data may be adjusted later due to device specific 17531 * issues. 17532 * 17533 * According to the SCSI spec, the READ CAPACITY 10 17534 * command returns the following: 17535 * 17536 * bytes 0-3: Maximum logical block address available. 17537 * (MSB in byte:0 & LSB in byte:3) 17538 * 17539 * bytes 4-7: Block length in bytes 17540 * (MSB in byte:4 & LSB in byte:7) 17541 * 17542 */ 17543 capacity = BE_32(capacity_buf[0]); 17544 lbasize = BE_32(capacity_buf[1]); 17545 17546 /* 17547 * Done with capacity_buf 17548 */ 17549 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17550 17551 /* 17552 * if the reported capacity is set to all 0xf's, then 17553 * this disk is too large and requires SBC-2 commands. 17554 * Reissue the request using READ CAPACITY 16. 17555 */ 17556 if (capacity == 0xffffffff) { 17557 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17558 &lbasize, path_flag); 17559 if (status != 0) { 17560 return (status); 17561 } 17562 } 17563 break; /* Success! */ 17564 case EIO: 17565 switch (ucmd_buf.uscsi_status) { 17566 case STATUS_RESERVATION_CONFLICT: 17567 status = EACCES; 17568 break; 17569 case STATUS_CHECK: 17570 /* 17571 * Check condition; look for ASC/ASCQ of 0x04/0x01 17572 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17573 */ 17574 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17575 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17576 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17577 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17578 return (EAGAIN); 17579 } 17580 break; 17581 default: 17582 break; 17583 } 17584 /* FALLTHRU */ 17585 default: 17586 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17587 return (status); 17588 } 17589 17590 /* 17591 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17592 * (2352 and 0 are common) so for these devices always force the value 17593 * to 2048 as required by the ATAPI specs. 17594 */ 17595 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17596 lbasize = 2048; 17597 } 17598 17599 /* 17600 * Get the maximum LBA value from the READ CAPACITY data. 17601 * Here we assume that the Partial Medium Indicator (PMI) bit 17602 * was cleared when issuing the command. This means that the LBA 17603 * returned from the device is the LBA of the last logical block 17604 * on the logical unit. The actual logical block count will be 17605 * this value plus one. 17606 * 17607 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17608 * so scale the capacity value to reflect this. 17609 */ 17610 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17611 17612 /* 17613 * Copy the values from the READ CAPACITY command into the space 17614 * provided by the caller. 17615 */ 17616 *capp = capacity; 17617 *lbap = lbasize; 17618 17619 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17620 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17621 17622 /* 17623 * Both the lbasize and capacity from the device must be nonzero, 17624 * otherwise we assume that the values are not valid and return 17625 * failure to the caller. (4203735) 17626 */ 17627 if ((capacity == 0) || (lbasize == 0)) { 17628 return (EIO); 17629 } 17630 17631 return (0); 17632 } 17633 17634 /* 17635 * Function: sd_send_scsi_READ_CAPACITY_16 17636 * 17637 * Description: This routine uses the scsi READ CAPACITY 16 command to 17638 * determine the device capacity in number of blocks and the 17639 * device native block size. If this function returns a failure, 17640 * then the values in *capp and *lbap are undefined. 17641 * This routine should always be called by 17642 * sd_send_scsi_READ_CAPACITY which will appy any device 17643 * specific adjustments to capacity and lbasize. 17644 * 17645 * Arguments: un - ptr to soft state struct for the target 17646 * capp - ptr to unsigned 64-bit variable to receive the 17647 * capacity value from the command. 17648 * lbap - ptr to unsigned 32-bit varaible to receive the 17649 * block size value from the command 17650 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17651 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17652 * to use the USCSI "direct" chain and bypass the normal 17653 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17654 * this command is issued as part of an error recovery 17655 * action. 17656 * 17657 * Return Code: 0 - Success 17658 * EIO - IO error 17659 * EACCES - Reservation conflict detected 17660 * EAGAIN - Device is becoming ready 17661 * errno return code from sd_send_scsi_cmd() 17662 * 17663 * Context: Can sleep. Blocks until command completes. 17664 */ 17665 17666 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17667 17668 static int 17669 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17670 uint32_t *lbap, int path_flag) 17671 { 17672 struct scsi_extended_sense sense_buf; 17673 struct uscsi_cmd ucmd_buf; 17674 union scsi_cdb cdb; 17675 uint64_t *capacity16_buf; 17676 uint64_t capacity; 17677 uint32_t lbasize; 17678 int status; 17679 17680 ASSERT(un != NULL); 17681 ASSERT(!mutex_owned(SD_MUTEX(un))); 17682 ASSERT(capp != NULL); 17683 ASSERT(lbap != NULL); 17684 17685 SD_TRACE(SD_LOG_IO, un, 17686 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17687 17688 /* 17689 * First send a READ_CAPACITY_16 command to the target. 17690 * 17691 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17692 * Medium Indicator bit is cleared. The address field must be 17693 * zero if the PMI bit is zero. 17694 */ 17695 bzero(&cdb, sizeof (cdb)); 17696 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17697 17698 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17699 17700 ucmd_buf.uscsi_cdb = (char *)&cdb; 17701 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17702 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17703 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17704 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17705 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17706 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17707 ucmd_buf.uscsi_timeout = 60; 17708 17709 /* 17710 * Read Capacity (16) is a Service Action In command. One 17711 * command byte (0x9E) is overloaded for multiple operations, 17712 * with the second CDB byte specifying the desired operation 17713 */ 17714 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17715 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17716 17717 /* 17718 * Fill in allocation length field 17719 */ 17720 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17721 17722 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17723 UIO_SYSSPACE, path_flag); 17724 17725 switch (status) { 17726 case 0: 17727 /* Return failure if we did not get valid capacity data. */ 17728 if (ucmd_buf.uscsi_resid > 20) { 17729 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17730 return (EIO); 17731 } 17732 17733 /* 17734 * Read capacity and block size from the READ CAPACITY 10 data. 17735 * This data may be adjusted later due to device specific 17736 * issues. 17737 * 17738 * According to the SCSI spec, the READ CAPACITY 10 17739 * command returns the following: 17740 * 17741 * bytes 0-7: Maximum logical block address available. 17742 * (MSB in byte:0 & LSB in byte:7) 17743 * 17744 * bytes 8-11: Block length in bytes 17745 * (MSB in byte:8 & LSB in byte:11) 17746 * 17747 */ 17748 capacity = BE_64(capacity16_buf[0]); 17749 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 17750 17751 /* 17752 * Done with capacity16_buf 17753 */ 17754 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17755 17756 /* 17757 * if the reported capacity is set to all 0xf's, then 17758 * this disk is too large. This could only happen with 17759 * a device that supports LBAs larger than 64 bits which 17760 * are not defined by any current T10 standards. 17761 */ 17762 if (capacity == 0xffffffffffffffff) { 17763 return (EIO); 17764 } 17765 break; /* Success! */ 17766 case EIO: 17767 switch (ucmd_buf.uscsi_status) { 17768 case STATUS_RESERVATION_CONFLICT: 17769 status = EACCES; 17770 break; 17771 case STATUS_CHECK: 17772 /* 17773 * Check condition; look for ASC/ASCQ of 0x04/0x01 17774 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17775 */ 17776 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17777 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17778 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17779 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17780 return (EAGAIN); 17781 } 17782 break; 17783 default: 17784 break; 17785 } 17786 /* FALLTHRU */ 17787 default: 17788 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17789 return (status); 17790 } 17791 17792 *capp = capacity; 17793 *lbap = lbasize; 17794 17795 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 17796 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17797 17798 return (0); 17799 } 17800 17801 17802 /* 17803 * Function: sd_send_scsi_START_STOP_UNIT 17804 * 17805 * Description: Issue a scsi START STOP UNIT command to the target. 17806 * 17807 * Arguments: un - pointer to driver soft state (unit) structure for 17808 * this target. 17809 * flag - SD_TARGET_START 17810 * SD_TARGET_STOP 17811 * SD_TARGET_EJECT 17812 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17813 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17814 * to use the USCSI "direct" chain and bypass the normal 17815 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17816 * command is issued as part of an error recovery action. 17817 * 17818 * Return Code: 0 - Success 17819 * EIO - IO error 17820 * EACCES - Reservation conflict detected 17821 * ENXIO - Not Ready, medium not present 17822 * errno return code from sd_send_scsi_cmd() 17823 * 17824 * Context: Can sleep. 17825 */ 17826 17827 static int 17828 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 17829 { 17830 struct scsi_extended_sense sense_buf; 17831 union scsi_cdb cdb; 17832 struct uscsi_cmd ucmd_buf; 17833 int status; 17834 17835 ASSERT(un != NULL); 17836 ASSERT(!mutex_owned(SD_MUTEX(un))); 17837 17838 SD_TRACE(SD_LOG_IO, un, 17839 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 17840 17841 if (un->un_f_check_start_stop && 17842 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 17843 (un->un_f_start_stop_supported != TRUE)) { 17844 return (0); 17845 } 17846 17847 /* 17848 * If we are performing an eject operation and 17849 * we receive any command other than SD_TARGET_EJECT 17850 * we should immediately return. 17851 */ 17852 if (flag != SD_TARGET_EJECT) { 17853 mutex_enter(SD_MUTEX(un)); 17854 if (un->un_f_ejecting == TRUE) { 17855 mutex_exit(SD_MUTEX(un)); 17856 return (EAGAIN); 17857 } 17858 mutex_exit(SD_MUTEX(un)); 17859 } 17860 17861 bzero(&cdb, sizeof (cdb)); 17862 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17863 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 17864 17865 cdb.scc_cmd = SCMD_START_STOP; 17866 cdb.cdb_opaque[4] = (uchar_t)flag; 17867 17868 ucmd_buf.uscsi_cdb = (char *)&cdb; 17869 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17870 ucmd_buf.uscsi_bufaddr = NULL; 17871 ucmd_buf.uscsi_buflen = 0; 17872 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17873 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 17874 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17875 ucmd_buf.uscsi_timeout = 200; 17876 17877 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17878 UIO_SYSSPACE, path_flag); 17879 17880 switch (status) { 17881 case 0: 17882 break; /* Success! */ 17883 case EIO: 17884 switch (ucmd_buf.uscsi_status) { 17885 case STATUS_RESERVATION_CONFLICT: 17886 status = EACCES; 17887 break; 17888 case STATUS_CHECK: 17889 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 17890 switch (scsi_sense_key( 17891 (uint8_t *)&sense_buf)) { 17892 case KEY_ILLEGAL_REQUEST: 17893 status = ENOTSUP; 17894 break; 17895 case KEY_NOT_READY: 17896 if (scsi_sense_asc( 17897 (uint8_t *)&sense_buf) 17898 == 0x3A) { 17899 status = ENXIO; 17900 } 17901 break; 17902 default: 17903 break; 17904 } 17905 } 17906 break; 17907 default: 17908 break; 17909 } 17910 break; 17911 default: 17912 break; 17913 } 17914 17915 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 17916 17917 return (status); 17918 } 17919 17920 17921 /* 17922 * Function: sd_start_stop_unit_callback 17923 * 17924 * Description: timeout(9F) callback to begin recovery process for a 17925 * device that has spun down. 17926 * 17927 * Arguments: arg - pointer to associated softstate struct. 17928 * 17929 * Context: Executes in a timeout(9F) thread context 17930 */ 17931 17932 static void 17933 sd_start_stop_unit_callback(void *arg) 17934 { 17935 struct sd_lun *un = arg; 17936 ASSERT(un != NULL); 17937 ASSERT(!mutex_owned(SD_MUTEX(un))); 17938 17939 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 17940 17941 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 17942 } 17943 17944 17945 /* 17946 * Function: sd_start_stop_unit_task 17947 * 17948 * Description: Recovery procedure when a drive is spun down. 17949 * 17950 * Arguments: arg - pointer to associated softstate struct. 17951 * 17952 * Context: Executes in a taskq() thread context 17953 */ 17954 17955 static void 17956 sd_start_stop_unit_task(void *arg) 17957 { 17958 struct sd_lun *un = arg; 17959 17960 ASSERT(un != NULL); 17961 ASSERT(!mutex_owned(SD_MUTEX(un))); 17962 17963 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 17964 17965 /* 17966 * Some unformatted drives report not ready error, no need to 17967 * restart if format has been initiated. 17968 */ 17969 mutex_enter(SD_MUTEX(un)); 17970 if (un->un_f_format_in_progress == TRUE) { 17971 mutex_exit(SD_MUTEX(un)); 17972 return; 17973 } 17974 mutex_exit(SD_MUTEX(un)); 17975 17976 /* 17977 * When a START STOP command is issued from here, it is part of a 17978 * failure recovery operation and must be issued before any other 17979 * commands, including any pending retries. Thus it must be sent 17980 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 17981 * succeeds or not, we will start I/O after the attempt. 17982 */ 17983 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 17984 SD_PATH_DIRECT_PRIORITY); 17985 17986 /* 17987 * The above call blocks until the START_STOP_UNIT command completes. 17988 * Now that it has completed, we must re-try the original IO that 17989 * received the NOT READY condition in the first place. There are 17990 * three possible conditions here: 17991 * 17992 * (1) The original IO is on un_retry_bp. 17993 * (2) The original IO is on the regular wait queue, and un_retry_bp 17994 * is NULL. 17995 * (3) The original IO is on the regular wait queue, and un_retry_bp 17996 * points to some other, unrelated bp. 17997 * 17998 * For each case, we must call sd_start_cmds() with un_retry_bp 17999 * as the argument. If un_retry_bp is NULL, this will initiate 18000 * processing of the regular wait queue. If un_retry_bp is not NULL, 18001 * then this will process the bp on un_retry_bp. That may or may not 18002 * be the original IO, but that does not matter: the important thing 18003 * is to keep the IO processing going at this point. 18004 * 18005 * Note: This is a very specific error recovery sequence associated 18006 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18007 * serialize the I/O with completion of the spin-up. 18008 */ 18009 mutex_enter(SD_MUTEX(un)); 18010 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18011 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18012 un, un->un_retry_bp); 18013 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18014 sd_start_cmds(un, un->un_retry_bp); 18015 mutex_exit(SD_MUTEX(un)); 18016 18017 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18018 } 18019 18020 18021 /* 18022 * Function: sd_send_scsi_INQUIRY 18023 * 18024 * Description: Issue the scsi INQUIRY command. 18025 * 18026 * Arguments: un 18027 * bufaddr 18028 * buflen 18029 * evpd 18030 * page_code 18031 * page_length 18032 * 18033 * Return Code: 0 - Success 18034 * errno return code from sd_send_scsi_cmd() 18035 * 18036 * Context: Can sleep. Does not return until command is completed. 18037 */ 18038 18039 static int 18040 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18041 uchar_t evpd, uchar_t page_code, size_t *residp) 18042 { 18043 union scsi_cdb cdb; 18044 struct uscsi_cmd ucmd_buf; 18045 int status; 18046 18047 ASSERT(un != NULL); 18048 ASSERT(!mutex_owned(SD_MUTEX(un))); 18049 ASSERT(bufaddr != NULL); 18050 18051 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18052 18053 bzero(&cdb, sizeof (cdb)); 18054 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18055 bzero(bufaddr, buflen); 18056 18057 cdb.scc_cmd = SCMD_INQUIRY; 18058 cdb.cdb_opaque[1] = evpd; 18059 cdb.cdb_opaque[2] = page_code; 18060 FORMG0COUNT(&cdb, buflen); 18061 18062 ucmd_buf.uscsi_cdb = (char *)&cdb; 18063 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18064 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18065 ucmd_buf.uscsi_buflen = buflen; 18066 ucmd_buf.uscsi_rqbuf = NULL; 18067 ucmd_buf.uscsi_rqlen = 0; 18068 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18069 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18070 18071 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18072 UIO_SYSSPACE, SD_PATH_DIRECT); 18073 18074 if ((status == 0) && (residp != NULL)) { 18075 *residp = ucmd_buf.uscsi_resid; 18076 } 18077 18078 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18079 18080 return (status); 18081 } 18082 18083 18084 /* 18085 * Function: sd_send_scsi_TEST_UNIT_READY 18086 * 18087 * Description: Issue the scsi TEST UNIT READY command. 18088 * This routine can be told to set the flag USCSI_DIAGNOSE to 18089 * prevent retrying failed commands. Use this when the intent 18090 * is either to check for device readiness, to clear a Unit 18091 * Attention, or to clear any outstanding sense data. 18092 * However under specific conditions the expected behavior 18093 * is for retries to bring a device ready, so use the flag 18094 * with caution. 18095 * 18096 * Arguments: un 18097 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18098 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18099 * 0: dont check for media present, do retries on cmd. 18100 * 18101 * Return Code: 0 - Success 18102 * EIO - IO error 18103 * EACCES - Reservation conflict detected 18104 * ENXIO - Not Ready, medium not present 18105 * errno return code from sd_send_scsi_cmd() 18106 * 18107 * Context: Can sleep. Does not return until command is completed. 18108 */ 18109 18110 static int 18111 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18112 { 18113 struct scsi_extended_sense sense_buf; 18114 union scsi_cdb cdb; 18115 struct uscsi_cmd ucmd_buf; 18116 int status; 18117 18118 ASSERT(un != NULL); 18119 ASSERT(!mutex_owned(SD_MUTEX(un))); 18120 18121 SD_TRACE(SD_LOG_IO, un, 18122 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18123 18124 /* 18125 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18126 * timeouts when they receive a TUR and the queue is not empty. Check 18127 * the configuration flag set during attach (indicating the drive has 18128 * this firmware bug) and un_ncmds_in_transport before issuing the 18129 * TUR. If there are 18130 * pending commands return success, this is a bit arbitrary but is ok 18131 * for non-removables (i.e. the eliteI disks) and non-clustering 18132 * configurations. 18133 */ 18134 if (un->un_f_cfg_tur_check == TRUE) { 18135 mutex_enter(SD_MUTEX(un)); 18136 if (un->un_ncmds_in_transport != 0) { 18137 mutex_exit(SD_MUTEX(un)); 18138 return (0); 18139 } 18140 mutex_exit(SD_MUTEX(un)); 18141 } 18142 18143 bzero(&cdb, sizeof (cdb)); 18144 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18145 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18146 18147 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18148 18149 ucmd_buf.uscsi_cdb = (char *)&cdb; 18150 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18151 ucmd_buf.uscsi_bufaddr = NULL; 18152 ucmd_buf.uscsi_buflen = 0; 18153 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18154 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18155 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18156 18157 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18158 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18159 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18160 } 18161 ucmd_buf.uscsi_timeout = 60; 18162 18163 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18164 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18165 SD_PATH_STANDARD)); 18166 18167 switch (status) { 18168 case 0: 18169 break; /* Success! */ 18170 case EIO: 18171 switch (ucmd_buf.uscsi_status) { 18172 case STATUS_RESERVATION_CONFLICT: 18173 status = EACCES; 18174 break; 18175 case STATUS_CHECK: 18176 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18177 break; 18178 } 18179 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18180 (scsi_sense_key((uint8_t *)&sense_buf) == 18181 KEY_NOT_READY) && 18182 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18183 status = ENXIO; 18184 } 18185 break; 18186 default: 18187 break; 18188 } 18189 break; 18190 default: 18191 break; 18192 } 18193 18194 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18195 18196 return (status); 18197 } 18198 18199 18200 /* 18201 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18202 * 18203 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18204 * 18205 * Arguments: un 18206 * 18207 * Return Code: 0 - Success 18208 * EACCES 18209 * ENOTSUP 18210 * errno return code from sd_send_scsi_cmd() 18211 * 18212 * Context: Can sleep. Does not return until command is completed. 18213 */ 18214 18215 static int 18216 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18217 uint16_t data_len, uchar_t *data_bufp) 18218 { 18219 struct scsi_extended_sense sense_buf; 18220 union scsi_cdb cdb; 18221 struct uscsi_cmd ucmd_buf; 18222 int status; 18223 int no_caller_buf = FALSE; 18224 18225 ASSERT(un != NULL); 18226 ASSERT(!mutex_owned(SD_MUTEX(un))); 18227 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18228 18229 SD_TRACE(SD_LOG_IO, un, 18230 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18231 18232 bzero(&cdb, sizeof (cdb)); 18233 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18234 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18235 if (data_bufp == NULL) { 18236 /* Allocate a default buf if the caller did not give one */ 18237 ASSERT(data_len == 0); 18238 data_len = MHIOC_RESV_KEY_SIZE; 18239 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18240 no_caller_buf = TRUE; 18241 } 18242 18243 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18244 cdb.cdb_opaque[1] = usr_cmd; 18245 FORMG1COUNT(&cdb, data_len); 18246 18247 ucmd_buf.uscsi_cdb = (char *)&cdb; 18248 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18249 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18250 ucmd_buf.uscsi_buflen = data_len; 18251 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18252 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18253 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18254 ucmd_buf.uscsi_timeout = 60; 18255 18256 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18257 UIO_SYSSPACE, SD_PATH_STANDARD); 18258 18259 switch (status) { 18260 case 0: 18261 break; /* Success! */ 18262 case EIO: 18263 switch (ucmd_buf.uscsi_status) { 18264 case STATUS_RESERVATION_CONFLICT: 18265 status = EACCES; 18266 break; 18267 case STATUS_CHECK: 18268 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18269 (scsi_sense_key((uint8_t *)&sense_buf) == 18270 KEY_ILLEGAL_REQUEST)) { 18271 status = ENOTSUP; 18272 } 18273 break; 18274 default: 18275 break; 18276 } 18277 break; 18278 default: 18279 break; 18280 } 18281 18282 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18283 18284 if (no_caller_buf == TRUE) { 18285 kmem_free(data_bufp, data_len); 18286 } 18287 18288 return (status); 18289 } 18290 18291 18292 /* 18293 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18294 * 18295 * Description: This routine is the driver entry point for handling CD-ROM 18296 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18297 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18298 * device. 18299 * 18300 * Arguments: un - Pointer to soft state struct for the target. 18301 * usr_cmd SCSI-3 reservation facility command (one of 18302 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18303 * SD_SCSI3_PREEMPTANDABORT) 18304 * usr_bufp - user provided pointer register, reserve descriptor or 18305 * preempt and abort structure (mhioc_register_t, 18306 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18307 * 18308 * Return Code: 0 - Success 18309 * EACCES 18310 * ENOTSUP 18311 * errno return code from sd_send_scsi_cmd() 18312 * 18313 * Context: Can sleep. Does not return until command is completed. 18314 */ 18315 18316 static int 18317 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18318 uchar_t *usr_bufp) 18319 { 18320 struct scsi_extended_sense sense_buf; 18321 union scsi_cdb cdb; 18322 struct uscsi_cmd ucmd_buf; 18323 int status; 18324 uchar_t data_len = sizeof (sd_prout_t); 18325 sd_prout_t *prp; 18326 18327 ASSERT(un != NULL); 18328 ASSERT(!mutex_owned(SD_MUTEX(un))); 18329 ASSERT(data_len == 24); /* required by scsi spec */ 18330 18331 SD_TRACE(SD_LOG_IO, un, 18332 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18333 18334 if (usr_bufp == NULL) { 18335 return (EINVAL); 18336 } 18337 18338 bzero(&cdb, sizeof (cdb)); 18339 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18340 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18341 prp = kmem_zalloc(data_len, KM_SLEEP); 18342 18343 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18344 cdb.cdb_opaque[1] = usr_cmd; 18345 FORMG1COUNT(&cdb, data_len); 18346 18347 ucmd_buf.uscsi_cdb = (char *)&cdb; 18348 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18349 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18350 ucmd_buf.uscsi_buflen = data_len; 18351 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18352 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18353 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18354 ucmd_buf.uscsi_timeout = 60; 18355 18356 switch (usr_cmd) { 18357 case SD_SCSI3_REGISTER: { 18358 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18359 18360 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18361 bcopy(ptr->newkey.key, prp->service_key, 18362 MHIOC_RESV_KEY_SIZE); 18363 prp->aptpl = ptr->aptpl; 18364 break; 18365 } 18366 case SD_SCSI3_RESERVE: 18367 case SD_SCSI3_RELEASE: { 18368 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18369 18370 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18371 prp->scope_address = BE_32(ptr->scope_specific_addr); 18372 cdb.cdb_opaque[2] = ptr->type; 18373 break; 18374 } 18375 case SD_SCSI3_PREEMPTANDABORT: { 18376 mhioc_preemptandabort_t *ptr = 18377 (mhioc_preemptandabort_t *)usr_bufp; 18378 18379 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18380 bcopy(ptr->victim_key.key, prp->service_key, 18381 MHIOC_RESV_KEY_SIZE); 18382 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18383 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18384 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18385 break; 18386 } 18387 case SD_SCSI3_REGISTERANDIGNOREKEY: 18388 { 18389 mhioc_registerandignorekey_t *ptr; 18390 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18391 bcopy(ptr->newkey.key, 18392 prp->service_key, MHIOC_RESV_KEY_SIZE); 18393 prp->aptpl = ptr->aptpl; 18394 break; 18395 } 18396 default: 18397 ASSERT(FALSE); 18398 break; 18399 } 18400 18401 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18402 UIO_SYSSPACE, SD_PATH_STANDARD); 18403 18404 switch (status) { 18405 case 0: 18406 break; /* Success! */ 18407 case EIO: 18408 switch (ucmd_buf.uscsi_status) { 18409 case STATUS_RESERVATION_CONFLICT: 18410 status = EACCES; 18411 break; 18412 case STATUS_CHECK: 18413 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18414 (scsi_sense_key((uint8_t *)&sense_buf) == 18415 KEY_ILLEGAL_REQUEST)) { 18416 status = ENOTSUP; 18417 } 18418 break; 18419 default: 18420 break; 18421 } 18422 break; 18423 default: 18424 break; 18425 } 18426 18427 kmem_free(prp, data_len); 18428 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18429 return (status); 18430 } 18431 18432 18433 /* 18434 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18435 * 18436 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18437 * 18438 * Arguments: un - pointer to the target's soft state struct 18439 * 18440 * Return Code: 0 - success 18441 * errno-type error code 18442 * 18443 * Context: kernel thread context only. 18444 */ 18445 18446 static int 18447 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18448 { 18449 struct sd_uscsi_info *uip; 18450 struct uscsi_cmd *uscmd; 18451 union scsi_cdb *cdb; 18452 struct buf *bp; 18453 int rval = 0; 18454 18455 SD_TRACE(SD_LOG_IO, un, 18456 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18457 18458 ASSERT(un != NULL); 18459 ASSERT(!mutex_owned(SD_MUTEX(un))); 18460 18461 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18462 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18463 18464 /* 18465 * First get some memory for the uscsi_cmd struct and cdb 18466 * and initialize for SYNCHRONIZE_CACHE cmd. 18467 */ 18468 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18469 uscmd->uscsi_cdblen = CDB_GROUP1; 18470 uscmd->uscsi_cdb = (caddr_t)cdb; 18471 uscmd->uscsi_bufaddr = NULL; 18472 uscmd->uscsi_buflen = 0; 18473 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18474 uscmd->uscsi_rqlen = SENSE_LENGTH; 18475 uscmd->uscsi_rqresid = SENSE_LENGTH; 18476 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18477 uscmd->uscsi_timeout = sd_io_time; 18478 18479 /* 18480 * Allocate an sd_uscsi_info struct and fill it with the info 18481 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18482 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18483 * since we allocate the buf here in this function, we do not 18484 * need to preserve the prior contents of b_private. 18485 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18486 */ 18487 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18488 uip->ui_flags = SD_PATH_DIRECT; 18489 uip->ui_cmdp = uscmd; 18490 18491 bp = getrbuf(KM_SLEEP); 18492 bp->b_private = uip; 18493 18494 /* 18495 * Setup buffer to carry uscsi request. 18496 */ 18497 bp->b_flags = B_BUSY; 18498 bp->b_bcount = 0; 18499 bp->b_blkno = 0; 18500 18501 if (dkc != NULL) { 18502 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18503 uip->ui_dkc = *dkc; 18504 } 18505 18506 bp->b_edev = SD_GET_DEV(un); 18507 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18508 18509 (void) sd_uscsi_strategy(bp); 18510 18511 /* 18512 * If synchronous request, wait for completion 18513 * If async just return and let b_iodone callback 18514 * cleanup. 18515 * NOTE: On return, u_ncmds_in_driver will be decremented, 18516 * but it was also incremented in sd_uscsi_strategy(), so 18517 * we should be ok. 18518 */ 18519 if (dkc == NULL) { 18520 (void) biowait(bp); 18521 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18522 } 18523 18524 return (rval); 18525 } 18526 18527 18528 static int 18529 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18530 { 18531 struct sd_uscsi_info *uip; 18532 struct uscsi_cmd *uscmd; 18533 uint8_t *sense_buf; 18534 struct sd_lun *un; 18535 int status; 18536 18537 uip = (struct sd_uscsi_info *)(bp->b_private); 18538 ASSERT(uip != NULL); 18539 18540 uscmd = uip->ui_cmdp; 18541 ASSERT(uscmd != NULL); 18542 18543 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18544 ASSERT(sense_buf != NULL); 18545 18546 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18547 ASSERT(un != NULL); 18548 18549 status = geterror(bp); 18550 switch (status) { 18551 case 0: 18552 break; /* Success! */ 18553 case EIO: 18554 switch (uscmd->uscsi_status) { 18555 case STATUS_RESERVATION_CONFLICT: 18556 /* Ignore reservation conflict */ 18557 status = 0; 18558 goto done; 18559 18560 case STATUS_CHECK: 18561 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18562 (scsi_sense_key(sense_buf) == 18563 KEY_ILLEGAL_REQUEST)) { 18564 /* Ignore Illegal Request error */ 18565 mutex_enter(SD_MUTEX(un)); 18566 un->un_f_sync_cache_supported = FALSE; 18567 mutex_exit(SD_MUTEX(un)); 18568 status = ENOTSUP; 18569 goto done; 18570 } 18571 break; 18572 default: 18573 break; 18574 } 18575 /* FALLTHRU */ 18576 default: 18577 /* 18578 * Don't log an error message if this device 18579 * has removable media. 18580 */ 18581 if (!un->un_f_has_removable_media) { 18582 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18583 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18584 } 18585 break; 18586 } 18587 18588 done: 18589 if (uip->ui_dkc.dkc_callback != NULL) { 18590 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18591 } 18592 18593 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18594 freerbuf(bp); 18595 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18596 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18597 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18598 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18599 18600 return (status); 18601 } 18602 18603 18604 /* 18605 * Function: sd_send_scsi_GET_CONFIGURATION 18606 * 18607 * Description: Issues the get configuration command to the device. 18608 * Called from sd_check_for_writable_cd & sd_get_media_info 18609 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18610 * Arguments: un 18611 * ucmdbuf 18612 * rqbuf 18613 * rqbuflen 18614 * bufaddr 18615 * buflen 18616 * path_flag 18617 * 18618 * Return Code: 0 - Success 18619 * errno return code from sd_send_scsi_cmd() 18620 * 18621 * Context: Can sleep. Does not return until command is completed. 18622 * 18623 */ 18624 18625 static int 18626 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18627 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18628 int path_flag) 18629 { 18630 char cdb[CDB_GROUP1]; 18631 int status; 18632 18633 ASSERT(un != NULL); 18634 ASSERT(!mutex_owned(SD_MUTEX(un))); 18635 ASSERT(bufaddr != NULL); 18636 ASSERT(ucmdbuf != NULL); 18637 ASSERT(rqbuf != NULL); 18638 18639 SD_TRACE(SD_LOG_IO, un, 18640 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18641 18642 bzero(cdb, sizeof (cdb)); 18643 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18644 bzero(rqbuf, rqbuflen); 18645 bzero(bufaddr, buflen); 18646 18647 /* 18648 * Set up cdb field for the get configuration command. 18649 */ 18650 cdb[0] = SCMD_GET_CONFIGURATION; 18651 cdb[1] = 0x02; /* Requested Type */ 18652 cdb[8] = SD_PROFILE_HEADER_LEN; 18653 ucmdbuf->uscsi_cdb = cdb; 18654 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18655 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18656 ucmdbuf->uscsi_buflen = buflen; 18657 ucmdbuf->uscsi_timeout = sd_io_time; 18658 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18659 ucmdbuf->uscsi_rqlen = rqbuflen; 18660 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18661 18662 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18663 UIO_SYSSPACE, path_flag); 18664 18665 switch (status) { 18666 case 0: 18667 break; /* Success! */ 18668 case EIO: 18669 switch (ucmdbuf->uscsi_status) { 18670 case STATUS_RESERVATION_CONFLICT: 18671 status = EACCES; 18672 break; 18673 default: 18674 break; 18675 } 18676 break; 18677 default: 18678 break; 18679 } 18680 18681 if (status == 0) { 18682 SD_DUMP_MEMORY(un, SD_LOG_IO, 18683 "sd_send_scsi_GET_CONFIGURATION: data", 18684 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18685 } 18686 18687 SD_TRACE(SD_LOG_IO, un, 18688 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 18689 18690 return (status); 18691 } 18692 18693 /* 18694 * Function: sd_send_scsi_feature_GET_CONFIGURATION 18695 * 18696 * Description: Issues the get configuration command to the device to 18697 * retrieve a specific feature. Called from 18698 * sd_check_for_writable_cd & sd_set_mmc_caps. 18699 * Arguments: un 18700 * ucmdbuf 18701 * rqbuf 18702 * rqbuflen 18703 * bufaddr 18704 * buflen 18705 * feature 18706 * 18707 * Return Code: 0 - Success 18708 * errno return code from sd_send_scsi_cmd() 18709 * 18710 * Context: Can sleep. Does not return until command is completed. 18711 * 18712 */ 18713 static int 18714 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 18715 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 18716 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 18717 { 18718 char cdb[CDB_GROUP1]; 18719 int status; 18720 18721 ASSERT(un != NULL); 18722 ASSERT(!mutex_owned(SD_MUTEX(un))); 18723 ASSERT(bufaddr != NULL); 18724 ASSERT(ucmdbuf != NULL); 18725 ASSERT(rqbuf != NULL); 18726 18727 SD_TRACE(SD_LOG_IO, un, 18728 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 18729 18730 bzero(cdb, sizeof (cdb)); 18731 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18732 bzero(rqbuf, rqbuflen); 18733 bzero(bufaddr, buflen); 18734 18735 /* 18736 * Set up cdb field for the get configuration command. 18737 */ 18738 cdb[0] = SCMD_GET_CONFIGURATION; 18739 cdb[1] = 0x02; /* Requested Type */ 18740 cdb[3] = feature; 18741 cdb[8] = buflen; 18742 ucmdbuf->uscsi_cdb = cdb; 18743 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18744 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18745 ucmdbuf->uscsi_buflen = buflen; 18746 ucmdbuf->uscsi_timeout = sd_io_time; 18747 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18748 ucmdbuf->uscsi_rqlen = rqbuflen; 18749 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18750 18751 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18752 UIO_SYSSPACE, path_flag); 18753 18754 switch (status) { 18755 case 0: 18756 break; /* Success! */ 18757 case EIO: 18758 switch (ucmdbuf->uscsi_status) { 18759 case STATUS_RESERVATION_CONFLICT: 18760 status = EACCES; 18761 break; 18762 default: 18763 break; 18764 } 18765 break; 18766 default: 18767 break; 18768 } 18769 18770 if (status == 0) { 18771 SD_DUMP_MEMORY(un, SD_LOG_IO, 18772 "sd_send_scsi_feature_GET_CONFIGURATION: data", 18773 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18774 } 18775 18776 SD_TRACE(SD_LOG_IO, un, 18777 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 18778 18779 return (status); 18780 } 18781 18782 18783 /* 18784 * Function: sd_send_scsi_MODE_SENSE 18785 * 18786 * Description: Utility function for issuing a scsi MODE SENSE command. 18787 * Note: This routine uses a consistent implementation for Group0, 18788 * Group1, and Group2 commands across all platforms. ATAPI devices 18789 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18790 * 18791 * Arguments: un - pointer to the softstate struct for the target. 18792 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18793 * CDB_GROUP[1|2] (10 byte). 18794 * bufaddr - buffer for page data retrieved from the target. 18795 * buflen - size of page to be retrieved. 18796 * page_code - page code of data to be retrieved from the target. 18797 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18798 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18799 * to use the USCSI "direct" chain and bypass the normal 18800 * command waitq. 18801 * 18802 * Return Code: 0 - Success 18803 * errno return code from sd_send_scsi_cmd() 18804 * 18805 * Context: Can sleep. Does not return until command is completed. 18806 */ 18807 18808 static int 18809 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18810 size_t buflen, uchar_t page_code, int path_flag) 18811 { 18812 struct scsi_extended_sense sense_buf; 18813 union scsi_cdb cdb; 18814 struct uscsi_cmd ucmd_buf; 18815 int status; 18816 int headlen; 18817 18818 ASSERT(un != NULL); 18819 ASSERT(!mutex_owned(SD_MUTEX(un))); 18820 ASSERT(bufaddr != NULL); 18821 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18822 (cdbsize == CDB_GROUP2)); 18823 18824 SD_TRACE(SD_LOG_IO, un, 18825 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 18826 18827 bzero(&cdb, sizeof (cdb)); 18828 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18829 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18830 bzero(bufaddr, buflen); 18831 18832 if (cdbsize == CDB_GROUP0) { 18833 cdb.scc_cmd = SCMD_MODE_SENSE; 18834 cdb.cdb_opaque[2] = page_code; 18835 FORMG0COUNT(&cdb, buflen); 18836 headlen = MODE_HEADER_LENGTH; 18837 } else { 18838 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 18839 cdb.cdb_opaque[2] = page_code; 18840 FORMG1COUNT(&cdb, buflen); 18841 headlen = MODE_HEADER_LENGTH_GRP2; 18842 } 18843 18844 ASSERT(headlen <= buflen); 18845 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18846 18847 ucmd_buf.uscsi_cdb = (char *)&cdb; 18848 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18849 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18850 ucmd_buf.uscsi_buflen = buflen; 18851 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18852 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18853 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18854 ucmd_buf.uscsi_timeout = 60; 18855 18856 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18857 UIO_SYSSPACE, path_flag); 18858 18859 switch (status) { 18860 case 0: 18861 /* 18862 * sr_check_wp() uses 0x3f page code and check the header of 18863 * mode page to determine if target device is write-protected. 18864 * But some USB devices return 0 bytes for 0x3f page code. For 18865 * this case, make sure that mode page header is returned at 18866 * least. 18867 */ 18868 if (buflen - ucmd_buf.uscsi_resid < headlen) 18869 status = EIO; 18870 break; /* Success! */ 18871 case EIO: 18872 switch (ucmd_buf.uscsi_status) { 18873 case STATUS_RESERVATION_CONFLICT: 18874 status = EACCES; 18875 break; 18876 default: 18877 break; 18878 } 18879 break; 18880 default: 18881 break; 18882 } 18883 18884 if (status == 0) { 18885 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 18886 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18887 } 18888 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 18889 18890 return (status); 18891 } 18892 18893 18894 /* 18895 * Function: sd_send_scsi_MODE_SELECT 18896 * 18897 * Description: Utility function for issuing a scsi MODE SELECT command. 18898 * Note: This routine uses a consistent implementation for Group0, 18899 * Group1, and Group2 commands across all platforms. ATAPI devices 18900 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18901 * 18902 * Arguments: un - pointer to the softstate struct for the target. 18903 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18904 * CDB_GROUP[1|2] (10 byte). 18905 * bufaddr - buffer for page data retrieved from the target. 18906 * buflen - size of page to be retrieved. 18907 * save_page - boolean to determin if SP bit should be set. 18908 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18909 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18910 * to use the USCSI "direct" chain and bypass the normal 18911 * command waitq. 18912 * 18913 * Return Code: 0 - Success 18914 * errno return code from sd_send_scsi_cmd() 18915 * 18916 * Context: Can sleep. Does not return until command is completed. 18917 */ 18918 18919 static int 18920 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18921 size_t buflen, uchar_t save_page, int path_flag) 18922 { 18923 struct scsi_extended_sense sense_buf; 18924 union scsi_cdb cdb; 18925 struct uscsi_cmd ucmd_buf; 18926 int status; 18927 18928 ASSERT(un != NULL); 18929 ASSERT(!mutex_owned(SD_MUTEX(un))); 18930 ASSERT(bufaddr != NULL); 18931 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18932 (cdbsize == CDB_GROUP2)); 18933 18934 SD_TRACE(SD_LOG_IO, un, 18935 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 18936 18937 bzero(&cdb, sizeof (cdb)); 18938 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18939 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18940 18941 /* Set the PF bit for many third party drives */ 18942 cdb.cdb_opaque[1] = 0x10; 18943 18944 /* Set the savepage(SP) bit if given */ 18945 if (save_page == SD_SAVE_PAGE) { 18946 cdb.cdb_opaque[1] |= 0x01; 18947 } 18948 18949 if (cdbsize == CDB_GROUP0) { 18950 cdb.scc_cmd = SCMD_MODE_SELECT; 18951 FORMG0COUNT(&cdb, buflen); 18952 } else { 18953 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 18954 FORMG1COUNT(&cdb, buflen); 18955 } 18956 18957 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18958 18959 ucmd_buf.uscsi_cdb = (char *)&cdb; 18960 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18961 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18962 ucmd_buf.uscsi_buflen = buflen; 18963 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18964 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18965 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18966 ucmd_buf.uscsi_timeout = 60; 18967 18968 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18969 UIO_SYSSPACE, path_flag); 18970 18971 switch (status) { 18972 case 0: 18973 break; /* Success! */ 18974 case EIO: 18975 switch (ucmd_buf.uscsi_status) { 18976 case STATUS_RESERVATION_CONFLICT: 18977 status = EACCES; 18978 break; 18979 default: 18980 break; 18981 } 18982 break; 18983 default: 18984 break; 18985 } 18986 18987 if (status == 0) { 18988 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 18989 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18990 } 18991 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 18992 18993 return (status); 18994 } 18995 18996 18997 /* 18998 * Function: sd_send_scsi_RDWR 18999 * 19000 * Description: Issue a scsi READ or WRITE command with the given parameters. 19001 * 19002 * Arguments: un: Pointer to the sd_lun struct for the target. 19003 * cmd: SCMD_READ or SCMD_WRITE 19004 * bufaddr: Address of caller's buffer to receive the RDWR data 19005 * buflen: Length of caller's buffer receive the RDWR data. 19006 * start_block: Block number for the start of the RDWR operation. 19007 * (Assumes target-native block size.) 19008 * residp: Pointer to variable to receive the redisual of the 19009 * RDWR operation (may be NULL of no residual requested). 19010 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19011 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19012 * to use the USCSI "direct" chain and bypass the normal 19013 * command waitq. 19014 * 19015 * Return Code: 0 - Success 19016 * errno return code from sd_send_scsi_cmd() 19017 * 19018 * Context: Can sleep. Does not return until command is completed. 19019 */ 19020 19021 static int 19022 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19023 size_t buflen, daddr_t start_block, int path_flag) 19024 { 19025 struct scsi_extended_sense sense_buf; 19026 union scsi_cdb cdb; 19027 struct uscsi_cmd ucmd_buf; 19028 uint32_t block_count; 19029 int status; 19030 int cdbsize; 19031 uchar_t flag; 19032 19033 ASSERT(un != NULL); 19034 ASSERT(!mutex_owned(SD_MUTEX(un))); 19035 ASSERT(bufaddr != NULL); 19036 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19037 19038 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19039 19040 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19041 return (EINVAL); 19042 } 19043 19044 mutex_enter(SD_MUTEX(un)); 19045 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19046 mutex_exit(SD_MUTEX(un)); 19047 19048 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19049 19050 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19051 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19052 bufaddr, buflen, start_block, block_count); 19053 19054 bzero(&cdb, sizeof (cdb)); 19055 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19056 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19057 19058 /* Compute CDB size to use */ 19059 if (start_block > 0xffffffff) 19060 cdbsize = CDB_GROUP4; 19061 else if ((start_block & 0xFFE00000) || 19062 (un->un_f_cfg_is_atapi == TRUE)) 19063 cdbsize = CDB_GROUP1; 19064 else 19065 cdbsize = CDB_GROUP0; 19066 19067 switch (cdbsize) { 19068 case CDB_GROUP0: /* 6-byte CDBs */ 19069 cdb.scc_cmd = cmd; 19070 FORMG0ADDR(&cdb, start_block); 19071 FORMG0COUNT(&cdb, block_count); 19072 break; 19073 case CDB_GROUP1: /* 10-byte CDBs */ 19074 cdb.scc_cmd = cmd | SCMD_GROUP1; 19075 FORMG1ADDR(&cdb, start_block); 19076 FORMG1COUNT(&cdb, block_count); 19077 break; 19078 case CDB_GROUP4: /* 16-byte CDBs */ 19079 cdb.scc_cmd = cmd | SCMD_GROUP4; 19080 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19081 FORMG4COUNT(&cdb, block_count); 19082 break; 19083 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19084 default: 19085 /* All others reserved */ 19086 return (EINVAL); 19087 } 19088 19089 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19090 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19091 19092 ucmd_buf.uscsi_cdb = (char *)&cdb; 19093 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19094 ucmd_buf.uscsi_bufaddr = bufaddr; 19095 ucmd_buf.uscsi_buflen = buflen; 19096 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19097 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19098 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19099 ucmd_buf.uscsi_timeout = 60; 19100 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19101 UIO_SYSSPACE, path_flag); 19102 switch (status) { 19103 case 0: 19104 break; /* Success! */ 19105 case EIO: 19106 switch (ucmd_buf.uscsi_status) { 19107 case STATUS_RESERVATION_CONFLICT: 19108 status = EACCES; 19109 break; 19110 default: 19111 break; 19112 } 19113 break; 19114 default: 19115 break; 19116 } 19117 19118 if (status == 0) { 19119 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19120 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19121 } 19122 19123 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19124 19125 return (status); 19126 } 19127 19128 19129 /* 19130 * Function: sd_send_scsi_LOG_SENSE 19131 * 19132 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19133 * 19134 * Arguments: un: Pointer to the sd_lun struct for the target. 19135 * 19136 * Return Code: 0 - Success 19137 * errno return code from sd_send_scsi_cmd() 19138 * 19139 * Context: Can sleep. Does not return until command is completed. 19140 */ 19141 19142 static int 19143 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19144 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19145 int path_flag) 19146 19147 { 19148 struct scsi_extended_sense sense_buf; 19149 union scsi_cdb cdb; 19150 struct uscsi_cmd ucmd_buf; 19151 int status; 19152 19153 ASSERT(un != NULL); 19154 ASSERT(!mutex_owned(SD_MUTEX(un))); 19155 19156 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19157 19158 bzero(&cdb, sizeof (cdb)); 19159 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19160 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19161 19162 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19163 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19164 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19165 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19166 FORMG1COUNT(&cdb, buflen); 19167 19168 ucmd_buf.uscsi_cdb = (char *)&cdb; 19169 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19170 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19171 ucmd_buf.uscsi_buflen = buflen; 19172 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19173 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19174 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19175 ucmd_buf.uscsi_timeout = 60; 19176 19177 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19178 UIO_SYSSPACE, path_flag); 19179 19180 switch (status) { 19181 case 0: 19182 break; 19183 case EIO: 19184 switch (ucmd_buf.uscsi_status) { 19185 case STATUS_RESERVATION_CONFLICT: 19186 status = EACCES; 19187 break; 19188 case STATUS_CHECK: 19189 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19190 (scsi_sense_key((uint8_t *)&sense_buf) == 19191 KEY_ILLEGAL_REQUEST) && 19192 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19193 /* 19194 * ASC 0x24: INVALID FIELD IN CDB 19195 */ 19196 switch (page_code) { 19197 case START_STOP_CYCLE_PAGE: 19198 /* 19199 * The start stop cycle counter is 19200 * implemented as page 0x31 in earlier 19201 * generation disks. In new generation 19202 * disks the start stop cycle counter is 19203 * implemented as page 0xE. To properly 19204 * handle this case if an attempt for 19205 * log page 0xE is made and fails we 19206 * will try again using page 0x31. 19207 * 19208 * Network storage BU committed to 19209 * maintain the page 0x31 for this 19210 * purpose and will not have any other 19211 * page implemented with page code 0x31 19212 * until all disks transition to the 19213 * standard page. 19214 */ 19215 mutex_enter(SD_MUTEX(un)); 19216 un->un_start_stop_cycle_page = 19217 START_STOP_CYCLE_VU_PAGE; 19218 cdb.cdb_opaque[2] = 19219 (char)(page_control << 6) | 19220 un->un_start_stop_cycle_page; 19221 mutex_exit(SD_MUTEX(un)); 19222 status = sd_send_scsi_cmd( 19223 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19224 UIO_SYSSPACE, path_flag); 19225 19226 break; 19227 case TEMPERATURE_PAGE: 19228 status = ENOTTY; 19229 break; 19230 default: 19231 break; 19232 } 19233 } 19234 break; 19235 default: 19236 break; 19237 } 19238 break; 19239 default: 19240 break; 19241 } 19242 19243 if (status == 0) { 19244 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19245 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19246 } 19247 19248 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19249 19250 return (status); 19251 } 19252 19253 19254 /* 19255 * Function: sdioctl 19256 * 19257 * Description: Driver's ioctl(9e) entry point function. 19258 * 19259 * Arguments: dev - device number 19260 * cmd - ioctl operation to be performed 19261 * arg - user argument, contains data to be set or reference 19262 * parameter for get 19263 * flag - bit flag, indicating open settings, 32/64 bit type 19264 * cred_p - user credential pointer 19265 * rval_p - calling process return value (OPT) 19266 * 19267 * Return Code: EINVAL 19268 * ENOTTY 19269 * ENXIO 19270 * EIO 19271 * EFAULT 19272 * ENOTSUP 19273 * EPERM 19274 * 19275 * Context: Called from the device switch at normal priority. 19276 */ 19277 19278 static int 19279 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19280 { 19281 struct sd_lun *un = NULL; 19282 int err = 0; 19283 int i = 0; 19284 cred_t *cr; 19285 int tmprval = EINVAL; 19286 int is_valid; 19287 19288 /* 19289 * All device accesses go thru sdstrategy where we check on suspend 19290 * status 19291 */ 19292 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19293 return (ENXIO); 19294 } 19295 19296 ASSERT(!mutex_owned(SD_MUTEX(un))); 19297 19298 19299 is_valid = SD_IS_VALID_LABEL(un); 19300 19301 /* 19302 * Moved this wait from sd_uscsi_strategy to here for 19303 * reasons of deadlock prevention. Internal driver commands, 19304 * specifically those to change a devices power level, result 19305 * in a call to sd_uscsi_strategy. 19306 */ 19307 mutex_enter(SD_MUTEX(un)); 19308 while ((un->un_state == SD_STATE_SUSPENDED) || 19309 (un->un_state == SD_STATE_PM_CHANGING)) { 19310 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19311 } 19312 /* 19313 * Twiddling the counter here protects commands from now 19314 * through to the top of sd_uscsi_strategy. Without the 19315 * counter inc. a power down, for example, could get in 19316 * after the above check for state is made and before 19317 * execution gets to the top of sd_uscsi_strategy. 19318 * That would cause problems. 19319 */ 19320 un->un_ncmds_in_driver++; 19321 19322 if (!is_valid && 19323 (flag & (FNDELAY | FNONBLOCK))) { 19324 switch (cmd) { 19325 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19326 case DKIOCGVTOC: 19327 case DKIOCGAPART: 19328 case DKIOCPARTINFO: 19329 case DKIOCSGEOM: 19330 case DKIOCSAPART: 19331 case DKIOCGETEFI: 19332 case DKIOCPARTITION: 19333 case DKIOCSVTOC: 19334 case DKIOCSETEFI: 19335 case DKIOCGMBOOT: 19336 case DKIOCSMBOOT: 19337 case DKIOCG_PHYGEOM: 19338 case DKIOCG_VIRTGEOM: 19339 /* let cmlb handle it */ 19340 goto skip_ready_valid; 19341 19342 case CDROMPAUSE: 19343 case CDROMRESUME: 19344 case CDROMPLAYMSF: 19345 case CDROMPLAYTRKIND: 19346 case CDROMREADTOCHDR: 19347 case CDROMREADTOCENTRY: 19348 case CDROMSTOP: 19349 case CDROMSTART: 19350 case CDROMVOLCTRL: 19351 case CDROMSUBCHNL: 19352 case CDROMREADMODE2: 19353 case CDROMREADMODE1: 19354 case CDROMREADOFFSET: 19355 case CDROMSBLKMODE: 19356 case CDROMGBLKMODE: 19357 case CDROMGDRVSPEED: 19358 case CDROMSDRVSPEED: 19359 case CDROMCDDA: 19360 case CDROMCDXA: 19361 case CDROMSUBCODE: 19362 if (!ISCD(un)) { 19363 un->un_ncmds_in_driver--; 19364 ASSERT(un->un_ncmds_in_driver >= 0); 19365 mutex_exit(SD_MUTEX(un)); 19366 return (ENOTTY); 19367 } 19368 break; 19369 case FDEJECT: 19370 case DKIOCEJECT: 19371 case CDROMEJECT: 19372 if (!un->un_f_eject_media_supported) { 19373 un->un_ncmds_in_driver--; 19374 ASSERT(un->un_ncmds_in_driver >= 0); 19375 mutex_exit(SD_MUTEX(un)); 19376 return (ENOTTY); 19377 } 19378 break; 19379 case DKIOCFLUSHWRITECACHE: 19380 mutex_exit(SD_MUTEX(un)); 19381 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19382 if (err != 0) { 19383 mutex_enter(SD_MUTEX(un)); 19384 un->un_ncmds_in_driver--; 19385 ASSERT(un->un_ncmds_in_driver >= 0); 19386 mutex_exit(SD_MUTEX(un)); 19387 return (EIO); 19388 } 19389 mutex_enter(SD_MUTEX(un)); 19390 /* FALLTHROUGH */ 19391 case DKIOCREMOVABLE: 19392 case DKIOCHOTPLUGGABLE: 19393 case DKIOCINFO: 19394 case DKIOCGMEDIAINFO: 19395 case MHIOCENFAILFAST: 19396 case MHIOCSTATUS: 19397 case MHIOCTKOWN: 19398 case MHIOCRELEASE: 19399 case MHIOCGRP_INKEYS: 19400 case MHIOCGRP_INRESV: 19401 case MHIOCGRP_REGISTER: 19402 case MHIOCGRP_RESERVE: 19403 case MHIOCGRP_PREEMPTANDABORT: 19404 case MHIOCGRP_REGISTERANDIGNOREKEY: 19405 case CDROMCLOSETRAY: 19406 case USCSICMD: 19407 goto skip_ready_valid; 19408 default: 19409 break; 19410 } 19411 19412 mutex_exit(SD_MUTEX(un)); 19413 err = sd_ready_and_valid(un); 19414 mutex_enter(SD_MUTEX(un)); 19415 19416 if (err != SD_READY_VALID) { 19417 switch (cmd) { 19418 case DKIOCSTATE: 19419 case CDROMGDRVSPEED: 19420 case CDROMSDRVSPEED: 19421 case FDEJECT: /* for eject command */ 19422 case DKIOCEJECT: 19423 case CDROMEJECT: 19424 case DKIOCREMOVABLE: 19425 case DKIOCHOTPLUGGABLE: 19426 break; 19427 default: 19428 if (un->un_f_has_removable_media) { 19429 err = ENXIO; 19430 } else { 19431 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19432 if (err == SD_RESERVED_BY_OTHERS) { 19433 err = EACCES; 19434 } else { 19435 err = EIO; 19436 } 19437 } 19438 un->un_ncmds_in_driver--; 19439 ASSERT(un->un_ncmds_in_driver >= 0); 19440 mutex_exit(SD_MUTEX(un)); 19441 return (err); 19442 } 19443 } 19444 } 19445 19446 skip_ready_valid: 19447 mutex_exit(SD_MUTEX(un)); 19448 19449 switch (cmd) { 19450 case DKIOCINFO: 19451 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19452 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19453 break; 19454 19455 case DKIOCGMEDIAINFO: 19456 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19457 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19458 break; 19459 19460 case DKIOCGGEOM: 19461 case DKIOCGVTOC: 19462 case DKIOCGAPART: 19463 case DKIOCPARTINFO: 19464 case DKIOCSGEOM: 19465 case DKIOCSAPART: 19466 case DKIOCGETEFI: 19467 case DKIOCPARTITION: 19468 case DKIOCSVTOC: 19469 case DKIOCSETEFI: 19470 case DKIOCGMBOOT: 19471 case DKIOCSMBOOT: 19472 case DKIOCG_PHYGEOM: 19473 case DKIOCG_VIRTGEOM: 19474 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19475 19476 /* TUR should spin up */ 19477 19478 if (un->un_f_has_removable_media) 19479 err = sd_send_scsi_TEST_UNIT_READY(un, 19480 SD_CHECK_FOR_MEDIA); 19481 else 19482 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19483 19484 if (err != 0) 19485 break; 19486 19487 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19488 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19489 19490 if ((err == 0) && 19491 ((cmd == DKIOCSETEFI) || 19492 (un->un_f_pkstats_enabled) && 19493 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19494 19495 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19496 (void *)SD_PATH_DIRECT); 19497 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19498 sd_set_pstats(un); 19499 SD_TRACE(SD_LOG_IO_PARTITION, un, 19500 "sd_ioctl: un:0x%p pstats created and " 19501 "set\n", un); 19502 } 19503 } 19504 19505 if ((cmd == DKIOCSVTOC) || 19506 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19507 19508 mutex_enter(SD_MUTEX(un)); 19509 if (un->un_f_devid_supported && 19510 (un->un_f_opt_fab_devid == TRUE)) { 19511 if (un->un_devid == NULL) { 19512 sd_register_devid(un, SD_DEVINFO(un), 19513 SD_TARGET_IS_UNRESERVED); 19514 } else { 19515 /* 19516 * The device id for this disk 19517 * has been fabricated. The 19518 * device id must be preserved 19519 * by writing it back out to 19520 * disk. 19521 */ 19522 if (sd_write_deviceid(un) != 0) { 19523 ddi_devid_free(un->un_devid); 19524 un->un_devid = NULL; 19525 } 19526 } 19527 } 19528 mutex_exit(SD_MUTEX(un)); 19529 } 19530 19531 break; 19532 19533 case DKIOCLOCK: 19534 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19535 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19536 SD_PATH_STANDARD); 19537 break; 19538 19539 case DKIOCUNLOCK: 19540 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19541 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19542 SD_PATH_STANDARD); 19543 break; 19544 19545 case DKIOCSTATE: { 19546 enum dkio_state state; 19547 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19548 19549 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19550 err = EFAULT; 19551 } else { 19552 err = sd_check_media(dev, state); 19553 if (err == 0) { 19554 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19555 sizeof (int), flag) != 0) 19556 err = EFAULT; 19557 } 19558 } 19559 break; 19560 } 19561 19562 case DKIOCREMOVABLE: 19563 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19564 i = un->un_f_has_removable_media ? 1 : 0; 19565 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19566 err = EFAULT; 19567 } else { 19568 err = 0; 19569 } 19570 break; 19571 19572 case DKIOCHOTPLUGGABLE: 19573 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19574 i = un->un_f_is_hotpluggable ? 1 : 0; 19575 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19576 err = EFAULT; 19577 } else { 19578 err = 0; 19579 } 19580 break; 19581 19582 case DKIOCGTEMPERATURE: 19583 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19584 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19585 break; 19586 19587 case MHIOCENFAILFAST: 19588 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19589 if ((err = drv_priv(cred_p)) == 0) { 19590 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19591 } 19592 break; 19593 19594 case MHIOCTKOWN: 19595 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19596 if ((err = drv_priv(cred_p)) == 0) { 19597 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19598 } 19599 break; 19600 19601 case MHIOCRELEASE: 19602 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19603 if ((err = drv_priv(cred_p)) == 0) { 19604 err = sd_mhdioc_release(dev); 19605 } 19606 break; 19607 19608 case MHIOCSTATUS: 19609 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19610 if ((err = drv_priv(cred_p)) == 0) { 19611 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19612 case 0: 19613 err = 0; 19614 break; 19615 case EACCES: 19616 *rval_p = 1; 19617 err = 0; 19618 break; 19619 default: 19620 err = EIO; 19621 break; 19622 } 19623 } 19624 break; 19625 19626 case MHIOCQRESERVE: 19627 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19628 if ((err = drv_priv(cred_p)) == 0) { 19629 err = sd_reserve_release(dev, SD_RESERVE); 19630 } 19631 break; 19632 19633 case MHIOCREREGISTERDEVID: 19634 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19635 if (drv_priv(cred_p) == EPERM) { 19636 err = EPERM; 19637 } else if (!un->un_f_devid_supported) { 19638 err = ENOTTY; 19639 } else { 19640 err = sd_mhdioc_register_devid(dev); 19641 } 19642 break; 19643 19644 case MHIOCGRP_INKEYS: 19645 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 19646 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19647 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19648 err = ENOTSUP; 19649 } else { 19650 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 19651 flag); 19652 } 19653 } 19654 break; 19655 19656 case MHIOCGRP_INRESV: 19657 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 19658 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19659 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19660 err = ENOTSUP; 19661 } else { 19662 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 19663 } 19664 } 19665 break; 19666 19667 case MHIOCGRP_REGISTER: 19668 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 19669 if ((err = drv_priv(cred_p)) != EPERM) { 19670 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19671 err = ENOTSUP; 19672 } else if (arg != NULL) { 19673 mhioc_register_t reg; 19674 if (ddi_copyin((void *)arg, ®, 19675 sizeof (mhioc_register_t), flag) != 0) { 19676 err = EFAULT; 19677 } else { 19678 err = 19679 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19680 un, SD_SCSI3_REGISTER, 19681 (uchar_t *)®); 19682 } 19683 } 19684 } 19685 break; 19686 19687 case MHIOCGRP_RESERVE: 19688 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 19689 if ((err = drv_priv(cred_p)) != EPERM) { 19690 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19691 err = ENOTSUP; 19692 } else if (arg != NULL) { 19693 mhioc_resv_desc_t resv_desc; 19694 if (ddi_copyin((void *)arg, &resv_desc, 19695 sizeof (mhioc_resv_desc_t), flag) != 0) { 19696 err = EFAULT; 19697 } else { 19698 err = 19699 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19700 un, SD_SCSI3_RESERVE, 19701 (uchar_t *)&resv_desc); 19702 } 19703 } 19704 } 19705 break; 19706 19707 case MHIOCGRP_PREEMPTANDABORT: 19708 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19709 if ((err = drv_priv(cred_p)) != EPERM) { 19710 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19711 err = ENOTSUP; 19712 } else if (arg != NULL) { 19713 mhioc_preemptandabort_t preempt_abort; 19714 if (ddi_copyin((void *)arg, &preempt_abort, 19715 sizeof (mhioc_preemptandabort_t), 19716 flag) != 0) { 19717 err = EFAULT; 19718 } else { 19719 err = 19720 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19721 un, SD_SCSI3_PREEMPTANDABORT, 19722 (uchar_t *)&preempt_abort); 19723 } 19724 } 19725 } 19726 break; 19727 19728 case MHIOCGRP_REGISTERANDIGNOREKEY: 19729 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 19730 if ((err = drv_priv(cred_p)) != EPERM) { 19731 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19732 err = ENOTSUP; 19733 } else if (arg != NULL) { 19734 mhioc_registerandignorekey_t r_and_i; 19735 if (ddi_copyin((void *)arg, (void *)&r_and_i, 19736 sizeof (mhioc_registerandignorekey_t), 19737 flag) != 0) { 19738 err = EFAULT; 19739 } else { 19740 err = 19741 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19742 un, SD_SCSI3_REGISTERANDIGNOREKEY, 19743 (uchar_t *)&r_and_i); 19744 } 19745 } 19746 } 19747 break; 19748 19749 case USCSICMD: 19750 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 19751 cr = ddi_get_cred(); 19752 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 19753 err = EPERM; 19754 } else { 19755 enum uio_seg uioseg; 19756 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 19757 UIO_USERSPACE; 19758 if (un->un_f_format_in_progress == TRUE) { 19759 err = EAGAIN; 19760 break; 19761 } 19762 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 19763 flag, uioseg, SD_PATH_STANDARD); 19764 } 19765 break; 19766 19767 case CDROMPAUSE: 19768 case CDROMRESUME: 19769 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 19770 if (!ISCD(un)) { 19771 err = ENOTTY; 19772 } else { 19773 err = sr_pause_resume(dev, cmd); 19774 } 19775 break; 19776 19777 case CDROMPLAYMSF: 19778 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 19779 if (!ISCD(un)) { 19780 err = ENOTTY; 19781 } else { 19782 err = sr_play_msf(dev, (caddr_t)arg, flag); 19783 } 19784 break; 19785 19786 case CDROMPLAYTRKIND: 19787 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 19788 #if defined(__i386) || defined(__amd64) 19789 /* 19790 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 19791 */ 19792 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19793 #else 19794 if (!ISCD(un)) { 19795 #endif 19796 err = ENOTTY; 19797 } else { 19798 err = sr_play_trkind(dev, (caddr_t)arg, flag); 19799 } 19800 break; 19801 19802 case CDROMREADTOCHDR: 19803 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 19804 if (!ISCD(un)) { 19805 err = ENOTTY; 19806 } else { 19807 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 19808 } 19809 break; 19810 19811 case CDROMREADTOCENTRY: 19812 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 19813 if (!ISCD(un)) { 19814 err = ENOTTY; 19815 } else { 19816 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 19817 } 19818 break; 19819 19820 case CDROMSTOP: 19821 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 19822 if (!ISCD(un)) { 19823 err = ENOTTY; 19824 } else { 19825 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 19826 SD_PATH_STANDARD); 19827 } 19828 break; 19829 19830 case CDROMSTART: 19831 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 19832 if (!ISCD(un)) { 19833 err = ENOTTY; 19834 } else { 19835 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19836 SD_PATH_STANDARD); 19837 } 19838 break; 19839 19840 case CDROMCLOSETRAY: 19841 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 19842 if (!ISCD(un)) { 19843 err = ENOTTY; 19844 } else { 19845 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 19846 SD_PATH_STANDARD); 19847 } 19848 break; 19849 19850 case FDEJECT: /* for eject command */ 19851 case DKIOCEJECT: 19852 case CDROMEJECT: 19853 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 19854 if (!un->un_f_eject_media_supported) { 19855 err = ENOTTY; 19856 } else { 19857 err = sr_eject(dev); 19858 } 19859 break; 19860 19861 case CDROMVOLCTRL: 19862 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 19863 if (!ISCD(un)) { 19864 err = ENOTTY; 19865 } else { 19866 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 19867 } 19868 break; 19869 19870 case CDROMSUBCHNL: 19871 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 19872 if (!ISCD(un)) { 19873 err = ENOTTY; 19874 } else { 19875 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 19876 } 19877 break; 19878 19879 case CDROMREADMODE2: 19880 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 19881 if (!ISCD(un)) { 19882 err = ENOTTY; 19883 } else if (un->un_f_cfg_is_atapi == TRUE) { 19884 /* 19885 * If the drive supports READ CD, use that instead of 19886 * switching the LBA size via a MODE SELECT 19887 * Block Descriptor 19888 */ 19889 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 19890 } else { 19891 err = sr_read_mode2(dev, (caddr_t)arg, flag); 19892 } 19893 break; 19894 19895 case CDROMREADMODE1: 19896 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 19897 if (!ISCD(un)) { 19898 err = ENOTTY; 19899 } else { 19900 err = sr_read_mode1(dev, (caddr_t)arg, flag); 19901 } 19902 break; 19903 19904 case CDROMREADOFFSET: 19905 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 19906 if (!ISCD(un)) { 19907 err = ENOTTY; 19908 } else { 19909 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 19910 flag); 19911 } 19912 break; 19913 19914 case CDROMSBLKMODE: 19915 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 19916 /* 19917 * There is no means of changing block size in case of atapi 19918 * drives, thus return ENOTTY if drive type is atapi 19919 */ 19920 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19921 err = ENOTTY; 19922 } else if (un->un_f_mmc_cap == TRUE) { 19923 19924 /* 19925 * MMC Devices do not support changing the 19926 * logical block size 19927 * 19928 * Note: EINVAL is being returned instead of ENOTTY to 19929 * maintain consistancy with the original mmc 19930 * driver update. 19931 */ 19932 err = EINVAL; 19933 } else { 19934 mutex_enter(SD_MUTEX(un)); 19935 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 19936 (un->un_ncmds_in_transport > 0)) { 19937 mutex_exit(SD_MUTEX(un)); 19938 err = EINVAL; 19939 } else { 19940 mutex_exit(SD_MUTEX(un)); 19941 err = sr_change_blkmode(dev, cmd, arg, flag); 19942 } 19943 } 19944 break; 19945 19946 case CDROMGBLKMODE: 19947 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 19948 if (!ISCD(un)) { 19949 err = ENOTTY; 19950 } else if ((un->un_f_cfg_is_atapi != FALSE) && 19951 (un->un_f_blockcount_is_valid != FALSE)) { 19952 /* 19953 * Drive is an ATAPI drive so return target block 19954 * size for ATAPI drives since we cannot change the 19955 * blocksize on ATAPI drives. Used primarily to detect 19956 * if an ATAPI cdrom is present. 19957 */ 19958 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 19959 sizeof (int), flag) != 0) { 19960 err = EFAULT; 19961 } else { 19962 err = 0; 19963 } 19964 19965 } else { 19966 /* 19967 * Drive supports changing block sizes via a Mode 19968 * Select. 19969 */ 19970 err = sr_change_blkmode(dev, cmd, arg, flag); 19971 } 19972 break; 19973 19974 case CDROMGDRVSPEED: 19975 case CDROMSDRVSPEED: 19976 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 19977 if (!ISCD(un)) { 19978 err = ENOTTY; 19979 } else if (un->un_f_mmc_cap == TRUE) { 19980 /* 19981 * Note: In the future the driver implementation 19982 * for getting and 19983 * setting cd speed should entail: 19984 * 1) If non-mmc try the Toshiba mode page 19985 * (sr_change_speed) 19986 * 2) If mmc but no support for Real Time Streaming try 19987 * the SET CD SPEED (0xBB) command 19988 * (sr_atapi_change_speed) 19989 * 3) If mmc and support for Real Time Streaming 19990 * try the GET PERFORMANCE and SET STREAMING 19991 * commands (not yet implemented, 4380808) 19992 */ 19993 /* 19994 * As per recent MMC spec, CD-ROM speed is variable 19995 * and changes with LBA. Since there is no such 19996 * things as drive speed now, fail this ioctl. 19997 * 19998 * Note: EINVAL is returned for consistancy of original 19999 * implementation which included support for getting 20000 * the drive speed of mmc devices but not setting 20001 * the drive speed. Thus EINVAL would be returned 20002 * if a set request was made for an mmc device. 20003 * We no longer support get or set speed for 20004 * mmc but need to remain consistent with regard 20005 * to the error code returned. 20006 */ 20007 err = EINVAL; 20008 } else if (un->un_f_cfg_is_atapi == TRUE) { 20009 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20010 } else { 20011 err = sr_change_speed(dev, cmd, arg, flag); 20012 } 20013 break; 20014 20015 case CDROMCDDA: 20016 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20017 if (!ISCD(un)) { 20018 err = ENOTTY; 20019 } else { 20020 err = sr_read_cdda(dev, (void *)arg, flag); 20021 } 20022 break; 20023 20024 case CDROMCDXA: 20025 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20026 if (!ISCD(un)) { 20027 err = ENOTTY; 20028 } else { 20029 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20030 } 20031 break; 20032 20033 case CDROMSUBCODE: 20034 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20035 if (!ISCD(un)) { 20036 err = ENOTTY; 20037 } else { 20038 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20039 } 20040 break; 20041 20042 20043 #ifdef SDDEBUG 20044 /* RESET/ABORTS testing ioctls */ 20045 case DKIOCRESET: { 20046 int reset_level; 20047 20048 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20049 err = EFAULT; 20050 } else { 20051 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20052 "reset_level = 0x%lx\n", reset_level); 20053 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20054 err = 0; 20055 } else { 20056 err = EIO; 20057 } 20058 } 20059 break; 20060 } 20061 20062 case DKIOCABORT: 20063 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20064 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20065 err = 0; 20066 } else { 20067 err = EIO; 20068 } 20069 break; 20070 #endif 20071 20072 #ifdef SD_FAULT_INJECTION 20073 /* SDIOC FaultInjection testing ioctls */ 20074 case SDIOCSTART: 20075 case SDIOCSTOP: 20076 case SDIOCINSERTPKT: 20077 case SDIOCINSERTXB: 20078 case SDIOCINSERTUN: 20079 case SDIOCINSERTARQ: 20080 case SDIOCPUSH: 20081 case SDIOCRETRIEVE: 20082 case SDIOCRUN: 20083 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20084 "SDIOC detected cmd:0x%X:\n", cmd); 20085 /* call error generator */ 20086 sd_faultinjection_ioctl(cmd, arg, un); 20087 err = 0; 20088 break; 20089 20090 #endif /* SD_FAULT_INJECTION */ 20091 20092 case DKIOCFLUSHWRITECACHE: 20093 { 20094 struct dk_callback *dkc = (struct dk_callback *)arg; 20095 20096 mutex_enter(SD_MUTEX(un)); 20097 if (!un->un_f_sync_cache_supported || 20098 !un->un_f_write_cache_enabled) { 20099 err = un->un_f_sync_cache_supported ? 20100 0 : ENOTSUP; 20101 mutex_exit(SD_MUTEX(un)); 20102 if ((flag & FKIOCTL) && dkc != NULL && 20103 dkc->dkc_callback != NULL) { 20104 (*dkc->dkc_callback)(dkc->dkc_cookie, 20105 err); 20106 /* 20107 * Did callback and reported error. 20108 * Since we did a callback, ioctl 20109 * should return 0. 20110 */ 20111 err = 0; 20112 } 20113 break; 20114 } 20115 mutex_exit(SD_MUTEX(un)); 20116 20117 if ((flag & FKIOCTL) && dkc != NULL && 20118 dkc->dkc_callback != NULL) { 20119 /* async SYNC CACHE request */ 20120 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20121 } else { 20122 /* synchronous SYNC CACHE request */ 20123 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20124 } 20125 } 20126 break; 20127 20128 case DKIOCGETWCE: { 20129 20130 int wce; 20131 20132 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20133 break; 20134 } 20135 20136 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20137 err = EFAULT; 20138 } 20139 break; 20140 } 20141 20142 case DKIOCSETWCE: { 20143 20144 int wce, sync_supported; 20145 20146 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20147 err = EFAULT; 20148 break; 20149 } 20150 20151 /* 20152 * Synchronize multiple threads trying to enable 20153 * or disable the cache via the un_f_wcc_cv 20154 * condition variable. 20155 */ 20156 mutex_enter(SD_MUTEX(un)); 20157 20158 /* 20159 * Don't allow the cache to be enabled if the 20160 * config file has it disabled. 20161 */ 20162 if (un->un_f_opt_disable_cache && wce) { 20163 mutex_exit(SD_MUTEX(un)); 20164 err = EINVAL; 20165 break; 20166 } 20167 20168 /* 20169 * Wait for write cache change in progress 20170 * bit to be clear before proceeding. 20171 */ 20172 while (un->un_f_wcc_inprog) 20173 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20174 20175 un->un_f_wcc_inprog = 1; 20176 20177 if (un->un_f_write_cache_enabled && wce == 0) { 20178 /* 20179 * Disable the write cache. Don't clear 20180 * un_f_write_cache_enabled until after 20181 * the mode select and flush are complete. 20182 */ 20183 sync_supported = un->un_f_sync_cache_supported; 20184 mutex_exit(SD_MUTEX(un)); 20185 if ((err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20186 SD_CACHE_DISABLE)) == 0 && sync_supported) { 20187 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20188 } 20189 20190 mutex_enter(SD_MUTEX(un)); 20191 if (err == 0) { 20192 un->un_f_write_cache_enabled = 0; 20193 } 20194 20195 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20196 /* 20197 * Set un_f_write_cache_enabled first, so there is 20198 * no window where the cache is enabled, but the 20199 * bit says it isn't. 20200 */ 20201 un->un_f_write_cache_enabled = 1; 20202 mutex_exit(SD_MUTEX(un)); 20203 20204 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20205 SD_CACHE_ENABLE); 20206 20207 mutex_enter(SD_MUTEX(un)); 20208 20209 if (err) { 20210 un->un_f_write_cache_enabled = 0; 20211 } 20212 } 20213 20214 un->un_f_wcc_inprog = 0; 20215 cv_broadcast(&un->un_wcc_cv); 20216 mutex_exit(SD_MUTEX(un)); 20217 break; 20218 } 20219 20220 default: 20221 err = ENOTTY; 20222 break; 20223 } 20224 mutex_enter(SD_MUTEX(un)); 20225 un->un_ncmds_in_driver--; 20226 ASSERT(un->un_ncmds_in_driver >= 0); 20227 mutex_exit(SD_MUTEX(un)); 20228 20229 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20230 return (err); 20231 } 20232 20233 20234 /* 20235 * Function: sd_dkio_ctrl_info 20236 * 20237 * Description: This routine is the driver entry point for handling controller 20238 * information ioctl requests (DKIOCINFO). 20239 * 20240 * Arguments: dev - the device number 20241 * arg - pointer to user provided dk_cinfo structure 20242 * specifying the controller type and attributes. 20243 * flag - this argument is a pass through to ddi_copyxxx() 20244 * directly from the mode argument of ioctl(). 20245 * 20246 * Return Code: 0 20247 * EFAULT 20248 * ENXIO 20249 */ 20250 20251 static int 20252 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20253 { 20254 struct sd_lun *un = NULL; 20255 struct dk_cinfo *info; 20256 dev_info_t *pdip; 20257 int lun, tgt; 20258 20259 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20260 return (ENXIO); 20261 } 20262 20263 info = (struct dk_cinfo *) 20264 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20265 20266 switch (un->un_ctype) { 20267 case CTYPE_CDROM: 20268 info->dki_ctype = DKC_CDROM; 20269 break; 20270 default: 20271 info->dki_ctype = DKC_SCSI_CCS; 20272 break; 20273 } 20274 pdip = ddi_get_parent(SD_DEVINFO(un)); 20275 info->dki_cnum = ddi_get_instance(pdip); 20276 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20277 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20278 } else { 20279 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20280 DK_DEVLEN - 1); 20281 } 20282 20283 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20284 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20285 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20286 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20287 20288 /* Unit Information */ 20289 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20290 info->dki_slave = ((tgt << 3) | lun); 20291 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20292 DK_DEVLEN - 1); 20293 info->dki_flags = DKI_FMTVOL; 20294 info->dki_partition = SDPART(dev); 20295 20296 /* Max Transfer size of this device in blocks */ 20297 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20298 info->dki_addr = 0; 20299 info->dki_space = 0; 20300 info->dki_prio = 0; 20301 info->dki_vec = 0; 20302 20303 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20304 kmem_free(info, sizeof (struct dk_cinfo)); 20305 return (EFAULT); 20306 } else { 20307 kmem_free(info, sizeof (struct dk_cinfo)); 20308 return (0); 20309 } 20310 } 20311 20312 20313 /* 20314 * Function: sd_get_media_info 20315 * 20316 * Description: This routine is the driver entry point for handling ioctl 20317 * requests for the media type or command set profile used by the 20318 * drive to operate on the media (DKIOCGMEDIAINFO). 20319 * 20320 * Arguments: dev - the device number 20321 * arg - pointer to user provided dk_minfo structure 20322 * specifying the media type, logical block size and 20323 * drive capacity. 20324 * flag - this argument is a pass through to ddi_copyxxx() 20325 * directly from the mode argument of ioctl(). 20326 * 20327 * Return Code: 0 20328 * EACCESS 20329 * EFAULT 20330 * ENXIO 20331 * EIO 20332 */ 20333 20334 static int 20335 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20336 { 20337 struct sd_lun *un = NULL; 20338 struct uscsi_cmd com; 20339 struct scsi_inquiry *sinq; 20340 struct dk_minfo media_info; 20341 u_longlong_t media_capacity; 20342 uint64_t capacity; 20343 uint_t lbasize; 20344 uchar_t *out_data; 20345 uchar_t *rqbuf; 20346 int rval = 0; 20347 int rtn; 20348 20349 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20350 (un->un_state == SD_STATE_OFFLINE)) { 20351 return (ENXIO); 20352 } 20353 20354 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20355 20356 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20357 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20358 20359 /* Issue a TUR to determine if the drive is ready with media present */ 20360 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20361 if (rval == ENXIO) { 20362 goto done; 20363 } 20364 20365 /* Now get configuration data */ 20366 if (ISCD(un)) { 20367 media_info.dki_media_type = DK_CDROM; 20368 20369 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20370 if (un->un_f_mmc_cap == TRUE) { 20371 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20372 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20373 SD_PATH_STANDARD); 20374 20375 if (rtn) { 20376 /* 20377 * Failed for other than an illegal request 20378 * or command not supported 20379 */ 20380 if ((com.uscsi_status == STATUS_CHECK) && 20381 (com.uscsi_rqstatus == STATUS_GOOD)) { 20382 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20383 (rqbuf[12] != 0x20)) { 20384 rval = EIO; 20385 goto done; 20386 } 20387 } 20388 } else { 20389 /* 20390 * The GET CONFIGURATION command succeeded 20391 * so set the media type according to the 20392 * returned data 20393 */ 20394 media_info.dki_media_type = out_data[6]; 20395 media_info.dki_media_type <<= 8; 20396 media_info.dki_media_type |= out_data[7]; 20397 } 20398 } 20399 } else { 20400 /* 20401 * The profile list is not available, so we attempt to identify 20402 * the media type based on the inquiry data 20403 */ 20404 sinq = un->un_sd->sd_inq; 20405 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20406 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20407 /* This is a direct access device or optical disk */ 20408 media_info.dki_media_type = DK_FIXED_DISK; 20409 20410 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20411 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20412 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20413 media_info.dki_media_type = DK_ZIP; 20414 } else if ( 20415 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20416 media_info.dki_media_type = DK_JAZ; 20417 } 20418 } 20419 } else { 20420 /* 20421 * Not a CD, direct access or optical disk so return 20422 * unknown media 20423 */ 20424 media_info.dki_media_type = DK_UNKNOWN; 20425 } 20426 } 20427 20428 /* Now read the capacity so we can provide the lbasize and capacity */ 20429 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20430 SD_PATH_DIRECT)) { 20431 case 0: 20432 break; 20433 case EACCES: 20434 rval = EACCES; 20435 goto done; 20436 default: 20437 rval = EIO; 20438 goto done; 20439 } 20440 20441 media_info.dki_lbsize = lbasize; 20442 media_capacity = capacity; 20443 20444 /* 20445 * sd_send_scsi_READ_CAPACITY() reports capacity in 20446 * un->un_sys_blocksize chunks. So we need to convert it into 20447 * cap.lbasize chunks. 20448 */ 20449 media_capacity *= un->un_sys_blocksize; 20450 media_capacity /= lbasize; 20451 media_info.dki_capacity = media_capacity; 20452 20453 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20454 rval = EFAULT; 20455 /* Put goto. Anybody might add some code below in future */ 20456 goto done; 20457 } 20458 done: 20459 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20460 kmem_free(rqbuf, SENSE_LENGTH); 20461 return (rval); 20462 } 20463 20464 20465 /* 20466 * Function: sd_check_media 20467 * 20468 * Description: This utility routine implements the functionality for the 20469 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20470 * driver state changes from that specified by the user 20471 * (inserted or ejected). For example, if the user specifies 20472 * DKIO_EJECTED and the current media state is inserted this 20473 * routine will immediately return DKIO_INSERTED. However, if the 20474 * current media state is not inserted the user thread will be 20475 * blocked until the drive state changes. If DKIO_NONE is specified 20476 * the user thread will block until a drive state change occurs. 20477 * 20478 * Arguments: dev - the device number 20479 * state - user pointer to a dkio_state, updated with the current 20480 * drive state at return. 20481 * 20482 * Return Code: ENXIO 20483 * EIO 20484 * EAGAIN 20485 * EINTR 20486 */ 20487 20488 static int 20489 sd_check_media(dev_t dev, enum dkio_state state) 20490 { 20491 struct sd_lun *un = NULL; 20492 enum dkio_state prev_state; 20493 opaque_t token = NULL; 20494 int rval = 0; 20495 20496 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20497 return (ENXIO); 20498 } 20499 20500 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20501 20502 mutex_enter(SD_MUTEX(un)); 20503 20504 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20505 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20506 20507 prev_state = un->un_mediastate; 20508 20509 /* is there anything to do? */ 20510 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20511 /* 20512 * submit the request to the scsi_watch service; 20513 * scsi_media_watch_cb() does the real work 20514 */ 20515 mutex_exit(SD_MUTEX(un)); 20516 20517 /* 20518 * This change handles the case where a scsi watch request is 20519 * added to a device that is powered down. To accomplish this 20520 * we power up the device before adding the scsi watch request, 20521 * since the scsi watch sends a TUR directly to the device 20522 * which the device cannot handle if it is powered down. 20523 */ 20524 if (sd_pm_entry(un) != DDI_SUCCESS) { 20525 mutex_enter(SD_MUTEX(un)); 20526 goto done; 20527 } 20528 20529 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20530 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20531 (caddr_t)dev); 20532 20533 sd_pm_exit(un); 20534 20535 mutex_enter(SD_MUTEX(un)); 20536 if (token == NULL) { 20537 rval = EAGAIN; 20538 goto done; 20539 } 20540 20541 /* 20542 * This is a special case IOCTL that doesn't return 20543 * until the media state changes. Routine sdpower 20544 * knows about and handles this so don't count it 20545 * as an active cmd in the driver, which would 20546 * keep the device busy to the pm framework. 20547 * If the count isn't decremented the device can't 20548 * be powered down. 20549 */ 20550 un->un_ncmds_in_driver--; 20551 ASSERT(un->un_ncmds_in_driver >= 0); 20552 20553 /* 20554 * if a prior request had been made, this will be the same 20555 * token, as scsi_watch was designed that way. 20556 */ 20557 un->un_swr_token = token; 20558 un->un_specified_mediastate = state; 20559 20560 /* 20561 * now wait for media change 20562 * we will not be signalled unless mediastate == state but it is 20563 * still better to test for this condition, since there is a 20564 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20565 */ 20566 SD_TRACE(SD_LOG_COMMON, un, 20567 "sd_check_media: waiting for media state change\n"); 20568 while (un->un_mediastate == state) { 20569 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20570 SD_TRACE(SD_LOG_COMMON, un, 20571 "sd_check_media: waiting for media state " 20572 "was interrupted\n"); 20573 un->un_ncmds_in_driver++; 20574 rval = EINTR; 20575 goto done; 20576 } 20577 SD_TRACE(SD_LOG_COMMON, un, 20578 "sd_check_media: received signal, state=%x\n", 20579 un->un_mediastate); 20580 } 20581 /* 20582 * Inc the counter to indicate the device once again 20583 * has an active outstanding cmd. 20584 */ 20585 un->un_ncmds_in_driver++; 20586 } 20587 20588 /* invalidate geometry */ 20589 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20590 sr_ejected(un); 20591 } 20592 20593 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20594 uint64_t capacity; 20595 uint_t lbasize; 20596 20597 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20598 mutex_exit(SD_MUTEX(un)); 20599 /* 20600 * Since the following routines use SD_PATH_DIRECT, we must 20601 * call PM directly before the upcoming disk accesses. This 20602 * may cause the disk to be power/spin up. 20603 */ 20604 20605 if (sd_pm_entry(un) == DDI_SUCCESS) { 20606 rval = sd_send_scsi_READ_CAPACITY(un, 20607 &capacity, 20608 &lbasize, SD_PATH_DIRECT); 20609 if (rval != 0) { 20610 sd_pm_exit(un); 20611 mutex_enter(SD_MUTEX(un)); 20612 goto done; 20613 } 20614 } else { 20615 rval = EIO; 20616 mutex_enter(SD_MUTEX(un)); 20617 goto done; 20618 } 20619 mutex_enter(SD_MUTEX(un)); 20620 20621 sd_update_block_info(un, lbasize, capacity); 20622 20623 /* 20624 * Check if the media in the device is writable or not 20625 */ 20626 if (ISCD(un)) 20627 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 20628 20629 mutex_exit(SD_MUTEX(un)); 20630 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 20631 if ((cmlb_validate(un->un_cmlbhandle, 0, 20632 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 20633 sd_set_pstats(un); 20634 SD_TRACE(SD_LOG_IO_PARTITION, un, 20635 "sd_check_media: un:0x%p pstats created and " 20636 "set\n", un); 20637 } 20638 20639 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20640 SD_PATH_DIRECT); 20641 sd_pm_exit(un); 20642 20643 mutex_enter(SD_MUTEX(un)); 20644 } 20645 done: 20646 un->un_f_watcht_stopped = FALSE; 20647 if (un->un_swr_token) { 20648 /* 20649 * Use of this local token and the mutex ensures that we avoid 20650 * some race conditions associated with terminating the 20651 * scsi watch. 20652 */ 20653 token = un->un_swr_token; 20654 un->un_swr_token = (opaque_t)NULL; 20655 mutex_exit(SD_MUTEX(un)); 20656 (void) scsi_watch_request_terminate(token, 20657 SCSI_WATCH_TERMINATE_WAIT); 20658 mutex_enter(SD_MUTEX(un)); 20659 } 20660 20661 /* 20662 * Update the capacity kstat value, if no media previously 20663 * (capacity kstat is 0) and a media has been inserted 20664 * (un_f_blockcount_is_valid == TRUE) 20665 */ 20666 if (un->un_errstats) { 20667 struct sd_errstats *stp = NULL; 20668 20669 stp = (struct sd_errstats *)un->un_errstats->ks_data; 20670 if ((stp->sd_capacity.value.ui64 == 0) && 20671 (un->un_f_blockcount_is_valid == TRUE)) { 20672 stp->sd_capacity.value.ui64 = 20673 (uint64_t)((uint64_t)un->un_blockcount * 20674 un->un_sys_blocksize); 20675 } 20676 } 20677 mutex_exit(SD_MUTEX(un)); 20678 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 20679 return (rval); 20680 } 20681 20682 20683 /* 20684 * Function: sd_delayed_cv_broadcast 20685 * 20686 * Description: Delayed cv_broadcast to allow for target to recover from media 20687 * insertion. 20688 * 20689 * Arguments: arg - driver soft state (unit) structure 20690 */ 20691 20692 static void 20693 sd_delayed_cv_broadcast(void *arg) 20694 { 20695 struct sd_lun *un = arg; 20696 20697 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 20698 20699 mutex_enter(SD_MUTEX(un)); 20700 un->un_dcvb_timeid = NULL; 20701 cv_broadcast(&un->un_state_cv); 20702 mutex_exit(SD_MUTEX(un)); 20703 } 20704 20705 20706 /* 20707 * Function: sd_media_watch_cb 20708 * 20709 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 20710 * routine processes the TUR sense data and updates the driver 20711 * state if a transition has occurred. The user thread 20712 * (sd_check_media) is then signalled. 20713 * 20714 * Arguments: arg - the device 'dev_t' is used for context to discriminate 20715 * among multiple watches that share this callback function 20716 * resultp - scsi watch facility result packet containing scsi 20717 * packet, status byte and sense data 20718 * 20719 * Return Code: 0 for success, -1 for failure 20720 */ 20721 20722 static int 20723 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 20724 { 20725 struct sd_lun *un; 20726 struct scsi_status *statusp = resultp->statusp; 20727 uint8_t *sensep = (uint8_t *)resultp->sensep; 20728 enum dkio_state state = DKIO_NONE; 20729 dev_t dev = (dev_t)arg; 20730 uchar_t actual_sense_length; 20731 uint8_t skey, asc, ascq; 20732 20733 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20734 return (-1); 20735 } 20736 actual_sense_length = resultp->actual_sense_length; 20737 20738 mutex_enter(SD_MUTEX(un)); 20739 SD_TRACE(SD_LOG_COMMON, un, 20740 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 20741 *((char *)statusp), (void *)sensep, actual_sense_length); 20742 20743 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 20744 un->un_mediastate = DKIO_DEV_GONE; 20745 cv_broadcast(&un->un_state_cv); 20746 mutex_exit(SD_MUTEX(un)); 20747 20748 return (0); 20749 } 20750 20751 /* 20752 * If there was a check condition then sensep points to valid sense data 20753 * If status was not a check condition but a reservation or busy status 20754 * then the new state is DKIO_NONE 20755 */ 20756 if (sensep != NULL) { 20757 skey = scsi_sense_key(sensep); 20758 asc = scsi_sense_asc(sensep); 20759 ascq = scsi_sense_ascq(sensep); 20760 20761 SD_INFO(SD_LOG_COMMON, un, 20762 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 20763 skey, asc, ascq); 20764 /* This routine only uses up to 13 bytes of sense data. */ 20765 if (actual_sense_length >= 13) { 20766 if (skey == KEY_UNIT_ATTENTION) { 20767 if (asc == 0x28) { 20768 state = DKIO_INSERTED; 20769 } 20770 } else if (skey == KEY_NOT_READY) { 20771 /* 20772 * if 02/04/02 means that the host 20773 * should send start command. Explicitly 20774 * leave the media state as is 20775 * (inserted) as the media is inserted 20776 * and host has stopped device for PM 20777 * reasons. Upon next true read/write 20778 * to this media will bring the 20779 * device to the right state good for 20780 * media access. 20781 */ 20782 if (asc == 0x3a) { 20783 state = DKIO_EJECTED; 20784 } else { 20785 /* 20786 * If the drive is busy with an 20787 * operation or long write, keep the 20788 * media in an inserted state. 20789 */ 20790 20791 if ((asc == 0x04) && 20792 ((ascq == 0x02) || 20793 (ascq == 0x07) || 20794 (ascq == 0x08))) { 20795 state = DKIO_INSERTED; 20796 } 20797 } 20798 } else if (skey == KEY_NO_SENSE) { 20799 if ((asc == 0x00) && (ascq == 0x00)) { 20800 /* 20801 * Sense Data 00/00/00 does not provide 20802 * any information about the state of 20803 * the media. Ignore it. 20804 */ 20805 mutex_exit(SD_MUTEX(un)); 20806 return (0); 20807 } 20808 } 20809 } 20810 } else if ((*((char *)statusp) == STATUS_GOOD) && 20811 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 20812 state = DKIO_INSERTED; 20813 } 20814 20815 SD_TRACE(SD_LOG_COMMON, un, 20816 "sd_media_watch_cb: state=%x, specified=%x\n", 20817 state, un->un_specified_mediastate); 20818 20819 /* 20820 * now signal the waiting thread if this is *not* the specified state; 20821 * delay the signal if the state is DKIO_INSERTED to allow the target 20822 * to recover 20823 */ 20824 if (state != un->un_specified_mediastate) { 20825 un->un_mediastate = state; 20826 if (state == DKIO_INSERTED) { 20827 /* 20828 * delay the signal to give the drive a chance 20829 * to do what it apparently needs to do 20830 */ 20831 SD_TRACE(SD_LOG_COMMON, un, 20832 "sd_media_watch_cb: delayed cv_broadcast\n"); 20833 if (un->un_dcvb_timeid == NULL) { 20834 un->un_dcvb_timeid = 20835 timeout(sd_delayed_cv_broadcast, un, 20836 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 20837 } 20838 } else { 20839 SD_TRACE(SD_LOG_COMMON, un, 20840 "sd_media_watch_cb: immediate cv_broadcast\n"); 20841 cv_broadcast(&un->un_state_cv); 20842 } 20843 } 20844 mutex_exit(SD_MUTEX(un)); 20845 return (0); 20846 } 20847 20848 20849 /* 20850 * Function: sd_dkio_get_temp 20851 * 20852 * Description: This routine is the driver entry point for handling ioctl 20853 * requests to get the disk temperature. 20854 * 20855 * Arguments: dev - the device number 20856 * arg - pointer to user provided dk_temperature structure. 20857 * flag - this argument is a pass through to ddi_copyxxx() 20858 * directly from the mode argument of ioctl(). 20859 * 20860 * Return Code: 0 20861 * EFAULT 20862 * ENXIO 20863 * EAGAIN 20864 */ 20865 20866 static int 20867 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 20868 { 20869 struct sd_lun *un = NULL; 20870 struct dk_temperature *dktemp = NULL; 20871 uchar_t *temperature_page; 20872 int rval = 0; 20873 int path_flag = SD_PATH_STANDARD; 20874 20875 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20876 return (ENXIO); 20877 } 20878 20879 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 20880 20881 /* copyin the disk temp argument to get the user flags */ 20882 if (ddi_copyin((void *)arg, dktemp, 20883 sizeof (struct dk_temperature), flag) != 0) { 20884 rval = EFAULT; 20885 goto done; 20886 } 20887 20888 /* Initialize the temperature to invalid. */ 20889 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20890 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20891 20892 /* 20893 * Note: Investigate removing the "bypass pm" semantic. 20894 * Can we just bypass PM always? 20895 */ 20896 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 20897 path_flag = SD_PATH_DIRECT; 20898 ASSERT(!mutex_owned(&un->un_pm_mutex)); 20899 mutex_enter(&un->un_pm_mutex); 20900 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 20901 /* 20902 * If DKT_BYPASS_PM is set, and the drive happens to be 20903 * in low power mode, we can not wake it up, Need to 20904 * return EAGAIN. 20905 */ 20906 mutex_exit(&un->un_pm_mutex); 20907 rval = EAGAIN; 20908 goto done; 20909 } else { 20910 /* 20911 * Indicate to PM the device is busy. This is required 20912 * to avoid a race - i.e. the ioctl is issuing a 20913 * command and the pm framework brings down the device 20914 * to low power mode (possible power cut-off on some 20915 * platforms). 20916 */ 20917 mutex_exit(&un->un_pm_mutex); 20918 if (sd_pm_entry(un) != DDI_SUCCESS) { 20919 rval = EAGAIN; 20920 goto done; 20921 } 20922 } 20923 } 20924 20925 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 20926 20927 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 20928 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 20929 goto done2; 20930 } 20931 20932 /* 20933 * For the current temperature verify that the parameter length is 0x02 20934 * and the parameter code is 0x00 20935 */ 20936 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 20937 (temperature_page[5] == 0x00)) { 20938 if (temperature_page[9] == 0xFF) { 20939 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20940 } else { 20941 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 20942 } 20943 } 20944 20945 /* 20946 * For the reference temperature verify that the parameter 20947 * length is 0x02 and the parameter code is 0x01 20948 */ 20949 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 20950 (temperature_page[11] == 0x01)) { 20951 if (temperature_page[15] == 0xFF) { 20952 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20953 } else { 20954 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 20955 } 20956 } 20957 20958 /* Do the copyout regardless of the temperature commands status. */ 20959 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 20960 flag) != 0) { 20961 rval = EFAULT; 20962 } 20963 20964 done2: 20965 if (path_flag == SD_PATH_DIRECT) { 20966 sd_pm_exit(un); 20967 } 20968 20969 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 20970 done: 20971 if (dktemp != NULL) { 20972 kmem_free(dktemp, sizeof (struct dk_temperature)); 20973 } 20974 20975 return (rval); 20976 } 20977 20978 20979 /* 20980 * Function: sd_log_page_supported 20981 * 20982 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 20983 * supported log pages. 20984 * 20985 * Arguments: un - 20986 * log_page - 20987 * 20988 * Return Code: -1 - on error (log sense is optional and may not be supported). 20989 * 0 - log page not found. 20990 * 1 - log page found. 20991 */ 20992 20993 static int 20994 sd_log_page_supported(struct sd_lun *un, int log_page) 20995 { 20996 uchar_t *log_page_data; 20997 int i; 20998 int match = 0; 20999 int log_size; 21000 21001 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21002 21003 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21004 SD_PATH_DIRECT) != 0) { 21005 SD_ERROR(SD_LOG_COMMON, un, 21006 "sd_log_page_supported: failed log page retrieval\n"); 21007 kmem_free(log_page_data, 0xFF); 21008 return (-1); 21009 } 21010 log_size = log_page_data[3]; 21011 21012 /* 21013 * The list of supported log pages start from the fourth byte. Check 21014 * until we run out of log pages or a match is found. 21015 */ 21016 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21017 if (log_page_data[i] == log_page) { 21018 match++; 21019 } 21020 } 21021 kmem_free(log_page_data, 0xFF); 21022 return (match); 21023 } 21024 21025 21026 /* 21027 * Function: sd_mhdioc_failfast 21028 * 21029 * Description: This routine is the driver entry point for handling ioctl 21030 * requests to enable/disable the multihost failfast option. 21031 * (MHIOCENFAILFAST) 21032 * 21033 * Arguments: dev - the device number 21034 * arg - user specified probing interval. 21035 * flag - this argument is a pass through to ddi_copyxxx() 21036 * directly from the mode argument of ioctl(). 21037 * 21038 * Return Code: 0 21039 * EFAULT 21040 * ENXIO 21041 */ 21042 21043 static int 21044 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21045 { 21046 struct sd_lun *un = NULL; 21047 int mh_time; 21048 int rval = 0; 21049 21050 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21051 return (ENXIO); 21052 } 21053 21054 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21055 return (EFAULT); 21056 21057 if (mh_time) { 21058 mutex_enter(SD_MUTEX(un)); 21059 un->un_resvd_status |= SD_FAILFAST; 21060 mutex_exit(SD_MUTEX(un)); 21061 /* 21062 * If mh_time is INT_MAX, then this ioctl is being used for 21063 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21064 */ 21065 if (mh_time != INT_MAX) { 21066 rval = sd_check_mhd(dev, mh_time); 21067 } 21068 } else { 21069 (void) sd_check_mhd(dev, 0); 21070 mutex_enter(SD_MUTEX(un)); 21071 un->un_resvd_status &= ~SD_FAILFAST; 21072 mutex_exit(SD_MUTEX(un)); 21073 } 21074 return (rval); 21075 } 21076 21077 21078 /* 21079 * Function: sd_mhdioc_takeown 21080 * 21081 * Description: This routine is the driver entry point for handling ioctl 21082 * requests to forcefully acquire exclusive access rights to the 21083 * multihost disk (MHIOCTKOWN). 21084 * 21085 * Arguments: dev - the device number 21086 * arg - user provided structure specifying the delay 21087 * parameters in milliseconds 21088 * flag - this argument is a pass through to ddi_copyxxx() 21089 * directly from the mode argument of ioctl(). 21090 * 21091 * Return Code: 0 21092 * EFAULT 21093 * ENXIO 21094 */ 21095 21096 static int 21097 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21098 { 21099 struct sd_lun *un = NULL; 21100 struct mhioctkown *tkown = NULL; 21101 int rval = 0; 21102 21103 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21104 return (ENXIO); 21105 } 21106 21107 if (arg != NULL) { 21108 tkown = (struct mhioctkown *) 21109 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21110 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21111 if (rval != 0) { 21112 rval = EFAULT; 21113 goto error; 21114 } 21115 } 21116 21117 rval = sd_take_ownership(dev, tkown); 21118 mutex_enter(SD_MUTEX(un)); 21119 if (rval == 0) { 21120 un->un_resvd_status |= SD_RESERVE; 21121 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21122 sd_reinstate_resv_delay = 21123 tkown->reinstate_resv_delay * 1000; 21124 } else { 21125 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21126 } 21127 /* 21128 * Give the scsi_watch routine interval set by 21129 * the MHIOCENFAILFAST ioctl precedence here. 21130 */ 21131 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21132 mutex_exit(SD_MUTEX(un)); 21133 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21134 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21135 "sd_mhdioc_takeown : %d\n", 21136 sd_reinstate_resv_delay); 21137 } else { 21138 mutex_exit(SD_MUTEX(un)); 21139 } 21140 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21141 sd_mhd_reset_notify_cb, (caddr_t)un); 21142 } else { 21143 un->un_resvd_status &= ~SD_RESERVE; 21144 mutex_exit(SD_MUTEX(un)); 21145 } 21146 21147 error: 21148 if (tkown != NULL) { 21149 kmem_free(tkown, sizeof (struct mhioctkown)); 21150 } 21151 return (rval); 21152 } 21153 21154 21155 /* 21156 * Function: sd_mhdioc_release 21157 * 21158 * Description: This routine is the driver entry point for handling ioctl 21159 * requests to release exclusive access rights to the multihost 21160 * disk (MHIOCRELEASE). 21161 * 21162 * Arguments: dev - the device number 21163 * 21164 * Return Code: 0 21165 * ENXIO 21166 */ 21167 21168 static int 21169 sd_mhdioc_release(dev_t dev) 21170 { 21171 struct sd_lun *un = NULL; 21172 timeout_id_t resvd_timeid_save; 21173 int resvd_status_save; 21174 int rval = 0; 21175 21176 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21177 return (ENXIO); 21178 } 21179 21180 mutex_enter(SD_MUTEX(un)); 21181 resvd_status_save = un->un_resvd_status; 21182 un->un_resvd_status &= 21183 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21184 if (un->un_resvd_timeid) { 21185 resvd_timeid_save = un->un_resvd_timeid; 21186 un->un_resvd_timeid = NULL; 21187 mutex_exit(SD_MUTEX(un)); 21188 (void) untimeout(resvd_timeid_save); 21189 } else { 21190 mutex_exit(SD_MUTEX(un)); 21191 } 21192 21193 /* 21194 * destroy any pending timeout thread that may be attempting to 21195 * reinstate reservation on this device. 21196 */ 21197 sd_rmv_resv_reclaim_req(dev); 21198 21199 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21200 mutex_enter(SD_MUTEX(un)); 21201 if ((un->un_mhd_token) && 21202 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21203 mutex_exit(SD_MUTEX(un)); 21204 (void) sd_check_mhd(dev, 0); 21205 } else { 21206 mutex_exit(SD_MUTEX(un)); 21207 } 21208 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21209 sd_mhd_reset_notify_cb, (caddr_t)un); 21210 } else { 21211 /* 21212 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21213 */ 21214 mutex_enter(SD_MUTEX(un)); 21215 un->un_resvd_status = resvd_status_save; 21216 mutex_exit(SD_MUTEX(un)); 21217 } 21218 return (rval); 21219 } 21220 21221 21222 /* 21223 * Function: sd_mhdioc_register_devid 21224 * 21225 * Description: This routine is the driver entry point for handling ioctl 21226 * requests to register the device id (MHIOCREREGISTERDEVID). 21227 * 21228 * Note: The implementation for this ioctl has been updated to 21229 * be consistent with the original PSARC case (1999/357) 21230 * (4375899, 4241671, 4220005) 21231 * 21232 * Arguments: dev - the device number 21233 * 21234 * Return Code: 0 21235 * ENXIO 21236 */ 21237 21238 static int 21239 sd_mhdioc_register_devid(dev_t dev) 21240 { 21241 struct sd_lun *un = NULL; 21242 int rval = 0; 21243 21244 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21245 return (ENXIO); 21246 } 21247 21248 ASSERT(!mutex_owned(SD_MUTEX(un))); 21249 21250 mutex_enter(SD_MUTEX(un)); 21251 21252 /* If a devid already exists, de-register it */ 21253 if (un->un_devid != NULL) { 21254 ddi_devid_unregister(SD_DEVINFO(un)); 21255 /* 21256 * After unregister devid, needs to free devid memory 21257 */ 21258 ddi_devid_free(un->un_devid); 21259 un->un_devid = NULL; 21260 } 21261 21262 /* Check for reservation conflict */ 21263 mutex_exit(SD_MUTEX(un)); 21264 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21265 mutex_enter(SD_MUTEX(un)); 21266 21267 switch (rval) { 21268 case 0: 21269 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21270 break; 21271 case EACCES: 21272 break; 21273 default: 21274 rval = EIO; 21275 } 21276 21277 mutex_exit(SD_MUTEX(un)); 21278 return (rval); 21279 } 21280 21281 21282 /* 21283 * Function: sd_mhdioc_inkeys 21284 * 21285 * Description: This routine is the driver entry point for handling ioctl 21286 * requests to issue the SCSI-3 Persistent In Read Keys command 21287 * to the device (MHIOCGRP_INKEYS). 21288 * 21289 * Arguments: dev - the device number 21290 * arg - user provided in_keys structure 21291 * flag - this argument is a pass through to ddi_copyxxx() 21292 * directly from the mode argument of ioctl(). 21293 * 21294 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21295 * ENXIO 21296 * EFAULT 21297 */ 21298 21299 static int 21300 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21301 { 21302 struct sd_lun *un; 21303 mhioc_inkeys_t inkeys; 21304 int rval = 0; 21305 21306 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21307 return (ENXIO); 21308 } 21309 21310 #ifdef _MULTI_DATAMODEL 21311 switch (ddi_model_convert_from(flag & FMODELS)) { 21312 case DDI_MODEL_ILP32: { 21313 struct mhioc_inkeys32 inkeys32; 21314 21315 if (ddi_copyin(arg, &inkeys32, 21316 sizeof (struct mhioc_inkeys32), flag) != 0) { 21317 return (EFAULT); 21318 } 21319 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21320 if ((rval = sd_persistent_reservation_in_read_keys(un, 21321 &inkeys, flag)) != 0) { 21322 return (rval); 21323 } 21324 inkeys32.generation = inkeys.generation; 21325 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21326 flag) != 0) { 21327 return (EFAULT); 21328 } 21329 break; 21330 } 21331 case DDI_MODEL_NONE: 21332 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21333 flag) != 0) { 21334 return (EFAULT); 21335 } 21336 if ((rval = sd_persistent_reservation_in_read_keys(un, 21337 &inkeys, flag)) != 0) { 21338 return (rval); 21339 } 21340 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21341 flag) != 0) { 21342 return (EFAULT); 21343 } 21344 break; 21345 } 21346 21347 #else /* ! _MULTI_DATAMODEL */ 21348 21349 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21350 return (EFAULT); 21351 } 21352 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21353 if (rval != 0) { 21354 return (rval); 21355 } 21356 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21357 return (EFAULT); 21358 } 21359 21360 #endif /* _MULTI_DATAMODEL */ 21361 21362 return (rval); 21363 } 21364 21365 21366 /* 21367 * Function: sd_mhdioc_inresv 21368 * 21369 * Description: This routine is the driver entry point for handling ioctl 21370 * requests to issue the SCSI-3 Persistent In Read Reservations 21371 * command to the device (MHIOCGRP_INKEYS). 21372 * 21373 * Arguments: dev - the device number 21374 * arg - user provided in_resv structure 21375 * flag - this argument is a pass through to ddi_copyxxx() 21376 * directly from the mode argument of ioctl(). 21377 * 21378 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21379 * ENXIO 21380 * EFAULT 21381 */ 21382 21383 static int 21384 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21385 { 21386 struct sd_lun *un; 21387 mhioc_inresvs_t inresvs; 21388 int rval = 0; 21389 21390 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21391 return (ENXIO); 21392 } 21393 21394 #ifdef _MULTI_DATAMODEL 21395 21396 switch (ddi_model_convert_from(flag & FMODELS)) { 21397 case DDI_MODEL_ILP32: { 21398 struct mhioc_inresvs32 inresvs32; 21399 21400 if (ddi_copyin(arg, &inresvs32, 21401 sizeof (struct mhioc_inresvs32), flag) != 0) { 21402 return (EFAULT); 21403 } 21404 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21405 if ((rval = sd_persistent_reservation_in_read_resv(un, 21406 &inresvs, flag)) != 0) { 21407 return (rval); 21408 } 21409 inresvs32.generation = inresvs.generation; 21410 if (ddi_copyout(&inresvs32, arg, 21411 sizeof (struct mhioc_inresvs32), flag) != 0) { 21412 return (EFAULT); 21413 } 21414 break; 21415 } 21416 case DDI_MODEL_NONE: 21417 if (ddi_copyin(arg, &inresvs, 21418 sizeof (mhioc_inresvs_t), flag) != 0) { 21419 return (EFAULT); 21420 } 21421 if ((rval = sd_persistent_reservation_in_read_resv(un, 21422 &inresvs, flag)) != 0) { 21423 return (rval); 21424 } 21425 if (ddi_copyout(&inresvs, arg, 21426 sizeof (mhioc_inresvs_t), flag) != 0) { 21427 return (EFAULT); 21428 } 21429 break; 21430 } 21431 21432 #else /* ! _MULTI_DATAMODEL */ 21433 21434 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21435 return (EFAULT); 21436 } 21437 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21438 if (rval != 0) { 21439 return (rval); 21440 } 21441 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21442 return (EFAULT); 21443 } 21444 21445 #endif /* ! _MULTI_DATAMODEL */ 21446 21447 return (rval); 21448 } 21449 21450 21451 /* 21452 * The following routines support the clustering functionality described below 21453 * and implement lost reservation reclaim functionality. 21454 * 21455 * Clustering 21456 * ---------- 21457 * The clustering code uses two different, independent forms of SCSI 21458 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21459 * Persistent Group Reservations. For any particular disk, it will use either 21460 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21461 * 21462 * SCSI-2 21463 * The cluster software takes ownership of a multi-hosted disk by issuing the 21464 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21465 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 21466 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 21467 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 21468 * driver. The meaning of failfast is that if the driver (on this host) ever 21469 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 21470 * it should immediately panic the host. The motivation for this ioctl is that 21471 * if this host does encounter reservation conflict, the underlying cause is 21472 * that some other host of the cluster has decided that this host is no longer 21473 * in the cluster and has seized control of the disks for itself. Since this 21474 * host is no longer in the cluster, it ought to panic itself. The 21475 * MHIOCENFAILFAST ioctl does two things: 21476 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21477 * error to panic the host 21478 * (b) it sets up a periodic timer to test whether this host still has 21479 * "access" (in that no other host has reserved the device): if the 21480 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21481 * purpose of that periodic timer is to handle scenarios where the host is 21482 * otherwise temporarily quiescent, temporarily doing no real i/o. 21483 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21484 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21485 * the device itself. 21486 * 21487 * SCSI-3 PGR 21488 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21489 * facility is supported through the shared multihost disk ioctls 21490 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21491 * MHIOCGRP_PREEMPTANDABORT) 21492 * 21493 * Reservation Reclaim: 21494 * -------------------- 21495 * To support the lost reservation reclaim operations this driver creates a 21496 * single thread to handle reinstating reservations on all devices that have 21497 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21498 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21499 * and the reservation reclaim thread loops through the requests to regain the 21500 * lost reservations. 21501 */ 21502 21503 /* 21504 * Function: sd_check_mhd() 21505 * 21506 * Description: This function sets up and submits a scsi watch request or 21507 * terminates an existing watch request. This routine is used in 21508 * support of reservation reclaim. 21509 * 21510 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21511 * among multiple watches that share the callback function 21512 * interval - the number of microseconds specifying the watch 21513 * interval for issuing TEST UNIT READY commands. If 21514 * set to 0 the watch should be terminated. If the 21515 * interval is set to 0 and if the device is required 21516 * to hold reservation while disabling failfast, the 21517 * watch is restarted with an interval of 21518 * reinstate_resv_delay. 21519 * 21520 * Return Code: 0 - Successful submit/terminate of scsi watch request 21521 * ENXIO - Indicates an invalid device was specified 21522 * EAGAIN - Unable to submit the scsi watch request 21523 */ 21524 21525 static int 21526 sd_check_mhd(dev_t dev, int interval) 21527 { 21528 struct sd_lun *un; 21529 opaque_t token; 21530 21531 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21532 return (ENXIO); 21533 } 21534 21535 /* is this a watch termination request? */ 21536 if (interval == 0) { 21537 mutex_enter(SD_MUTEX(un)); 21538 /* if there is an existing watch task then terminate it */ 21539 if (un->un_mhd_token) { 21540 token = un->un_mhd_token; 21541 un->un_mhd_token = NULL; 21542 mutex_exit(SD_MUTEX(un)); 21543 (void) scsi_watch_request_terminate(token, 21544 SCSI_WATCH_TERMINATE_WAIT); 21545 mutex_enter(SD_MUTEX(un)); 21546 } else { 21547 mutex_exit(SD_MUTEX(un)); 21548 /* 21549 * Note: If we return here we don't check for the 21550 * failfast case. This is the original legacy 21551 * implementation but perhaps we should be checking 21552 * the failfast case. 21553 */ 21554 return (0); 21555 } 21556 /* 21557 * If the device is required to hold reservation while 21558 * disabling failfast, we need to restart the scsi_watch 21559 * routine with an interval of reinstate_resv_delay. 21560 */ 21561 if (un->un_resvd_status & SD_RESERVE) { 21562 interval = sd_reinstate_resv_delay/1000; 21563 } else { 21564 /* no failfast so bail */ 21565 mutex_exit(SD_MUTEX(un)); 21566 return (0); 21567 } 21568 mutex_exit(SD_MUTEX(un)); 21569 } 21570 21571 /* 21572 * adjust minimum time interval to 1 second, 21573 * and convert from msecs to usecs 21574 */ 21575 if (interval > 0 && interval < 1000) { 21576 interval = 1000; 21577 } 21578 interval *= 1000; 21579 21580 /* 21581 * submit the request to the scsi_watch service 21582 */ 21583 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21584 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21585 if (token == NULL) { 21586 return (EAGAIN); 21587 } 21588 21589 /* 21590 * save token for termination later on 21591 */ 21592 mutex_enter(SD_MUTEX(un)); 21593 un->un_mhd_token = token; 21594 mutex_exit(SD_MUTEX(un)); 21595 return (0); 21596 } 21597 21598 21599 /* 21600 * Function: sd_mhd_watch_cb() 21601 * 21602 * Description: This function is the call back function used by the scsi watch 21603 * facility. The scsi watch facility sends the "Test Unit Ready" 21604 * and processes the status. If applicable (i.e. a "Unit Attention" 21605 * status and automatic "Request Sense" not used) the scsi watch 21606 * facility will send a "Request Sense" and retrieve the sense data 21607 * to be passed to this callback function. In either case the 21608 * automatic "Request Sense" or the facility submitting one, this 21609 * callback is passed the status and sense data. 21610 * 21611 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21612 * among multiple watches that share this callback function 21613 * resultp - scsi watch facility result packet containing scsi 21614 * packet, status byte and sense data 21615 * 21616 * Return Code: 0 - continue the watch task 21617 * non-zero - terminate the watch task 21618 */ 21619 21620 static int 21621 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21622 { 21623 struct sd_lun *un; 21624 struct scsi_status *statusp; 21625 uint8_t *sensep; 21626 struct scsi_pkt *pkt; 21627 uchar_t actual_sense_length; 21628 dev_t dev = (dev_t)arg; 21629 21630 ASSERT(resultp != NULL); 21631 statusp = resultp->statusp; 21632 sensep = (uint8_t *)resultp->sensep; 21633 pkt = resultp->pkt; 21634 actual_sense_length = resultp->actual_sense_length; 21635 21636 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21637 return (ENXIO); 21638 } 21639 21640 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21641 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 21642 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 21643 21644 /* Begin processing of the status and/or sense data */ 21645 if (pkt->pkt_reason != CMD_CMPLT) { 21646 /* Handle the incomplete packet */ 21647 sd_mhd_watch_incomplete(un, pkt); 21648 return (0); 21649 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 21650 if (*((unsigned char *)statusp) 21651 == STATUS_RESERVATION_CONFLICT) { 21652 /* 21653 * Handle a reservation conflict by panicking if 21654 * configured for failfast or by logging the conflict 21655 * and updating the reservation status 21656 */ 21657 mutex_enter(SD_MUTEX(un)); 21658 if ((un->un_resvd_status & SD_FAILFAST) && 21659 (sd_failfast_enable)) { 21660 sd_panic_for_res_conflict(un); 21661 /*NOTREACHED*/ 21662 } 21663 SD_INFO(SD_LOG_IOCTL_MHD, un, 21664 "sd_mhd_watch_cb: Reservation Conflict\n"); 21665 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 21666 mutex_exit(SD_MUTEX(un)); 21667 } 21668 } 21669 21670 if (sensep != NULL) { 21671 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 21672 mutex_enter(SD_MUTEX(un)); 21673 if ((scsi_sense_asc(sensep) == 21674 SD_SCSI_RESET_SENSE_CODE) && 21675 (un->un_resvd_status & SD_RESERVE)) { 21676 /* 21677 * The additional sense code indicates a power 21678 * on or bus device reset has occurred; update 21679 * the reservation status. 21680 */ 21681 un->un_resvd_status |= 21682 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21683 SD_INFO(SD_LOG_IOCTL_MHD, un, 21684 "sd_mhd_watch_cb: Lost Reservation\n"); 21685 } 21686 } else { 21687 return (0); 21688 } 21689 } else { 21690 mutex_enter(SD_MUTEX(un)); 21691 } 21692 21693 if ((un->un_resvd_status & SD_RESERVE) && 21694 (un->un_resvd_status & SD_LOST_RESERVE)) { 21695 if (un->un_resvd_status & SD_WANT_RESERVE) { 21696 /* 21697 * A reset occurred in between the last probe and this 21698 * one so if a timeout is pending cancel it. 21699 */ 21700 if (un->un_resvd_timeid) { 21701 timeout_id_t temp_id = un->un_resvd_timeid; 21702 un->un_resvd_timeid = NULL; 21703 mutex_exit(SD_MUTEX(un)); 21704 (void) untimeout(temp_id); 21705 mutex_enter(SD_MUTEX(un)); 21706 } 21707 un->un_resvd_status &= ~SD_WANT_RESERVE; 21708 } 21709 if (un->un_resvd_timeid == 0) { 21710 /* Schedule a timeout to handle the lost reservation */ 21711 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 21712 (void *)dev, 21713 drv_usectohz(sd_reinstate_resv_delay)); 21714 } 21715 } 21716 mutex_exit(SD_MUTEX(un)); 21717 return (0); 21718 } 21719 21720 21721 /* 21722 * Function: sd_mhd_watch_incomplete() 21723 * 21724 * Description: This function is used to find out why a scsi pkt sent by the 21725 * scsi watch facility was not completed. Under some scenarios this 21726 * routine will return. Otherwise it will send a bus reset to see 21727 * if the drive is still online. 21728 * 21729 * Arguments: un - driver soft state (unit) structure 21730 * pkt - incomplete scsi pkt 21731 */ 21732 21733 static void 21734 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 21735 { 21736 int be_chatty; 21737 int perr; 21738 21739 ASSERT(pkt != NULL); 21740 ASSERT(un != NULL); 21741 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 21742 perr = (pkt->pkt_statistics & STAT_PERR); 21743 21744 mutex_enter(SD_MUTEX(un)); 21745 if (un->un_state == SD_STATE_DUMPING) { 21746 mutex_exit(SD_MUTEX(un)); 21747 return; 21748 } 21749 21750 switch (pkt->pkt_reason) { 21751 case CMD_UNX_BUS_FREE: 21752 /* 21753 * If we had a parity error that caused the target to drop BSY*, 21754 * don't be chatty about it. 21755 */ 21756 if (perr && be_chatty) { 21757 be_chatty = 0; 21758 } 21759 break; 21760 case CMD_TAG_REJECT: 21761 /* 21762 * The SCSI-2 spec states that a tag reject will be sent by the 21763 * target if tagged queuing is not supported. A tag reject may 21764 * also be sent during certain initialization periods or to 21765 * control internal resources. For the latter case the target 21766 * may also return Queue Full. 21767 * 21768 * If this driver receives a tag reject from a target that is 21769 * going through an init period or controlling internal 21770 * resources tagged queuing will be disabled. This is a less 21771 * than optimal behavior but the driver is unable to determine 21772 * the target state and assumes tagged queueing is not supported 21773 */ 21774 pkt->pkt_flags = 0; 21775 un->un_tagflags = 0; 21776 21777 if (un->un_f_opt_queueing == TRUE) { 21778 un->un_throttle = min(un->un_throttle, 3); 21779 } else { 21780 un->un_throttle = 1; 21781 } 21782 mutex_exit(SD_MUTEX(un)); 21783 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 21784 mutex_enter(SD_MUTEX(un)); 21785 break; 21786 case CMD_INCOMPLETE: 21787 /* 21788 * The transport stopped with an abnormal state, fallthrough and 21789 * reset the target and/or bus unless selection did not complete 21790 * (indicated by STATE_GOT_BUS) in which case we don't want to 21791 * go through a target/bus reset 21792 */ 21793 if (pkt->pkt_state == STATE_GOT_BUS) { 21794 break; 21795 } 21796 /*FALLTHROUGH*/ 21797 21798 case CMD_TIMEOUT: 21799 default: 21800 /* 21801 * The lun may still be running the command, so a lun reset 21802 * should be attempted. If the lun reset fails or cannot be 21803 * issued, than try a target reset. Lastly try a bus reset. 21804 */ 21805 if ((pkt->pkt_statistics & 21806 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 21807 int reset_retval = 0; 21808 mutex_exit(SD_MUTEX(un)); 21809 if (un->un_f_allow_bus_device_reset == TRUE) { 21810 if (un->un_f_lun_reset_enabled == TRUE) { 21811 reset_retval = 21812 scsi_reset(SD_ADDRESS(un), 21813 RESET_LUN); 21814 } 21815 if (reset_retval == 0) { 21816 reset_retval = 21817 scsi_reset(SD_ADDRESS(un), 21818 RESET_TARGET); 21819 } 21820 } 21821 if (reset_retval == 0) { 21822 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 21823 } 21824 mutex_enter(SD_MUTEX(un)); 21825 } 21826 break; 21827 } 21828 21829 /* A device/bus reset has occurred; update the reservation status. */ 21830 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 21831 (STAT_BUS_RESET | STAT_DEV_RESET))) { 21832 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21833 un->un_resvd_status |= 21834 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21835 SD_INFO(SD_LOG_IOCTL_MHD, un, 21836 "sd_mhd_watch_incomplete: Lost Reservation\n"); 21837 } 21838 } 21839 21840 /* 21841 * The disk has been turned off; Update the device state. 21842 * 21843 * Note: Should we be offlining the disk here? 21844 */ 21845 if (pkt->pkt_state == STATE_GOT_BUS) { 21846 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 21847 "Disk not responding to selection\n"); 21848 if (un->un_state != SD_STATE_OFFLINE) { 21849 New_state(un, SD_STATE_OFFLINE); 21850 } 21851 } else if (be_chatty) { 21852 /* 21853 * suppress messages if they are all the same pkt reason; 21854 * with TQ, many (up to 256) are returned with the same 21855 * pkt_reason 21856 */ 21857 if (pkt->pkt_reason != un->un_last_pkt_reason) { 21858 SD_ERROR(SD_LOG_IOCTL_MHD, un, 21859 "sd_mhd_watch_incomplete: " 21860 "SCSI transport failed: reason '%s'\n", 21861 scsi_rname(pkt->pkt_reason)); 21862 } 21863 } 21864 un->un_last_pkt_reason = pkt->pkt_reason; 21865 mutex_exit(SD_MUTEX(un)); 21866 } 21867 21868 21869 /* 21870 * Function: sd_sname() 21871 * 21872 * Description: This is a simple little routine to return a string containing 21873 * a printable description of command status byte for use in 21874 * logging. 21875 * 21876 * Arguments: status - pointer to a status byte 21877 * 21878 * Return Code: char * - string containing status description. 21879 */ 21880 21881 static char * 21882 sd_sname(uchar_t status) 21883 { 21884 switch (status & STATUS_MASK) { 21885 case STATUS_GOOD: 21886 return ("good status"); 21887 case STATUS_CHECK: 21888 return ("check condition"); 21889 case STATUS_MET: 21890 return ("condition met"); 21891 case STATUS_BUSY: 21892 return ("busy"); 21893 case STATUS_INTERMEDIATE: 21894 return ("intermediate"); 21895 case STATUS_INTERMEDIATE_MET: 21896 return ("intermediate - condition met"); 21897 case STATUS_RESERVATION_CONFLICT: 21898 return ("reservation_conflict"); 21899 case STATUS_TERMINATED: 21900 return ("command terminated"); 21901 case STATUS_QFULL: 21902 return ("queue full"); 21903 default: 21904 return ("<unknown status>"); 21905 } 21906 } 21907 21908 21909 /* 21910 * Function: sd_mhd_resvd_recover() 21911 * 21912 * Description: This function adds a reservation entry to the 21913 * sd_resv_reclaim_request list and signals the reservation 21914 * reclaim thread that there is work pending. If the reservation 21915 * reclaim thread has not been previously created this function 21916 * will kick it off. 21917 * 21918 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21919 * among multiple watches that share this callback function 21920 * 21921 * Context: This routine is called by timeout() and is run in interrupt 21922 * context. It must not sleep or call other functions which may 21923 * sleep. 21924 */ 21925 21926 static void 21927 sd_mhd_resvd_recover(void *arg) 21928 { 21929 dev_t dev = (dev_t)arg; 21930 struct sd_lun *un; 21931 struct sd_thr_request *sd_treq = NULL; 21932 struct sd_thr_request *sd_cur = NULL; 21933 struct sd_thr_request *sd_prev = NULL; 21934 int already_there = 0; 21935 21936 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21937 return; 21938 } 21939 21940 mutex_enter(SD_MUTEX(un)); 21941 un->un_resvd_timeid = NULL; 21942 if (un->un_resvd_status & SD_WANT_RESERVE) { 21943 /* 21944 * There was a reset so don't issue the reserve, allow the 21945 * sd_mhd_watch_cb callback function to notice this and 21946 * reschedule the timeout for reservation. 21947 */ 21948 mutex_exit(SD_MUTEX(un)); 21949 return; 21950 } 21951 mutex_exit(SD_MUTEX(un)); 21952 21953 /* 21954 * Add this device to the sd_resv_reclaim_request list and the 21955 * sd_resv_reclaim_thread should take care of the rest. 21956 * 21957 * Note: We can't sleep in this context so if the memory allocation 21958 * fails allow the sd_mhd_watch_cb callback function to notice this and 21959 * reschedule the timeout for reservation. (4378460) 21960 */ 21961 sd_treq = (struct sd_thr_request *) 21962 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 21963 if (sd_treq == NULL) { 21964 return; 21965 } 21966 21967 sd_treq->sd_thr_req_next = NULL; 21968 sd_treq->dev = dev; 21969 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21970 if (sd_tr.srq_thr_req_head == NULL) { 21971 sd_tr.srq_thr_req_head = sd_treq; 21972 } else { 21973 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 21974 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 21975 if (sd_cur->dev == dev) { 21976 /* 21977 * already in Queue so don't log 21978 * another request for the device 21979 */ 21980 already_there = 1; 21981 break; 21982 } 21983 sd_prev = sd_cur; 21984 } 21985 if (!already_there) { 21986 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 21987 "logging request for %lx\n", dev); 21988 sd_prev->sd_thr_req_next = sd_treq; 21989 } else { 21990 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 21991 } 21992 } 21993 21994 /* 21995 * Create a kernel thread to do the reservation reclaim and free up this 21996 * thread. We cannot block this thread while we go away to do the 21997 * reservation reclaim 21998 */ 21999 if (sd_tr.srq_resv_reclaim_thread == NULL) 22000 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22001 sd_resv_reclaim_thread, NULL, 22002 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22003 22004 /* Tell the reservation reclaim thread that it has work to do */ 22005 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22006 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22007 } 22008 22009 /* 22010 * Function: sd_resv_reclaim_thread() 22011 * 22012 * Description: This function implements the reservation reclaim operations 22013 * 22014 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22015 * among multiple watches that share this callback function 22016 */ 22017 22018 static void 22019 sd_resv_reclaim_thread() 22020 { 22021 struct sd_lun *un; 22022 struct sd_thr_request *sd_mhreq; 22023 22024 /* Wait for work */ 22025 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22026 if (sd_tr.srq_thr_req_head == NULL) { 22027 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22028 &sd_tr.srq_resv_reclaim_mutex); 22029 } 22030 22031 /* Loop while we have work */ 22032 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22033 un = ddi_get_soft_state(sd_state, 22034 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22035 if (un == NULL) { 22036 /* 22037 * softstate structure is NULL so just 22038 * dequeue the request and continue 22039 */ 22040 sd_tr.srq_thr_req_head = 22041 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22042 kmem_free(sd_tr.srq_thr_cur_req, 22043 sizeof (struct sd_thr_request)); 22044 continue; 22045 } 22046 22047 /* dequeue the request */ 22048 sd_mhreq = sd_tr.srq_thr_cur_req; 22049 sd_tr.srq_thr_req_head = 22050 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22051 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22052 22053 /* 22054 * Reclaim reservation only if SD_RESERVE is still set. There 22055 * may have been a call to MHIOCRELEASE before we got here. 22056 */ 22057 mutex_enter(SD_MUTEX(un)); 22058 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22059 /* 22060 * Note: The SD_LOST_RESERVE flag is cleared before 22061 * reclaiming the reservation. If this is done after the 22062 * call to sd_reserve_release a reservation loss in the 22063 * window between pkt completion of reserve cmd and 22064 * mutex_enter below may not be recognized 22065 */ 22066 un->un_resvd_status &= ~SD_LOST_RESERVE; 22067 mutex_exit(SD_MUTEX(un)); 22068 22069 if (sd_reserve_release(sd_mhreq->dev, 22070 SD_RESERVE) == 0) { 22071 mutex_enter(SD_MUTEX(un)); 22072 un->un_resvd_status |= SD_RESERVE; 22073 mutex_exit(SD_MUTEX(un)); 22074 SD_INFO(SD_LOG_IOCTL_MHD, un, 22075 "sd_resv_reclaim_thread: " 22076 "Reservation Recovered\n"); 22077 } else { 22078 mutex_enter(SD_MUTEX(un)); 22079 un->un_resvd_status |= SD_LOST_RESERVE; 22080 mutex_exit(SD_MUTEX(un)); 22081 SD_INFO(SD_LOG_IOCTL_MHD, un, 22082 "sd_resv_reclaim_thread: Failed " 22083 "Reservation Recovery\n"); 22084 } 22085 } else { 22086 mutex_exit(SD_MUTEX(un)); 22087 } 22088 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22089 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22090 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22091 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22092 /* 22093 * wakeup the destroy thread if anyone is waiting on 22094 * us to complete. 22095 */ 22096 cv_signal(&sd_tr.srq_inprocess_cv); 22097 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22098 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22099 } 22100 22101 /* 22102 * cleanup the sd_tr structure now that this thread will not exist 22103 */ 22104 ASSERT(sd_tr.srq_thr_req_head == NULL); 22105 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22106 sd_tr.srq_resv_reclaim_thread = NULL; 22107 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22108 thread_exit(); 22109 } 22110 22111 22112 /* 22113 * Function: sd_rmv_resv_reclaim_req() 22114 * 22115 * Description: This function removes any pending reservation reclaim requests 22116 * for the specified device. 22117 * 22118 * Arguments: dev - the device 'dev_t' 22119 */ 22120 22121 static void 22122 sd_rmv_resv_reclaim_req(dev_t dev) 22123 { 22124 struct sd_thr_request *sd_mhreq; 22125 struct sd_thr_request *sd_prev; 22126 22127 /* Remove a reservation reclaim request from the list */ 22128 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22129 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22130 /* 22131 * We are attempting to reinstate reservation for 22132 * this device. We wait for sd_reserve_release() 22133 * to return before we return. 22134 */ 22135 cv_wait(&sd_tr.srq_inprocess_cv, 22136 &sd_tr.srq_resv_reclaim_mutex); 22137 } else { 22138 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22139 if (sd_mhreq && sd_mhreq->dev == dev) { 22140 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22141 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22142 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22143 return; 22144 } 22145 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22146 if (sd_mhreq && sd_mhreq->dev == dev) { 22147 break; 22148 } 22149 sd_prev = sd_mhreq; 22150 } 22151 if (sd_mhreq != NULL) { 22152 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22153 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22154 } 22155 } 22156 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22157 } 22158 22159 22160 /* 22161 * Function: sd_mhd_reset_notify_cb() 22162 * 22163 * Description: This is a call back function for scsi_reset_notify. This 22164 * function updates the softstate reserved status and logs the 22165 * reset. The driver scsi watch facility callback function 22166 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22167 * will reclaim the reservation. 22168 * 22169 * Arguments: arg - driver soft state (unit) structure 22170 */ 22171 22172 static void 22173 sd_mhd_reset_notify_cb(caddr_t arg) 22174 { 22175 struct sd_lun *un = (struct sd_lun *)arg; 22176 22177 mutex_enter(SD_MUTEX(un)); 22178 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22179 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22180 SD_INFO(SD_LOG_IOCTL_MHD, un, 22181 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22182 } 22183 mutex_exit(SD_MUTEX(un)); 22184 } 22185 22186 22187 /* 22188 * Function: sd_take_ownership() 22189 * 22190 * Description: This routine implements an algorithm to achieve a stable 22191 * reservation on disks which don't implement priority reserve, 22192 * and makes sure that other host lose re-reservation attempts. 22193 * This algorithm contains of a loop that keeps issuing the RESERVE 22194 * for some period of time (min_ownership_delay, default 6 seconds) 22195 * During that loop, it looks to see if there has been a bus device 22196 * reset or bus reset (both of which cause an existing reservation 22197 * to be lost). If the reservation is lost issue RESERVE until a 22198 * period of min_ownership_delay with no resets has gone by, or 22199 * until max_ownership_delay has expired. This loop ensures that 22200 * the host really did manage to reserve the device, in spite of 22201 * resets. The looping for min_ownership_delay (default six 22202 * seconds) is important to early generation clustering products, 22203 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22204 * MHIOCENFAILFAST periodic timer of two seconds. By having 22205 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22206 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22207 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22208 * have already noticed, via the MHIOCENFAILFAST polling, that it 22209 * no longer "owns" the disk and will have panicked itself. Thus, 22210 * the host issuing the MHIOCTKOWN is assured (with timing 22211 * dependencies) that by the time it actually starts to use the 22212 * disk for real work, the old owner is no longer accessing it. 22213 * 22214 * min_ownership_delay is the minimum amount of time for which the 22215 * disk must be reserved continuously devoid of resets before the 22216 * MHIOCTKOWN ioctl will return success. 22217 * 22218 * max_ownership_delay indicates the amount of time by which the 22219 * take ownership should succeed or timeout with an error. 22220 * 22221 * Arguments: dev - the device 'dev_t' 22222 * *p - struct containing timing info. 22223 * 22224 * Return Code: 0 for success or error code 22225 */ 22226 22227 static int 22228 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22229 { 22230 struct sd_lun *un; 22231 int rval; 22232 int err; 22233 int reservation_count = 0; 22234 int min_ownership_delay = 6000000; /* in usec */ 22235 int max_ownership_delay = 30000000; /* in usec */ 22236 clock_t start_time; /* starting time of this algorithm */ 22237 clock_t end_time; /* time limit for giving up */ 22238 clock_t ownership_time; /* time limit for stable ownership */ 22239 clock_t current_time; 22240 clock_t previous_current_time; 22241 22242 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22243 return (ENXIO); 22244 } 22245 22246 /* 22247 * Attempt a device reservation. A priority reservation is requested. 22248 */ 22249 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22250 != SD_SUCCESS) { 22251 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22252 "sd_take_ownership: return(1)=%d\n", rval); 22253 return (rval); 22254 } 22255 22256 /* Update the softstate reserved status to indicate the reservation */ 22257 mutex_enter(SD_MUTEX(un)); 22258 un->un_resvd_status |= SD_RESERVE; 22259 un->un_resvd_status &= 22260 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22261 mutex_exit(SD_MUTEX(un)); 22262 22263 if (p != NULL) { 22264 if (p->min_ownership_delay != 0) { 22265 min_ownership_delay = p->min_ownership_delay * 1000; 22266 } 22267 if (p->max_ownership_delay != 0) { 22268 max_ownership_delay = p->max_ownership_delay * 1000; 22269 } 22270 } 22271 SD_INFO(SD_LOG_IOCTL_MHD, un, 22272 "sd_take_ownership: min, max delays: %d, %d\n", 22273 min_ownership_delay, max_ownership_delay); 22274 22275 start_time = ddi_get_lbolt(); 22276 current_time = start_time; 22277 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22278 end_time = start_time + drv_usectohz(max_ownership_delay); 22279 22280 while (current_time - end_time < 0) { 22281 delay(drv_usectohz(500000)); 22282 22283 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22284 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22285 mutex_enter(SD_MUTEX(un)); 22286 rval = (un->un_resvd_status & 22287 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22288 mutex_exit(SD_MUTEX(un)); 22289 break; 22290 } 22291 } 22292 previous_current_time = current_time; 22293 current_time = ddi_get_lbolt(); 22294 mutex_enter(SD_MUTEX(un)); 22295 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22296 ownership_time = ddi_get_lbolt() + 22297 drv_usectohz(min_ownership_delay); 22298 reservation_count = 0; 22299 } else { 22300 reservation_count++; 22301 } 22302 un->un_resvd_status |= SD_RESERVE; 22303 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22304 mutex_exit(SD_MUTEX(un)); 22305 22306 SD_INFO(SD_LOG_IOCTL_MHD, un, 22307 "sd_take_ownership: ticks for loop iteration=%ld, " 22308 "reservation=%s\n", (current_time - previous_current_time), 22309 reservation_count ? "ok" : "reclaimed"); 22310 22311 if (current_time - ownership_time >= 0 && 22312 reservation_count >= 4) { 22313 rval = 0; /* Achieved a stable ownership */ 22314 break; 22315 } 22316 if (current_time - end_time >= 0) { 22317 rval = EACCES; /* No ownership in max possible time */ 22318 break; 22319 } 22320 } 22321 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22322 "sd_take_ownership: return(2)=%d\n", rval); 22323 return (rval); 22324 } 22325 22326 22327 /* 22328 * Function: sd_reserve_release() 22329 * 22330 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22331 * PRIORITY RESERVE commands based on a user specified command type 22332 * 22333 * Arguments: dev - the device 'dev_t' 22334 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22335 * SD_RESERVE, SD_RELEASE 22336 * 22337 * Return Code: 0 or Error Code 22338 */ 22339 22340 static int 22341 sd_reserve_release(dev_t dev, int cmd) 22342 { 22343 struct uscsi_cmd *com = NULL; 22344 struct sd_lun *un = NULL; 22345 char cdb[CDB_GROUP0]; 22346 int rval; 22347 22348 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22349 (cmd == SD_PRIORITY_RESERVE)); 22350 22351 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22352 return (ENXIO); 22353 } 22354 22355 /* instantiate and initialize the command and cdb */ 22356 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22357 bzero(cdb, CDB_GROUP0); 22358 com->uscsi_flags = USCSI_SILENT; 22359 com->uscsi_timeout = un->un_reserve_release_time; 22360 com->uscsi_cdblen = CDB_GROUP0; 22361 com->uscsi_cdb = cdb; 22362 if (cmd == SD_RELEASE) { 22363 cdb[0] = SCMD_RELEASE; 22364 } else { 22365 cdb[0] = SCMD_RESERVE; 22366 } 22367 22368 /* Send the command. */ 22369 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22370 SD_PATH_STANDARD); 22371 22372 /* 22373 * "break" a reservation that is held by another host, by issuing a 22374 * reset if priority reserve is desired, and we could not get the 22375 * device. 22376 */ 22377 if ((cmd == SD_PRIORITY_RESERVE) && 22378 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22379 /* 22380 * First try to reset the LUN. If we cannot, then try a target 22381 * reset, followed by a bus reset if the target reset fails. 22382 */ 22383 int reset_retval = 0; 22384 if (un->un_f_lun_reset_enabled == TRUE) { 22385 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22386 } 22387 if (reset_retval == 0) { 22388 /* The LUN reset either failed or was not issued */ 22389 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22390 } 22391 if ((reset_retval == 0) && 22392 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22393 rval = EIO; 22394 kmem_free(com, sizeof (*com)); 22395 return (rval); 22396 } 22397 22398 bzero(com, sizeof (struct uscsi_cmd)); 22399 com->uscsi_flags = USCSI_SILENT; 22400 com->uscsi_cdb = cdb; 22401 com->uscsi_cdblen = CDB_GROUP0; 22402 com->uscsi_timeout = 5; 22403 22404 /* 22405 * Reissue the last reserve command, this time without request 22406 * sense. Assume that it is just a regular reserve command. 22407 */ 22408 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22409 SD_PATH_STANDARD); 22410 } 22411 22412 /* Return an error if still getting a reservation conflict. */ 22413 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22414 rval = EACCES; 22415 } 22416 22417 kmem_free(com, sizeof (*com)); 22418 return (rval); 22419 } 22420 22421 22422 #define SD_NDUMP_RETRIES 12 22423 /* 22424 * System Crash Dump routine 22425 */ 22426 22427 static int 22428 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22429 { 22430 int instance; 22431 int partition; 22432 int i; 22433 int err; 22434 struct sd_lun *un; 22435 struct scsi_pkt *wr_pktp; 22436 struct buf *wr_bp; 22437 struct buf wr_buf; 22438 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22439 daddr_t tgt_blkno; /* rmw - blkno for target */ 22440 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22441 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22442 size_t io_start_offset; 22443 int doing_rmw = FALSE; 22444 int rval; 22445 #if defined(__i386) || defined(__amd64) 22446 ssize_t dma_resid; 22447 daddr_t oblkno; 22448 #endif 22449 diskaddr_t nblks = 0; 22450 diskaddr_t start_block; 22451 22452 instance = SDUNIT(dev); 22453 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22454 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22455 return (ENXIO); 22456 } 22457 22458 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22459 22460 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22461 22462 partition = SDPART(dev); 22463 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22464 22465 /* Validate blocks to dump at against partition size. */ 22466 22467 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22468 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22469 22470 if ((blkno + nblk) > nblks) { 22471 SD_TRACE(SD_LOG_DUMP, un, 22472 "sddump: dump range larger than partition: " 22473 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22474 blkno, nblk, nblks); 22475 return (EINVAL); 22476 } 22477 22478 mutex_enter(&un->un_pm_mutex); 22479 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22480 struct scsi_pkt *start_pktp; 22481 22482 mutex_exit(&un->un_pm_mutex); 22483 22484 /* 22485 * use pm framework to power on HBA 1st 22486 */ 22487 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22488 22489 /* 22490 * Dump no long uses sdpower to power on a device, it's 22491 * in-line here so it can be done in polled mode. 22492 */ 22493 22494 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22495 22496 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22497 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22498 22499 if (start_pktp == NULL) { 22500 /* We were not given a SCSI packet, fail. */ 22501 return (EIO); 22502 } 22503 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22504 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22505 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22506 start_pktp->pkt_flags = FLAG_NOINTR; 22507 22508 mutex_enter(SD_MUTEX(un)); 22509 SD_FILL_SCSI1_LUN(un, start_pktp); 22510 mutex_exit(SD_MUTEX(un)); 22511 /* 22512 * Scsi_poll returns 0 (success) if the command completes and 22513 * the status block is STATUS_GOOD. 22514 */ 22515 if (sd_scsi_poll(un, start_pktp) != 0) { 22516 scsi_destroy_pkt(start_pktp); 22517 return (EIO); 22518 } 22519 scsi_destroy_pkt(start_pktp); 22520 (void) sd_ddi_pm_resume(un); 22521 } else { 22522 mutex_exit(&un->un_pm_mutex); 22523 } 22524 22525 mutex_enter(SD_MUTEX(un)); 22526 un->un_throttle = 0; 22527 22528 /* 22529 * The first time through, reset the specific target device. 22530 * However, when cpr calls sddump we know that sd is in a 22531 * a good state so no bus reset is required. 22532 * Clear sense data via Request Sense cmd. 22533 * In sddump we don't care about allow_bus_device_reset anymore 22534 */ 22535 22536 if ((un->un_state != SD_STATE_SUSPENDED) && 22537 (un->un_state != SD_STATE_DUMPING)) { 22538 22539 New_state(un, SD_STATE_DUMPING); 22540 22541 if (un->un_f_is_fibre == FALSE) { 22542 mutex_exit(SD_MUTEX(un)); 22543 /* 22544 * Attempt a bus reset for parallel scsi. 22545 * 22546 * Note: A bus reset is required because on some host 22547 * systems (i.e. E420R) a bus device reset is 22548 * insufficient to reset the state of the target. 22549 * 22550 * Note: Don't issue the reset for fibre-channel, 22551 * because this tends to hang the bus (loop) for 22552 * too long while everyone is logging out and in 22553 * and the deadman timer for dumping will fire 22554 * before the dump is complete. 22555 */ 22556 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22557 mutex_enter(SD_MUTEX(un)); 22558 Restore_state(un); 22559 mutex_exit(SD_MUTEX(un)); 22560 return (EIO); 22561 } 22562 22563 /* Delay to give the device some recovery time. */ 22564 drv_usecwait(10000); 22565 22566 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22567 SD_INFO(SD_LOG_DUMP, un, 22568 "sddump: sd_send_polled_RQS failed\n"); 22569 } 22570 mutex_enter(SD_MUTEX(un)); 22571 } 22572 } 22573 22574 /* 22575 * Convert the partition-relative block number to a 22576 * disk physical block number. 22577 */ 22578 blkno += start_block; 22579 22580 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22581 22582 22583 /* 22584 * Check if the device has a non-512 block size. 22585 */ 22586 wr_bp = NULL; 22587 if (NOT_DEVBSIZE(un)) { 22588 tgt_byte_offset = blkno * un->un_sys_blocksize; 22589 tgt_byte_count = nblk * un->un_sys_blocksize; 22590 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22591 (tgt_byte_count % un->un_tgt_blocksize)) { 22592 doing_rmw = TRUE; 22593 /* 22594 * Calculate the block number and number of block 22595 * in terms of the media block size. 22596 */ 22597 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22598 tgt_nblk = 22599 ((tgt_byte_offset + tgt_byte_count + 22600 (un->un_tgt_blocksize - 1)) / 22601 un->un_tgt_blocksize) - tgt_blkno; 22602 22603 /* 22604 * Invoke the routine which is going to do read part 22605 * of read-modify-write. 22606 * Note that this routine returns a pointer to 22607 * a valid bp in wr_bp. 22608 */ 22609 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22610 &wr_bp); 22611 if (err) { 22612 mutex_exit(SD_MUTEX(un)); 22613 return (err); 22614 } 22615 /* 22616 * Offset is being calculated as - 22617 * (original block # * system block size) - 22618 * (new block # * target block size) 22619 */ 22620 io_start_offset = 22621 ((uint64_t)(blkno * un->un_sys_blocksize)) - 22622 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 22623 22624 ASSERT((io_start_offset >= 0) && 22625 (io_start_offset < un->un_tgt_blocksize)); 22626 /* 22627 * Do the modify portion of read modify write. 22628 */ 22629 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 22630 (size_t)nblk * un->un_sys_blocksize); 22631 } else { 22632 doing_rmw = FALSE; 22633 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22634 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 22635 } 22636 22637 /* Convert blkno and nblk to target blocks */ 22638 blkno = tgt_blkno; 22639 nblk = tgt_nblk; 22640 } else { 22641 wr_bp = &wr_buf; 22642 bzero(wr_bp, sizeof (struct buf)); 22643 wr_bp->b_flags = B_BUSY; 22644 wr_bp->b_un.b_addr = addr; 22645 wr_bp->b_bcount = nblk << DEV_BSHIFT; 22646 wr_bp->b_resid = 0; 22647 } 22648 22649 mutex_exit(SD_MUTEX(un)); 22650 22651 /* 22652 * Obtain a SCSI packet for the write command. 22653 * It should be safe to call the allocator here without 22654 * worrying about being locked for DVMA mapping because 22655 * the address we're passed is already a DVMA mapping 22656 * 22657 * We are also not going to worry about semaphore ownership 22658 * in the dump buffer. Dumping is single threaded at present. 22659 */ 22660 22661 wr_pktp = NULL; 22662 22663 #if defined(__i386) || defined(__amd64) 22664 dma_resid = wr_bp->b_bcount; 22665 oblkno = blkno; 22666 while (dma_resid != 0) { 22667 #endif 22668 22669 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22670 wr_bp->b_flags &= ~B_ERROR; 22671 22672 #if defined(__i386) || defined(__amd64) 22673 blkno = oblkno + 22674 ((wr_bp->b_bcount - dma_resid) / 22675 un->un_tgt_blocksize); 22676 nblk = dma_resid / un->un_tgt_blocksize; 22677 22678 if (wr_pktp) { 22679 /* Partial DMA transfers after initial transfer */ 22680 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 22681 blkno, nblk); 22682 } else { 22683 /* Initial transfer */ 22684 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22685 un->un_pkt_flags, NULL_FUNC, NULL, 22686 blkno, nblk); 22687 } 22688 #else 22689 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22690 0, NULL_FUNC, NULL, blkno, nblk); 22691 #endif 22692 22693 if (rval == 0) { 22694 /* We were given a SCSI packet, continue. */ 22695 break; 22696 } 22697 22698 if (i == 0) { 22699 if (wr_bp->b_flags & B_ERROR) { 22700 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22701 "no resources for dumping; " 22702 "error code: 0x%x, retrying", 22703 geterror(wr_bp)); 22704 } else { 22705 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22706 "no resources for dumping; retrying"); 22707 } 22708 } else if (i != (SD_NDUMP_RETRIES - 1)) { 22709 if (wr_bp->b_flags & B_ERROR) { 22710 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22711 "no resources for dumping; error code: " 22712 "0x%x, retrying\n", geterror(wr_bp)); 22713 } 22714 } else { 22715 if (wr_bp->b_flags & B_ERROR) { 22716 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22717 "no resources for dumping; " 22718 "error code: 0x%x, retries failed, " 22719 "giving up.\n", geterror(wr_bp)); 22720 } else { 22721 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22722 "no resources for dumping; " 22723 "retries failed, giving up.\n"); 22724 } 22725 mutex_enter(SD_MUTEX(un)); 22726 Restore_state(un); 22727 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 22728 mutex_exit(SD_MUTEX(un)); 22729 scsi_free_consistent_buf(wr_bp); 22730 } else { 22731 mutex_exit(SD_MUTEX(un)); 22732 } 22733 return (EIO); 22734 } 22735 drv_usecwait(10000); 22736 } 22737 22738 #if defined(__i386) || defined(__amd64) 22739 /* 22740 * save the resid from PARTIAL_DMA 22741 */ 22742 dma_resid = wr_pktp->pkt_resid; 22743 if (dma_resid != 0) 22744 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 22745 wr_pktp->pkt_resid = 0; 22746 #endif 22747 22748 /* SunBug 1222170 */ 22749 wr_pktp->pkt_flags = FLAG_NOINTR; 22750 22751 err = EIO; 22752 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22753 22754 /* 22755 * Scsi_poll returns 0 (success) if the command completes and 22756 * the status block is STATUS_GOOD. We should only check 22757 * errors if this condition is not true. Even then we should 22758 * send our own request sense packet only if we have a check 22759 * condition and auto request sense has not been performed by 22760 * the hba. 22761 */ 22762 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 22763 22764 if ((sd_scsi_poll(un, wr_pktp) == 0) && 22765 (wr_pktp->pkt_resid == 0)) { 22766 err = SD_SUCCESS; 22767 break; 22768 } 22769 22770 /* 22771 * Check CMD_DEV_GONE 1st, give up if device is gone. 22772 */ 22773 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 22774 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22775 "Device is gone\n"); 22776 break; 22777 } 22778 22779 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 22780 SD_INFO(SD_LOG_DUMP, un, 22781 "sddump: write failed with CHECK, try # %d\n", i); 22782 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 22783 (void) sd_send_polled_RQS(un); 22784 } 22785 22786 continue; 22787 } 22788 22789 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 22790 int reset_retval = 0; 22791 22792 SD_INFO(SD_LOG_DUMP, un, 22793 "sddump: write failed with BUSY, try # %d\n", i); 22794 22795 if (un->un_f_lun_reset_enabled == TRUE) { 22796 reset_retval = scsi_reset(SD_ADDRESS(un), 22797 RESET_LUN); 22798 } 22799 if (reset_retval == 0) { 22800 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22801 } 22802 (void) sd_send_polled_RQS(un); 22803 22804 } else { 22805 SD_INFO(SD_LOG_DUMP, un, 22806 "sddump: write failed with 0x%x, try # %d\n", 22807 SD_GET_PKT_STATUS(wr_pktp), i); 22808 mutex_enter(SD_MUTEX(un)); 22809 sd_reset_target(un, wr_pktp); 22810 mutex_exit(SD_MUTEX(un)); 22811 } 22812 22813 /* 22814 * If we are not getting anywhere with lun/target resets, 22815 * let's reset the bus. 22816 */ 22817 if (i == SD_NDUMP_RETRIES/2) { 22818 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22819 (void) sd_send_polled_RQS(un); 22820 } 22821 22822 } 22823 #if defined(__i386) || defined(__amd64) 22824 } /* dma_resid */ 22825 #endif 22826 22827 scsi_destroy_pkt(wr_pktp); 22828 mutex_enter(SD_MUTEX(un)); 22829 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 22830 mutex_exit(SD_MUTEX(un)); 22831 scsi_free_consistent_buf(wr_bp); 22832 } else { 22833 mutex_exit(SD_MUTEX(un)); 22834 } 22835 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 22836 return (err); 22837 } 22838 22839 /* 22840 * Function: sd_scsi_poll() 22841 * 22842 * Description: This is a wrapper for the scsi_poll call. 22843 * 22844 * Arguments: sd_lun - The unit structure 22845 * scsi_pkt - The scsi packet being sent to the device. 22846 * 22847 * Return Code: 0 - Command completed successfully with good status 22848 * -1 - Command failed. This could indicate a check condition 22849 * or other status value requiring recovery action. 22850 * 22851 */ 22852 22853 static int 22854 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 22855 { 22856 int status; 22857 22858 ASSERT(un != NULL); 22859 ASSERT(!mutex_owned(SD_MUTEX(un))); 22860 ASSERT(pktp != NULL); 22861 22862 status = SD_SUCCESS; 22863 22864 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 22865 pktp->pkt_flags |= un->un_tagflags; 22866 pktp->pkt_flags &= ~FLAG_NODISCON; 22867 } 22868 22869 status = sd_ddi_scsi_poll(pktp); 22870 /* 22871 * Scsi_poll returns 0 (success) if the command completes and the 22872 * status block is STATUS_GOOD. We should only check errors if this 22873 * condition is not true. Even then we should send our own request 22874 * sense packet only if we have a check condition and auto 22875 * request sense has not been performed by the hba. 22876 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 22877 */ 22878 if ((status != SD_SUCCESS) && 22879 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 22880 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 22881 (pktp->pkt_reason != CMD_DEV_GONE)) 22882 (void) sd_send_polled_RQS(un); 22883 22884 return (status); 22885 } 22886 22887 /* 22888 * Function: sd_send_polled_RQS() 22889 * 22890 * Description: This sends the request sense command to a device. 22891 * 22892 * Arguments: sd_lun - The unit structure 22893 * 22894 * Return Code: 0 - Command completed successfully with good status 22895 * -1 - Command failed. 22896 * 22897 */ 22898 22899 static int 22900 sd_send_polled_RQS(struct sd_lun *un) 22901 { 22902 int ret_val; 22903 struct scsi_pkt *rqs_pktp; 22904 struct buf *rqs_bp; 22905 22906 ASSERT(un != NULL); 22907 ASSERT(!mutex_owned(SD_MUTEX(un))); 22908 22909 ret_val = SD_SUCCESS; 22910 22911 rqs_pktp = un->un_rqs_pktp; 22912 rqs_bp = un->un_rqs_bp; 22913 22914 mutex_enter(SD_MUTEX(un)); 22915 22916 if (un->un_sense_isbusy) { 22917 ret_val = SD_FAILURE; 22918 mutex_exit(SD_MUTEX(un)); 22919 return (ret_val); 22920 } 22921 22922 /* 22923 * If the request sense buffer (and packet) is not in use, 22924 * let's set the un_sense_isbusy and send our packet 22925 */ 22926 un->un_sense_isbusy = 1; 22927 rqs_pktp->pkt_resid = 0; 22928 rqs_pktp->pkt_reason = 0; 22929 rqs_pktp->pkt_flags |= FLAG_NOINTR; 22930 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 22931 22932 mutex_exit(SD_MUTEX(un)); 22933 22934 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 22935 " 0x%p\n", rqs_bp->b_un.b_addr); 22936 22937 /* 22938 * Can't send this to sd_scsi_poll, we wrap ourselves around the 22939 * axle - it has a call into us! 22940 */ 22941 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 22942 SD_INFO(SD_LOG_COMMON, un, 22943 "sd_send_polled_RQS: RQS failed\n"); 22944 } 22945 22946 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 22947 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 22948 22949 mutex_enter(SD_MUTEX(un)); 22950 un->un_sense_isbusy = 0; 22951 mutex_exit(SD_MUTEX(un)); 22952 22953 return (ret_val); 22954 } 22955 22956 /* 22957 * Defines needed for localized version of the scsi_poll routine. 22958 */ 22959 #define SD_CSEC 10000 /* usecs */ 22960 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 22961 22962 22963 /* 22964 * Function: sd_ddi_scsi_poll() 22965 * 22966 * Description: Localized version of the scsi_poll routine. The purpose is to 22967 * send a scsi_pkt to a device as a polled command. This version 22968 * is to ensure more robust handling of transport errors. 22969 * Specifically this routine cures not ready, coming ready 22970 * transition for power up and reset of sonoma's. This can take 22971 * up to 45 seconds for power-on and 20 seconds for reset of a 22972 * sonoma lun. 22973 * 22974 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 22975 * 22976 * Return Code: 0 - Command completed successfully with good status 22977 * -1 - Command failed. 22978 * 22979 */ 22980 22981 static int 22982 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 22983 { 22984 int busy_count; 22985 int timeout; 22986 int rval = SD_FAILURE; 22987 int savef; 22988 uint8_t *sensep; 22989 long savet; 22990 void (*savec)(); 22991 /* 22992 * The following is defined in machdep.c and is used in determining if 22993 * the scsi transport system will do polled I/O instead of interrupt 22994 * I/O when called from xx_dump(). 22995 */ 22996 extern int do_polled_io; 22997 22998 /* 22999 * save old flags in pkt, to restore at end 23000 */ 23001 savef = pkt->pkt_flags; 23002 savec = pkt->pkt_comp; 23003 savet = pkt->pkt_time; 23004 23005 pkt->pkt_flags |= FLAG_NOINTR; 23006 23007 /* 23008 * XXX there is nothing in the SCSA spec that states that we should not 23009 * do a callback for polled cmds; however, removing this will break sd 23010 * and probably other target drivers 23011 */ 23012 pkt->pkt_comp = NULL; 23013 23014 /* 23015 * we don't like a polled command without timeout. 23016 * 60 seconds seems long enough. 23017 */ 23018 if (pkt->pkt_time == 0) { 23019 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23020 } 23021 23022 /* 23023 * Send polled cmd. 23024 * 23025 * We do some error recovery for various errors. Tran_busy, 23026 * queue full, and non-dispatched commands are retried every 10 msec. 23027 * as they are typically transient failures. Busy status and Not 23028 * Ready are retried every second as this status takes a while to 23029 * change. Unit attention is retried for pkt_time (60) times 23030 * with no delay. 23031 */ 23032 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 23033 23034 for (busy_count = 0; busy_count < timeout; busy_count++) { 23035 int rc; 23036 int poll_delay; 23037 23038 /* 23039 * Initialize pkt status variables. 23040 */ 23041 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23042 23043 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23044 if (rc != TRAN_BUSY) { 23045 /* Transport failed - give up. */ 23046 break; 23047 } else { 23048 /* Transport busy - try again. */ 23049 poll_delay = 1 * SD_CSEC; /* 10 msec */ 23050 } 23051 } else { 23052 /* 23053 * Transport accepted - check pkt status. 23054 */ 23055 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23056 if (pkt->pkt_reason == CMD_CMPLT && 23057 rc == STATUS_CHECK && 23058 pkt->pkt_state & STATE_ARQ_DONE) { 23059 struct scsi_arq_status *arqstat = 23060 (struct scsi_arq_status *)(pkt->pkt_scbp); 23061 23062 sensep = (uint8_t *)&arqstat->sts_sensedata; 23063 } else { 23064 sensep = NULL; 23065 } 23066 23067 if ((pkt->pkt_reason == CMD_CMPLT) && 23068 (rc == STATUS_GOOD)) { 23069 /* No error - we're done */ 23070 rval = SD_SUCCESS; 23071 break; 23072 23073 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23074 /* Lost connection - give up */ 23075 break; 23076 23077 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23078 (pkt->pkt_state == 0)) { 23079 /* Pkt not dispatched - try again. */ 23080 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23081 23082 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23083 (rc == STATUS_QFULL)) { 23084 /* Queue full - try again. */ 23085 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23086 23087 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23088 (rc == STATUS_BUSY)) { 23089 /* Busy - try again. */ 23090 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23091 busy_count += (SD_SEC_TO_CSEC - 1); 23092 23093 } else if ((sensep != NULL) && 23094 (scsi_sense_key(sensep) == 23095 KEY_UNIT_ATTENTION)) { 23096 /* Unit Attention - try again */ 23097 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 23098 continue; 23099 23100 } else if ((sensep != NULL) && 23101 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23102 (scsi_sense_asc(sensep) == 0x04) && 23103 (scsi_sense_ascq(sensep) == 0x01)) { 23104 /* Not ready -> ready - try again. */ 23105 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23106 busy_count += (SD_SEC_TO_CSEC - 1); 23107 23108 } else { 23109 /* BAD status - give up. */ 23110 break; 23111 } 23112 } 23113 23114 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 23115 !do_polled_io) { 23116 delay(drv_usectohz(poll_delay)); 23117 } else { 23118 /* we busy wait during cpr_dump or interrupt threads */ 23119 drv_usecwait(poll_delay); 23120 } 23121 } 23122 23123 pkt->pkt_flags = savef; 23124 pkt->pkt_comp = savec; 23125 pkt->pkt_time = savet; 23126 return (rval); 23127 } 23128 23129 23130 /* 23131 * Function: sd_persistent_reservation_in_read_keys 23132 * 23133 * Description: This routine is the driver entry point for handling CD-ROM 23134 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23135 * by sending the SCSI-3 PRIN commands to the device. 23136 * Processes the read keys command response by copying the 23137 * reservation key information into the user provided buffer. 23138 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23139 * 23140 * Arguments: un - Pointer to soft state struct for the target. 23141 * usrp - user provided pointer to multihost Persistent In Read 23142 * Keys structure (mhioc_inkeys_t) 23143 * flag - this argument is a pass through to ddi_copyxxx() 23144 * directly from the mode argument of ioctl(). 23145 * 23146 * Return Code: 0 - Success 23147 * EACCES 23148 * ENOTSUP 23149 * errno return code from sd_send_scsi_cmd() 23150 * 23151 * Context: Can sleep. Does not return until command is completed. 23152 */ 23153 23154 static int 23155 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23156 mhioc_inkeys_t *usrp, int flag) 23157 { 23158 #ifdef _MULTI_DATAMODEL 23159 struct mhioc_key_list32 li32; 23160 #endif 23161 sd_prin_readkeys_t *in; 23162 mhioc_inkeys_t *ptr; 23163 mhioc_key_list_t li; 23164 uchar_t *data_bufp; 23165 int data_len; 23166 int rval; 23167 size_t copysz; 23168 23169 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23170 return (EINVAL); 23171 } 23172 bzero(&li, sizeof (mhioc_key_list_t)); 23173 23174 /* 23175 * Get the listsize from user 23176 */ 23177 #ifdef _MULTI_DATAMODEL 23178 23179 switch (ddi_model_convert_from(flag & FMODELS)) { 23180 case DDI_MODEL_ILP32: 23181 copysz = sizeof (struct mhioc_key_list32); 23182 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23183 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23184 "sd_persistent_reservation_in_read_keys: " 23185 "failed ddi_copyin: mhioc_key_list32_t\n"); 23186 rval = EFAULT; 23187 goto done; 23188 } 23189 li.listsize = li32.listsize; 23190 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23191 break; 23192 23193 case DDI_MODEL_NONE: 23194 copysz = sizeof (mhioc_key_list_t); 23195 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23196 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23197 "sd_persistent_reservation_in_read_keys: " 23198 "failed ddi_copyin: mhioc_key_list_t\n"); 23199 rval = EFAULT; 23200 goto done; 23201 } 23202 break; 23203 } 23204 23205 #else /* ! _MULTI_DATAMODEL */ 23206 copysz = sizeof (mhioc_key_list_t); 23207 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23208 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23209 "sd_persistent_reservation_in_read_keys: " 23210 "failed ddi_copyin: mhioc_key_list_t\n"); 23211 rval = EFAULT; 23212 goto done; 23213 } 23214 #endif 23215 23216 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23217 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23218 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23219 23220 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23221 data_len, data_bufp)) != 0) { 23222 goto done; 23223 } 23224 in = (sd_prin_readkeys_t *)data_bufp; 23225 ptr->generation = BE_32(in->generation); 23226 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23227 23228 /* 23229 * Return the min(listsize, listlen) keys 23230 */ 23231 #ifdef _MULTI_DATAMODEL 23232 23233 switch (ddi_model_convert_from(flag & FMODELS)) { 23234 case DDI_MODEL_ILP32: 23235 li32.listlen = li.listlen; 23236 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23237 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23238 "sd_persistent_reservation_in_read_keys: " 23239 "failed ddi_copyout: mhioc_key_list32_t\n"); 23240 rval = EFAULT; 23241 goto done; 23242 } 23243 break; 23244 23245 case DDI_MODEL_NONE: 23246 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23247 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23248 "sd_persistent_reservation_in_read_keys: " 23249 "failed ddi_copyout: mhioc_key_list_t\n"); 23250 rval = EFAULT; 23251 goto done; 23252 } 23253 break; 23254 } 23255 23256 #else /* ! _MULTI_DATAMODEL */ 23257 23258 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23259 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23260 "sd_persistent_reservation_in_read_keys: " 23261 "failed ddi_copyout: mhioc_key_list_t\n"); 23262 rval = EFAULT; 23263 goto done; 23264 } 23265 23266 #endif /* _MULTI_DATAMODEL */ 23267 23268 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23269 li.listsize * MHIOC_RESV_KEY_SIZE); 23270 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23271 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23272 "sd_persistent_reservation_in_read_keys: " 23273 "failed ddi_copyout: keylist\n"); 23274 rval = EFAULT; 23275 } 23276 done: 23277 kmem_free(data_bufp, data_len); 23278 return (rval); 23279 } 23280 23281 23282 /* 23283 * Function: sd_persistent_reservation_in_read_resv 23284 * 23285 * Description: This routine is the driver entry point for handling CD-ROM 23286 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23287 * by sending the SCSI-3 PRIN commands to the device. 23288 * Process the read persistent reservations command response by 23289 * copying the reservation information into the user provided 23290 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23291 * 23292 * Arguments: un - Pointer to soft state struct for the target. 23293 * usrp - user provided pointer to multihost Persistent In Read 23294 * Keys structure (mhioc_inkeys_t) 23295 * flag - this argument is a pass through to ddi_copyxxx() 23296 * directly from the mode argument of ioctl(). 23297 * 23298 * Return Code: 0 - Success 23299 * EACCES 23300 * ENOTSUP 23301 * errno return code from sd_send_scsi_cmd() 23302 * 23303 * Context: Can sleep. Does not return until command is completed. 23304 */ 23305 23306 static int 23307 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23308 mhioc_inresvs_t *usrp, int flag) 23309 { 23310 #ifdef _MULTI_DATAMODEL 23311 struct mhioc_resv_desc_list32 resvlist32; 23312 #endif 23313 sd_prin_readresv_t *in; 23314 mhioc_inresvs_t *ptr; 23315 sd_readresv_desc_t *readresv_ptr; 23316 mhioc_resv_desc_list_t resvlist; 23317 mhioc_resv_desc_t resvdesc; 23318 uchar_t *data_bufp; 23319 int data_len; 23320 int rval; 23321 int i; 23322 size_t copysz; 23323 mhioc_resv_desc_t *bufp; 23324 23325 if ((ptr = usrp) == NULL) { 23326 return (EINVAL); 23327 } 23328 23329 /* 23330 * Get the listsize from user 23331 */ 23332 #ifdef _MULTI_DATAMODEL 23333 switch (ddi_model_convert_from(flag & FMODELS)) { 23334 case DDI_MODEL_ILP32: 23335 copysz = sizeof (struct mhioc_resv_desc_list32); 23336 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23337 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23338 "sd_persistent_reservation_in_read_resv: " 23339 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23340 rval = EFAULT; 23341 goto done; 23342 } 23343 resvlist.listsize = resvlist32.listsize; 23344 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23345 break; 23346 23347 case DDI_MODEL_NONE: 23348 copysz = sizeof (mhioc_resv_desc_list_t); 23349 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23350 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23351 "sd_persistent_reservation_in_read_resv: " 23352 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23353 rval = EFAULT; 23354 goto done; 23355 } 23356 break; 23357 } 23358 #else /* ! _MULTI_DATAMODEL */ 23359 copysz = sizeof (mhioc_resv_desc_list_t); 23360 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23361 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23362 "sd_persistent_reservation_in_read_resv: " 23363 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23364 rval = EFAULT; 23365 goto done; 23366 } 23367 #endif /* ! _MULTI_DATAMODEL */ 23368 23369 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23370 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23371 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23372 23373 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23374 data_len, data_bufp)) != 0) { 23375 goto done; 23376 } 23377 in = (sd_prin_readresv_t *)data_bufp; 23378 ptr->generation = BE_32(in->generation); 23379 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23380 23381 /* 23382 * Return the min(listsize, listlen( keys 23383 */ 23384 #ifdef _MULTI_DATAMODEL 23385 23386 switch (ddi_model_convert_from(flag & FMODELS)) { 23387 case DDI_MODEL_ILP32: 23388 resvlist32.listlen = resvlist.listlen; 23389 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23390 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23391 "sd_persistent_reservation_in_read_resv: " 23392 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23393 rval = EFAULT; 23394 goto done; 23395 } 23396 break; 23397 23398 case DDI_MODEL_NONE: 23399 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23400 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23401 "sd_persistent_reservation_in_read_resv: " 23402 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23403 rval = EFAULT; 23404 goto done; 23405 } 23406 break; 23407 } 23408 23409 #else /* ! _MULTI_DATAMODEL */ 23410 23411 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23412 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23413 "sd_persistent_reservation_in_read_resv: " 23414 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23415 rval = EFAULT; 23416 goto done; 23417 } 23418 23419 #endif /* ! _MULTI_DATAMODEL */ 23420 23421 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23422 bufp = resvlist.list; 23423 copysz = sizeof (mhioc_resv_desc_t); 23424 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23425 i++, readresv_ptr++, bufp++) { 23426 23427 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23428 MHIOC_RESV_KEY_SIZE); 23429 resvdesc.type = readresv_ptr->type; 23430 resvdesc.scope = readresv_ptr->scope; 23431 resvdesc.scope_specific_addr = 23432 BE_32(readresv_ptr->scope_specific_addr); 23433 23434 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23435 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23436 "sd_persistent_reservation_in_read_resv: " 23437 "failed ddi_copyout: resvlist\n"); 23438 rval = EFAULT; 23439 goto done; 23440 } 23441 } 23442 done: 23443 kmem_free(data_bufp, data_len); 23444 return (rval); 23445 } 23446 23447 23448 /* 23449 * Function: sr_change_blkmode() 23450 * 23451 * Description: This routine is the driver entry point for handling CD-ROM 23452 * block mode ioctl requests. Support for returning and changing 23453 * the current block size in use by the device is implemented. The 23454 * LBA size is changed via a MODE SELECT Block Descriptor. 23455 * 23456 * This routine issues a mode sense with an allocation length of 23457 * 12 bytes for the mode page header and a single block descriptor. 23458 * 23459 * Arguments: dev - the device 'dev_t' 23460 * cmd - the request type; one of CDROMGBLKMODE (get) or 23461 * CDROMSBLKMODE (set) 23462 * data - current block size or requested block size 23463 * flag - this argument is a pass through to ddi_copyxxx() directly 23464 * from the mode argument of ioctl(). 23465 * 23466 * Return Code: the code returned by sd_send_scsi_cmd() 23467 * EINVAL if invalid arguments are provided 23468 * EFAULT if ddi_copyxxx() fails 23469 * ENXIO if fail ddi_get_soft_state 23470 * EIO if invalid mode sense block descriptor length 23471 * 23472 */ 23473 23474 static int 23475 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23476 { 23477 struct sd_lun *un = NULL; 23478 struct mode_header *sense_mhp, *select_mhp; 23479 struct block_descriptor *sense_desc, *select_desc; 23480 int current_bsize; 23481 int rval = EINVAL; 23482 uchar_t *sense = NULL; 23483 uchar_t *select = NULL; 23484 23485 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23486 23487 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23488 return (ENXIO); 23489 } 23490 23491 /* 23492 * The block length is changed via the Mode Select block descriptor, the 23493 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23494 * required as part of this routine. Therefore the mode sense allocation 23495 * length is specified to be the length of a mode page header and a 23496 * block descriptor. 23497 */ 23498 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23499 23500 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23501 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23502 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23503 "sr_change_blkmode: Mode Sense Failed\n"); 23504 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23505 return (rval); 23506 } 23507 23508 /* Check the block descriptor len to handle only 1 block descriptor */ 23509 sense_mhp = (struct mode_header *)sense; 23510 if ((sense_mhp->bdesc_length == 0) || 23511 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23512 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23513 "sr_change_blkmode: Mode Sense returned invalid block" 23514 " descriptor length\n"); 23515 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23516 return (EIO); 23517 } 23518 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23519 current_bsize = ((sense_desc->blksize_hi << 16) | 23520 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23521 23522 /* Process command */ 23523 switch (cmd) { 23524 case CDROMGBLKMODE: 23525 /* Return the block size obtained during the mode sense */ 23526 if (ddi_copyout(¤t_bsize, (void *)data, 23527 sizeof (int), flag) != 0) 23528 rval = EFAULT; 23529 break; 23530 case CDROMSBLKMODE: 23531 /* Validate the requested block size */ 23532 switch (data) { 23533 case CDROM_BLK_512: 23534 case CDROM_BLK_1024: 23535 case CDROM_BLK_2048: 23536 case CDROM_BLK_2056: 23537 case CDROM_BLK_2336: 23538 case CDROM_BLK_2340: 23539 case CDROM_BLK_2352: 23540 case CDROM_BLK_2368: 23541 case CDROM_BLK_2448: 23542 case CDROM_BLK_2646: 23543 case CDROM_BLK_2647: 23544 break; 23545 default: 23546 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23547 "sr_change_blkmode: " 23548 "Block Size '%ld' Not Supported\n", data); 23549 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23550 return (EINVAL); 23551 } 23552 23553 /* 23554 * The current block size matches the requested block size so 23555 * there is no need to send the mode select to change the size 23556 */ 23557 if (current_bsize == data) { 23558 break; 23559 } 23560 23561 /* Build the select data for the requested block size */ 23562 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23563 select_mhp = (struct mode_header *)select; 23564 select_desc = 23565 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23566 /* 23567 * The LBA size is changed via the block descriptor, so the 23568 * descriptor is built according to the user data 23569 */ 23570 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23571 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23572 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23573 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23574 23575 /* Send the mode select for the requested block size */ 23576 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23577 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23578 SD_PATH_STANDARD)) != 0) { 23579 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23580 "sr_change_blkmode: Mode Select Failed\n"); 23581 /* 23582 * The mode select failed for the requested block size, 23583 * so reset the data for the original block size and 23584 * send it to the target. The error is indicated by the 23585 * return value for the failed mode select. 23586 */ 23587 select_desc->blksize_hi = sense_desc->blksize_hi; 23588 select_desc->blksize_mid = sense_desc->blksize_mid; 23589 select_desc->blksize_lo = sense_desc->blksize_lo; 23590 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23591 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23592 SD_PATH_STANDARD); 23593 } else { 23594 ASSERT(!mutex_owned(SD_MUTEX(un))); 23595 mutex_enter(SD_MUTEX(un)); 23596 sd_update_block_info(un, (uint32_t)data, 0); 23597 mutex_exit(SD_MUTEX(un)); 23598 } 23599 break; 23600 default: 23601 /* should not reach here, but check anyway */ 23602 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23603 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 23604 rval = EINVAL; 23605 break; 23606 } 23607 23608 if (select) { 23609 kmem_free(select, BUFLEN_CHG_BLK_MODE); 23610 } 23611 if (sense) { 23612 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23613 } 23614 return (rval); 23615 } 23616 23617 23618 /* 23619 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 23620 * implement driver support for getting and setting the CD speed. The command 23621 * set used will be based on the device type. If the device has not been 23622 * identified as MMC the Toshiba vendor specific mode page will be used. If 23623 * the device is MMC but does not support the Real Time Streaming feature 23624 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 23625 * be used to read the speed. 23626 */ 23627 23628 /* 23629 * Function: sr_change_speed() 23630 * 23631 * Description: This routine is the driver entry point for handling CD-ROM 23632 * drive speed ioctl requests for devices supporting the Toshiba 23633 * vendor specific drive speed mode page. Support for returning 23634 * and changing the current drive speed in use by the device is 23635 * implemented. 23636 * 23637 * Arguments: dev - the device 'dev_t' 23638 * cmd - the request type; one of CDROMGDRVSPEED (get) or 23639 * CDROMSDRVSPEED (set) 23640 * data - current drive speed or requested drive speed 23641 * flag - this argument is a pass through to ddi_copyxxx() directly 23642 * from the mode argument of ioctl(). 23643 * 23644 * Return Code: the code returned by sd_send_scsi_cmd() 23645 * EINVAL if invalid arguments are provided 23646 * EFAULT if ddi_copyxxx() fails 23647 * ENXIO if fail ddi_get_soft_state 23648 * EIO if invalid mode sense block descriptor length 23649 */ 23650 23651 static int 23652 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23653 { 23654 struct sd_lun *un = NULL; 23655 struct mode_header *sense_mhp, *select_mhp; 23656 struct mode_speed *sense_page, *select_page; 23657 int current_speed; 23658 int rval = EINVAL; 23659 int bd_len; 23660 uchar_t *sense = NULL; 23661 uchar_t *select = NULL; 23662 23663 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23664 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23665 return (ENXIO); 23666 } 23667 23668 /* 23669 * Note: The drive speed is being modified here according to a Toshiba 23670 * vendor specific mode page (0x31). 23671 */ 23672 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23673 23674 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23675 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 23676 SD_PATH_STANDARD)) != 0) { 23677 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23678 "sr_change_speed: Mode Sense Failed\n"); 23679 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23680 return (rval); 23681 } 23682 sense_mhp = (struct mode_header *)sense; 23683 23684 /* Check the block descriptor len to handle only 1 block descriptor */ 23685 bd_len = sense_mhp->bdesc_length; 23686 if (bd_len > MODE_BLK_DESC_LENGTH) { 23687 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23688 "sr_change_speed: Mode Sense returned invalid block " 23689 "descriptor length\n"); 23690 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23691 return (EIO); 23692 } 23693 23694 sense_page = (struct mode_speed *) 23695 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 23696 current_speed = sense_page->speed; 23697 23698 /* Process command */ 23699 switch (cmd) { 23700 case CDROMGDRVSPEED: 23701 /* Return the drive speed obtained during the mode sense */ 23702 if (current_speed == 0x2) { 23703 current_speed = CDROM_TWELVE_SPEED; 23704 } 23705 if (ddi_copyout(¤t_speed, (void *)data, 23706 sizeof (int), flag) != 0) { 23707 rval = EFAULT; 23708 } 23709 break; 23710 case CDROMSDRVSPEED: 23711 /* Validate the requested drive speed */ 23712 switch ((uchar_t)data) { 23713 case CDROM_TWELVE_SPEED: 23714 data = 0x2; 23715 /*FALLTHROUGH*/ 23716 case CDROM_NORMAL_SPEED: 23717 case CDROM_DOUBLE_SPEED: 23718 case CDROM_QUAD_SPEED: 23719 case CDROM_MAXIMUM_SPEED: 23720 break; 23721 default: 23722 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23723 "sr_change_speed: " 23724 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 23725 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23726 return (EINVAL); 23727 } 23728 23729 /* 23730 * The current drive speed matches the requested drive speed so 23731 * there is no need to send the mode select to change the speed 23732 */ 23733 if (current_speed == data) { 23734 break; 23735 } 23736 23737 /* Build the select data for the requested drive speed */ 23738 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23739 select_mhp = (struct mode_header *)select; 23740 select_mhp->bdesc_length = 0; 23741 select_page = 23742 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23743 select_page = 23744 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23745 select_page->mode_page.code = CDROM_MODE_SPEED; 23746 select_page->mode_page.length = 2; 23747 select_page->speed = (uchar_t)data; 23748 23749 /* Send the mode select for the requested block size */ 23750 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23751 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23752 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 23753 /* 23754 * The mode select failed for the requested drive speed, 23755 * so reset the data for the original drive speed and 23756 * send it to the target. The error is indicated by the 23757 * return value for the failed mode select. 23758 */ 23759 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23760 "sr_drive_speed: Mode Select Failed\n"); 23761 select_page->speed = sense_page->speed; 23762 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23763 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23764 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 23765 } 23766 break; 23767 default: 23768 /* should not reach here, but check anyway */ 23769 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23770 "sr_change_speed: Command '%x' Not Supported\n", cmd); 23771 rval = EINVAL; 23772 break; 23773 } 23774 23775 if (select) { 23776 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 23777 } 23778 if (sense) { 23779 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23780 } 23781 23782 return (rval); 23783 } 23784 23785 23786 /* 23787 * Function: sr_atapi_change_speed() 23788 * 23789 * Description: This routine is the driver entry point for handling CD-ROM 23790 * drive speed ioctl requests for MMC devices that do not support 23791 * the Real Time Streaming feature (0x107). 23792 * 23793 * Note: This routine will use the SET SPEED command which may not 23794 * be supported by all devices. 23795 * 23796 * Arguments: dev- the device 'dev_t' 23797 * cmd- the request type; one of CDROMGDRVSPEED (get) or 23798 * CDROMSDRVSPEED (set) 23799 * data- current drive speed or requested drive speed 23800 * flag- this argument is a pass through to ddi_copyxxx() directly 23801 * from the mode argument of ioctl(). 23802 * 23803 * Return Code: the code returned by sd_send_scsi_cmd() 23804 * EINVAL if invalid arguments are provided 23805 * EFAULT if ddi_copyxxx() fails 23806 * ENXIO if fail ddi_get_soft_state 23807 * EIO if invalid mode sense block descriptor length 23808 */ 23809 23810 static int 23811 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23812 { 23813 struct sd_lun *un; 23814 struct uscsi_cmd *com = NULL; 23815 struct mode_header_grp2 *sense_mhp; 23816 uchar_t *sense_page; 23817 uchar_t *sense = NULL; 23818 char cdb[CDB_GROUP5]; 23819 int bd_len; 23820 int current_speed = 0; 23821 int max_speed = 0; 23822 int rval; 23823 23824 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23825 23826 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23827 return (ENXIO); 23828 } 23829 23830 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 23831 23832 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 23833 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 23834 SD_PATH_STANDARD)) != 0) { 23835 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23836 "sr_atapi_change_speed: Mode Sense Failed\n"); 23837 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23838 return (rval); 23839 } 23840 23841 /* Check the block descriptor len to handle only 1 block descriptor */ 23842 sense_mhp = (struct mode_header_grp2 *)sense; 23843 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 23844 if (bd_len > MODE_BLK_DESC_LENGTH) { 23845 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23846 "sr_atapi_change_speed: Mode Sense returned invalid " 23847 "block descriptor length\n"); 23848 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23849 return (EIO); 23850 } 23851 23852 /* Calculate the current and maximum drive speeds */ 23853 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 23854 current_speed = (sense_page[14] << 8) | sense_page[15]; 23855 max_speed = (sense_page[8] << 8) | sense_page[9]; 23856 23857 /* Process the command */ 23858 switch (cmd) { 23859 case CDROMGDRVSPEED: 23860 current_speed /= SD_SPEED_1X; 23861 if (ddi_copyout(¤t_speed, (void *)data, 23862 sizeof (int), flag) != 0) 23863 rval = EFAULT; 23864 break; 23865 case CDROMSDRVSPEED: 23866 /* Convert the speed code to KB/sec */ 23867 switch ((uchar_t)data) { 23868 case CDROM_NORMAL_SPEED: 23869 current_speed = SD_SPEED_1X; 23870 break; 23871 case CDROM_DOUBLE_SPEED: 23872 current_speed = 2 * SD_SPEED_1X; 23873 break; 23874 case CDROM_QUAD_SPEED: 23875 current_speed = 4 * SD_SPEED_1X; 23876 break; 23877 case CDROM_TWELVE_SPEED: 23878 current_speed = 12 * SD_SPEED_1X; 23879 break; 23880 case CDROM_MAXIMUM_SPEED: 23881 current_speed = 0xffff; 23882 break; 23883 default: 23884 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23885 "sr_atapi_change_speed: invalid drive speed %d\n", 23886 (uchar_t)data); 23887 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23888 return (EINVAL); 23889 } 23890 23891 /* Check the request against the drive's max speed. */ 23892 if (current_speed != 0xffff) { 23893 if (current_speed > max_speed) { 23894 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23895 return (EINVAL); 23896 } 23897 } 23898 23899 /* 23900 * Build and send the SET SPEED command 23901 * 23902 * Note: The SET SPEED (0xBB) command used in this routine is 23903 * obsolete per the SCSI MMC spec but still supported in the 23904 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 23905 * therefore the command is still implemented in this routine. 23906 */ 23907 bzero(cdb, sizeof (cdb)); 23908 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 23909 cdb[2] = (uchar_t)(current_speed >> 8); 23910 cdb[3] = (uchar_t)current_speed; 23911 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23912 com->uscsi_cdb = (caddr_t)cdb; 23913 com->uscsi_cdblen = CDB_GROUP5; 23914 com->uscsi_bufaddr = NULL; 23915 com->uscsi_buflen = 0; 23916 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23917 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 23918 break; 23919 default: 23920 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23921 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 23922 rval = EINVAL; 23923 } 23924 23925 if (sense) { 23926 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23927 } 23928 if (com) { 23929 kmem_free(com, sizeof (*com)); 23930 } 23931 return (rval); 23932 } 23933 23934 23935 /* 23936 * Function: sr_pause_resume() 23937 * 23938 * Description: This routine is the driver entry point for handling CD-ROM 23939 * pause/resume ioctl requests. This only affects the audio play 23940 * operation. 23941 * 23942 * Arguments: dev - the device 'dev_t' 23943 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 23944 * for setting the resume bit of the cdb. 23945 * 23946 * Return Code: the code returned by sd_send_scsi_cmd() 23947 * EINVAL if invalid mode specified 23948 * 23949 */ 23950 23951 static int 23952 sr_pause_resume(dev_t dev, int cmd) 23953 { 23954 struct sd_lun *un; 23955 struct uscsi_cmd *com; 23956 char cdb[CDB_GROUP1]; 23957 int rval; 23958 23959 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23960 return (ENXIO); 23961 } 23962 23963 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23964 bzero(cdb, CDB_GROUP1); 23965 cdb[0] = SCMD_PAUSE_RESUME; 23966 switch (cmd) { 23967 case CDROMRESUME: 23968 cdb[8] = 1; 23969 break; 23970 case CDROMPAUSE: 23971 cdb[8] = 0; 23972 break; 23973 default: 23974 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 23975 " Command '%x' Not Supported\n", cmd); 23976 rval = EINVAL; 23977 goto done; 23978 } 23979 23980 com->uscsi_cdb = cdb; 23981 com->uscsi_cdblen = CDB_GROUP1; 23982 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23983 23984 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23985 SD_PATH_STANDARD); 23986 23987 done: 23988 kmem_free(com, sizeof (*com)); 23989 return (rval); 23990 } 23991 23992 23993 /* 23994 * Function: sr_play_msf() 23995 * 23996 * Description: This routine is the driver entry point for handling CD-ROM 23997 * ioctl requests to output the audio signals at the specified 23998 * starting address and continue the audio play until the specified 23999 * ending address (CDROMPLAYMSF) The address is in Minute Second 24000 * Frame (MSF) format. 24001 * 24002 * Arguments: dev - the device 'dev_t' 24003 * data - pointer to user provided audio msf structure, 24004 * specifying start/end addresses. 24005 * flag - this argument is a pass through to ddi_copyxxx() 24006 * directly from the mode argument of ioctl(). 24007 * 24008 * Return Code: the code returned by sd_send_scsi_cmd() 24009 * EFAULT if ddi_copyxxx() fails 24010 * ENXIO if fail ddi_get_soft_state 24011 * EINVAL if data pointer is NULL 24012 */ 24013 24014 static int 24015 sr_play_msf(dev_t dev, caddr_t data, int flag) 24016 { 24017 struct sd_lun *un; 24018 struct uscsi_cmd *com; 24019 struct cdrom_msf msf_struct; 24020 struct cdrom_msf *msf = &msf_struct; 24021 char cdb[CDB_GROUP1]; 24022 int rval; 24023 24024 if (data == NULL) { 24025 return (EINVAL); 24026 } 24027 24028 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24029 return (ENXIO); 24030 } 24031 24032 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24033 return (EFAULT); 24034 } 24035 24036 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24037 bzero(cdb, CDB_GROUP1); 24038 cdb[0] = SCMD_PLAYAUDIO_MSF; 24039 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24040 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24041 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24042 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24043 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24044 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24045 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24046 } else { 24047 cdb[3] = msf->cdmsf_min0; 24048 cdb[4] = msf->cdmsf_sec0; 24049 cdb[5] = msf->cdmsf_frame0; 24050 cdb[6] = msf->cdmsf_min1; 24051 cdb[7] = msf->cdmsf_sec1; 24052 cdb[8] = msf->cdmsf_frame1; 24053 } 24054 com->uscsi_cdb = cdb; 24055 com->uscsi_cdblen = CDB_GROUP1; 24056 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24057 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24058 SD_PATH_STANDARD); 24059 kmem_free(com, sizeof (*com)); 24060 return (rval); 24061 } 24062 24063 24064 /* 24065 * Function: sr_play_trkind() 24066 * 24067 * Description: This routine is the driver entry point for handling CD-ROM 24068 * ioctl requests to output the audio signals at the specified 24069 * starting address and continue the audio play until the specified 24070 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24071 * format. 24072 * 24073 * Arguments: dev - the device 'dev_t' 24074 * data - pointer to user provided audio track/index structure, 24075 * specifying start/end addresses. 24076 * flag - this argument is a pass through to ddi_copyxxx() 24077 * directly from the mode argument of ioctl(). 24078 * 24079 * Return Code: the code returned by sd_send_scsi_cmd() 24080 * EFAULT if ddi_copyxxx() fails 24081 * ENXIO if fail ddi_get_soft_state 24082 * EINVAL if data pointer is NULL 24083 */ 24084 24085 static int 24086 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24087 { 24088 struct cdrom_ti ti_struct; 24089 struct cdrom_ti *ti = &ti_struct; 24090 struct uscsi_cmd *com = NULL; 24091 char cdb[CDB_GROUP1]; 24092 int rval; 24093 24094 if (data == NULL) { 24095 return (EINVAL); 24096 } 24097 24098 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24099 return (EFAULT); 24100 } 24101 24102 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24103 bzero(cdb, CDB_GROUP1); 24104 cdb[0] = SCMD_PLAYAUDIO_TI; 24105 cdb[4] = ti->cdti_trk0; 24106 cdb[5] = ti->cdti_ind0; 24107 cdb[7] = ti->cdti_trk1; 24108 cdb[8] = ti->cdti_ind1; 24109 com->uscsi_cdb = cdb; 24110 com->uscsi_cdblen = CDB_GROUP1; 24111 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24112 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24113 SD_PATH_STANDARD); 24114 kmem_free(com, sizeof (*com)); 24115 return (rval); 24116 } 24117 24118 24119 /* 24120 * Function: sr_read_all_subcodes() 24121 * 24122 * Description: This routine is the driver entry point for handling CD-ROM 24123 * ioctl requests to return raw subcode data while the target is 24124 * playing audio (CDROMSUBCODE). 24125 * 24126 * Arguments: dev - the device 'dev_t' 24127 * data - pointer to user provided cdrom subcode structure, 24128 * specifying the transfer length and address. 24129 * flag - this argument is a pass through to ddi_copyxxx() 24130 * directly from the mode argument of ioctl(). 24131 * 24132 * Return Code: the code returned by sd_send_scsi_cmd() 24133 * EFAULT if ddi_copyxxx() fails 24134 * ENXIO if fail ddi_get_soft_state 24135 * EINVAL if data pointer is NULL 24136 */ 24137 24138 static int 24139 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24140 { 24141 struct sd_lun *un = NULL; 24142 struct uscsi_cmd *com = NULL; 24143 struct cdrom_subcode *subcode = NULL; 24144 int rval; 24145 size_t buflen; 24146 char cdb[CDB_GROUP5]; 24147 24148 #ifdef _MULTI_DATAMODEL 24149 /* To support ILP32 applications in an LP64 world */ 24150 struct cdrom_subcode32 cdrom_subcode32; 24151 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24152 #endif 24153 if (data == NULL) { 24154 return (EINVAL); 24155 } 24156 24157 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24158 return (ENXIO); 24159 } 24160 24161 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24162 24163 #ifdef _MULTI_DATAMODEL 24164 switch (ddi_model_convert_from(flag & FMODELS)) { 24165 case DDI_MODEL_ILP32: 24166 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24167 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24168 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24169 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24170 return (EFAULT); 24171 } 24172 /* Convert the ILP32 uscsi data from the application to LP64 */ 24173 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24174 break; 24175 case DDI_MODEL_NONE: 24176 if (ddi_copyin(data, subcode, 24177 sizeof (struct cdrom_subcode), flag)) { 24178 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24179 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24180 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24181 return (EFAULT); 24182 } 24183 break; 24184 } 24185 #else /* ! _MULTI_DATAMODEL */ 24186 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24187 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24188 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24189 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24190 return (EFAULT); 24191 } 24192 #endif /* _MULTI_DATAMODEL */ 24193 24194 /* 24195 * Since MMC-2 expects max 3 bytes for length, check if the 24196 * length input is greater than 3 bytes 24197 */ 24198 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24199 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24200 "sr_read_all_subcodes: " 24201 "cdrom transfer length too large: %d (limit %d)\n", 24202 subcode->cdsc_length, 0xFFFFFF); 24203 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24204 return (EINVAL); 24205 } 24206 24207 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24208 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24209 bzero(cdb, CDB_GROUP5); 24210 24211 if (un->un_f_mmc_cap == TRUE) { 24212 cdb[0] = (char)SCMD_READ_CD; 24213 cdb[2] = (char)0xff; 24214 cdb[3] = (char)0xff; 24215 cdb[4] = (char)0xff; 24216 cdb[5] = (char)0xff; 24217 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24218 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24219 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24220 cdb[10] = 1; 24221 } else { 24222 /* 24223 * Note: A vendor specific command (0xDF) is being used her to 24224 * request a read of all subcodes. 24225 */ 24226 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24227 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24228 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24229 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24230 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24231 } 24232 com->uscsi_cdb = cdb; 24233 com->uscsi_cdblen = CDB_GROUP5; 24234 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24235 com->uscsi_buflen = buflen; 24236 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24237 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24238 SD_PATH_STANDARD); 24239 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24240 kmem_free(com, sizeof (*com)); 24241 return (rval); 24242 } 24243 24244 24245 /* 24246 * Function: sr_read_subchannel() 24247 * 24248 * Description: This routine is the driver entry point for handling CD-ROM 24249 * ioctl requests to return the Q sub-channel data of the CD 24250 * current position block. (CDROMSUBCHNL) The data includes the 24251 * track number, index number, absolute CD-ROM address (LBA or MSF 24252 * format per the user) , track relative CD-ROM address (LBA or MSF 24253 * format per the user), control data and audio status. 24254 * 24255 * Arguments: dev - the device 'dev_t' 24256 * data - pointer to user provided cdrom sub-channel structure 24257 * flag - this argument is a pass through to ddi_copyxxx() 24258 * directly from the mode argument of ioctl(). 24259 * 24260 * Return Code: the code returned by sd_send_scsi_cmd() 24261 * EFAULT if ddi_copyxxx() fails 24262 * ENXIO if fail ddi_get_soft_state 24263 * EINVAL if data pointer is NULL 24264 */ 24265 24266 static int 24267 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24268 { 24269 struct sd_lun *un; 24270 struct uscsi_cmd *com; 24271 struct cdrom_subchnl subchanel; 24272 struct cdrom_subchnl *subchnl = &subchanel; 24273 char cdb[CDB_GROUP1]; 24274 caddr_t buffer; 24275 int rval; 24276 24277 if (data == NULL) { 24278 return (EINVAL); 24279 } 24280 24281 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24282 (un->un_state == SD_STATE_OFFLINE)) { 24283 return (ENXIO); 24284 } 24285 24286 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24287 return (EFAULT); 24288 } 24289 24290 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24291 bzero(cdb, CDB_GROUP1); 24292 cdb[0] = SCMD_READ_SUBCHANNEL; 24293 /* Set the MSF bit based on the user requested address format */ 24294 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24295 /* 24296 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24297 * returned 24298 */ 24299 cdb[2] = 0x40; 24300 /* 24301 * Set byte 3 to specify the return data format. A value of 0x01 24302 * indicates that the CD-ROM current position should be returned. 24303 */ 24304 cdb[3] = 0x01; 24305 cdb[8] = 0x10; 24306 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24307 com->uscsi_cdb = cdb; 24308 com->uscsi_cdblen = CDB_GROUP1; 24309 com->uscsi_bufaddr = buffer; 24310 com->uscsi_buflen = 16; 24311 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24312 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24313 SD_PATH_STANDARD); 24314 if (rval != 0) { 24315 kmem_free(buffer, 16); 24316 kmem_free(com, sizeof (*com)); 24317 return (rval); 24318 } 24319 24320 /* Process the returned Q sub-channel data */ 24321 subchnl->cdsc_audiostatus = buffer[1]; 24322 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24323 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24324 subchnl->cdsc_trk = buffer[6]; 24325 subchnl->cdsc_ind = buffer[7]; 24326 if (subchnl->cdsc_format & CDROM_LBA) { 24327 subchnl->cdsc_absaddr.lba = 24328 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24329 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24330 subchnl->cdsc_reladdr.lba = 24331 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24332 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24333 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24334 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24335 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24336 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24337 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24338 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24339 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24340 } else { 24341 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24342 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24343 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24344 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24345 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24346 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24347 } 24348 kmem_free(buffer, 16); 24349 kmem_free(com, sizeof (*com)); 24350 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24351 != 0) { 24352 return (EFAULT); 24353 } 24354 return (rval); 24355 } 24356 24357 24358 /* 24359 * Function: sr_read_tocentry() 24360 * 24361 * Description: This routine is the driver entry point for handling CD-ROM 24362 * ioctl requests to read from the Table of Contents (TOC) 24363 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24364 * fields, the starting address (LBA or MSF format per the user) 24365 * and the data mode if the user specified track is a data track. 24366 * 24367 * Note: The READ HEADER (0x44) command used in this routine is 24368 * obsolete per the SCSI MMC spec but still supported in the 24369 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24370 * therefore the command is still implemented in this routine. 24371 * 24372 * Arguments: dev - the device 'dev_t' 24373 * data - pointer to user provided toc entry structure, 24374 * specifying the track # and the address format 24375 * (LBA or MSF). 24376 * flag - this argument is a pass through to ddi_copyxxx() 24377 * directly from the mode argument of ioctl(). 24378 * 24379 * Return Code: the code returned by sd_send_scsi_cmd() 24380 * EFAULT if ddi_copyxxx() fails 24381 * ENXIO if fail ddi_get_soft_state 24382 * EINVAL if data pointer is NULL 24383 */ 24384 24385 static int 24386 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24387 { 24388 struct sd_lun *un = NULL; 24389 struct uscsi_cmd *com; 24390 struct cdrom_tocentry toc_entry; 24391 struct cdrom_tocentry *entry = &toc_entry; 24392 caddr_t buffer; 24393 int rval; 24394 char cdb[CDB_GROUP1]; 24395 24396 if (data == NULL) { 24397 return (EINVAL); 24398 } 24399 24400 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24401 (un->un_state == SD_STATE_OFFLINE)) { 24402 return (ENXIO); 24403 } 24404 24405 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24406 return (EFAULT); 24407 } 24408 24409 /* Validate the requested track and address format */ 24410 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24411 return (EINVAL); 24412 } 24413 24414 if (entry->cdte_track == 0) { 24415 return (EINVAL); 24416 } 24417 24418 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24419 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24420 bzero(cdb, CDB_GROUP1); 24421 24422 cdb[0] = SCMD_READ_TOC; 24423 /* Set the MSF bit based on the user requested address format */ 24424 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24425 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24426 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24427 } else { 24428 cdb[6] = entry->cdte_track; 24429 } 24430 24431 /* 24432 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24433 * (4 byte TOC response header + 8 byte track descriptor) 24434 */ 24435 cdb[8] = 12; 24436 com->uscsi_cdb = cdb; 24437 com->uscsi_cdblen = CDB_GROUP1; 24438 com->uscsi_bufaddr = buffer; 24439 com->uscsi_buflen = 0x0C; 24440 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24441 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24442 SD_PATH_STANDARD); 24443 if (rval != 0) { 24444 kmem_free(buffer, 12); 24445 kmem_free(com, sizeof (*com)); 24446 return (rval); 24447 } 24448 24449 /* Process the toc entry */ 24450 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24451 entry->cdte_ctrl = (buffer[5] & 0x0F); 24452 if (entry->cdte_format & CDROM_LBA) { 24453 entry->cdte_addr.lba = 24454 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24455 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24456 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24457 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24458 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24459 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24460 /* 24461 * Send a READ TOC command using the LBA address format to get 24462 * the LBA for the track requested so it can be used in the 24463 * READ HEADER request 24464 * 24465 * Note: The MSF bit of the READ HEADER command specifies the 24466 * output format. The block address specified in that command 24467 * must be in LBA format. 24468 */ 24469 cdb[1] = 0; 24470 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24471 SD_PATH_STANDARD); 24472 if (rval != 0) { 24473 kmem_free(buffer, 12); 24474 kmem_free(com, sizeof (*com)); 24475 return (rval); 24476 } 24477 } else { 24478 entry->cdte_addr.msf.minute = buffer[9]; 24479 entry->cdte_addr.msf.second = buffer[10]; 24480 entry->cdte_addr.msf.frame = buffer[11]; 24481 /* 24482 * Send a READ TOC command using the LBA address format to get 24483 * the LBA for the track requested so it can be used in the 24484 * READ HEADER request 24485 * 24486 * Note: The MSF bit of the READ HEADER command specifies the 24487 * output format. The block address specified in that command 24488 * must be in LBA format. 24489 */ 24490 cdb[1] = 0; 24491 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24492 SD_PATH_STANDARD); 24493 if (rval != 0) { 24494 kmem_free(buffer, 12); 24495 kmem_free(com, sizeof (*com)); 24496 return (rval); 24497 } 24498 } 24499 24500 /* 24501 * Build and send the READ HEADER command to determine the data mode of 24502 * the user specified track. 24503 */ 24504 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24505 (entry->cdte_track != CDROM_LEADOUT)) { 24506 bzero(cdb, CDB_GROUP1); 24507 cdb[0] = SCMD_READ_HEADER; 24508 cdb[2] = buffer[8]; 24509 cdb[3] = buffer[9]; 24510 cdb[4] = buffer[10]; 24511 cdb[5] = buffer[11]; 24512 cdb[8] = 0x08; 24513 com->uscsi_buflen = 0x08; 24514 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24515 SD_PATH_STANDARD); 24516 if (rval == 0) { 24517 entry->cdte_datamode = buffer[0]; 24518 } else { 24519 /* 24520 * READ HEADER command failed, since this is 24521 * obsoleted in one spec, its better to return 24522 * -1 for an invlid track so that we can still 24523 * receive the rest of the TOC data. 24524 */ 24525 entry->cdte_datamode = (uchar_t)-1; 24526 } 24527 } else { 24528 entry->cdte_datamode = (uchar_t)-1; 24529 } 24530 24531 kmem_free(buffer, 12); 24532 kmem_free(com, sizeof (*com)); 24533 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24534 return (EFAULT); 24535 24536 return (rval); 24537 } 24538 24539 24540 /* 24541 * Function: sr_read_tochdr() 24542 * 24543 * Description: This routine is the driver entry point for handling CD-ROM 24544 * ioctl requests to read the Table of Contents (TOC) header 24545 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24546 * and ending track numbers 24547 * 24548 * Arguments: dev - the device 'dev_t' 24549 * data - pointer to user provided toc header structure, 24550 * specifying the starting and ending track numbers. 24551 * flag - this argument is a pass through to ddi_copyxxx() 24552 * directly from the mode argument of ioctl(). 24553 * 24554 * Return Code: the code returned by sd_send_scsi_cmd() 24555 * EFAULT if ddi_copyxxx() fails 24556 * ENXIO if fail ddi_get_soft_state 24557 * EINVAL if data pointer is NULL 24558 */ 24559 24560 static int 24561 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24562 { 24563 struct sd_lun *un; 24564 struct uscsi_cmd *com; 24565 struct cdrom_tochdr toc_header; 24566 struct cdrom_tochdr *hdr = &toc_header; 24567 char cdb[CDB_GROUP1]; 24568 int rval; 24569 caddr_t buffer; 24570 24571 if (data == NULL) { 24572 return (EINVAL); 24573 } 24574 24575 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24576 (un->un_state == SD_STATE_OFFLINE)) { 24577 return (ENXIO); 24578 } 24579 24580 buffer = kmem_zalloc(4, KM_SLEEP); 24581 bzero(cdb, CDB_GROUP1); 24582 cdb[0] = SCMD_READ_TOC; 24583 /* 24584 * Specifying a track number of 0x00 in the READ TOC command indicates 24585 * that the TOC header should be returned 24586 */ 24587 cdb[6] = 0x00; 24588 /* 24589 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24590 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24591 */ 24592 cdb[8] = 0x04; 24593 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24594 com->uscsi_cdb = cdb; 24595 com->uscsi_cdblen = CDB_GROUP1; 24596 com->uscsi_bufaddr = buffer; 24597 com->uscsi_buflen = 0x04; 24598 com->uscsi_timeout = 300; 24599 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24600 24601 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24602 SD_PATH_STANDARD); 24603 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24604 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 24605 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 24606 } else { 24607 hdr->cdth_trk0 = buffer[2]; 24608 hdr->cdth_trk1 = buffer[3]; 24609 } 24610 kmem_free(buffer, 4); 24611 kmem_free(com, sizeof (*com)); 24612 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 24613 return (EFAULT); 24614 } 24615 return (rval); 24616 } 24617 24618 24619 /* 24620 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 24621 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 24622 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 24623 * digital audio and extended architecture digital audio. These modes are 24624 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 24625 * MMC specs. 24626 * 24627 * In addition to support for the various data formats these routines also 24628 * include support for devices that implement only the direct access READ 24629 * commands (0x08, 0x28), devices that implement the READ_CD commands 24630 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 24631 * READ CDXA commands (0xD8, 0xDB) 24632 */ 24633 24634 /* 24635 * Function: sr_read_mode1() 24636 * 24637 * Description: This routine is the driver entry point for handling CD-ROM 24638 * ioctl read mode1 requests (CDROMREADMODE1). 24639 * 24640 * Arguments: dev - the device 'dev_t' 24641 * data - pointer to user provided cd read structure specifying 24642 * the lba buffer address and length. 24643 * flag - this argument is a pass through to ddi_copyxxx() 24644 * directly from the mode argument of ioctl(). 24645 * 24646 * Return Code: the code returned by sd_send_scsi_cmd() 24647 * EFAULT if ddi_copyxxx() fails 24648 * ENXIO if fail ddi_get_soft_state 24649 * EINVAL if data pointer is NULL 24650 */ 24651 24652 static int 24653 sr_read_mode1(dev_t dev, caddr_t data, int flag) 24654 { 24655 struct sd_lun *un; 24656 struct cdrom_read mode1_struct; 24657 struct cdrom_read *mode1 = &mode1_struct; 24658 int rval; 24659 #ifdef _MULTI_DATAMODEL 24660 /* To support ILP32 applications in an LP64 world */ 24661 struct cdrom_read32 cdrom_read32; 24662 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24663 #endif /* _MULTI_DATAMODEL */ 24664 24665 if (data == NULL) { 24666 return (EINVAL); 24667 } 24668 24669 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24670 (un->un_state == SD_STATE_OFFLINE)) { 24671 return (ENXIO); 24672 } 24673 24674 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24675 "sd_read_mode1: entry: un:0x%p\n", un); 24676 24677 #ifdef _MULTI_DATAMODEL 24678 switch (ddi_model_convert_from(flag & FMODELS)) { 24679 case DDI_MODEL_ILP32: 24680 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24681 return (EFAULT); 24682 } 24683 /* Convert the ILP32 uscsi data from the application to LP64 */ 24684 cdrom_read32tocdrom_read(cdrd32, mode1); 24685 break; 24686 case DDI_MODEL_NONE: 24687 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24688 return (EFAULT); 24689 } 24690 } 24691 #else /* ! _MULTI_DATAMODEL */ 24692 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24693 return (EFAULT); 24694 } 24695 #endif /* _MULTI_DATAMODEL */ 24696 24697 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 24698 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 24699 24700 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24701 "sd_read_mode1: exit: un:0x%p\n", un); 24702 24703 return (rval); 24704 } 24705 24706 24707 /* 24708 * Function: sr_read_cd_mode2() 24709 * 24710 * Description: This routine is the driver entry point for handling CD-ROM 24711 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24712 * support the READ CD (0xBE) command or the 1st generation 24713 * READ CD (0xD4) command. 24714 * 24715 * Arguments: dev - the device 'dev_t' 24716 * data - pointer to user provided cd read structure specifying 24717 * the lba buffer address and length. 24718 * flag - this argument is a pass through to ddi_copyxxx() 24719 * directly from the mode argument of ioctl(). 24720 * 24721 * Return Code: the code returned by sd_send_scsi_cmd() 24722 * EFAULT if ddi_copyxxx() fails 24723 * ENXIO if fail ddi_get_soft_state 24724 * EINVAL if data pointer is NULL 24725 */ 24726 24727 static int 24728 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 24729 { 24730 struct sd_lun *un; 24731 struct uscsi_cmd *com; 24732 struct cdrom_read mode2_struct; 24733 struct cdrom_read *mode2 = &mode2_struct; 24734 uchar_t cdb[CDB_GROUP5]; 24735 int nblocks; 24736 int rval; 24737 #ifdef _MULTI_DATAMODEL 24738 /* To support ILP32 applications in an LP64 world */ 24739 struct cdrom_read32 cdrom_read32; 24740 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24741 #endif /* _MULTI_DATAMODEL */ 24742 24743 if (data == NULL) { 24744 return (EINVAL); 24745 } 24746 24747 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24748 (un->un_state == SD_STATE_OFFLINE)) { 24749 return (ENXIO); 24750 } 24751 24752 #ifdef _MULTI_DATAMODEL 24753 switch (ddi_model_convert_from(flag & FMODELS)) { 24754 case DDI_MODEL_ILP32: 24755 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24756 return (EFAULT); 24757 } 24758 /* Convert the ILP32 uscsi data from the application to LP64 */ 24759 cdrom_read32tocdrom_read(cdrd32, mode2); 24760 break; 24761 case DDI_MODEL_NONE: 24762 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24763 return (EFAULT); 24764 } 24765 break; 24766 } 24767 24768 #else /* ! _MULTI_DATAMODEL */ 24769 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24770 return (EFAULT); 24771 } 24772 #endif /* _MULTI_DATAMODEL */ 24773 24774 bzero(cdb, sizeof (cdb)); 24775 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 24776 /* Read command supported by 1st generation atapi drives */ 24777 cdb[0] = SCMD_READ_CDD4; 24778 } else { 24779 /* Universal CD Access Command */ 24780 cdb[0] = SCMD_READ_CD; 24781 } 24782 24783 /* 24784 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 24785 */ 24786 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 24787 24788 /* set the start address */ 24789 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 24790 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 24791 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24792 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 24793 24794 /* set the transfer length */ 24795 nblocks = mode2->cdread_buflen / 2336; 24796 cdb[6] = (uchar_t)(nblocks >> 16); 24797 cdb[7] = (uchar_t)(nblocks >> 8); 24798 cdb[8] = (uchar_t)nblocks; 24799 24800 /* set the filter bits */ 24801 cdb[9] = CDROM_READ_CD_USERDATA; 24802 24803 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24804 com->uscsi_cdb = (caddr_t)cdb; 24805 com->uscsi_cdblen = sizeof (cdb); 24806 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24807 com->uscsi_buflen = mode2->cdread_buflen; 24808 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24809 24810 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24811 SD_PATH_STANDARD); 24812 kmem_free(com, sizeof (*com)); 24813 return (rval); 24814 } 24815 24816 24817 /* 24818 * Function: sr_read_mode2() 24819 * 24820 * Description: This routine is the driver entry point for handling CD-ROM 24821 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24822 * do not support the READ CD (0xBE) command. 24823 * 24824 * Arguments: dev - the device 'dev_t' 24825 * data - pointer to user provided cd read structure specifying 24826 * the lba buffer address and length. 24827 * flag - this argument is a pass through to ddi_copyxxx() 24828 * directly from the mode argument of ioctl(). 24829 * 24830 * Return Code: the code returned by sd_send_scsi_cmd() 24831 * EFAULT if ddi_copyxxx() fails 24832 * ENXIO if fail ddi_get_soft_state 24833 * EINVAL if data pointer is NULL 24834 * EIO if fail to reset block size 24835 * EAGAIN if commands are in progress in the driver 24836 */ 24837 24838 static int 24839 sr_read_mode2(dev_t dev, caddr_t data, int flag) 24840 { 24841 struct sd_lun *un; 24842 struct cdrom_read mode2_struct; 24843 struct cdrom_read *mode2 = &mode2_struct; 24844 int rval; 24845 uint32_t restore_blksize; 24846 struct uscsi_cmd *com; 24847 uchar_t cdb[CDB_GROUP0]; 24848 int nblocks; 24849 24850 #ifdef _MULTI_DATAMODEL 24851 /* To support ILP32 applications in an LP64 world */ 24852 struct cdrom_read32 cdrom_read32; 24853 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24854 #endif /* _MULTI_DATAMODEL */ 24855 24856 if (data == NULL) { 24857 return (EINVAL); 24858 } 24859 24860 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24861 (un->un_state == SD_STATE_OFFLINE)) { 24862 return (ENXIO); 24863 } 24864 24865 /* 24866 * Because this routine will update the device and driver block size 24867 * being used we want to make sure there are no commands in progress. 24868 * If commands are in progress the user will have to try again. 24869 * 24870 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 24871 * in sdioctl to protect commands from sdioctl through to the top of 24872 * sd_uscsi_strategy. See sdioctl for details. 24873 */ 24874 mutex_enter(SD_MUTEX(un)); 24875 if (un->un_ncmds_in_driver != 1) { 24876 mutex_exit(SD_MUTEX(un)); 24877 return (EAGAIN); 24878 } 24879 mutex_exit(SD_MUTEX(un)); 24880 24881 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24882 "sd_read_mode2: entry: un:0x%p\n", un); 24883 24884 #ifdef _MULTI_DATAMODEL 24885 switch (ddi_model_convert_from(flag & FMODELS)) { 24886 case DDI_MODEL_ILP32: 24887 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24888 return (EFAULT); 24889 } 24890 /* Convert the ILP32 uscsi data from the application to LP64 */ 24891 cdrom_read32tocdrom_read(cdrd32, mode2); 24892 break; 24893 case DDI_MODEL_NONE: 24894 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24895 return (EFAULT); 24896 } 24897 break; 24898 } 24899 #else /* ! _MULTI_DATAMODEL */ 24900 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 24901 return (EFAULT); 24902 } 24903 #endif /* _MULTI_DATAMODEL */ 24904 24905 /* Store the current target block size for restoration later */ 24906 restore_blksize = un->un_tgt_blocksize; 24907 24908 /* Change the device and soft state target block size to 2336 */ 24909 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 24910 rval = EIO; 24911 goto done; 24912 } 24913 24914 24915 bzero(cdb, sizeof (cdb)); 24916 24917 /* set READ operation */ 24918 cdb[0] = SCMD_READ; 24919 24920 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 24921 mode2->cdread_lba >>= 2; 24922 24923 /* set the start address */ 24924 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 24925 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24926 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 24927 24928 /* set the transfer length */ 24929 nblocks = mode2->cdread_buflen / 2336; 24930 cdb[4] = (uchar_t)nblocks & 0xFF; 24931 24932 /* build command */ 24933 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24934 com->uscsi_cdb = (caddr_t)cdb; 24935 com->uscsi_cdblen = sizeof (cdb); 24936 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24937 com->uscsi_buflen = mode2->cdread_buflen; 24938 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24939 24940 /* 24941 * Issue SCSI command with user space address for read buffer. 24942 * 24943 * This sends the command through main channel in the driver. 24944 * 24945 * Since this is accessed via an IOCTL call, we go through the 24946 * standard path, so that if the device was powered down, then 24947 * it would be 'awakened' to handle the command. 24948 */ 24949 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24950 SD_PATH_STANDARD); 24951 24952 kmem_free(com, sizeof (*com)); 24953 24954 /* Restore the device and soft state target block size */ 24955 if (sr_sector_mode(dev, restore_blksize) != 0) { 24956 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24957 "can't do switch back to mode 1\n"); 24958 /* 24959 * If sd_send_scsi_READ succeeded we still need to report 24960 * an error because we failed to reset the block size 24961 */ 24962 if (rval == 0) { 24963 rval = EIO; 24964 } 24965 } 24966 24967 done: 24968 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24969 "sd_read_mode2: exit: un:0x%p\n", un); 24970 24971 return (rval); 24972 } 24973 24974 24975 /* 24976 * Function: sr_sector_mode() 24977 * 24978 * Description: This utility function is used by sr_read_mode2 to set the target 24979 * block size based on the user specified size. This is a legacy 24980 * implementation based upon a vendor specific mode page 24981 * 24982 * Arguments: dev - the device 'dev_t' 24983 * data - flag indicating if block size is being set to 2336 or 24984 * 512. 24985 * 24986 * Return Code: the code returned by sd_send_scsi_cmd() 24987 * EFAULT if ddi_copyxxx() fails 24988 * ENXIO if fail ddi_get_soft_state 24989 * EINVAL if data pointer is NULL 24990 */ 24991 24992 static int 24993 sr_sector_mode(dev_t dev, uint32_t blksize) 24994 { 24995 struct sd_lun *un; 24996 uchar_t *sense; 24997 uchar_t *select; 24998 int rval; 24999 25000 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25001 (un->un_state == SD_STATE_OFFLINE)) { 25002 return (ENXIO); 25003 } 25004 25005 sense = kmem_zalloc(20, KM_SLEEP); 25006 25007 /* Note: This is a vendor specific mode page (0x81) */ 25008 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25009 SD_PATH_STANDARD)) != 0) { 25010 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25011 "sr_sector_mode: Mode Sense failed\n"); 25012 kmem_free(sense, 20); 25013 return (rval); 25014 } 25015 select = kmem_zalloc(20, KM_SLEEP); 25016 select[3] = 0x08; 25017 select[10] = ((blksize >> 8) & 0xff); 25018 select[11] = (blksize & 0xff); 25019 select[12] = 0x01; 25020 select[13] = 0x06; 25021 select[14] = sense[14]; 25022 select[15] = sense[15]; 25023 if (blksize == SD_MODE2_BLKSIZE) { 25024 select[14] |= 0x01; 25025 } 25026 25027 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25028 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25029 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25030 "sr_sector_mode: Mode Select failed\n"); 25031 } else { 25032 /* 25033 * Only update the softstate block size if we successfully 25034 * changed the device block mode. 25035 */ 25036 mutex_enter(SD_MUTEX(un)); 25037 sd_update_block_info(un, blksize, 0); 25038 mutex_exit(SD_MUTEX(un)); 25039 } 25040 kmem_free(sense, 20); 25041 kmem_free(select, 20); 25042 return (rval); 25043 } 25044 25045 25046 /* 25047 * Function: sr_read_cdda() 25048 * 25049 * Description: This routine is the driver entry point for handling CD-ROM 25050 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25051 * the target supports CDDA these requests are handled via a vendor 25052 * specific command (0xD8) If the target does not support CDDA 25053 * these requests are handled via the READ CD command (0xBE). 25054 * 25055 * Arguments: dev - the device 'dev_t' 25056 * data - pointer to user provided CD-DA structure specifying 25057 * the track starting address, transfer length, and 25058 * subcode options. 25059 * flag - this argument is a pass through to ddi_copyxxx() 25060 * directly from the mode argument of ioctl(). 25061 * 25062 * Return Code: the code returned by sd_send_scsi_cmd() 25063 * EFAULT if ddi_copyxxx() fails 25064 * ENXIO if fail ddi_get_soft_state 25065 * EINVAL if invalid arguments are provided 25066 * ENOTTY 25067 */ 25068 25069 static int 25070 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25071 { 25072 struct sd_lun *un; 25073 struct uscsi_cmd *com; 25074 struct cdrom_cdda *cdda; 25075 int rval; 25076 size_t buflen; 25077 char cdb[CDB_GROUP5]; 25078 25079 #ifdef _MULTI_DATAMODEL 25080 /* To support ILP32 applications in an LP64 world */ 25081 struct cdrom_cdda32 cdrom_cdda32; 25082 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25083 #endif /* _MULTI_DATAMODEL */ 25084 25085 if (data == NULL) { 25086 return (EINVAL); 25087 } 25088 25089 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25090 return (ENXIO); 25091 } 25092 25093 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25094 25095 #ifdef _MULTI_DATAMODEL 25096 switch (ddi_model_convert_from(flag & FMODELS)) { 25097 case DDI_MODEL_ILP32: 25098 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25099 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25100 "sr_read_cdda: ddi_copyin Failed\n"); 25101 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25102 return (EFAULT); 25103 } 25104 /* Convert the ILP32 uscsi data from the application to LP64 */ 25105 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25106 break; 25107 case DDI_MODEL_NONE: 25108 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25109 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25110 "sr_read_cdda: ddi_copyin Failed\n"); 25111 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25112 return (EFAULT); 25113 } 25114 break; 25115 } 25116 #else /* ! _MULTI_DATAMODEL */ 25117 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25118 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25119 "sr_read_cdda: ddi_copyin Failed\n"); 25120 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25121 return (EFAULT); 25122 } 25123 #endif /* _MULTI_DATAMODEL */ 25124 25125 /* 25126 * Since MMC-2 expects max 3 bytes for length, check if the 25127 * length input is greater than 3 bytes 25128 */ 25129 if ((cdda->cdda_length & 0xFF000000) != 0) { 25130 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25131 "cdrom transfer length too large: %d (limit %d)\n", 25132 cdda->cdda_length, 0xFFFFFF); 25133 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25134 return (EINVAL); 25135 } 25136 25137 switch (cdda->cdda_subcode) { 25138 case CDROM_DA_NO_SUBCODE: 25139 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25140 break; 25141 case CDROM_DA_SUBQ: 25142 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25143 break; 25144 case CDROM_DA_ALL_SUBCODE: 25145 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25146 break; 25147 case CDROM_DA_SUBCODE_ONLY: 25148 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25149 break; 25150 default: 25151 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25152 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25153 cdda->cdda_subcode); 25154 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25155 return (EINVAL); 25156 } 25157 25158 /* Build and send the command */ 25159 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25160 bzero(cdb, CDB_GROUP5); 25161 25162 if (un->un_f_cfg_cdda == TRUE) { 25163 cdb[0] = (char)SCMD_READ_CD; 25164 cdb[1] = 0x04; 25165 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25166 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25167 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25168 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25169 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25170 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25171 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25172 cdb[9] = 0x10; 25173 switch (cdda->cdda_subcode) { 25174 case CDROM_DA_NO_SUBCODE : 25175 cdb[10] = 0x0; 25176 break; 25177 case CDROM_DA_SUBQ : 25178 cdb[10] = 0x2; 25179 break; 25180 case CDROM_DA_ALL_SUBCODE : 25181 cdb[10] = 0x1; 25182 break; 25183 case CDROM_DA_SUBCODE_ONLY : 25184 /* FALLTHROUGH */ 25185 default : 25186 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25187 kmem_free(com, sizeof (*com)); 25188 return (ENOTTY); 25189 } 25190 } else { 25191 cdb[0] = (char)SCMD_READ_CDDA; 25192 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25193 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25194 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25195 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25196 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25197 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25198 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25199 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25200 cdb[10] = cdda->cdda_subcode; 25201 } 25202 25203 com->uscsi_cdb = cdb; 25204 com->uscsi_cdblen = CDB_GROUP5; 25205 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25206 com->uscsi_buflen = buflen; 25207 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25208 25209 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25210 SD_PATH_STANDARD); 25211 25212 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25213 kmem_free(com, sizeof (*com)); 25214 return (rval); 25215 } 25216 25217 25218 /* 25219 * Function: sr_read_cdxa() 25220 * 25221 * Description: This routine is the driver entry point for handling CD-ROM 25222 * ioctl requests to return CD-XA (Extended Architecture) data. 25223 * (CDROMCDXA). 25224 * 25225 * Arguments: dev - the device 'dev_t' 25226 * data - pointer to user provided CD-XA structure specifying 25227 * the data starting address, transfer length, and format 25228 * flag - this argument is a pass through to ddi_copyxxx() 25229 * directly from the mode argument of ioctl(). 25230 * 25231 * Return Code: the code returned by sd_send_scsi_cmd() 25232 * EFAULT if ddi_copyxxx() fails 25233 * ENXIO if fail ddi_get_soft_state 25234 * EINVAL if data pointer is NULL 25235 */ 25236 25237 static int 25238 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25239 { 25240 struct sd_lun *un; 25241 struct uscsi_cmd *com; 25242 struct cdrom_cdxa *cdxa; 25243 int rval; 25244 size_t buflen; 25245 char cdb[CDB_GROUP5]; 25246 uchar_t read_flags; 25247 25248 #ifdef _MULTI_DATAMODEL 25249 /* To support ILP32 applications in an LP64 world */ 25250 struct cdrom_cdxa32 cdrom_cdxa32; 25251 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25252 #endif /* _MULTI_DATAMODEL */ 25253 25254 if (data == NULL) { 25255 return (EINVAL); 25256 } 25257 25258 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25259 return (ENXIO); 25260 } 25261 25262 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25263 25264 #ifdef _MULTI_DATAMODEL 25265 switch (ddi_model_convert_from(flag & FMODELS)) { 25266 case DDI_MODEL_ILP32: 25267 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25268 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25269 return (EFAULT); 25270 } 25271 /* 25272 * Convert the ILP32 uscsi data from the 25273 * application to LP64 for internal use. 25274 */ 25275 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25276 break; 25277 case DDI_MODEL_NONE: 25278 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25279 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25280 return (EFAULT); 25281 } 25282 break; 25283 } 25284 #else /* ! _MULTI_DATAMODEL */ 25285 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25286 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25287 return (EFAULT); 25288 } 25289 #endif /* _MULTI_DATAMODEL */ 25290 25291 /* 25292 * Since MMC-2 expects max 3 bytes for length, check if the 25293 * length input is greater than 3 bytes 25294 */ 25295 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25296 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25297 "cdrom transfer length too large: %d (limit %d)\n", 25298 cdxa->cdxa_length, 0xFFFFFF); 25299 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25300 return (EINVAL); 25301 } 25302 25303 switch (cdxa->cdxa_format) { 25304 case CDROM_XA_DATA: 25305 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25306 read_flags = 0x10; 25307 break; 25308 case CDROM_XA_SECTOR_DATA: 25309 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25310 read_flags = 0xf8; 25311 break; 25312 case CDROM_XA_DATA_W_ERROR: 25313 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25314 read_flags = 0xfc; 25315 break; 25316 default: 25317 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25318 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25319 cdxa->cdxa_format); 25320 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25321 return (EINVAL); 25322 } 25323 25324 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25325 bzero(cdb, CDB_GROUP5); 25326 if (un->un_f_mmc_cap == TRUE) { 25327 cdb[0] = (char)SCMD_READ_CD; 25328 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25329 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25330 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25331 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25332 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25333 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25334 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25335 cdb[9] = (char)read_flags; 25336 } else { 25337 /* 25338 * Note: A vendor specific command (0xDB) is being used her to 25339 * request a read of all subcodes. 25340 */ 25341 cdb[0] = (char)SCMD_READ_CDXA; 25342 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25343 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25344 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25345 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25346 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25347 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25348 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25349 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25350 cdb[10] = cdxa->cdxa_format; 25351 } 25352 com->uscsi_cdb = cdb; 25353 com->uscsi_cdblen = CDB_GROUP5; 25354 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25355 com->uscsi_buflen = buflen; 25356 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25357 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25358 SD_PATH_STANDARD); 25359 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25360 kmem_free(com, sizeof (*com)); 25361 return (rval); 25362 } 25363 25364 25365 /* 25366 * Function: sr_eject() 25367 * 25368 * Description: This routine is the driver entry point for handling CD-ROM 25369 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25370 * 25371 * Arguments: dev - the device 'dev_t' 25372 * 25373 * Return Code: the code returned by sd_send_scsi_cmd() 25374 */ 25375 25376 static int 25377 sr_eject(dev_t dev) 25378 { 25379 struct sd_lun *un; 25380 int rval; 25381 25382 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25383 (un->un_state == SD_STATE_OFFLINE)) { 25384 return (ENXIO); 25385 } 25386 25387 /* 25388 * To prevent race conditions with the eject 25389 * command, keep track of an eject command as 25390 * it progresses. If we are already handling 25391 * an eject command in the driver for the given 25392 * unit and another request to eject is received 25393 * immediately return EAGAIN so we don't lose 25394 * the command if the current eject command fails. 25395 */ 25396 mutex_enter(SD_MUTEX(un)); 25397 if (un->un_f_ejecting == TRUE) { 25398 mutex_exit(SD_MUTEX(un)); 25399 return (EAGAIN); 25400 } 25401 un->un_f_ejecting = TRUE; 25402 mutex_exit(SD_MUTEX(un)); 25403 25404 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25405 SD_PATH_STANDARD)) != 0) { 25406 mutex_enter(SD_MUTEX(un)); 25407 un->un_f_ejecting = FALSE; 25408 mutex_exit(SD_MUTEX(un)); 25409 return (rval); 25410 } 25411 25412 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25413 SD_PATH_STANDARD); 25414 25415 if (rval == 0) { 25416 mutex_enter(SD_MUTEX(un)); 25417 sr_ejected(un); 25418 un->un_mediastate = DKIO_EJECTED; 25419 un->un_f_ejecting = FALSE; 25420 cv_broadcast(&un->un_state_cv); 25421 mutex_exit(SD_MUTEX(un)); 25422 } else { 25423 mutex_enter(SD_MUTEX(un)); 25424 un->un_f_ejecting = FALSE; 25425 mutex_exit(SD_MUTEX(un)); 25426 } 25427 return (rval); 25428 } 25429 25430 25431 /* 25432 * Function: sr_ejected() 25433 * 25434 * Description: This routine updates the soft state structure to invalidate the 25435 * geometry information after the media has been ejected or a 25436 * media eject has been detected. 25437 * 25438 * Arguments: un - driver soft state (unit) structure 25439 */ 25440 25441 static void 25442 sr_ejected(struct sd_lun *un) 25443 { 25444 struct sd_errstats *stp; 25445 25446 ASSERT(un != NULL); 25447 ASSERT(mutex_owned(SD_MUTEX(un))); 25448 25449 un->un_f_blockcount_is_valid = FALSE; 25450 un->un_f_tgt_blocksize_is_valid = FALSE; 25451 mutex_exit(SD_MUTEX(un)); 25452 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25453 mutex_enter(SD_MUTEX(un)); 25454 25455 if (un->un_errstats != NULL) { 25456 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25457 stp->sd_capacity.value.ui64 = 0; 25458 } 25459 25460 /* remove "capacity-of-device" properties */ 25461 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25462 "device-nblocks"); 25463 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25464 "device-blksize"); 25465 } 25466 25467 25468 /* 25469 * Function: sr_check_wp() 25470 * 25471 * Description: This routine checks the write protection of a removable 25472 * media disk and hotpluggable devices via the write protect bit of 25473 * the Mode Page Header device specific field. Some devices choke 25474 * on unsupported mode page. In order to workaround this issue, 25475 * this routine has been implemented to use 0x3f mode page(request 25476 * for all pages) for all device types. 25477 * 25478 * Arguments: dev - the device 'dev_t' 25479 * 25480 * Return Code: int indicating if the device is write protected (1) or not (0) 25481 * 25482 * Context: Kernel thread. 25483 * 25484 */ 25485 25486 static int 25487 sr_check_wp(dev_t dev) 25488 { 25489 struct sd_lun *un; 25490 uchar_t device_specific; 25491 uchar_t *sense; 25492 int hdrlen; 25493 int rval = FALSE; 25494 25495 /* 25496 * Note: The return codes for this routine should be reworked to 25497 * properly handle the case of a NULL softstate. 25498 */ 25499 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25500 return (FALSE); 25501 } 25502 25503 if (un->un_f_cfg_is_atapi == TRUE) { 25504 /* 25505 * The mode page contents are not required; set the allocation 25506 * length for the mode page header only 25507 */ 25508 hdrlen = MODE_HEADER_LENGTH_GRP2; 25509 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25510 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25511 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25512 goto err_exit; 25513 device_specific = 25514 ((struct mode_header_grp2 *)sense)->device_specific; 25515 } else { 25516 hdrlen = MODE_HEADER_LENGTH; 25517 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25518 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25519 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25520 goto err_exit; 25521 device_specific = 25522 ((struct mode_header *)sense)->device_specific; 25523 } 25524 25525 /* 25526 * Write protect mode sense failed; not all disks 25527 * understand this query. Return FALSE assuming that 25528 * these devices are not writable. 25529 */ 25530 if (device_specific & WRITE_PROTECT) { 25531 rval = TRUE; 25532 } 25533 25534 err_exit: 25535 kmem_free(sense, hdrlen); 25536 return (rval); 25537 } 25538 25539 /* 25540 * Function: sr_volume_ctrl() 25541 * 25542 * Description: This routine is the driver entry point for handling CD-ROM 25543 * audio output volume ioctl requests. (CDROMVOLCTRL) 25544 * 25545 * Arguments: dev - the device 'dev_t' 25546 * data - pointer to user audio volume control structure 25547 * flag - this argument is a pass through to ddi_copyxxx() 25548 * directly from the mode argument of ioctl(). 25549 * 25550 * Return Code: the code returned by sd_send_scsi_cmd() 25551 * EFAULT if ddi_copyxxx() fails 25552 * ENXIO if fail ddi_get_soft_state 25553 * EINVAL if data pointer is NULL 25554 * 25555 */ 25556 25557 static int 25558 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25559 { 25560 struct sd_lun *un; 25561 struct cdrom_volctrl volume; 25562 struct cdrom_volctrl *vol = &volume; 25563 uchar_t *sense_page; 25564 uchar_t *select_page; 25565 uchar_t *sense; 25566 uchar_t *select; 25567 int sense_buflen; 25568 int select_buflen; 25569 int rval; 25570 25571 if (data == NULL) { 25572 return (EINVAL); 25573 } 25574 25575 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25576 (un->un_state == SD_STATE_OFFLINE)) { 25577 return (ENXIO); 25578 } 25579 25580 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25581 return (EFAULT); 25582 } 25583 25584 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25585 struct mode_header_grp2 *sense_mhp; 25586 struct mode_header_grp2 *select_mhp; 25587 int bd_len; 25588 25589 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25590 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25591 MODEPAGE_AUDIO_CTRL_LEN; 25592 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25593 select = kmem_zalloc(select_buflen, KM_SLEEP); 25594 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25595 sense_buflen, MODEPAGE_AUDIO_CTRL, 25596 SD_PATH_STANDARD)) != 0) { 25597 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25598 "sr_volume_ctrl: Mode Sense Failed\n"); 25599 kmem_free(sense, sense_buflen); 25600 kmem_free(select, select_buflen); 25601 return (rval); 25602 } 25603 sense_mhp = (struct mode_header_grp2 *)sense; 25604 select_mhp = (struct mode_header_grp2 *)select; 25605 bd_len = (sense_mhp->bdesc_length_hi << 8) | 25606 sense_mhp->bdesc_length_lo; 25607 if (bd_len > MODE_BLK_DESC_LENGTH) { 25608 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25609 "sr_volume_ctrl: Mode Sense returned invalid " 25610 "block descriptor length\n"); 25611 kmem_free(sense, sense_buflen); 25612 kmem_free(select, select_buflen); 25613 return (EIO); 25614 } 25615 sense_page = (uchar_t *) 25616 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25617 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 25618 select_mhp->length_msb = 0; 25619 select_mhp->length_lsb = 0; 25620 select_mhp->bdesc_length_hi = 0; 25621 select_mhp->bdesc_length_lo = 0; 25622 } else { 25623 struct mode_header *sense_mhp, *select_mhp; 25624 25625 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25626 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25627 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25628 select = kmem_zalloc(select_buflen, KM_SLEEP); 25629 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 25630 sense_buflen, MODEPAGE_AUDIO_CTRL, 25631 SD_PATH_STANDARD)) != 0) { 25632 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25633 "sr_volume_ctrl: Mode Sense Failed\n"); 25634 kmem_free(sense, sense_buflen); 25635 kmem_free(select, select_buflen); 25636 return (rval); 25637 } 25638 sense_mhp = (struct mode_header *)sense; 25639 select_mhp = (struct mode_header *)select; 25640 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 25641 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25642 "sr_volume_ctrl: Mode Sense returned invalid " 25643 "block descriptor length\n"); 25644 kmem_free(sense, sense_buflen); 25645 kmem_free(select, select_buflen); 25646 return (EIO); 25647 } 25648 sense_page = (uchar_t *) 25649 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25650 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 25651 select_mhp->length = 0; 25652 select_mhp->bdesc_length = 0; 25653 } 25654 /* 25655 * Note: An audio control data structure could be created and overlayed 25656 * on the following in place of the array indexing method implemented. 25657 */ 25658 25659 /* Build the select data for the user volume data */ 25660 select_page[0] = MODEPAGE_AUDIO_CTRL; 25661 select_page[1] = 0xE; 25662 /* Set the immediate bit */ 25663 select_page[2] = 0x04; 25664 /* Zero out reserved fields */ 25665 select_page[3] = 0x00; 25666 select_page[4] = 0x00; 25667 /* Return sense data for fields not to be modified */ 25668 select_page[5] = sense_page[5]; 25669 select_page[6] = sense_page[6]; 25670 select_page[7] = sense_page[7]; 25671 /* Set the user specified volume levels for channel 0 and 1 */ 25672 select_page[8] = 0x01; 25673 select_page[9] = vol->channel0; 25674 select_page[10] = 0x02; 25675 select_page[11] = vol->channel1; 25676 /* Channel 2 and 3 are currently unsupported so return the sense data */ 25677 select_page[12] = sense_page[12]; 25678 select_page[13] = sense_page[13]; 25679 select_page[14] = sense_page[14]; 25680 select_page[15] = sense_page[15]; 25681 25682 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25683 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 25684 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25685 } else { 25686 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 25687 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25688 } 25689 25690 kmem_free(sense, sense_buflen); 25691 kmem_free(select, select_buflen); 25692 return (rval); 25693 } 25694 25695 25696 /* 25697 * Function: sr_read_sony_session_offset() 25698 * 25699 * Description: This routine is the driver entry point for handling CD-ROM 25700 * ioctl requests for session offset information. (CDROMREADOFFSET) 25701 * The address of the first track in the last session of a 25702 * multi-session CD-ROM is returned 25703 * 25704 * Note: This routine uses a vendor specific key value in the 25705 * command control field without implementing any vendor check here 25706 * or in the ioctl routine. 25707 * 25708 * Arguments: dev - the device 'dev_t' 25709 * data - pointer to an int to hold the requested address 25710 * flag - this argument is a pass through to ddi_copyxxx() 25711 * directly from the mode argument of ioctl(). 25712 * 25713 * Return Code: the code returned by sd_send_scsi_cmd() 25714 * EFAULT if ddi_copyxxx() fails 25715 * ENXIO if fail ddi_get_soft_state 25716 * EINVAL if data pointer is NULL 25717 */ 25718 25719 static int 25720 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 25721 { 25722 struct sd_lun *un; 25723 struct uscsi_cmd *com; 25724 caddr_t buffer; 25725 char cdb[CDB_GROUP1]; 25726 int session_offset = 0; 25727 int rval; 25728 25729 if (data == NULL) { 25730 return (EINVAL); 25731 } 25732 25733 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25734 (un->un_state == SD_STATE_OFFLINE)) { 25735 return (ENXIO); 25736 } 25737 25738 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 25739 bzero(cdb, CDB_GROUP1); 25740 cdb[0] = SCMD_READ_TOC; 25741 /* 25742 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25743 * (4 byte TOC response header + 8 byte response data) 25744 */ 25745 cdb[8] = SONY_SESSION_OFFSET_LEN; 25746 /* Byte 9 is the control byte. A vendor specific value is used */ 25747 cdb[9] = SONY_SESSION_OFFSET_KEY; 25748 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25749 com->uscsi_cdb = cdb; 25750 com->uscsi_cdblen = CDB_GROUP1; 25751 com->uscsi_bufaddr = buffer; 25752 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 25753 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25754 25755 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25756 SD_PATH_STANDARD); 25757 if (rval != 0) { 25758 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25759 kmem_free(com, sizeof (*com)); 25760 return (rval); 25761 } 25762 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 25763 session_offset = 25764 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25765 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25766 /* 25767 * Offset returned offset in current lbasize block's. Convert to 25768 * 2k block's to return to the user 25769 */ 25770 if (un->un_tgt_blocksize == CDROM_BLK_512) { 25771 session_offset >>= 2; 25772 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 25773 session_offset >>= 1; 25774 } 25775 } 25776 25777 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 25778 rval = EFAULT; 25779 } 25780 25781 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25782 kmem_free(com, sizeof (*com)); 25783 return (rval); 25784 } 25785 25786 25787 /* 25788 * Function: sd_wm_cache_constructor() 25789 * 25790 * Description: Cache Constructor for the wmap cache for the read/modify/write 25791 * devices. 25792 * 25793 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25794 * un - sd_lun structure for the device. 25795 * flag - the km flags passed to constructor 25796 * 25797 * Return Code: 0 on success. 25798 * -1 on failure. 25799 */ 25800 25801 /*ARGSUSED*/ 25802 static int 25803 sd_wm_cache_constructor(void *wm, void *un, int flags) 25804 { 25805 bzero(wm, sizeof (struct sd_w_map)); 25806 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 25807 return (0); 25808 } 25809 25810 25811 /* 25812 * Function: sd_wm_cache_destructor() 25813 * 25814 * Description: Cache destructor for the wmap cache for the read/modify/write 25815 * devices. 25816 * 25817 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25818 * un - sd_lun structure for the device. 25819 */ 25820 /*ARGSUSED*/ 25821 static void 25822 sd_wm_cache_destructor(void *wm, void *un) 25823 { 25824 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 25825 } 25826 25827 25828 /* 25829 * Function: sd_range_lock() 25830 * 25831 * Description: Lock the range of blocks specified as parameter to ensure 25832 * that read, modify write is atomic and no other i/o writes 25833 * to the same location. The range is specified in terms 25834 * of start and end blocks. Block numbers are the actual 25835 * media block numbers and not system. 25836 * 25837 * Arguments: un - sd_lun structure for the device. 25838 * startb - The starting block number 25839 * endb - The end block number 25840 * typ - type of i/o - simple/read_modify_write 25841 * 25842 * Return Code: wm - pointer to the wmap structure. 25843 * 25844 * Context: This routine can sleep. 25845 */ 25846 25847 static struct sd_w_map * 25848 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 25849 { 25850 struct sd_w_map *wmp = NULL; 25851 struct sd_w_map *sl_wmp = NULL; 25852 struct sd_w_map *tmp_wmp; 25853 wm_state state = SD_WM_CHK_LIST; 25854 25855 25856 ASSERT(un != NULL); 25857 ASSERT(!mutex_owned(SD_MUTEX(un))); 25858 25859 mutex_enter(SD_MUTEX(un)); 25860 25861 while (state != SD_WM_DONE) { 25862 25863 switch (state) { 25864 case SD_WM_CHK_LIST: 25865 /* 25866 * This is the starting state. Check the wmap list 25867 * to see if the range is currently available. 25868 */ 25869 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 25870 /* 25871 * If this is a simple write and no rmw 25872 * i/o is pending then try to lock the 25873 * range as the range should be available. 25874 */ 25875 state = SD_WM_LOCK_RANGE; 25876 } else { 25877 tmp_wmp = sd_get_range(un, startb, endb); 25878 if (tmp_wmp != NULL) { 25879 if ((wmp != NULL) && ONLIST(un, wmp)) { 25880 /* 25881 * Should not keep onlist wmps 25882 * while waiting this macro 25883 * will also do wmp = NULL; 25884 */ 25885 FREE_ONLIST_WMAP(un, wmp); 25886 } 25887 /* 25888 * sl_wmp is the wmap on which wait 25889 * is done, since the tmp_wmp points 25890 * to the inuse wmap, set sl_wmp to 25891 * tmp_wmp and change the state to sleep 25892 */ 25893 sl_wmp = tmp_wmp; 25894 state = SD_WM_WAIT_MAP; 25895 } else { 25896 state = SD_WM_LOCK_RANGE; 25897 } 25898 25899 } 25900 break; 25901 25902 case SD_WM_LOCK_RANGE: 25903 ASSERT(un->un_wm_cache); 25904 /* 25905 * The range need to be locked, try to get a wmap. 25906 * First attempt it with NO_SLEEP, want to avoid a sleep 25907 * if possible as we will have to release the sd mutex 25908 * if we have to sleep. 25909 */ 25910 if (wmp == NULL) 25911 wmp = kmem_cache_alloc(un->un_wm_cache, 25912 KM_NOSLEEP); 25913 if (wmp == NULL) { 25914 mutex_exit(SD_MUTEX(un)); 25915 _NOTE(DATA_READABLE_WITHOUT_LOCK 25916 (sd_lun::un_wm_cache)) 25917 wmp = kmem_cache_alloc(un->un_wm_cache, 25918 KM_SLEEP); 25919 mutex_enter(SD_MUTEX(un)); 25920 /* 25921 * we released the mutex so recheck and go to 25922 * check list state. 25923 */ 25924 state = SD_WM_CHK_LIST; 25925 } else { 25926 /* 25927 * We exit out of state machine since we 25928 * have the wmap. Do the housekeeping first. 25929 * place the wmap on the wmap list if it is not 25930 * on it already and then set the state to done. 25931 */ 25932 wmp->wm_start = startb; 25933 wmp->wm_end = endb; 25934 wmp->wm_flags = typ | SD_WM_BUSY; 25935 if (typ & SD_WTYPE_RMW) { 25936 un->un_rmw_count++; 25937 } 25938 /* 25939 * If not already on the list then link 25940 */ 25941 if (!ONLIST(un, wmp)) { 25942 wmp->wm_next = un->un_wm; 25943 wmp->wm_prev = NULL; 25944 if (wmp->wm_next) 25945 wmp->wm_next->wm_prev = wmp; 25946 un->un_wm = wmp; 25947 } 25948 state = SD_WM_DONE; 25949 } 25950 break; 25951 25952 case SD_WM_WAIT_MAP: 25953 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 25954 /* 25955 * Wait is done on sl_wmp, which is set in the 25956 * check_list state. 25957 */ 25958 sl_wmp->wm_wanted_count++; 25959 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 25960 sl_wmp->wm_wanted_count--; 25961 /* 25962 * We can reuse the memory from the completed sl_wmp 25963 * lock range for our new lock, but only if noone is 25964 * waiting for it. 25965 */ 25966 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 25967 if (sl_wmp->wm_wanted_count == 0) { 25968 if (wmp != NULL) 25969 CHK_N_FREEWMP(un, wmp); 25970 wmp = sl_wmp; 25971 } 25972 sl_wmp = NULL; 25973 /* 25974 * After waking up, need to recheck for availability of 25975 * range. 25976 */ 25977 state = SD_WM_CHK_LIST; 25978 break; 25979 25980 default: 25981 panic("sd_range_lock: " 25982 "Unknown state %d in sd_range_lock", state); 25983 /*NOTREACHED*/ 25984 } /* switch(state) */ 25985 25986 } /* while(state != SD_WM_DONE) */ 25987 25988 mutex_exit(SD_MUTEX(un)); 25989 25990 ASSERT(wmp != NULL); 25991 25992 return (wmp); 25993 } 25994 25995 25996 /* 25997 * Function: sd_get_range() 25998 * 25999 * Description: Find if there any overlapping I/O to this one 26000 * Returns the write-map of 1st such I/O, NULL otherwise. 26001 * 26002 * Arguments: un - sd_lun structure for the device. 26003 * startb - The starting block number 26004 * endb - The end block number 26005 * 26006 * Return Code: wm - pointer to the wmap structure. 26007 */ 26008 26009 static struct sd_w_map * 26010 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26011 { 26012 struct sd_w_map *wmp; 26013 26014 ASSERT(un != NULL); 26015 26016 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26017 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26018 continue; 26019 } 26020 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26021 break; 26022 } 26023 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26024 break; 26025 } 26026 } 26027 26028 return (wmp); 26029 } 26030 26031 26032 /* 26033 * Function: sd_free_inlist_wmap() 26034 * 26035 * Description: Unlink and free a write map struct. 26036 * 26037 * Arguments: un - sd_lun structure for the device. 26038 * wmp - sd_w_map which needs to be unlinked. 26039 */ 26040 26041 static void 26042 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26043 { 26044 ASSERT(un != NULL); 26045 26046 if (un->un_wm == wmp) { 26047 un->un_wm = wmp->wm_next; 26048 } else { 26049 wmp->wm_prev->wm_next = wmp->wm_next; 26050 } 26051 26052 if (wmp->wm_next) { 26053 wmp->wm_next->wm_prev = wmp->wm_prev; 26054 } 26055 26056 wmp->wm_next = wmp->wm_prev = NULL; 26057 26058 kmem_cache_free(un->un_wm_cache, wmp); 26059 } 26060 26061 26062 /* 26063 * Function: sd_range_unlock() 26064 * 26065 * Description: Unlock the range locked by wm. 26066 * Free write map if nobody else is waiting on it. 26067 * 26068 * Arguments: un - sd_lun structure for the device. 26069 * wmp - sd_w_map which needs to be unlinked. 26070 */ 26071 26072 static void 26073 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26074 { 26075 ASSERT(un != NULL); 26076 ASSERT(wm != NULL); 26077 ASSERT(!mutex_owned(SD_MUTEX(un))); 26078 26079 mutex_enter(SD_MUTEX(un)); 26080 26081 if (wm->wm_flags & SD_WTYPE_RMW) { 26082 un->un_rmw_count--; 26083 } 26084 26085 if (wm->wm_wanted_count) { 26086 wm->wm_flags = 0; 26087 /* 26088 * Broadcast that the wmap is available now. 26089 */ 26090 cv_broadcast(&wm->wm_avail); 26091 } else { 26092 /* 26093 * If no one is waiting on the map, it should be free'ed. 26094 */ 26095 sd_free_inlist_wmap(un, wm); 26096 } 26097 26098 mutex_exit(SD_MUTEX(un)); 26099 } 26100 26101 26102 /* 26103 * Function: sd_read_modify_write_task 26104 * 26105 * Description: Called from a taskq thread to initiate the write phase of 26106 * a read-modify-write request. This is used for targets where 26107 * un->un_sys_blocksize != un->un_tgt_blocksize. 26108 * 26109 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26110 * 26111 * Context: Called under taskq thread context. 26112 */ 26113 26114 static void 26115 sd_read_modify_write_task(void *arg) 26116 { 26117 struct sd_mapblocksize_info *bsp; 26118 struct buf *bp; 26119 struct sd_xbuf *xp; 26120 struct sd_lun *un; 26121 26122 bp = arg; /* The bp is given in arg */ 26123 ASSERT(bp != NULL); 26124 26125 /* Get the pointer to the layer-private data struct */ 26126 xp = SD_GET_XBUF(bp); 26127 ASSERT(xp != NULL); 26128 bsp = xp->xb_private; 26129 ASSERT(bsp != NULL); 26130 26131 un = SD_GET_UN(bp); 26132 ASSERT(un != NULL); 26133 ASSERT(!mutex_owned(SD_MUTEX(un))); 26134 26135 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26136 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26137 26138 /* 26139 * This is the write phase of a read-modify-write request, called 26140 * under the context of a taskq thread in response to the completion 26141 * of the read portion of the rmw request completing under interrupt 26142 * context. The write request must be sent from here down the iostart 26143 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26144 * we use the layer index saved in the layer-private data area. 26145 */ 26146 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26147 26148 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26149 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26150 } 26151 26152 26153 /* 26154 * Function: sddump_do_read_of_rmw() 26155 * 26156 * Description: This routine will be called from sddump, If sddump is called 26157 * with an I/O which not aligned on device blocksize boundary 26158 * then the write has to be converted to read-modify-write. 26159 * Do the read part here in order to keep sddump simple. 26160 * Note - That the sd_mutex is held across the call to this 26161 * routine. 26162 * 26163 * Arguments: un - sd_lun 26164 * blkno - block number in terms of media block size. 26165 * nblk - number of blocks. 26166 * bpp - pointer to pointer to the buf structure. On return 26167 * from this function, *bpp points to the valid buffer 26168 * to which the write has to be done. 26169 * 26170 * Return Code: 0 for success or errno-type return code 26171 */ 26172 26173 static int 26174 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26175 struct buf **bpp) 26176 { 26177 int err; 26178 int i; 26179 int rval; 26180 struct buf *bp; 26181 struct scsi_pkt *pkt = NULL; 26182 uint32_t target_blocksize; 26183 26184 ASSERT(un != NULL); 26185 ASSERT(mutex_owned(SD_MUTEX(un))); 26186 26187 target_blocksize = un->un_tgt_blocksize; 26188 26189 mutex_exit(SD_MUTEX(un)); 26190 26191 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26192 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26193 if (bp == NULL) { 26194 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26195 "no resources for dumping; giving up"); 26196 err = ENOMEM; 26197 goto done; 26198 } 26199 26200 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26201 blkno, nblk); 26202 if (rval != 0) { 26203 scsi_free_consistent_buf(bp); 26204 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26205 "no resources for dumping; giving up"); 26206 err = ENOMEM; 26207 goto done; 26208 } 26209 26210 pkt->pkt_flags |= FLAG_NOINTR; 26211 26212 err = EIO; 26213 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26214 26215 /* 26216 * Scsi_poll returns 0 (success) if the command completes and 26217 * the status block is STATUS_GOOD. We should only check 26218 * errors if this condition is not true. Even then we should 26219 * send our own request sense packet only if we have a check 26220 * condition and auto request sense has not been performed by 26221 * the hba. 26222 */ 26223 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26224 26225 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26226 err = 0; 26227 break; 26228 } 26229 26230 /* 26231 * Check CMD_DEV_GONE 1st, give up if device is gone, 26232 * no need to read RQS data. 26233 */ 26234 if (pkt->pkt_reason == CMD_DEV_GONE) { 26235 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26236 "Device is gone\n"); 26237 break; 26238 } 26239 26240 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26241 SD_INFO(SD_LOG_DUMP, un, 26242 "sddump: read failed with CHECK, try # %d\n", i); 26243 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26244 (void) sd_send_polled_RQS(un); 26245 } 26246 26247 continue; 26248 } 26249 26250 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26251 int reset_retval = 0; 26252 26253 SD_INFO(SD_LOG_DUMP, un, 26254 "sddump: read failed with BUSY, try # %d\n", i); 26255 26256 if (un->un_f_lun_reset_enabled == TRUE) { 26257 reset_retval = scsi_reset(SD_ADDRESS(un), 26258 RESET_LUN); 26259 } 26260 if (reset_retval == 0) { 26261 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26262 } 26263 (void) sd_send_polled_RQS(un); 26264 26265 } else { 26266 SD_INFO(SD_LOG_DUMP, un, 26267 "sddump: read failed with 0x%x, try # %d\n", 26268 SD_GET_PKT_STATUS(pkt), i); 26269 mutex_enter(SD_MUTEX(un)); 26270 sd_reset_target(un, pkt); 26271 mutex_exit(SD_MUTEX(un)); 26272 } 26273 26274 /* 26275 * If we are not getting anywhere with lun/target resets, 26276 * let's reset the bus. 26277 */ 26278 if (i > SD_NDUMP_RETRIES/2) { 26279 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26280 (void) sd_send_polled_RQS(un); 26281 } 26282 26283 } 26284 scsi_destroy_pkt(pkt); 26285 26286 if (err != 0) { 26287 scsi_free_consistent_buf(bp); 26288 *bpp = NULL; 26289 } else { 26290 *bpp = bp; 26291 } 26292 26293 done: 26294 mutex_enter(SD_MUTEX(un)); 26295 return (err); 26296 } 26297 26298 26299 /* 26300 * Function: sd_failfast_flushq 26301 * 26302 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26303 * in b_flags and move them onto the failfast queue, then kick 26304 * off a thread to return all bp's on the failfast queue to 26305 * their owners with an error set. 26306 * 26307 * Arguments: un - pointer to the soft state struct for the instance. 26308 * 26309 * Context: may execute in interrupt context. 26310 */ 26311 26312 static void 26313 sd_failfast_flushq(struct sd_lun *un) 26314 { 26315 struct buf *bp; 26316 struct buf *next_waitq_bp; 26317 struct buf *prev_waitq_bp = NULL; 26318 26319 ASSERT(un != NULL); 26320 ASSERT(mutex_owned(SD_MUTEX(un))); 26321 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26322 ASSERT(un->un_failfast_bp == NULL); 26323 26324 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26325 "sd_failfast_flushq: entry: un:0x%p\n", un); 26326 26327 /* 26328 * Check if we should flush all bufs when entering failfast state, or 26329 * just those with B_FAILFAST set. 26330 */ 26331 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26332 /* 26333 * Move *all* bp's on the wait queue to the failfast flush 26334 * queue, including those that do NOT have B_FAILFAST set. 26335 */ 26336 if (un->un_failfast_headp == NULL) { 26337 ASSERT(un->un_failfast_tailp == NULL); 26338 un->un_failfast_headp = un->un_waitq_headp; 26339 } else { 26340 ASSERT(un->un_failfast_tailp != NULL); 26341 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26342 } 26343 26344 un->un_failfast_tailp = un->un_waitq_tailp; 26345 26346 /* update kstat for each bp moved out of the waitq */ 26347 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26348 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26349 } 26350 26351 /* empty the waitq */ 26352 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26353 26354 } else { 26355 /* 26356 * Go thru the wait queue, pick off all entries with 26357 * B_FAILFAST set, and move these onto the failfast queue. 26358 */ 26359 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26360 /* 26361 * Save the pointer to the next bp on the wait queue, 26362 * so we get to it on the next iteration of this loop. 26363 */ 26364 next_waitq_bp = bp->av_forw; 26365 26366 /* 26367 * If this bp from the wait queue does NOT have 26368 * B_FAILFAST set, just move on to the next element 26369 * in the wait queue. Note, this is the only place 26370 * where it is correct to set prev_waitq_bp. 26371 */ 26372 if ((bp->b_flags & B_FAILFAST) == 0) { 26373 prev_waitq_bp = bp; 26374 continue; 26375 } 26376 26377 /* 26378 * Remove the bp from the wait queue. 26379 */ 26380 if (bp == un->un_waitq_headp) { 26381 /* The bp is the first element of the waitq. */ 26382 un->un_waitq_headp = next_waitq_bp; 26383 if (un->un_waitq_headp == NULL) { 26384 /* The wait queue is now empty */ 26385 un->un_waitq_tailp = NULL; 26386 } 26387 } else { 26388 /* 26389 * The bp is either somewhere in the middle 26390 * or at the end of the wait queue. 26391 */ 26392 ASSERT(un->un_waitq_headp != NULL); 26393 ASSERT(prev_waitq_bp != NULL); 26394 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26395 == 0); 26396 if (bp == un->un_waitq_tailp) { 26397 /* bp is the last entry on the waitq. */ 26398 ASSERT(next_waitq_bp == NULL); 26399 un->un_waitq_tailp = prev_waitq_bp; 26400 } 26401 prev_waitq_bp->av_forw = next_waitq_bp; 26402 } 26403 bp->av_forw = NULL; 26404 26405 /* 26406 * update kstat since the bp is moved out of 26407 * the waitq 26408 */ 26409 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26410 26411 /* 26412 * Now put the bp onto the failfast queue. 26413 */ 26414 if (un->un_failfast_headp == NULL) { 26415 /* failfast queue is currently empty */ 26416 ASSERT(un->un_failfast_tailp == NULL); 26417 un->un_failfast_headp = 26418 un->un_failfast_tailp = bp; 26419 } else { 26420 /* Add the bp to the end of the failfast q */ 26421 ASSERT(un->un_failfast_tailp != NULL); 26422 ASSERT(un->un_failfast_tailp->b_flags & 26423 B_FAILFAST); 26424 un->un_failfast_tailp->av_forw = bp; 26425 un->un_failfast_tailp = bp; 26426 } 26427 } 26428 } 26429 26430 /* 26431 * Now return all bp's on the failfast queue to their owners. 26432 */ 26433 while ((bp = un->un_failfast_headp) != NULL) { 26434 26435 un->un_failfast_headp = bp->av_forw; 26436 if (un->un_failfast_headp == NULL) { 26437 un->un_failfast_tailp = NULL; 26438 } 26439 26440 /* 26441 * We want to return the bp with a failure error code, but 26442 * we do not want a call to sd_start_cmds() to occur here, 26443 * so use sd_return_failed_command_no_restart() instead of 26444 * sd_return_failed_command(). 26445 */ 26446 sd_return_failed_command_no_restart(un, bp, EIO); 26447 } 26448 26449 /* Flush the xbuf queues if required. */ 26450 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26451 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26452 } 26453 26454 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26455 "sd_failfast_flushq: exit: un:0x%p\n", un); 26456 } 26457 26458 26459 /* 26460 * Function: sd_failfast_flushq_callback 26461 * 26462 * Description: Return TRUE if the given bp meets the criteria for failfast 26463 * flushing. Used with ddi_xbuf_flushq(9F). 26464 * 26465 * Arguments: bp - ptr to buf struct to be examined. 26466 * 26467 * Context: Any 26468 */ 26469 26470 static int 26471 sd_failfast_flushq_callback(struct buf *bp) 26472 { 26473 /* 26474 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26475 * state is entered; OR (2) the given bp has B_FAILFAST set. 26476 */ 26477 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26478 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26479 } 26480 26481 26482 26483 #if defined(__i386) || defined(__amd64) 26484 /* 26485 * Function: sd_setup_next_xfer 26486 * 26487 * Description: Prepare next I/O operation using DMA_PARTIAL 26488 * 26489 */ 26490 26491 static int 26492 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26493 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26494 { 26495 ssize_t num_blks_not_xfered; 26496 daddr_t strt_blk_num; 26497 ssize_t bytes_not_xfered; 26498 int rval; 26499 26500 ASSERT(pkt->pkt_resid == 0); 26501 26502 /* 26503 * Calculate next block number and amount to be transferred. 26504 * 26505 * How much data NOT transfered to the HBA yet. 26506 */ 26507 bytes_not_xfered = xp->xb_dma_resid; 26508 26509 /* 26510 * figure how many blocks NOT transfered to the HBA yet. 26511 */ 26512 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26513 26514 /* 26515 * set starting block number to the end of what WAS transfered. 26516 */ 26517 strt_blk_num = xp->xb_blkno + 26518 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26519 26520 /* 26521 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26522 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26523 * the disk mutex here. 26524 */ 26525 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26526 strt_blk_num, num_blks_not_xfered); 26527 26528 if (rval == 0) { 26529 26530 /* 26531 * Success. 26532 * 26533 * Adjust things if there are still more blocks to be 26534 * transfered. 26535 */ 26536 xp->xb_dma_resid = pkt->pkt_resid; 26537 pkt->pkt_resid = 0; 26538 26539 return (1); 26540 } 26541 26542 /* 26543 * There's really only one possible return value from 26544 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26545 * returns NULL. 26546 */ 26547 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26548 26549 bp->b_resid = bp->b_bcount; 26550 bp->b_flags |= B_ERROR; 26551 26552 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26553 "Error setting up next portion of DMA transfer\n"); 26554 26555 return (0); 26556 } 26557 #endif 26558 26559 /* 26560 * Function: sd_panic_for_res_conflict 26561 * 26562 * Description: Call panic with a string formatted with "Reservation Conflict" 26563 * and a human readable identifier indicating the SD instance 26564 * that experienced the reservation conflict. 26565 * 26566 * Arguments: un - pointer to the soft state struct for the instance. 26567 * 26568 * Context: may execute in interrupt context. 26569 */ 26570 26571 #define SD_RESV_CONFLICT_FMT_LEN 40 26572 void 26573 sd_panic_for_res_conflict(struct sd_lun *un) 26574 { 26575 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26576 char path_str[MAXPATHLEN]; 26577 26578 (void) snprintf(panic_str, sizeof (panic_str), 26579 "Reservation Conflict\nDisk: %s", 26580 ddi_pathname(SD_DEVINFO(un), path_str)); 26581 26582 panic(panic_str); 26583 } 26584 26585 /* 26586 * Note: The following sd_faultinjection_ioctl( ) routines implement 26587 * driver support for handling fault injection for error analysis 26588 * causing faults in multiple layers of the driver. 26589 * 26590 */ 26591 26592 #ifdef SD_FAULT_INJECTION 26593 static uint_t sd_fault_injection_on = 0; 26594 26595 /* 26596 * Function: sd_faultinjection_ioctl() 26597 * 26598 * Description: This routine is the driver entry point for handling 26599 * faultinjection ioctls to inject errors into the 26600 * layer model 26601 * 26602 * Arguments: cmd - the ioctl cmd received 26603 * arg - the arguments from user and returns 26604 */ 26605 26606 static void 26607 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 26608 26609 uint_t i; 26610 uint_t rval; 26611 26612 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 26613 26614 mutex_enter(SD_MUTEX(un)); 26615 26616 switch (cmd) { 26617 case SDIOCRUN: 26618 /* Allow pushed faults to be injected */ 26619 SD_INFO(SD_LOG_SDTEST, un, 26620 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 26621 26622 sd_fault_injection_on = 1; 26623 26624 SD_INFO(SD_LOG_IOERR, un, 26625 "sd_faultinjection_ioctl: run finished\n"); 26626 break; 26627 26628 case SDIOCSTART: 26629 /* Start Injection Session */ 26630 SD_INFO(SD_LOG_SDTEST, un, 26631 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 26632 26633 sd_fault_injection_on = 0; 26634 un->sd_injection_mask = 0xFFFFFFFF; 26635 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26636 un->sd_fi_fifo_pkt[i] = NULL; 26637 un->sd_fi_fifo_xb[i] = NULL; 26638 un->sd_fi_fifo_un[i] = NULL; 26639 un->sd_fi_fifo_arq[i] = NULL; 26640 } 26641 un->sd_fi_fifo_start = 0; 26642 un->sd_fi_fifo_end = 0; 26643 26644 mutex_enter(&(un->un_fi_mutex)); 26645 un->sd_fi_log[0] = '\0'; 26646 un->sd_fi_buf_len = 0; 26647 mutex_exit(&(un->un_fi_mutex)); 26648 26649 SD_INFO(SD_LOG_IOERR, un, 26650 "sd_faultinjection_ioctl: start finished\n"); 26651 break; 26652 26653 case SDIOCSTOP: 26654 /* Stop Injection Session */ 26655 SD_INFO(SD_LOG_SDTEST, un, 26656 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 26657 sd_fault_injection_on = 0; 26658 un->sd_injection_mask = 0x0; 26659 26660 /* Empty stray or unuseds structs from fifo */ 26661 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26662 if (un->sd_fi_fifo_pkt[i] != NULL) { 26663 kmem_free(un->sd_fi_fifo_pkt[i], 26664 sizeof (struct sd_fi_pkt)); 26665 } 26666 if (un->sd_fi_fifo_xb[i] != NULL) { 26667 kmem_free(un->sd_fi_fifo_xb[i], 26668 sizeof (struct sd_fi_xb)); 26669 } 26670 if (un->sd_fi_fifo_un[i] != NULL) { 26671 kmem_free(un->sd_fi_fifo_un[i], 26672 sizeof (struct sd_fi_un)); 26673 } 26674 if (un->sd_fi_fifo_arq[i] != NULL) { 26675 kmem_free(un->sd_fi_fifo_arq[i], 26676 sizeof (struct sd_fi_arq)); 26677 } 26678 un->sd_fi_fifo_pkt[i] = NULL; 26679 un->sd_fi_fifo_un[i] = NULL; 26680 un->sd_fi_fifo_xb[i] = NULL; 26681 un->sd_fi_fifo_arq[i] = NULL; 26682 } 26683 un->sd_fi_fifo_start = 0; 26684 un->sd_fi_fifo_end = 0; 26685 26686 SD_INFO(SD_LOG_IOERR, un, 26687 "sd_faultinjection_ioctl: stop finished\n"); 26688 break; 26689 26690 case SDIOCINSERTPKT: 26691 /* Store a packet struct to be pushed onto fifo */ 26692 SD_INFO(SD_LOG_SDTEST, un, 26693 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 26694 26695 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26696 26697 sd_fault_injection_on = 0; 26698 26699 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 26700 if (un->sd_fi_fifo_pkt[i] != NULL) { 26701 kmem_free(un->sd_fi_fifo_pkt[i], 26702 sizeof (struct sd_fi_pkt)); 26703 } 26704 if (arg != NULL) { 26705 un->sd_fi_fifo_pkt[i] = 26706 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 26707 if (un->sd_fi_fifo_pkt[i] == NULL) { 26708 /* Alloc failed don't store anything */ 26709 break; 26710 } 26711 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 26712 sizeof (struct sd_fi_pkt), 0); 26713 if (rval == -1) { 26714 kmem_free(un->sd_fi_fifo_pkt[i], 26715 sizeof (struct sd_fi_pkt)); 26716 un->sd_fi_fifo_pkt[i] = NULL; 26717 } 26718 } else { 26719 SD_INFO(SD_LOG_IOERR, un, 26720 "sd_faultinjection_ioctl: pkt null\n"); 26721 } 26722 break; 26723 26724 case SDIOCINSERTXB: 26725 /* Store a xb struct to be pushed onto fifo */ 26726 SD_INFO(SD_LOG_SDTEST, un, 26727 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 26728 26729 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26730 26731 sd_fault_injection_on = 0; 26732 26733 if (un->sd_fi_fifo_xb[i] != NULL) { 26734 kmem_free(un->sd_fi_fifo_xb[i], 26735 sizeof (struct sd_fi_xb)); 26736 un->sd_fi_fifo_xb[i] = NULL; 26737 } 26738 if (arg != NULL) { 26739 un->sd_fi_fifo_xb[i] = 26740 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 26741 if (un->sd_fi_fifo_xb[i] == NULL) { 26742 /* Alloc failed don't store anything */ 26743 break; 26744 } 26745 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 26746 sizeof (struct sd_fi_xb), 0); 26747 26748 if (rval == -1) { 26749 kmem_free(un->sd_fi_fifo_xb[i], 26750 sizeof (struct sd_fi_xb)); 26751 un->sd_fi_fifo_xb[i] = NULL; 26752 } 26753 } else { 26754 SD_INFO(SD_LOG_IOERR, un, 26755 "sd_faultinjection_ioctl: xb null\n"); 26756 } 26757 break; 26758 26759 case SDIOCINSERTUN: 26760 /* Store a un struct to be pushed onto fifo */ 26761 SD_INFO(SD_LOG_SDTEST, un, 26762 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 26763 26764 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26765 26766 sd_fault_injection_on = 0; 26767 26768 if (un->sd_fi_fifo_un[i] != NULL) { 26769 kmem_free(un->sd_fi_fifo_un[i], 26770 sizeof (struct sd_fi_un)); 26771 un->sd_fi_fifo_un[i] = NULL; 26772 } 26773 if (arg != NULL) { 26774 un->sd_fi_fifo_un[i] = 26775 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 26776 if (un->sd_fi_fifo_un[i] == NULL) { 26777 /* Alloc failed don't store anything */ 26778 break; 26779 } 26780 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 26781 sizeof (struct sd_fi_un), 0); 26782 if (rval == -1) { 26783 kmem_free(un->sd_fi_fifo_un[i], 26784 sizeof (struct sd_fi_un)); 26785 un->sd_fi_fifo_un[i] = NULL; 26786 } 26787 26788 } else { 26789 SD_INFO(SD_LOG_IOERR, un, 26790 "sd_faultinjection_ioctl: un null\n"); 26791 } 26792 26793 break; 26794 26795 case SDIOCINSERTARQ: 26796 /* Store a arq struct to be pushed onto fifo */ 26797 SD_INFO(SD_LOG_SDTEST, un, 26798 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 26799 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26800 26801 sd_fault_injection_on = 0; 26802 26803 if (un->sd_fi_fifo_arq[i] != NULL) { 26804 kmem_free(un->sd_fi_fifo_arq[i], 26805 sizeof (struct sd_fi_arq)); 26806 un->sd_fi_fifo_arq[i] = NULL; 26807 } 26808 if (arg != NULL) { 26809 un->sd_fi_fifo_arq[i] = 26810 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 26811 if (un->sd_fi_fifo_arq[i] == NULL) { 26812 /* Alloc failed don't store anything */ 26813 break; 26814 } 26815 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 26816 sizeof (struct sd_fi_arq), 0); 26817 if (rval == -1) { 26818 kmem_free(un->sd_fi_fifo_arq[i], 26819 sizeof (struct sd_fi_arq)); 26820 un->sd_fi_fifo_arq[i] = NULL; 26821 } 26822 26823 } else { 26824 SD_INFO(SD_LOG_IOERR, un, 26825 "sd_faultinjection_ioctl: arq null\n"); 26826 } 26827 26828 break; 26829 26830 case SDIOCPUSH: 26831 /* Push stored xb, pkt, un, and arq onto fifo */ 26832 sd_fault_injection_on = 0; 26833 26834 if (arg != NULL) { 26835 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 26836 if (rval != -1 && 26837 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26838 un->sd_fi_fifo_end += i; 26839 } 26840 } else { 26841 SD_INFO(SD_LOG_IOERR, un, 26842 "sd_faultinjection_ioctl: push arg null\n"); 26843 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26844 un->sd_fi_fifo_end++; 26845 } 26846 } 26847 SD_INFO(SD_LOG_IOERR, un, 26848 "sd_faultinjection_ioctl: push to end=%d\n", 26849 un->sd_fi_fifo_end); 26850 break; 26851 26852 case SDIOCRETRIEVE: 26853 /* Return buffer of log from Injection session */ 26854 SD_INFO(SD_LOG_SDTEST, un, 26855 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 26856 26857 sd_fault_injection_on = 0; 26858 26859 mutex_enter(&(un->un_fi_mutex)); 26860 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 26861 un->sd_fi_buf_len+1, 0); 26862 mutex_exit(&(un->un_fi_mutex)); 26863 26864 if (rval == -1) { 26865 /* 26866 * arg is possibly invalid setting 26867 * it to NULL for return 26868 */ 26869 arg = NULL; 26870 } 26871 break; 26872 } 26873 26874 mutex_exit(SD_MUTEX(un)); 26875 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 26876 " exit\n"); 26877 } 26878 26879 26880 /* 26881 * Function: sd_injection_log() 26882 * 26883 * Description: This routine adds buff to the already existing injection log 26884 * for retrieval via faultinjection_ioctl for use in fault 26885 * detection and recovery 26886 * 26887 * Arguments: buf - the string to add to the log 26888 */ 26889 26890 static void 26891 sd_injection_log(char *buf, struct sd_lun *un) 26892 { 26893 uint_t len; 26894 26895 ASSERT(un != NULL); 26896 ASSERT(buf != NULL); 26897 26898 mutex_enter(&(un->un_fi_mutex)); 26899 26900 len = min(strlen(buf), 255); 26901 /* Add logged value to Injection log to be returned later */ 26902 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 26903 uint_t offset = strlen((char *)un->sd_fi_log); 26904 char *destp = (char *)un->sd_fi_log + offset; 26905 int i; 26906 for (i = 0; i < len; i++) { 26907 *destp++ = *buf++; 26908 } 26909 un->sd_fi_buf_len += len; 26910 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 26911 } 26912 26913 mutex_exit(&(un->un_fi_mutex)); 26914 } 26915 26916 26917 /* 26918 * Function: sd_faultinjection() 26919 * 26920 * Description: This routine takes the pkt and changes its 26921 * content based on error injection scenerio. 26922 * 26923 * Arguments: pktp - packet to be changed 26924 */ 26925 26926 static void 26927 sd_faultinjection(struct scsi_pkt *pktp) 26928 { 26929 uint_t i; 26930 struct sd_fi_pkt *fi_pkt; 26931 struct sd_fi_xb *fi_xb; 26932 struct sd_fi_un *fi_un; 26933 struct sd_fi_arq *fi_arq; 26934 struct buf *bp; 26935 struct sd_xbuf *xb; 26936 struct sd_lun *un; 26937 26938 ASSERT(pktp != NULL); 26939 26940 /* pull bp xb and un from pktp */ 26941 bp = (struct buf *)pktp->pkt_private; 26942 xb = SD_GET_XBUF(bp); 26943 un = SD_GET_UN(bp); 26944 26945 ASSERT(un != NULL); 26946 26947 mutex_enter(SD_MUTEX(un)); 26948 26949 SD_TRACE(SD_LOG_SDTEST, un, 26950 "sd_faultinjection: entry Injection from sdintr\n"); 26951 26952 /* if injection is off return */ 26953 if (sd_fault_injection_on == 0 || 26954 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 26955 mutex_exit(SD_MUTEX(un)); 26956 return; 26957 } 26958 26959 26960 /* take next set off fifo */ 26961 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 26962 26963 fi_pkt = un->sd_fi_fifo_pkt[i]; 26964 fi_xb = un->sd_fi_fifo_xb[i]; 26965 fi_un = un->sd_fi_fifo_un[i]; 26966 fi_arq = un->sd_fi_fifo_arq[i]; 26967 26968 26969 /* set variables accordingly */ 26970 /* set pkt if it was on fifo */ 26971 if (fi_pkt != NULL) { 26972 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 26973 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 26974 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 26975 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 26976 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 26977 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 26978 26979 } 26980 26981 /* set xb if it was on fifo */ 26982 if (fi_xb != NULL) { 26983 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 26984 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 26985 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 26986 SD_CONDSET(xb, xb, xb_victim_retry_count, 26987 "xb_victim_retry_count"); 26988 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 26989 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 26990 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 26991 26992 /* copy in block data from sense */ 26993 if (fi_xb->xb_sense_data[0] != -1) { 26994 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 26995 SENSE_LENGTH); 26996 } 26997 26998 /* copy in extended sense codes */ 26999 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27000 "es_code"); 27001 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27002 "es_key"); 27003 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27004 "es_add_code"); 27005 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27006 es_qual_code, "es_qual_code"); 27007 } 27008 27009 /* set un if it was on fifo */ 27010 if (fi_un != NULL) { 27011 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27012 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27013 SD_CONDSET(un, un, un_reset_retry_count, 27014 "un_reset_retry_count"); 27015 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27016 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27017 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27018 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27019 "un_f_allow_bus_device_reset"); 27020 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27021 27022 } 27023 27024 /* copy in auto request sense if it was on fifo */ 27025 if (fi_arq != NULL) { 27026 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27027 } 27028 27029 /* free structs */ 27030 if (un->sd_fi_fifo_pkt[i] != NULL) { 27031 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27032 } 27033 if (un->sd_fi_fifo_xb[i] != NULL) { 27034 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27035 } 27036 if (un->sd_fi_fifo_un[i] != NULL) { 27037 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27038 } 27039 if (un->sd_fi_fifo_arq[i] != NULL) { 27040 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27041 } 27042 27043 /* 27044 * kmem_free does not gurantee to set to NULL 27045 * since we uses these to determine if we set 27046 * values or not lets confirm they are always 27047 * NULL after free 27048 */ 27049 un->sd_fi_fifo_pkt[i] = NULL; 27050 un->sd_fi_fifo_un[i] = NULL; 27051 un->sd_fi_fifo_xb[i] = NULL; 27052 un->sd_fi_fifo_arq[i] = NULL; 27053 27054 un->sd_fi_fifo_start++; 27055 27056 mutex_exit(SD_MUTEX(un)); 27057 27058 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27059 } 27060 27061 #endif /* SD_FAULT_INJECTION */ 27062 27063 /* 27064 * This routine is invoked in sd_unit_attach(). Before calling it, the 27065 * properties in conf file should be processed already, and "hotpluggable" 27066 * property was processed also. 27067 * 27068 * The sd driver distinguishes 3 different type of devices: removable media, 27069 * non-removable media, and hotpluggable. Below the differences are defined: 27070 * 27071 * 1. Device ID 27072 * 27073 * The device ID of a device is used to identify this device. Refer to 27074 * ddi_devid_register(9F). 27075 * 27076 * For a non-removable media disk device which can provide 0x80 or 0x83 27077 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27078 * device ID is created to identify this device. For other non-removable 27079 * media devices, a default device ID is created only if this device has 27080 * at least 2 alter cylinders. Otherwise, this device has no devid. 27081 * 27082 * ------------------------------------------------------- 27083 * removable media hotpluggable | Can Have Device ID 27084 * ------------------------------------------------------- 27085 * false false | Yes 27086 * false true | Yes 27087 * true x | No 27088 * ------------------------------------------------------ 27089 * 27090 * 27091 * 2. SCSI group 4 commands 27092 * 27093 * In SCSI specs, only some commands in group 4 command set can use 27094 * 8-byte addresses that can be used to access >2TB storage spaces. 27095 * Other commands have no such capability. Without supporting group4, 27096 * it is impossible to make full use of storage spaces of a disk with 27097 * capacity larger than 2TB. 27098 * 27099 * ----------------------------------------------- 27100 * removable media hotpluggable LP64 | Group 27101 * ----------------------------------------------- 27102 * false false false | 1 27103 * false false true | 4 27104 * false true false | 1 27105 * false true true | 4 27106 * true x x | 5 27107 * ----------------------------------------------- 27108 * 27109 * 27110 * 3. Check for VTOC Label 27111 * 27112 * If a direct-access disk has no EFI label, sd will check if it has a 27113 * valid VTOC label. Now, sd also does that check for removable media 27114 * and hotpluggable devices. 27115 * 27116 * -------------------------------------------------------------- 27117 * Direct-Access removable media hotpluggable | Check Label 27118 * ------------------------------------------------------------- 27119 * false false false | No 27120 * false false true | No 27121 * false true false | Yes 27122 * false true true | Yes 27123 * true x x | Yes 27124 * -------------------------------------------------------------- 27125 * 27126 * 27127 * 4. Building default VTOC label 27128 * 27129 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27130 * If those devices have no valid VTOC label, sd(7d) will attempt to 27131 * create default VTOC for them. Currently sd creates default VTOC label 27132 * for all devices on x86 platform (VTOC_16), but only for removable 27133 * media devices on SPARC (VTOC_8). 27134 * 27135 * ----------------------------------------------------------- 27136 * removable media hotpluggable platform | Default Label 27137 * ----------------------------------------------------------- 27138 * false false sparc | No 27139 * false true x86 | Yes 27140 * false true sparc | Yes 27141 * true x x | Yes 27142 * ---------------------------------------------------------- 27143 * 27144 * 27145 * 5. Supported blocksizes of target devices 27146 * 27147 * Sd supports non-512-byte blocksize for removable media devices only. 27148 * For other devices, only 512-byte blocksize is supported. This may be 27149 * changed in near future because some RAID devices require non-512-byte 27150 * blocksize 27151 * 27152 * ----------------------------------------------------------- 27153 * removable media hotpluggable | non-512-byte blocksize 27154 * ----------------------------------------------------------- 27155 * false false | No 27156 * false true | No 27157 * true x | Yes 27158 * ----------------------------------------------------------- 27159 * 27160 * 27161 * 6. Automatic mount & unmount 27162 * 27163 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27164 * if a device is removable media device. It return 1 for removable media 27165 * devices, and 0 for others. 27166 * 27167 * The automatic mounting subsystem should distinguish between the types 27168 * of devices and apply automounting policies to each. 27169 * 27170 * 27171 * 7. fdisk partition management 27172 * 27173 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27174 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27175 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27176 * fdisk partitions on both x86 and SPARC platform. 27177 * 27178 * ----------------------------------------------------------- 27179 * platform removable media USB/1394 | fdisk supported 27180 * ----------------------------------------------------------- 27181 * x86 X X | true 27182 * ------------------------------------------------------------ 27183 * sparc X X | false 27184 * ------------------------------------------------------------ 27185 * 27186 * 27187 * 8. MBOOT/MBR 27188 * 27189 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27190 * read/write mboot for removable media devices on sparc platform. 27191 * 27192 * ----------------------------------------------------------- 27193 * platform removable media USB/1394 | mboot supported 27194 * ----------------------------------------------------------- 27195 * x86 X X | true 27196 * ------------------------------------------------------------ 27197 * sparc false false | false 27198 * sparc false true | true 27199 * sparc true false | true 27200 * sparc true true | true 27201 * ------------------------------------------------------------ 27202 * 27203 * 27204 * 9. error handling during opening device 27205 * 27206 * If failed to open a disk device, an errno is returned. For some kinds 27207 * of errors, different errno is returned depending on if this device is 27208 * a removable media device. This brings USB/1394 hard disks in line with 27209 * expected hard disk behavior. It is not expected that this breaks any 27210 * application. 27211 * 27212 * ------------------------------------------------------ 27213 * removable media hotpluggable | errno 27214 * ------------------------------------------------------ 27215 * false false | EIO 27216 * false true | EIO 27217 * true x | ENXIO 27218 * ------------------------------------------------------ 27219 * 27220 * 27221 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27222 * 27223 * These IOCTLs are applicable only to removable media devices. 27224 * 27225 * ----------------------------------------------------------- 27226 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27227 * ----------------------------------------------------------- 27228 * false false | No 27229 * false true | No 27230 * true x | Yes 27231 * ----------------------------------------------------------- 27232 * 27233 * 27234 * 12. Kstats for partitions 27235 * 27236 * sd creates partition kstat for non-removable media devices. USB and 27237 * Firewire hard disks now have partition kstats 27238 * 27239 * ------------------------------------------------------ 27240 * removable media hotpluggable | kstat 27241 * ------------------------------------------------------ 27242 * false false | Yes 27243 * false true | Yes 27244 * true x | No 27245 * ------------------------------------------------------ 27246 * 27247 * 27248 * 13. Removable media & hotpluggable properties 27249 * 27250 * Sd driver creates a "removable-media" property for removable media 27251 * devices. Parent nexus drivers create a "hotpluggable" property if 27252 * it supports hotplugging. 27253 * 27254 * --------------------------------------------------------------------- 27255 * removable media hotpluggable | "removable-media" " hotpluggable" 27256 * --------------------------------------------------------------------- 27257 * false false | No No 27258 * false true | No Yes 27259 * true false | Yes No 27260 * true true | Yes Yes 27261 * --------------------------------------------------------------------- 27262 * 27263 * 27264 * 14. Power Management 27265 * 27266 * sd only power manages removable media devices or devices that support 27267 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27268 * 27269 * A parent nexus that supports hotplugging can also set "pm-capable" 27270 * if the disk can be power managed. 27271 * 27272 * ------------------------------------------------------------ 27273 * removable media hotpluggable pm-capable | power manage 27274 * ------------------------------------------------------------ 27275 * false false false | No 27276 * false false true | Yes 27277 * false true false | No 27278 * false true true | Yes 27279 * true x x | Yes 27280 * ------------------------------------------------------------ 27281 * 27282 * USB and firewire hard disks can now be power managed independently 27283 * of the framebuffer 27284 * 27285 * 27286 * 15. Support for USB disks with capacity larger than 1TB 27287 * 27288 * Currently, sd doesn't permit a fixed disk device with capacity 27289 * larger than 1TB to be used in a 32-bit operating system environment. 27290 * However, sd doesn't do that for removable media devices. Instead, it 27291 * assumes that removable media devices cannot have a capacity larger 27292 * than 1TB. Therefore, using those devices on 32-bit system is partially 27293 * supported, which can cause some unexpected results. 27294 * 27295 * --------------------------------------------------------------------- 27296 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27297 * --------------------------------------------------------------------- 27298 * false false | true | no 27299 * false true | true | no 27300 * true false | true | Yes 27301 * true true | true | Yes 27302 * --------------------------------------------------------------------- 27303 * 27304 * 27305 * 16. Check write-protection at open time 27306 * 27307 * When a removable media device is being opened for writing without NDELAY 27308 * flag, sd will check if this device is writable. If attempting to open 27309 * without NDELAY flag a write-protected device, this operation will abort. 27310 * 27311 * ------------------------------------------------------------ 27312 * removable media USB/1394 | WP Check 27313 * ------------------------------------------------------------ 27314 * false false | No 27315 * false true | No 27316 * true false | Yes 27317 * true true | Yes 27318 * ------------------------------------------------------------ 27319 * 27320 * 27321 * 17. syslog when corrupted VTOC is encountered 27322 * 27323 * Currently, if an invalid VTOC is encountered, sd only print syslog 27324 * for fixed SCSI disks. 27325 * ------------------------------------------------------------ 27326 * removable media USB/1394 | print syslog 27327 * ------------------------------------------------------------ 27328 * false false | Yes 27329 * false true | No 27330 * true false | No 27331 * true true | No 27332 * ------------------------------------------------------------ 27333 */ 27334 static void 27335 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27336 { 27337 int pm_capable_prop; 27338 27339 ASSERT(un->un_sd); 27340 ASSERT(un->un_sd->sd_inq); 27341 27342 /* 27343 * Enable SYNC CACHE support for all devices. 27344 */ 27345 un->un_f_sync_cache_supported = TRUE; 27346 27347 if (un->un_sd->sd_inq->inq_rmb) { 27348 /* 27349 * The media of this device is removable. And for this kind 27350 * of devices, it is possible to change medium after opening 27351 * devices. Thus we should support this operation. 27352 */ 27353 un->un_f_has_removable_media = TRUE; 27354 27355 /* 27356 * support non-512-byte blocksize of removable media devices 27357 */ 27358 un->un_f_non_devbsize_supported = TRUE; 27359 27360 /* 27361 * Assume that all removable media devices support DOOR_LOCK 27362 */ 27363 un->un_f_doorlock_supported = TRUE; 27364 27365 /* 27366 * For a removable media device, it is possible to be opened 27367 * with NDELAY flag when there is no media in drive, in this 27368 * case we don't care if device is writable. But if without 27369 * NDELAY flag, we need to check if media is write-protected. 27370 */ 27371 un->un_f_chk_wp_open = TRUE; 27372 27373 /* 27374 * need to start a SCSI watch thread to monitor media state, 27375 * when media is being inserted or ejected, notify syseventd. 27376 */ 27377 un->un_f_monitor_media_state = TRUE; 27378 27379 /* 27380 * Some devices don't support START_STOP_UNIT command. 27381 * Therefore, we'd better check if a device supports it 27382 * before sending it. 27383 */ 27384 un->un_f_check_start_stop = TRUE; 27385 27386 /* 27387 * support eject media ioctl: 27388 * FDEJECT, DKIOCEJECT, CDROMEJECT 27389 */ 27390 un->un_f_eject_media_supported = TRUE; 27391 27392 /* 27393 * Because many removable-media devices don't support 27394 * LOG_SENSE, we couldn't use this command to check if 27395 * a removable media device support power-management. 27396 * We assume that they support power-management via 27397 * START_STOP_UNIT command and can be spun up and down 27398 * without limitations. 27399 */ 27400 un->un_f_pm_supported = TRUE; 27401 27402 /* 27403 * Need to create a zero length (Boolean) property 27404 * removable-media for the removable media devices. 27405 * Note that the return value of the property is not being 27406 * checked, since if unable to create the property 27407 * then do not want the attach to fail altogether. Consistent 27408 * with other property creation in attach. 27409 */ 27410 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27411 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27412 27413 } else { 27414 /* 27415 * create device ID for device 27416 */ 27417 un->un_f_devid_supported = TRUE; 27418 27419 /* 27420 * Spin up non-removable-media devices once it is attached 27421 */ 27422 un->un_f_attach_spinup = TRUE; 27423 27424 /* 27425 * According to SCSI specification, Sense data has two kinds of 27426 * format: fixed format, and descriptor format. At present, we 27427 * don't support descriptor format sense data for removable 27428 * media. 27429 */ 27430 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27431 un->un_f_descr_format_supported = TRUE; 27432 } 27433 27434 /* 27435 * kstats are created only for non-removable media devices. 27436 * 27437 * Set this in sd.conf to 0 in order to disable kstats. The 27438 * default is 1, so they are enabled by default. 27439 */ 27440 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27441 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27442 "enable-partition-kstats", 1)); 27443 27444 /* 27445 * Check if HBA has set the "pm-capable" property. 27446 * If "pm-capable" exists and is non-zero then we can 27447 * power manage the device without checking the start/stop 27448 * cycle count log sense page. 27449 * 27450 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27451 * then we should not power manage the device. 27452 * 27453 * If "pm-capable" doesn't exist then pm_capable_prop will 27454 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27455 * sd will check the start/stop cycle count log sense page 27456 * and power manage the device if the cycle count limit has 27457 * not been exceeded. 27458 */ 27459 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27460 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27461 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27462 un->un_f_log_sense_supported = TRUE; 27463 } else { 27464 /* 27465 * pm-capable property exists. 27466 * 27467 * Convert "TRUE" values for pm_capable_prop to 27468 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27469 * later. "TRUE" values are any values except 27470 * SD_PM_CAPABLE_FALSE (0) and 27471 * SD_PM_CAPABLE_UNDEFINED (-1) 27472 */ 27473 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27474 un->un_f_log_sense_supported = FALSE; 27475 } else { 27476 un->un_f_pm_supported = TRUE; 27477 } 27478 27479 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27480 "sd_unit_attach: un:0x%p pm-capable " 27481 "property set to %d.\n", un, un->un_f_pm_supported); 27482 } 27483 } 27484 27485 if (un->un_f_is_hotpluggable) { 27486 27487 /* 27488 * Have to watch hotpluggable devices as well, since 27489 * that's the only way for userland applications to 27490 * detect hot removal while device is busy/mounted. 27491 */ 27492 un->un_f_monitor_media_state = TRUE; 27493 27494 un->un_f_check_start_stop = TRUE; 27495 27496 } 27497 } 27498 27499 /* 27500 * sd_tg_rdwr: 27501 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27502 * in sys block size, req_length in bytes. 27503 * 27504 */ 27505 static int 27506 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27507 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27508 { 27509 struct sd_lun *un; 27510 int path_flag = (int)(uintptr_t)tg_cookie; 27511 char *dkl = NULL; 27512 diskaddr_t real_addr = start_block; 27513 diskaddr_t first_byte, end_block; 27514 27515 size_t buffer_size = reqlength; 27516 int rval; 27517 diskaddr_t cap; 27518 uint32_t lbasize; 27519 27520 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27521 if (un == NULL) 27522 return (ENXIO); 27523 27524 if (cmd != TG_READ && cmd != TG_WRITE) 27525 return (EINVAL); 27526 27527 mutex_enter(SD_MUTEX(un)); 27528 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27529 mutex_exit(SD_MUTEX(un)); 27530 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27531 &lbasize, path_flag); 27532 if (rval != 0) 27533 return (rval); 27534 mutex_enter(SD_MUTEX(un)); 27535 sd_update_block_info(un, lbasize, cap); 27536 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27537 mutex_exit(SD_MUTEX(un)); 27538 return (EIO); 27539 } 27540 } 27541 27542 if (NOT_DEVBSIZE(un)) { 27543 /* 27544 * sys_blocksize != tgt_blocksize, need to re-adjust 27545 * blkno and save the index to beginning of dk_label 27546 */ 27547 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27548 real_addr = first_byte / un->un_tgt_blocksize; 27549 27550 end_block = (first_byte + reqlength + 27551 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27552 27553 /* round up buffer size to multiple of target block size */ 27554 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27555 27556 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27557 "label_addr: 0x%x allocation size: 0x%x\n", 27558 real_addr, buffer_size); 27559 27560 if (((first_byte % un->un_tgt_blocksize) != 0) || 27561 (reqlength % un->un_tgt_blocksize) != 0) 27562 /* the request is not aligned */ 27563 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27564 } 27565 27566 /* 27567 * The MMC standard allows READ CAPACITY to be 27568 * inaccurate by a bounded amount (in the interest of 27569 * response latency). As a result, failed READs are 27570 * commonplace (due to the reading of metadata and not 27571 * data). Depending on the per-Vendor/drive Sense data, 27572 * the failed READ can cause many (unnecessary) retries. 27573 */ 27574 27575 if (ISCD(un) && (cmd == TG_READ) && 27576 (un->un_f_blockcount_is_valid == TRUE) && 27577 ((start_block == (un->un_blockcount - 1))|| 27578 (start_block == (un->un_blockcount - 2)))) { 27579 path_flag = SD_PATH_DIRECT_PRIORITY; 27580 } 27581 27582 mutex_exit(SD_MUTEX(un)); 27583 if (cmd == TG_READ) { 27584 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27585 buffer_size, real_addr, path_flag); 27586 if (dkl != NULL) 27587 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27588 real_addr), bufaddr, reqlength); 27589 } else { 27590 if (dkl) { 27591 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27592 real_addr, path_flag); 27593 if (rval) { 27594 kmem_free(dkl, buffer_size); 27595 return (rval); 27596 } 27597 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27598 real_addr), reqlength); 27599 } 27600 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27601 buffer_size, real_addr, path_flag); 27602 } 27603 27604 if (dkl != NULL) 27605 kmem_free(dkl, buffer_size); 27606 27607 return (rval); 27608 } 27609 27610 27611 static int 27612 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 27613 { 27614 27615 struct sd_lun *un; 27616 diskaddr_t cap; 27617 uint32_t lbasize; 27618 int path_flag = (int)(uintptr_t)tg_cookie; 27619 int ret = 0; 27620 27621 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27622 if (un == NULL) 27623 return (ENXIO); 27624 27625 switch (cmd) { 27626 case TG_GETPHYGEOM: 27627 case TG_GETVIRTGEOM: 27628 case TG_GETCAPACITY: 27629 case TG_GETBLOCKSIZE: 27630 mutex_enter(SD_MUTEX(un)); 27631 27632 if ((un->un_f_blockcount_is_valid == TRUE) && 27633 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 27634 cap = un->un_blockcount; 27635 lbasize = un->un_tgt_blocksize; 27636 mutex_exit(SD_MUTEX(un)); 27637 } else { 27638 mutex_exit(SD_MUTEX(un)); 27639 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27640 &lbasize, path_flag); 27641 if (ret != 0) 27642 return (ret); 27643 mutex_enter(SD_MUTEX(un)); 27644 sd_update_block_info(un, lbasize, cap); 27645 if ((un->un_f_blockcount_is_valid == FALSE) || 27646 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 27647 mutex_exit(SD_MUTEX(un)); 27648 return (EIO); 27649 } 27650 mutex_exit(SD_MUTEX(un)); 27651 } 27652 27653 if (cmd == TG_GETCAPACITY) { 27654 *(diskaddr_t *)arg = cap; 27655 return (0); 27656 } 27657 27658 if (cmd == TG_GETBLOCKSIZE) { 27659 *(uint32_t *)arg = lbasize; 27660 return (0); 27661 } 27662 27663 if (cmd == TG_GETPHYGEOM) 27664 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 27665 cap, lbasize, path_flag); 27666 else 27667 /* TG_GETVIRTGEOM */ 27668 ret = sd_get_virtual_geometry(un, 27669 (cmlb_geom_t *)arg, cap, lbasize); 27670 27671 return (ret); 27672 27673 case TG_GETATTR: 27674 mutex_enter(SD_MUTEX(un)); 27675 ((tg_attribute_t *)arg)->media_is_writable = 27676 un->un_f_mmc_writable_media; 27677 mutex_exit(SD_MUTEX(un)); 27678 return (0); 27679 default: 27680 return (ENOTTY); 27681 27682 } 27683 27684 } 27685