1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SCSI disk target driver. 28 */ 29 #include <sys/scsi/scsi.h> 30 #include <sys/dkbad.h> 31 #include <sys/dklabel.h> 32 #include <sys/dkio.h> 33 #include <sys/fdio.h> 34 #include <sys/cdio.h> 35 #include <sys/mhd.h> 36 #include <sys/vtoc.h> 37 #include <sys/dktp/fdisk.h> 38 #include <sys/kstat.h> 39 #include <sys/vtrace.h> 40 #include <sys/note.h> 41 #include <sys/thread.h> 42 #include <sys/proc.h> 43 #include <sys/efi_partition.h> 44 #include <sys/var.h> 45 #include <sys/aio_req.h> 46 47 #ifdef __lock_lint 48 #define _LP64 49 #define __amd64 50 #endif 51 52 #if (defined(__fibre)) 53 /* Note: is there a leadville version of the following? */ 54 #include <sys/fc4/fcal_linkapp.h> 55 #endif 56 #include <sys/taskq.h> 57 #include <sys/uuid.h> 58 #include <sys/byteorder.h> 59 #include <sys/sdt.h> 60 61 #include "sd_xbuf.h" 62 63 #include <sys/scsi/targets/sddef.h> 64 #include <sys/cmlb.h> 65 #include <sys/sysevent/eventdefs.h> 66 #include <sys/sysevent/dev.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0, 516 1 517 }; 518 519 520 521 #if (defined(SD_PROP_TST)) 522 523 #define SD_TST_CTYPE_VAL CTYPE_CDROM 524 #define SD_TST_THROTTLE_VAL 16 525 #define SD_TST_NOTREADY_VAL 12 526 #define SD_TST_BUSY_VAL 60 527 #define SD_TST_RST_RETRY_VAL 36 528 #define SD_TST_RSV_REL_TIME 60 529 530 static sd_tunables tst_properties = { 531 SD_TST_THROTTLE_VAL, 532 SD_TST_CTYPE_VAL, 533 SD_TST_NOTREADY_VAL, 534 SD_TST_BUSY_VAL, 535 SD_TST_RST_RETRY_VAL, 536 SD_TST_RSV_REL_TIME, 537 0, 538 0, 539 0 540 }; 541 #endif 542 543 /* This is similar to the ANSI toupper implementation */ 544 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 545 546 /* 547 * Static Driver Configuration Table 548 * 549 * This is the table of disks which need throttle adjustment (or, perhaps 550 * something else as defined by the flags at a future time.) device_id 551 * is a string consisting of concatenated vid (vendor), pid (product/model) 552 * and revision strings as defined in the scsi_inquiry structure. Offsets of 553 * the parts of the string are as defined by the sizes in the scsi_inquiry 554 * structure. Device type is searched as far as the device_id string is 555 * defined. Flags defines which values are to be set in the driver from the 556 * properties list. 557 * 558 * Entries below which begin and end with a "*" are a special case. 559 * These do not have a specific vendor, and the string which follows 560 * can appear anywhere in the 16 byte PID portion of the inquiry data. 561 * 562 * Entries below which begin and end with a " " (blank) are a special 563 * case. The comparison function will treat multiple consecutive blanks 564 * as equivalent to a single blank. For example, this causes a 565 * sd_disk_table entry of " NEC CDROM " to match a device's id string 566 * of "NEC CDROM". 567 * 568 * Note: The MD21 controller type has been obsoleted. 569 * ST318202F is a Legacy device 570 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 571 * made with an FC connection. The entries here are a legacy. 572 */ 573 static sd_disk_config_t sd_disk_table[] = { 574 #if defined(__fibre) || defined(__i386) || defined(__amd64) 575 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 625 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 626 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 627 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 628 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 629 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 630 { "SUN T3", SD_CONF_BSET_THROTTLE | 631 SD_CONF_BSET_BSY_RETRY_COUNT| 632 SD_CONF_BSET_RST_RETRIES| 633 SD_CONF_BSET_RSV_REL_TIME, 634 &purple_properties }, 635 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 636 SD_CONF_BSET_BSY_RETRY_COUNT| 637 SD_CONF_BSET_RST_RETRIES| 638 SD_CONF_BSET_RSV_REL_TIME| 639 SD_CONF_BSET_MIN_THROTTLE| 640 SD_CONF_BSET_DISKSORT_DISABLED, 641 &sve_properties }, 642 { "SUN T4", SD_CONF_BSET_THROTTLE | 643 SD_CONF_BSET_BSY_RETRY_COUNT| 644 SD_CONF_BSET_RST_RETRIES| 645 SD_CONF_BSET_RSV_REL_TIME, 646 &purple_properties }, 647 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 648 SD_CONF_BSET_LUN_RESET_ENABLED, 649 &maserati_properties }, 650 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 651 SD_CONF_BSET_NRR_COUNT| 652 SD_CONF_BSET_BSY_RETRY_COUNT| 653 SD_CONF_BSET_RST_RETRIES| 654 SD_CONF_BSET_MIN_THROTTLE| 655 SD_CONF_BSET_DISKSORT_DISABLED| 656 SD_CONF_BSET_LUN_RESET_ENABLED, 657 &pirus_properties }, 658 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 659 SD_CONF_BSET_NRR_COUNT| 660 SD_CONF_BSET_BSY_RETRY_COUNT| 661 SD_CONF_BSET_RST_RETRIES| 662 SD_CONF_BSET_MIN_THROTTLE| 663 SD_CONF_BSET_DISKSORT_DISABLED| 664 SD_CONF_BSET_LUN_RESET_ENABLED, 665 &pirus_properties }, 666 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 667 SD_CONF_BSET_NRR_COUNT| 668 SD_CONF_BSET_BSY_RETRY_COUNT| 669 SD_CONF_BSET_RST_RETRIES| 670 SD_CONF_BSET_MIN_THROTTLE| 671 SD_CONF_BSET_DISKSORT_DISABLED| 672 SD_CONF_BSET_LUN_RESET_ENABLED, 673 &pirus_properties }, 674 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 675 SD_CONF_BSET_NRR_COUNT| 676 SD_CONF_BSET_BSY_RETRY_COUNT| 677 SD_CONF_BSET_RST_RETRIES| 678 SD_CONF_BSET_MIN_THROTTLE| 679 SD_CONF_BSET_DISKSORT_DISABLED| 680 SD_CONF_BSET_LUN_RESET_ENABLED, 681 &pirus_properties }, 682 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 683 SD_CONF_BSET_NRR_COUNT| 684 SD_CONF_BSET_BSY_RETRY_COUNT| 685 SD_CONF_BSET_RST_RETRIES| 686 SD_CONF_BSET_MIN_THROTTLE| 687 SD_CONF_BSET_DISKSORT_DISABLED| 688 SD_CONF_BSET_LUN_RESET_ENABLED, 689 &pirus_properties }, 690 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 691 SD_CONF_BSET_NRR_COUNT| 692 SD_CONF_BSET_BSY_RETRY_COUNT| 693 SD_CONF_BSET_RST_RETRIES| 694 SD_CONF_BSET_MIN_THROTTLE| 695 SD_CONF_BSET_DISKSORT_DISABLED| 696 SD_CONF_BSET_LUN_RESET_ENABLED, 697 &pirus_properties }, 698 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 699 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 700 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 701 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 702 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 703 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 704 #endif /* fibre or NON-sparc platforms */ 705 #if ((defined(__sparc) && !defined(__fibre)) ||\ 706 (defined(__i386) || defined(__amd64))) 707 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 708 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 709 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 710 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 711 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 716 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 717 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 718 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 719 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 720 &symbios_properties }, 721 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 722 &lsi_properties_scsi }, 723 #if defined(__i386) || defined(__amd64) 724 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 725 | SD_CONF_BSET_READSUB_BCD 726 | SD_CONF_BSET_READ_TOC_ADDR_BCD 727 | SD_CONF_BSET_NO_READ_HEADER 728 | SD_CONF_BSET_READ_CD_XD4), NULL }, 729 730 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 731 | SD_CONF_BSET_READSUB_BCD 732 | SD_CONF_BSET_READ_TOC_ADDR_BCD 733 | SD_CONF_BSET_NO_READ_HEADER 734 | SD_CONF_BSET_READ_CD_XD4), NULL }, 735 #endif /* __i386 || __amd64 */ 736 #endif /* sparc NON-fibre or NON-sparc platforms */ 737 738 #if (defined(SD_PROP_TST)) 739 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 740 | SD_CONF_BSET_CTYPE 741 | SD_CONF_BSET_NRR_COUNT 742 | SD_CONF_BSET_FAB_DEVID 743 | SD_CONF_BSET_NOCACHE 744 | SD_CONF_BSET_BSY_RETRY_COUNT 745 | SD_CONF_BSET_PLAYMSF_BCD 746 | SD_CONF_BSET_READSUB_BCD 747 | SD_CONF_BSET_READ_TOC_TRK_BCD 748 | SD_CONF_BSET_READ_TOC_ADDR_BCD 749 | SD_CONF_BSET_NO_READ_HEADER 750 | SD_CONF_BSET_READ_CD_XD4 751 | SD_CONF_BSET_RST_RETRIES 752 | SD_CONF_BSET_RSV_REL_TIME 753 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 754 #endif 755 }; 756 757 static const int sd_disk_table_size = 758 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 759 760 761 762 #define SD_INTERCONNECT_PARALLEL 0 763 #define SD_INTERCONNECT_FABRIC 1 764 #define SD_INTERCONNECT_FIBRE 2 765 #define SD_INTERCONNECT_SSA 3 766 #define SD_INTERCONNECT_SATA 4 767 #define SD_IS_PARALLEL_SCSI(un) \ 768 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 769 #define SD_IS_SERIAL(un) \ 770 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 771 772 /* 773 * Definitions used by device id registration routines 774 */ 775 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 776 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 777 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 778 779 static kmutex_t sd_sense_mutex = {0}; 780 781 /* 782 * Macros for updates of the driver state 783 */ 784 #define New_state(un, s) \ 785 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 786 #define Restore_state(un) \ 787 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 788 789 static struct sd_cdbinfo sd_cdbtab[] = { 790 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 791 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 792 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 793 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 794 }; 795 796 /* 797 * Specifies the number of seconds that must have elapsed since the last 798 * cmd. has completed for a device to be declared idle to the PM framework. 799 */ 800 static int sd_pm_idletime = 1; 801 802 /* 803 * Internal function prototypes 804 */ 805 806 #if (defined(__fibre)) 807 /* 808 * These #defines are to avoid namespace collisions that occur because this 809 * code is currently used to compile two separate driver modules: sd and ssd. 810 * All function names need to be treated this way (even if declared static) 811 * in order to allow the debugger to resolve the names properly. 812 * It is anticipated that in the near future the ssd module will be obsoleted, 813 * at which time this ugliness should go away. 814 */ 815 #define sd_log_trace ssd_log_trace 816 #define sd_log_info ssd_log_info 817 #define sd_log_err ssd_log_err 818 #define sdprobe ssdprobe 819 #define sdinfo ssdinfo 820 #define sd_prop_op ssd_prop_op 821 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 822 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 823 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 824 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 825 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 826 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 827 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 828 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 829 #define sd_spin_up_unit ssd_spin_up_unit 830 #define sd_enable_descr_sense ssd_enable_descr_sense 831 #define sd_reenable_dsense_task ssd_reenable_dsense_task 832 #define sd_set_mmc_caps ssd_set_mmc_caps 833 #define sd_read_unit_properties ssd_read_unit_properties 834 #define sd_process_sdconf_file ssd_process_sdconf_file 835 #define sd_process_sdconf_table ssd_process_sdconf_table 836 #define sd_sdconf_id_match ssd_sdconf_id_match 837 #define sd_blank_cmp ssd_blank_cmp 838 #define sd_chk_vers1_data ssd_chk_vers1_data 839 #define sd_set_vers1_properties ssd_set_vers1_properties 840 841 #define sd_get_physical_geometry ssd_get_physical_geometry 842 #define sd_get_virtual_geometry ssd_get_virtual_geometry 843 #define sd_update_block_info ssd_update_block_info 844 #define sd_register_devid ssd_register_devid 845 #define sd_get_devid ssd_get_devid 846 #define sd_create_devid ssd_create_devid 847 #define sd_write_deviceid ssd_write_deviceid 848 #define sd_check_vpd_page_support ssd_check_vpd_page_support 849 #define sd_setup_pm ssd_setup_pm 850 #define sd_create_pm_components ssd_create_pm_components 851 #define sd_ddi_suspend ssd_ddi_suspend 852 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 853 #define sd_ddi_resume ssd_ddi_resume 854 #define sd_ddi_pm_resume ssd_ddi_pm_resume 855 #define sdpower ssdpower 856 #define sdattach ssdattach 857 #define sddetach ssddetach 858 #define sd_unit_attach ssd_unit_attach 859 #define sd_unit_detach ssd_unit_detach 860 #define sd_set_unit_attributes ssd_set_unit_attributes 861 #define sd_create_errstats ssd_create_errstats 862 #define sd_set_errstats ssd_set_errstats 863 #define sd_set_pstats ssd_set_pstats 864 #define sddump ssddump 865 #define sd_scsi_poll ssd_scsi_poll 866 #define sd_send_polled_RQS ssd_send_polled_RQS 867 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 868 #define sd_init_event_callbacks ssd_init_event_callbacks 869 #define sd_event_callback ssd_event_callback 870 #define sd_cache_control ssd_cache_control 871 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 872 #define sd_get_nv_sup ssd_get_nv_sup 873 #define sd_make_device ssd_make_device 874 #define sdopen ssdopen 875 #define sdclose ssdclose 876 #define sd_ready_and_valid ssd_ready_and_valid 877 #define sdmin ssdmin 878 #define sdread ssdread 879 #define sdwrite ssdwrite 880 #define sdaread ssdaread 881 #define sdawrite ssdawrite 882 #define sdstrategy ssdstrategy 883 #define sdioctl ssdioctl 884 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 885 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 886 #define sd_checksum_iostart ssd_checksum_iostart 887 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 888 #define sd_pm_iostart ssd_pm_iostart 889 #define sd_core_iostart ssd_core_iostart 890 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 891 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 892 #define sd_checksum_iodone ssd_checksum_iodone 893 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 894 #define sd_pm_iodone ssd_pm_iodone 895 #define sd_initpkt_for_buf ssd_initpkt_for_buf 896 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 897 #define sd_setup_rw_pkt ssd_setup_rw_pkt 898 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 899 #define sd_buf_iodone ssd_buf_iodone 900 #define sd_uscsi_strategy ssd_uscsi_strategy 901 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 902 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 903 #define sd_uscsi_iodone ssd_uscsi_iodone 904 #define sd_xbuf_strategy ssd_xbuf_strategy 905 #define sd_xbuf_init ssd_xbuf_init 906 #define sd_pm_entry ssd_pm_entry 907 #define sd_pm_exit ssd_pm_exit 908 909 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 910 #define sd_pm_timeout_handler ssd_pm_timeout_handler 911 912 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 913 #define sdintr ssdintr 914 #define sd_start_cmds ssd_start_cmds 915 #define sd_send_scsi_cmd ssd_send_scsi_cmd 916 #define sd_bioclone_alloc ssd_bioclone_alloc 917 #define sd_bioclone_free ssd_bioclone_free 918 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 919 #define sd_shadow_buf_free ssd_shadow_buf_free 920 #define sd_print_transport_rejected_message \ 921 ssd_print_transport_rejected_message 922 #define sd_retry_command ssd_retry_command 923 #define sd_set_retry_bp ssd_set_retry_bp 924 #define sd_send_request_sense_command ssd_send_request_sense_command 925 #define sd_start_retry_command ssd_start_retry_command 926 #define sd_start_direct_priority_command \ 927 ssd_start_direct_priority_command 928 #define sd_return_failed_command ssd_return_failed_command 929 #define sd_return_failed_command_no_restart \ 930 ssd_return_failed_command_no_restart 931 #define sd_return_command ssd_return_command 932 #define sd_sync_with_callback ssd_sync_with_callback 933 #define sdrunout ssdrunout 934 #define sd_mark_rqs_busy ssd_mark_rqs_busy 935 #define sd_mark_rqs_idle ssd_mark_rqs_idle 936 #define sd_reduce_throttle ssd_reduce_throttle 937 #define sd_restore_throttle ssd_restore_throttle 938 #define sd_print_incomplete_msg ssd_print_incomplete_msg 939 #define sd_init_cdb_limits ssd_init_cdb_limits 940 #define sd_pkt_status_good ssd_pkt_status_good 941 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 942 #define sd_pkt_status_busy ssd_pkt_status_busy 943 #define sd_pkt_status_reservation_conflict \ 944 ssd_pkt_status_reservation_conflict 945 #define sd_pkt_status_qfull ssd_pkt_status_qfull 946 #define sd_handle_request_sense ssd_handle_request_sense 947 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 948 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 949 #define sd_validate_sense_data ssd_validate_sense_data 950 #define sd_decode_sense ssd_decode_sense 951 #define sd_print_sense_msg ssd_print_sense_msg 952 #define sd_sense_key_no_sense ssd_sense_key_no_sense 953 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 954 #define sd_sense_key_not_ready ssd_sense_key_not_ready 955 #define sd_sense_key_medium_or_hardware_error \ 956 ssd_sense_key_medium_or_hardware_error 957 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 958 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 959 #define sd_sense_key_fail_command ssd_sense_key_fail_command 960 #define sd_sense_key_blank_check ssd_sense_key_blank_check 961 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 962 #define sd_sense_key_default ssd_sense_key_default 963 #define sd_print_retry_msg ssd_print_retry_msg 964 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 965 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 966 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 967 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 968 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 969 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 970 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 971 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 972 #define sd_pkt_reason_default ssd_pkt_reason_default 973 #define sd_reset_target ssd_reset_target 974 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 975 #define sd_start_stop_unit_task ssd_start_stop_unit_task 976 #define sd_taskq_create ssd_taskq_create 977 #define sd_taskq_delete ssd_taskq_delete 978 #define sd_target_change_task ssd_target_change_task 979 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 980 #define sd_media_change_task ssd_media_change_task 981 #define sd_handle_mchange ssd_handle_mchange 982 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 983 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 984 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 985 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 986 #define sd_send_scsi_feature_GET_CONFIGURATION \ 987 sd_send_scsi_feature_GET_CONFIGURATION 988 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 989 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 990 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 991 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 992 ssd_send_scsi_PERSISTENT_RESERVE_IN 993 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 994 ssd_send_scsi_PERSISTENT_RESERVE_OUT 995 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 996 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 997 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 998 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 999 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1000 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1001 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1002 #define sd_alloc_rqs ssd_alloc_rqs 1003 #define sd_free_rqs ssd_free_rqs 1004 #define sd_dump_memory ssd_dump_memory 1005 #define sd_get_media_info ssd_get_media_info 1006 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1007 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1008 #define sd_strtok_r ssd_strtok_r 1009 #define sd_set_properties ssd_set_properties 1010 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1011 #define sd_setup_next_xfer ssd_setup_next_xfer 1012 #define sd_dkio_get_temp ssd_dkio_get_temp 1013 #define sd_check_mhd ssd_check_mhd 1014 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1015 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1016 #define sd_sname ssd_sname 1017 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1018 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1019 #define sd_take_ownership ssd_take_ownership 1020 #define sd_reserve_release ssd_reserve_release 1021 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1022 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1023 #define sd_persistent_reservation_in_read_keys \ 1024 ssd_persistent_reservation_in_read_keys 1025 #define sd_persistent_reservation_in_read_resv \ 1026 ssd_persistent_reservation_in_read_resv 1027 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1028 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1029 #define sd_mhdioc_release ssd_mhdioc_release 1030 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1031 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1032 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1033 #define sr_change_blkmode ssr_change_blkmode 1034 #define sr_change_speed ssr_change_speed 1035 #define sr_atapi_change_speed ssr_atapi_change_speed 1036 #define sr_pause_resume ssr_pause_resume 1037 #define sr_play_msf ssr_play_msf 1038 #define sr_play_trkind ssr_play_trkind 1039 #define sr_read_all_subcodes ssr_read_all_subcodes 1040 #define sr_read_subchannel ssr_read_subchannel 1041 #define sr_read_tocentry ssr_read_tocentry 1042 #define sr_read_tochdr ssr_read_tochdr 1043 #define sr_read_cdda ssr_read_cdda 1044 #define sr_read_cdxa ssr_read_cdxa 1045 #define sr_read_mode1 ssr_read_mode1 1046 #define sr_read_mode2 ssr_read_mode2 1047 #define sr_read_cd_mode2 ssr_read_cd_mode2 1048 #define sr_sector_mode ssr_sector_mode 1049 #define sr_eject ssr_eject 1050 #define sr_ejected ssr_ejected 1051 #define sr_check_wp ssr_check_wp 1052 #define sd_check_media ssd_check_media 1053 #define sd_media_watch_cb ssd_media_watch_cb 1054 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1055 #define sr_volume_ctrl ssr_volume_ctrl 1056 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1057 #define sd_log_page_supported ssd_log_page_supported 1058 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1059 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1060 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1061 #define sd_range_lock ssd_range_lock 1062 #define sd_get_range ssd_get_range 1063 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1064 #define sd_range_unlock ssd_range_unlock 1065 #define sd_read_modify_write_task ssd_read_modify_write_task 1066 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1067 1068 #define sd_iostart_chain ssd_iostart_chain 1069 #define sd_iodone_chain ssd_iodone_chain 1070 #define sd_initpkt_map ssd_initpkt_map 1071 #define sd_destroypkt_map ssd_destroypkt_map 1072 #define sd_chain_type_map ssd_chain_type_map 1073 #define sd_chain_index_map ssd_chain_index_map 1074 1075 #define sd_failfast_flushctl ssd_failfast_flushctl 1076 #define sd_failfast_flushq ssd_failfast_flushq 1077 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1078 1079 #define sd_is_lsi ssd_is_lsi 1080 #define sd_tg_rdwr ssd_tg_rdwr 1081 #define sd_tg_getinfo ssd_tg_getinfo 1082 1083 #endif /* #if (defined(__fibre)) */ 1084 1085 1086 int _init(void); 1087 int _fini(void); 1088 int _info(struct modinfo *modinfop); 1089 1090 /*PRINTFLIKE3*/ 1091 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1092 /*PRINTFLIKE3*/ 1093 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1094 /*PRINTFLIKE3*/ 1095 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1096 1097 static int sdprobe(dev_info_t *devi); 1098 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1099 void **result); 1100 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1101 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1102 1103 /* 1104 * Smart probe for parallel scsi 1105 */ 1106 static void sd_scsi_probe_cache_init(void); 1107 static void sd_scsi_probe_cache_fini(void); 1108 static void sd_scsi_clear_probe_cache(void); 1109 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1110 1111 /* 1112 * Attached luns on target for parallel scsi 1113 */ 1114 static void sd_scsi_target_lun_init(void); 1115 static void sd_scsi_target_lun_fini(void); 1116 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1117 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1118 1119 static int sd_spin_up_unit(struct sd_lun *un); 1120 #ifdef _LP64 1121 static void sd_enable_descr_sense(struct sd_lun *un); 1122 static void sd_reenable_dsense_task(void *arg); 1123 #endif /* _LP64 */ 1124 1125 static void sd_set_mmc_caps(struct sd_lun *un); 1126 1127 static void sd_read_unit_properties(struct sd_lun *un); 1128 static int sd_process_sdconf_file(struct sd_lun *un); 1129 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1130 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1131 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1132 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1133 int *data_list, sd_tunables *values); 1134 static void sd_process_sdconf_table(struct sd_lun *un); 1135 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1136 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1137 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1138 int list_len, char *dataname_ptr); 1139 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1140 sd_tunables *prop_list); 1141 1142 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1143 int reservation_flag); 1144 static int sd_get_devid(struct sd_lun *un); 1145 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1146 static int sd_write_deviceid(struct sd_lun *un); 1147 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1148 static int sd_check_vpd_page_support(struct sd_lun *un); 1149 1150 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1151 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1152 1153 static int sd_ddi_suspend(dev_info_t *devi); 1154 static int sd_ddi_pm_suspend(struct sd_lun *un); 1155 static int sd_ddi_resume(dev_info_t *devi); 1156 static int sd_ddi_pm_resume(struct sd_lun *un); 1157 static int sdpower(dev_info_t *devi, int component, int level); 1158 1159 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1160 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1161 static int sd_unit_attach(dev_info_t *devi); 1162 static int sd_unit_detach(dev_info_t *devi); 1163 1164 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1165 static void sd_create_errstats(struct sd_lun *un, int instance); 1166 static void sd_set_errstats(struct sd_lun *un); 1167 static void sd_set_pstats(struct sd_lun *un); 1168 1169 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1170 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1171 static int sd_send_polled_RQS(struct sd_lun *un); 1172 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1173 1174 #if (defined(__fibre)) 1175 /* 1176 * Event callbacks (photon) 1177 */ 1178 static void sd_init_event_callbacks(struct sd_lun *un); 1179 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1180 #endif 1181 1182 /* 1183 * Defines for sd_cache_control 1184 */ 1185 1186 #define SD_CACHE_ENABLE 1 1187 #define SD_CACHE_DISABLE 0 1188 #define SD_CACHE_NOCHANGE -1 1189 1190 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1191 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1192 static void sd_get_nv_sup(struct sd_lun *un); 1193 static dev_t sd_make_device(dev_info_t *devi); 1194 1195 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1196 uint64_t capacity); 1197 1198 /* 1199 * Driver entry point functions. 1200 */ 1201 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1202 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1203 static int sd_ready_and_valid(struct sd_lun *un); 1204 1205 static void sdmin(struct buf *bp); 1206 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1207 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1208 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1209 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1210 1211 static int sdstrategy(struct buf *bp); 1212 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1213 1214 /* 1215 * Function prototypes for layering functions in the iostart chain. 1216 */ 1217 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1218 struct buf *bp); 1219 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1220 struct buf *bp); 1221 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1222 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1223 struct buf *bp); 1224 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1225 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1226 1227 /* 1228 * Function prototypes for layering functions in the iodone chain. 1229 */ 1230 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1231 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1232 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1233 struct buf *bp); 1234 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1235 struct buf *bp); 1236 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1237 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1238 struct buf *bp); 1239 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1240 1241 /* 1242 * Prototypes for functions to support buf(9S) based IO. 1243 */ 1244 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1245 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1246 static void sd_destroypkt_for_buf(struct buf *); 1247 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1248 struct buf *bp, int flags, 1249 int (*callback)(caddr_t), caddr_t callback_arg, 1250 diskaddr_t lba, uint32_t blockcount); 1251 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1252 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1253 1254 /* 1255 * Prototypes for functions to support USCSI IO. 1256 */ 1257 static int sd_uscsi_strategy(struct buf *bp); 1258 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1259 static void sd_destroypkt_for_uscsi(struct buf *); 1260 1261 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1262 uchar_t chain_type, void *pktinfop); 1263 1264 static int sd_pm_entry(struct sd_lun *un); 1265 static void sd_pm_exit(struct sd_lun *un); 1266 1267 static void sd_pm_idletimeout_handler(void *arg); 1268 1269 /* 1270 * sd_core internal functions (used at the sd_core_io layer). 1271 */ 1272 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1273 static void sdintr(struct scsi_pkt *pktp); 1274 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1275 1276 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1277 enum uio_seg dataspace, int path_flag); 1278 1279 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1280 daddr_t blkno, int (*func)(struct buf *)); 1281 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1282 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1283 static void sd_bioclone_free(struct buf *bp); 1284 static void sd_shadow_buf_free(struct buf *bp); 1285 1286 static void sd_print_transport_rejected_message(struct sd_lun *un, 1287 struct sd_xbuf *xp, int code); 1288 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1289 void *arg, int code); 1290 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1291 void *arg, int code); 1292 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1293 void *arg, int code); 1294 1295 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1296 int retry_check_flag, 1297 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1298 int c), 1299 void *user_arg, int failure_code, clock_t retry_delay, 1300 void (*statp)(kstat_io_t *)); 1301 1302 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1303 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1304 1305 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1306 struct scsi_pkt *pktp); 1307 static void sd_start_retry_command(void *arg); 1308 static void sd_start_direct_priority_command(void *arg); 1309 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1310 int errcode); 1311 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1312 struct buf *bp, int errcode); 1313 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1314 static void sd_sync_with_callback(struct sd_lun *un); 1315 static int sdrunout(caddr_t arg); 1316 1317 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1318 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1319 1320 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1321 static void sd_restore_throttle(void *arg); 1322 1323 static void sd_init_cdb_limits(struct sd_lun *un); 1324 1325 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 1328 /* 1329 * Error handling functions 1330 */ 1331 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1332 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1333 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1334 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1335 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1336 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1337 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1338 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1339 1340 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1341 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1342 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1343 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1345 struct sd_xbuf *xp, size_t actual_len); 1346 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1347 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 1349 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1350 void *arg, int code); 1351 1352 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1353 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1354 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1355 uint8_t *sense_datap, 1356 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1357 static void sd_sense_key_not_ready(struct sd_lun *un, 1358 uint8_t *sense_datap, 1359 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1361 uint8_t *sense_datap, 1362 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1363 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1364 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1365 static void sd_sense_key_unit_attention(struct sd_lun *un, 1366 uint8_t *sense_datap, 1367 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1369 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1370 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1371 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1372 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1373 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1374 static void sd_sense_key_default(struct sd_lun *un, 1375 uint8_t *sense_datap, 1376 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 1378 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1379 void *arg, int flag); 1380 1381 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1388 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1389 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1394 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1395 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 1398 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1399 1400 static void sd_start_stop_unit_callback(void *arg); 1401 static void sd_start_stop_unit_task(void *arg); 1402 1403 static void sd_taskq_create(void); 1404 static void sd_taskq_delete(void); 1405 static void sd_target_change_task(void *arg); 1406 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1407 static void sd_media_change_task(void *arg); 1408 1409 static int sd_handle_mchange(struct sd_lun *un); 1410 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1411 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1412 uint32_t *lbap, int path_flag); 1413 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1414 uint32_t *lbap, int path_flag); 1415 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1416 int path_flag); 1417 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1418 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1419 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1420 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1421 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1422 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1423 uchar_t usr_cmd, uchar_t *usr_bufp); 1424 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1425 struct dk_callback *dkc); 1426 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1427 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1428 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1429 uchar_t *bufaddr, uint_t buflen, int path_flag); 1430 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1431 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1432 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1433 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1434 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1435 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1436 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1437 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1438 size_t buflen, daddr_t start_block, int path_flag); 1439 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1440 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1441 path_flag) 1442 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1443 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1444 path_flag) 1445 1446 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1447 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1448 uint16_t param_ptr, int path_flag); 1449 1450 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1451 static void sd_free_rqs(struct sd_lun *un); 1452 1453 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1454 uchar_t *data, int len, int fmt); 1455 static void sd_panic_for_res_conflict(struct sd_lun *un); 1456 1457 /* 1458 * Disk Ioctl Function Prototypes 1459 */ 1460 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1461 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1462 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1463 1464 /* 1465 * Multi-host Ioctl Prototypes 1466 */ 1467 static int sd_check_mhd(dev_t dev, int interval); 1468 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1469 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1470 static char *sd_sname(uchar_t status); 1471 static void sd_mhd_resvd_recover(void *arg); 1472 static void sd_resv_reclaim_thread(); 1473 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1474 static int sd_reserve_release(dev_t dev, int cmd); 1475 static void sd_rmv_resv_reclaim_req(dev_t dev); 1476 static void sd_mhd_reset_notify_cb(caddr_t arg); 1477 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1478 mhioc_inkeys_t *usrp, int flag); 1479 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1480 mhioc_inresvs_t *usrp, int flag); 1481 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1482 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1483 static int sd_mhdioc_release(dev_t dev); 1484 static int sd_mhdioc_register_devid(dev_t dev); 1485 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1486 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1487 1488 /* 1489 * SCSI removable prototypes 1490 */ 1491 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1492 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1493 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1494 static int sr_pause_resume(dev_t dev, int mode); 1495 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1496 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1497 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1498 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1499 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1500 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1501 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1502 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1503 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1504 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1505 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1506 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1507 static int sr_eject(dev_t dev); 1508 static void sr_ejected(register struct sd_lun *un); 1509 static int sr_check_wp(dev_t dev); 1510 static int sd_check_media(dev_t dev, enum dkio_state state); 1511 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1512 static void sd_delayed_cv_broadcast(void *arg); 1513 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1514 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1515 1516 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1517 1518 /* 1519 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1520 */ 1521 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1522 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1523 static void sd_wm_cache_destructor(void *wm, void *un); 1524 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1525 daddr_t endb, ushort_t typ); 1526 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1527 daddr_t endb); 1528 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1529 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1530 static void sd_read_modify_write_task(void * arg); 1531 static int 1532 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1533 struct buf **bpp); 1534 1535 1536 /* 1537 * Function prototypes for failfast support. 1538 */ 1539 static void sd_failfast_flushq(struct sd_lun *un); 1540 static int sd_failfast_flushq_callback(struct buf *bp); 1541 1542 /* 1543 * Function prototypes to check for lsi devices 1544 */ 1545 static void sd_is_lsi(struct sd_lun *un); 1546 1547 /* 1548 * Function prototypes for partial DMA support 1549 */ 1550 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1551 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1552 1553 1554 /* Function prototypes for cmlb */ 1555 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1556 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1557 1558 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1559 1560 /* 1561 * Constants for failfast support: 1562 * 1563 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1564 * failfast processing being performed. 1565 * 1566 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1567 * failfast processing on all bufs with B_FAILFAST set. 1568 */ 1569 1570 #define SD_FAILFAST_INACTIVE 0 1571 #define SD_FAILFAST_ACTIVE 1 1572 1573 /* 1574 * Bitmask to control behavior of buf(9S) flushes when a transition to 1575 * the failfast state occurs. Optional bits include: 1576 * 1577 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1578 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1579 * be flushed. 1580 * 1581 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1582 * driver, in addition to the regular wait queue. This includes the xbuf 1583 * queues. When clear, only the driver's wait queue will be flushed. 1584 */ 1585 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1586 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1587 1588 /* 1589 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1590 * to flush all queues within the driver. 1591 */ 1592 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1593 1594 1595 /* 1596 * SD Testing Fault Injection 1597 */ 1598 #ifdef SD_FAULT_INJECTION 1599 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1600 static void sd_faultinjection(struct scsi_pkt *pktp); 1601 static void sd_injection_log(char *buf, struct sd_lun *un); 1602 #endif 1603 1604 /* 1605 * Device driver ops vector 1606 */ 1607 static struct cb_ops sd_cb_ops = { 1608 sdopen, /* open */ 1609 sdclose, /* close */ 1610 sdstrategy, /* strategy */ 1611 nodev, /* print */ 1612 sddump, /* dump */ 1613 sdread, /* read */ 1614 sdwrite, /* write */ 1615 sdioctl, /* ioctl */ 1616 nodev, /* devmap */ 1617 nodev, /* mmap */ 1618 nodev, /* segmap */ 1619 nochpoll, /* poll */ 1620 sd_prop_op, /* cb_prop_op */ 1621 0, /* streamtab */ 1622 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1623 CB_REV, /* cb_rev */ 1624 sdaread, /* async I/O read entry point */ 1625 sdawrite /* async I/O write entry point */ 1626 }; 1627 1628 static struct dev_ops sd_ops = { 1629 DEVO_REV, /* devo_rev, */ 1630 0, /* refcnt */ 1631 sdinfo, /* info */ 1632 nulldev, /* identify */ 1633 sdprobe, /* probe */ 1634 sdattach, /* attach */ 1635 sddetach, /* detach */ 1636 nodev, /* reset */ 1637 &sd_cb_ops, /* driver operations */ 1638 NULL, /* bus operations */ 1639 sdpower /* power */ 1640 }; 1641 1642 1643 /* 1644 * This is the loadable module wrapper. 1645 */ 1646 #include <sys/modctl.h> 1647 1648 static struct modldrv modldrv = { 1649 &mod_driverops, /* Type of module. This one is a driver */ 1650 SD_MODULE_NAME, /* Module name. */ 1651 &sd_ops /* driver ops */ 1652 }; 1653 1654 1655 static struct modlinkage modlinkage = { 1656 MODREV_1, 1657 &modldrv, 1658 NULL 1659 }; 1660 1661 static cmlb_tg_ops_t sd_tgops = { 1662 TG_DK_OPS_VERSION_1, 1663 sd_tg_rdwr, 1664 sd_tg_getinfo 1665 }; 1666 1667 static struct scsi_asq_key_strings sd_additional_codes[] = { 1668 0x81, 0, "Logical Unit is Reserved", 1669 0x85, 0, "Audio Address Not Valid", 1670 0xb6, 0, "Media Load Mechanism Failed", 1671 0xB9, 0, "Audio Play Operation Aborted", 1672 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1673 0x53, 2, "Medium removal prevented", 1674 0x6f, 0, "Authentication failed during key exchange", 1675 0x6f, 1, "Key not present", 1676 0x6f, 2, "Key not established", 1677 0x6f, 3, "Read without proper authentication", 1678 0x6f, 4, "Mismatched region to this logical unit", 1679 0x6f, 5, "Region reset count error", 1680 0xffff, 0x0, NULL 1681 }; 1682 1683 1684 /* 1685 * Struct for passing printing information for sense data messages 1686 */ 1687 struct sd_sense_info { 1688 int ssi_severity; 1689 int ssi_pfa_flag; 1690 }; 1691 1692 /* 1693 * Table of function pointers for iostart-side routines. Separate "chains" 1694 * of layered function calls are formed by placing the function pointers 1695 * sequentially in the desired order. Functions are called according to an 1696 * incrementing table index ordering. The last function in each chain must 1697 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1698 * in the sd_iodone_chain[] array. 1699 * 1700 * Note: It may seem more natural to organize both the iostart and iodone 1701 * functions together, into an array of structures (or some similar 1702 * organization) with a common index, rather than two separate arrays which 1703 * must be maintained in synchronization. The purpose of this division is 1704 * to achieve improved performance: individual arrays allows for more 1705 * effective cache line utilization on certain platforms. 1706 */ 1707 1708 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1709 1710 1711 static sd_chain_t sd_iostart_chain[] = { 1712 1713 /* Chain for buf IO for disk drive targets (PM enabled) */ 1714 sd_mapblockaddr_iostart, /* Index: 0 */ 1715 sd_pm_iostart, /* Index: 1 */ 1716 sd_core_iostart, /* Index: 2 */ 1717 1718 /* Chain for buf IO for disk drive targets (PM disabled) */ 1719 sd_mapblockaddr_iostart, /* Index: 3 */ 1720 sd_core_iostart, /* Index: 4 */ 1721 1722 /* Chain for buf IO for removable-media targets (PM enabled) */ 1723 sd_mapblockaddr_iostart, /* Index: 5 */ 1724 sd_mapblocksize_iostart, /* Index: 6 */ 1725 sd_pm_iostart, /* Index: 7 */ 1726 sd_core_iostart, /* Index: 8 */ 1727 1728 /* Chain for buf IO for removable-media targets (PM disabled) */ 1729 sd_mapblockaddr_iostart, /* Index: 9 */ 1730 sd_mapblocksize_iostart, /* Index: 10 */ 1731 sd_core_iostart, /* Index: 11 */ 1732 1733 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1734 sd_mapblockaddr_iostart, /* Index: 12 */ 1735 sd_checksum_iostart, /* Index: 13 */ 1736 sd_pm_iostart, /* Index: 14 */ 1737 sd_core_iostart, /* Index: 15 */ 1738 1739 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1740 sd_mapblockaddr_iostart, /* Index: 16 */ 1741 sd_checksum_iostart, /* Index: 17 */ 1742 sd_core_iostart, /* Index: 18 */ 1743 1744 /* Chain for USCSI commands (all targets) */ 1745 sd_pm_iostart, /* Index: 19 */ 1746 sd_core_iostart, /* Index: 20 */ 1747 1748 /* Chain for checksumming USCSI commands (all targets) */ 1749 sd_checksum_uscsi_iostart, /* Index: 21 */ 1750 sd_pm_iostart, /* Index: 22 */ 1751 sd_core_iostart, /* Index: 23 */ 1752 1753 /* Chain for "direct" USCSI commands (all targets) */ 1754 sd_core_iostart, /* Index: 24 */ 1755 1756 /* Chain for "direct priority" USCSI commands (all targets) */ 1757 sd_core_iostart, /* Index: 25 */ 1758 }; 1759 1760 /* 1761 * Macros to locate the first function of each iostart chain in the 1762 * sd_iostart_chain[] array. These are located by the index in the array. 1763 */ 1764 #define SD_CHAIN_DISK_IOSTART 0 1765 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1766 #define SD_CHAIN_RMMEDIA_IOSTART 5 1767 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1768 #define SD_CHAIN_CHKSUM_IOSTART 12 1769 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1770 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1771 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1772 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1773 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1774 1775 1776 /* 1777 * Table of function pointers for the iodone-side routines for the driver- 1778 * internal layering mechanism. The calling sequence for iodone routines 1779 * uses a decrementing table index, so the last routine called in a chain 1780 * must be at the lowest array index location for that chain. The last 1781 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1782 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1783 * of the functions in an iodone side chain must correspond to the ordering 1784 * of the iostart routines for that chain. Note that there is no iodone 1785 * side routine that corresponds to sd_core_iostart(), so there is no 1786 * entry in the table for this. 1787 */ 1788 1789 static sd_chain_t sd_iodone_chain[] = { 1790 1791 /* Chain for buf IO for disk drive targets (PM enabled) */ 1792 sd_buf_iodone, /* Index: 0 */ 1793 sd_mapblockaddr_iodone, /* Index: 1 */ 1794 sd_pm_iodone, /* Index: 2 */ 1795 1796 /* Chain for buf IO for disk drive targets (PM disabled) */ 1797 sd_buf_iodone, /* Index: 3 */ 1798 sd_mapblockaddr_iodone, /* Index: 4 */ 1799 1800 /* Chain for buf IO for removable-media targets (PM enabled) */ 1801 sd_buf_iodone, /* Index: 5 */ 1802 sd_mapblockaddr_iodone, /* Index: 6 */ 1803 sd_mapblocksize_iodone, /* Index: 7 */ 1804 sd_pm_iodone, /* Index: 8 */ 1805 1806 /* Chain for buf IO for removable-media targets (PM disabled) */ 1807 sd_buf_iodone, /* Index: 9 */ 1808 sd_mapblockaddr_iodone, /* Index: 10 */ 1809 sd_mapblocksize_iodone, /* Index: 11 */ 1810 1811 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1812 sd_buf_iodone, /* Index: 12 */ 1813 sd_mapblockaddr_iodone, /* Index: 13 */ 1814 sd_checksum_iodone, /* Index: 14 */ 1815 sd_pm_iodone, /* Index: 15 */ 1816 1817 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1818 sd_buf_iodone, /* Index: 16 */ 1819 sd_mapblockaddr_iodone, /* Index: 17 */ 1820 sd_checksum_iodone, /* Index: 18 */ 1821 1822 /* Chain for USCSI commands (non-checksum targets) */ 1823 sd_uscsi_iodone, /* Index: 19 */ 1824 sd_pm_iodone, /* Index: 20 */ 1825 1826 /* Chain for USCSI commands (checksum targets) */ 1827 sd_uscsi_iodone, /* Index: 21 */ 1828 sd_checksum_uscsi_iodone, /* Index: 22 */ 1829 sd_pm_iodone, /* Index: 22 */ 1830 1831 /* Chain for "direct" USCSI commands (all targets) */ 1832 sd_uscsi_iodone, /* Index: 24 */ 1833 1834 /* Chain for "direct priority" USCSI commands (all targets) */ 1835 sd_uscsi_iodone, /* Index: 25 */ 1836 }; 1837 1838 1839 /* 1840 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1841 * each iodone-side chain. These are located by the array index, but as the 1842 * iodone side functions are called in a decrementing-index order, the 1843 * highest index number in each chain must be specified (as these correspond 1844 * to the first function in the iodone chain that will be called by the core 1845 * at IO completion time). 1846 */ 1847 1848 #define SD_CHAIN_DISK_IODONE 2 1849 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1850 #define SD_CHAIN_RMMEDIA_IODONE 8 1851 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1852 #define SD_CHAIN_CHKSUM_IODONE 15 1853 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1854 #define SD_CHAIN_USCSI_CMD_IODONE 20 1855 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1856 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1857 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1858 1859 1860 1861 1862 /* 1863 * Array to map a layering chain index to the appropriate initpkt routine. 1864 * The redundant entries are present so that the index used for accessing 1865 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1866 * with this table as well. 1867 */ 1868 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1869 1870 static sd_initpkt_t sd_initpkt_map[] = { 1871 1872 /* Chain for buf IO for disk drive targets (PM enabled) */ 1873 sd_initpkt_for_buf, /* Index: 0 */ 1874 sd_initpkt_for_buf, /* Index: 1 */ 1875 sd_initpkt_for_buf, /* Index: 2 */ 1876 1877 /* Chain for buf IO for disk drive targets (PM disabled) */ 1878 sd_initpkt_for_buf, /* Index: 3 */ 1879 sd_initpkt_for_buf, /* Index: 4 */ 1880 1881 /* Chain for buf IO for removable-media targets (PM enabled) */ 1882 sd_initpkt_for_buf, /* Index: 5 */ 1883 sd_initpkt_for_buf, /* Index: 6 */ 1884 sd_initpkt_for_buf, /* Index: 7 */ 1885 sd_initpkt_for_buf, /* Index: 8 */ 1886 1887 /* Chain for buf IO for removable-media targets (PM disabled) */ 1888 sd_initpkt_for_buf, /* Index: 9 */ 1889 sd_initpkt_for_buf, /* Index: 10 */ 1890 sd_initpkt_for_buf, /* Index: 11 */ 1891 1892 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1893 sd_initpkt_for_buf, /* Index: 12 */ 1894 sd_initpkt_for_buf, /* Index: 13 */ 1895 sd_initpkt_for_buf, /* Index: 14 */ 1896 sd_initpkt_for_buf, /* Index: 15 */ 1897 1898 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1899 sd_initpkt_for_buf, /* Index: 16 */ 1900 sd_initpkt_for_buf, /* Index: 17 */ 1901 sd_initpkt_for_buf, /* Index: 18 */ 1902 1903 /* Chain for USCSI commands (non-checksum targets) */ 1904 sd_initpkt_for_uscsi, /* Index: 19 */ 1905 sd_initpkt_for_uscsi, /* Index: 20 */ 1906 1907 /* Chain for USCSI commands (checksum targets) */ 1908 sd_initpkt_for_uscsi, /* Index: 21 */ 1909 sd_initpkt_for_uscsi, /* Index: 22 */ 1910 sd_initpkt_for_uscsi, /* Index: 22 */ 1911 1912 /* Chain for "direct" USCSI commands (all targets) */ 1913 sd_initpkt_for_uscsi, /* Index: 24 */ 1914 1915 /* Chain for "direct priority" USCSI commands (all targets) */ 1916 sd_initpkt_for_uscsi, /* Index: 25 */ 1917 1918 }; 1919 1920 1921 /* 1922 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1923 * The redundant entries are present so that the index used for accessing 1924 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1925 * with this table as well. 1926 */ 1927 typedef void (*sd_destroypkt_t)(struct buf *); 1928 1929 static sd_destroypkt_t sd_destroypkt_map[] = { 1930 1931 /* Chain for buf IO for disk drive targets (PM enabled) */ 1932 sd_destroypkt_for_buf, /* Index: 0 */ 1933 sd_destroypkt_for_buf, /* Index: 1 */ 1934 sd_destroypkt_for_buf, /* Index: 2 */ 1935 1936 /* Chain for buf IO for disk drive targets (PM disabled) */ 1937 sd_destroypkt_for_buf, /* Index: 3 */ 1938 sd_destroypkt_for_buf, /* Index: 4 */ 1939 1940 /* Chain for buf IO for removable-media targets (PM enabled) */ 1941 sd_destroypkt_for_buf, /* Index: 5 */ 1942 sd_destroypkt_for_buf, /* Index: 6 */ 1943 sd_destroypkt_for_buf, /* Index: 7 */ 1944 sd_destroypkt_for_buf, /* Index: 8 */ 1945 1946 /* Chain for buf IO for removable-media targets (PM disabled) */ 1947 sd_destroypkt_for_buf, /* Index: 9 */ 1948 sd_destroypkt_for_buf, /* Index: 10 */ 1949 sd_destroypkt_for_buf, /* Index: 11 */ 1950 1951 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1952 sd_destroypkt_for_buf, /* Index: 12 */ 1953 sd_destroypkt_for_buf, /* Index: 13 */ 1954 sd_destroypkt_for_buf, /* Index: 14 */ 1955 sd_destroypkt_for_buf, /* Index: 15 */ 1956 1957 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1958 sd_destroypkt_for_buf, /* Index: 16 */ 1959 sd_destroypkt_for_buf, /* Index: 17 */ 1960 sd_destroypkt_for_buf, /* Index: 18 */ 1961 1962 /* Chain for USCSI commands (non-checksum targets) */ 1963 sd_destroypkt_for_uscsi, /* Index: 19 */ 1964 sd_destroypkt_for_uscsi, /* Index: 20 */ 1965 1966 /* Chain for USCSI commands (checksum targets) */ 1967 sd_destroypkt_for_uscsi, /* Index: 21 */ 1968 sd_destroypkt_for_uscsi, /* Index: 22 */ 1969 sd_destroypkt_for_uscsi, /* Index: 22 */ 1970 1971 /* Chain for "direct" USCSI commands (all targets) */ 1972 sd_destroypkt_for_uscsi, /* Index: 24 */ 1973 1974 /* Chain for "direct priority" USCSI commands (all targets) */ 1975 sd_destroypkt_for_uscsi, /* Index: 25 */ 1976 1977 }; 1978 1979 1980 1981 /* 1982 * Array to map a layering chain index to the appropriate chain "type". 1983 * The chain type indicates a specific property/usage of the chain. 1984 * The redundant entries are present so that the index used for accessing 1985 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1986 * with this table as well. 1987 */ 1988 1989 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1990 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1991 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1992 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1993 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1994 /* (for error recovery) */ 1995 1996 static int sd_chain_type_map[] = { 1997 1998 /* Chain for buf IO for disk drive targets (PM enabled) */ 1999 SD_CHAIN_BUFIO, /* Index: 0 */ 2000 SD_CHAIN_BUFIO, /* Index: 1 */ 2001 SD_CHAIN_BUFIO, /* Index: 2 */ 2002 2003 /* Chain for buf IO for disk drive targets (PM disabled) */ 2004 SD_CHAIN_BUFIO, /* Index: 3 */ 2005 SD_CHAIN_BUFIO, /* Index: 4 */ 2006 2007 /* Chain for buf IO for removable-media targets (PM enabled) */ 2008 SD_CHAIN_BUFIO, /* Index: 5 */ 2009 SD_CHAIN_BUFIO, /* Index: 6 */ 2010 SD_CHAIN_BUFIO, /* Index: 7 */ 2011 SD_CHAIN_BUFIO, /* Index: 8 */ 2012 2013 /* Chain for buf IO for removable-media targets (PM disabled) */ 2014 SD_CHAIN_BUFIO, /* Index: 9 */ 2015 SD_CHAIN_BUFIO, /* Index: 10 */ 2016 SD_CHAIN_BUFIO, /* Index: 11 */ 2017 2018 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2019 SD_CHAIN_BUFIO, /* Index: 12 */ 2020 SD_CHAIN_BUFIO, /* Index: 13 */ 2021 SD_CHAIN_BUFIO, /* Index: 14 */ 2022 SD_CHAIN_BUFIO, /* Index: 15 */ 2023 2024 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2025 SD_CHAIN_BUFIO, /* Index: 16 */ 2026 SD_CHAIN_BUFIO, /* Index: 17 */ 2027 SD_CHAIN_BUFIO, /* Index: 18 */ 2028 2029 /* Chain for USCSI commands (non-checksum targets) */ 2030 SD_CHAIN_USCSI, /* Index: 19 */ 2031 SD_CHAIN_USCSI, /* Index: 20 */ 2032 2033 /* Chain for USCSI commands (checksum targets) */ 2034 SD_CHAIN_USCSI, /* Index: 21 */ 2035 SD_CHAIN_USCSI, /* Index: 22 */ 2036 SD_CHAIN_USCSI, /* Index: 22 */ 2037 2038 /* Chain for "direct" USCSI commands (all targets) */ 2039 SD_CHAIN_DIRECT, /* Index: 24 */ 2040 2041 /* Chain for "direct priority" USCSI commands (all targets) */ 2042 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2043 }; 2044 2045 2046 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2047 #define SD_IS_BUFIO(xp) \ 2048 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2049 2050 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2051 #define SD_IS_DIRECT_PRIORITY(xp) \ 2052 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2053 2054 2055 2056 /* 2057 * Struct, array, and macros to map a specific chain to the appropriate 2058 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2059 * 2060 * The sd_chain_index_map[] array is used at attach time to set the various 2061 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2062 * chain to be used with the instance. This allows different instances to use 2063 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2064 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2065 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2066 * dynamically & without the use of locking; and (2) a layer may update the 2067 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2068 * to allow for deferred processing of an IO within the same chain from a 2069 * different execution context. 2070 */ 2071 2072 struct sd_chain_index { 2073 int sci_iostart_index; 2074 int sci_iodone_index; 2075 }; 2076 2077 static struct sd_chain_index sd_chain_index_map[] = { 2078 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2079 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2080 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2081 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2082 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2083 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2084 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2085 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2086 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2087 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2088 }; 2089 2090 2091 /* 2092 * The following are indexes into the sd_chain_index_map[] array. 2093 */ 2094 2095 /* un->un_buf_chain_type must be set to one of these */ 2096 #define SD_CHAIN_INFO_DISK 0 2097 #define SD_CHAIN_INFO_DISK_NO_PM 1 2098 #define SD_CHAIN_INFO_RMMEDIA 2 2099 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2100 #define SD_CHAIN_INFO_CHKSUM 4 2101 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2102 2103 /* un->un_uscsi_chain_type must be set to one of these */ 2104 #define SD_CHAIN_INFO_USCSI_CMD 6 2105 /* USCSI with PM disabled is the same as DIRECT */ 2106 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2107 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2108 2109 /* un->un_direct_chain_type must be set to one of these */ 2110 #define SD_CHAIN_INFO_DIRECT_CMD 8 2111 2112 /* un->un_priority_chain_type must be set to one of these */ 2113 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2114 2115 /* size for devid inquiries */ 2116 #define MAX_INQUIRY_SIZE 0xF0 2117 2118 /* 2119 * Macros used by functions to pass a given buf(9S) struct along to the 2120 * next function in the layering chain for further processing. 2121 * 2122 * In the following macros, passing more than three arguments to the called 2123 * routines causes the optimizer for the SPARC compiler to stop doing tail 2124 * call elimination which results in significant performance degradation. 2125 */ 2126 #define SD_BEGIN_IOSTART(index, un, bp) \ 2127 ((*(sd_iostart_chain[index]))(index, un, bp)) 2128 2129 #define SD_BEGIN_IODONE(index, un, bp) \ 2130 ((*(sd_iodone_chain[index]))(index, un, bp)) 2131 2132 #define SD_NEXT_IOSTART(index, un, bp) \ 2133 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2134 2135 #define SD_NEXT_IODONE(index, un, bp) \ 2136 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2137 2138 /* 2139 * Function: _init 2140 * 2141 * Description: This is the driver _init(9E) entry point. 2142 * 2143 * Return Code: Returns the value from mod_install(9F) or 2144 * ddi_soft_state_init(9F) as appropriate. 2145 * 2146 * Context: Called when driver module loaded. 2147 */ 2148 2149 int 2150 _init(void) 2151 { 2152 int err; 2153 2154 /* establish driver name from module name */ 2155 sd_label = (char *)mod_modname(&modlinkage); 2156 2157 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2158 SD_MAXUNIT); 2159 2160 if (err != 0) { 2161 return (err); 2162 } 2163 2164 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2165 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2166 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2167 2168 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2169 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2170 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2171 2172 /* 2173 * it's ok to init here even for fibre device 2174 */ 2175 sd_scsi_probe_cache_init(); 2176 2177 sd_scsi_target_lun_init(); 2178 2179 /* 2180 * Creating taskq before mod_install ensures that all callers (threads) 2181 * that enter the module after a successful mod_install encounter 2182 * a valid taskq. 2183 */ 2184 sd_taskq_create(); 2185 2186 err = mod_install(&modlinkage); 2187 if (err != 0) { 2188 /* delete taskq if install fails */ 2189 sd_taskq_delete(); 2190 2191 mutex_destroy(&sd_detach_mutex); 2192 mutex_destroy(&sd_log_mutex); 2193 mutex_destroy(&sd_label_mutex); 2194 2195 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2196 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2197 cv_destroy(&sd_tr.srq_inprocess_cv); 2198 2199 sd_scsi_probe_cache_fini(); 2200 2201 sd_scsi_target_lun_fini(); 2202 2203 ddi_soft_state_fini(&sd_state); 2204 return (err); 2205 } 2206 2207 return (err); 2208 } 2209 2210 2211 /* 2212 * Function: _fini 2213 * 2214 * Description: This is the driver _fini(9E) entry point. 2215 * 2216 * Return Code: Returns the value from mod_remove(9F) 2217 * 2218 * Context: Called when driver module is unloaded. 2219 */ 2220 2221 int 2222 _fini(void) 2223 { 2224 int err; 2225 2226 if ((err = mod_remove(&modlinkage)) != 0) { 2227 return (err); 2228 } 2229 2230 sd_taskq_delete(); 2231 2232 mutex_destroy(&sd_detach_mutex); 2233 mutex_destroy(&sd_log_mutex); 2234 mutex_destroy(&sd_label_mutex); 2235 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2236 2237 sd_scsi_probe_cache_fini(); 2238 2239 sd_scsi_target_lun_fini(); 2240 2241 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2242 cv_destroy(&sd_tr.srq_inprocess_cv); 2243 2244 ddi_soft_state_fini(&sd_state); 2245 2246 return (err); 2247 } 2248 2249 2250 /* 2251 * Function: _info 2252 * 2253 * Description: This is the driver _info(9E) entry point. 2254 * 2255 * Arguments: modinfop - pointer to the driver modinfo structure 2256 * 2257 * Return Code: Returns the value from mod_info(9F). 2258 * 2259 * Context: Kernel thread context 2260 */ 2261 2262 int 2263 _info(struct modinfo *modinfop) 2264 { 2265 return (mod_info(&modlinkage, modinfop)); 2266 } 2267 2268 2269 /* 2270 * The following routines implement the driver message logging facility. 2271 * They provide component- and level- based debug output filtering. 2272 * Output may also be restricted to messages for a single instance by 2273 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2274 * to NULL, then messages for all instances are printed. 2275 * 2276 * These routines have been cloned from each other due to the language 2277 * constraints of macros and variable argument list processing. 2278 */ 2279 2280 2281 /* 2282 * Function: sd_log_err 2283 * 2284 * Description: This routine is called by the SD_ERROR macro for debug 2285 * logging of error conditions. 2286 * 2287 * Arguments: comp - driver component being logged 2288 * dev - pointer to driver info structure 2289 * fmt - error string and format to be logged 2290 */ 2291 2292 static void 2293 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2294 { 2295 va_list ap; 2296 dev_info_t *dev; 2297 2298 ASSERT(un != NULL); 2299 dev = SD_DEVINFO(un); 2300 ASSERT(dev != NULL); 2301 2302 /* 2303 * Filter messages based on the global component and level masks. 2304 * Also print if un matches the value of sd_debug_un, or if 2305 * sd_debug_un is set to NULL. 2306 */ 2307 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2308 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2309 mutex_enter(&sd_log_mutex); 2310 va_start(ap, fmt); 2311 (void) vsprintf(sd_log_buf, fmt, ap); 2312 va_end(ap); 2313 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2314 mutex_exit(&sd_log_mutex); 2315 } 2316 #ifdef SD_FAULT_INJECTION 2317 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2318 if (un->sd_injection_mask & comp) { 2319 mutex_enter(&sd_log_mutex); 2320 va_start(ap, fmt); 2321 (void) vsprintf(sd_log_buf, fmt, ap); 2322 va_end(ap); 2323 sd_injection_log(sd_log_buf, un); 2324 mutex_exit(&sd_log_mutex); 2325 } 2326 #endif 2327 } 2328 2329 2330 /* 2331 * Function: sd_log_info 2332 * 2333 * Description: This routine is called by the SD_INFO macro for debug 2334 * logging of general purpose informational conditions. 2335 * 2336 * Arguments: comp - driver component being logged 2337 * dev - pointer to driver info structure 2338 * fmt - info string and format to be logged 2339 */ 2340 2341 static void 2342 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2343 { 2344 va_list ap; 2345 dev_info_t *dev; 2346 2347 ASSERT(un != NULL); 2348 dev = SD_DEVINFO(un); 2349 ASSERT(dev != NULL); 2350 2351 /* 2352 * Filter messages based on the global component and level masks. 2353 * Also print if un matches the value of sd_debug_un, or if 2354 * sd_debug_un is set to NULL. 2355 */ 2356 if ((sd_component_mask & component) && 2357 (sd_level_mask & SD_LOGMASK_INFO) && 2358 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2359 mutex_enter(&sd_log_mutex); 2360 va_start(ap, fmt); 2361 (void) vsprintf(sd_log_buf, fmt, ap); 2362 va_end(ap); 2363 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2364 mutex_exit(&sd_log_mutex); 2365 } 2366 #ifdef SD_FAULT_INJECTION 2367 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2368 if (un->sd_injection_mask & component) { 2369 mutex_enter(&sd_log_mutex); 2370 va_start(ap, fmt); 2371 (void) vsprintf(sd_log_buf, fmt, ap); 2372 va_end(ap); 2373 sd_injection_log(sd_log_buf, un); 2374 mutex_exit(&sd_log_mutex); 2375 } 2376 #endif 2377 } 2378 2379 2380 /* 2381 * Function: sd_log_trace 2382 * 2383 * Description: This routine is called by the SD_TRACE macro for debug 2384 * logging of trace conditions (i.e. function entry/exit). 2385 * 2386 * Arguments: comp - driver component being logged 2387 * dev - pointer to driver info structure 2388 * fmt - trace string and format to be logged 2389 */ 2390 2391 static void 2392 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2393 { 2394 va_list ap; 2395 dev_info_t *dev; 2396 2397 ASSERT(un != NULL); 2398 dev = SD_DEVINFO(un); 2399 ASSERT(dev != NULL); 2400 2401 /* 2402 * Filter messages based on the global component and level masks. 2403 * Also print if un matches the value of sd_debug_un, or if 2404 * sd_debug_un is set to NULL. 2405 */ 2406 if ((sd_component_mask & component) && 2407 (sd_level_mask & SD_LOGMASK_TRACE) && 2408 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2409 mutex_enter(&sd_log_mutex); 2410 va_start(ap, fmt); 2411 (void) vsprintf(sd_log_buf, fmt, ap); 2412 va_end(ap); 2413 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2414 mutex_exit(&sd_log_mutex); 2415 } 2416 #ifdef SD_FAULT_INJECTION 2417 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2418 if (un->sd_injection_mask & component) { 2419 mutex_enter(&sd_log_mutex); 2420 va_start(ap, fmt); 2421 (void) vsprintf(sd_log_buf, fmt, ap); 2422 va_end(ap); 2423 sd_injection_log(sd_log_buf, un); 2424 mutex_exit(&sd_log_mutex); 2425 } 2426 #endif 2427 } 2428 2429 2430 /* 2431 * Function: sdprobe 2432 * 2433 * Description: This is the driver probe(9e) entry point function. 2434 * 2435 * Arguments: devi - opaque device info handle 2436 * 2437 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2438 * DDI_PROBE_FAILURE: If the probe failed. 2439 * DDI_PROBE_PARTIAL: If the instance is not present now, 2440 * but may be present in the future. 2441 */ 2442 2443 static int 2444 sdprobe(dev_info_t *devi) 2445 { 2446 struct scsi_device *devp; 2447 int rval; 2448 int instance; 2449 2450 /* 2451 * if it wasn't for pln, sdprobe could actually be nulldev 2452 * in the "__fibre" case. 2453 */ 2454 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2455 return (DDI_PROBE_DONTCARE); 2456 } 2457 2458 devp = ddi_get_driver_private(devi); 2459 2460 if (devp == NULL) { 2461 /* Ooops... nexus driver is mis-configured... */ 2462 return (DDI_PROBE_FAILURE); 2463 } 2464 2465 instance = ddi_get_instance(devi); 2466 2467 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2468 return (DDI_PROBE_PARTIAL); 2469 } 2470 2471 /* 2472 * Call the SCSA utility probe routine to see if we actually 2473 * have a target at this SCSI nexus. 2474 */ 2475 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2476 case SCSIPROBE_EXISTS: 2477 switch (devp->sd_inq->inq_dtype) { 2478 case DTYPE_DIRECT: 2479 rval = DDI_PROBE_SUCCESS; 2480 break; 2481 case DTYPE_RODIRECT: 2482 /* CDs etc. Can be removable media */ 2483 rval = DDI_PROBE_SUCCESS; 2484 break; 2485 case DTYPE_OPTICAL: 2486 /* 2487 * Rewritable optical driver HP115AA 2488 * Can also be removable media 2489 */ 2490 2491 /* 2492 * Do not attempt to bind to DTYPE_OPTICAL if 2493 * pre solaris 9 sparc sd behavior is required 2494 * 2495 * If first time through and sd_dtype_optical_bind 2496 * has not been set in /etc/system check properties 2497 */ 2498 2499 if (sd_dtype_optical_bind < 0) { 2500 sd_dtype_optical_bind = ddi_prop_get_int 2501 (DDI_DEV_T_ANY, devi, 0, 2502 "optical-device-bind", 1); 2503 } 2504 2505 if (sd_dtype_optical_bind == 0) { 2506 rval = DDI_PROBE_FAILURE; 2507 } else { 2508 rval = DDI_PROBE_SUCCESS; 2509 } 2510 break; 2511 2512 case DTYPE_NOTPRESENT: 2513 default: 2514 rval = DDI_PROBE_FAILURE; 2515 break; 2516 } 2517 break; 2518 default: 2519 rval = DDI_PROBE_PARTIAL; 2520 break; 2521 } 2522 2523 /* 2524 * This routine checks for resource allocation prior to freeing, 2525 * so it will take care of the "smart probing" case where a 2526 * scsi_probe() may or may not have been issued and will *not* 2527 * free previously-freed resources. 2528 */ 2529 scsi_unprobe(devp); 2530 return (rval); 2531 } 2532 2533 2534 /* 2535 * Function: sdinfo 2536 * 2537 * Description: This is the driver getinfo(9e) entry point function. 2538 * Given the device number, return the devinfo pointer from 2539 * the scsi_device structure or the instance number 2540 * associated with the dev_t. 2541 * 2542 * Arguments: dip - pointer to device info structure 2543 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2544 * DDI_INFO_DEVT2INSTANCE) 2545 * arg - driver dev_t 2546 * resultp - user buffer for request response 2547 * 2548 * Return Code: DDI_SUCCESS 2549 * DDI_FAILURE 2550 */ 2551 /* ARGSUSED */ 2552 static int 2553 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2554 { 2555 struct sd_lun *un; 2556 dev_t dev; 2557 int instance; 2558 int error; 2559 2560 switch (infocmd) { 2561 case DDI_INFO_DEVT2DEVINFO: 2562 dev = (dev_t)arg; 2563 instance = SDUNIT(dev); 2564 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2565 return (DDI_FAILURE); 2566 } 2567 *result = (void *) SD_DEVINFO(un); 2568 error = DDI_SUCCESS; 2569 break; 2570 case DDI_INFO_DEVT2INSTANCE: 2571 dev = (dev_t)arg; 2572 instance = SDUNIT(dev); 2573 *result = (void *)(uintptr_t)instance; 2574 error = DDI_SUCCESS; 2575 break; 2576 default: 2577 error = DDI_FAILURE; 2578 } 2579 return (error); 2580 } 2581 2582 /* 2583 * Function: sd_prop_op 2584 * 2585 * Description: This is the driver prop_op(9e) entry point function. 2586 * Return the number of blocks for the partition in question 2587 * or forward the request to the property facilities. 2588 * 2589 * Arguments: dev - device number 2590 * dip - pointer to device info structure 2591 * prop_op - property operator 2592 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2593 * name - pointer to property name 2594 * valuep - pointer or address of the user buffer 2595 * lengthp - property length 2596 * 2597 * Return Code: DDI_PROP_SUCCESS 2598 * DDI_PROP_NOT_FOUND 2599 * DDI_PROP_UNDEFINED 2600 * DDI_PROP_NO_MEMORY 2601 * DDI_PROP_BUF_TOO_SMALL 2602 */ 2603 2604 static int 2605 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2606 char *name, caddr_t valuep, int *lengthp) 2607 { 2608 struct sd_lun *un; 2609 2610 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2611 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2612 name, valuep, lengthp)); 2613 2614 return (cmlb_prop_op(un->un_cmlbhandle, 2615 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2616 SDPART(dev), (void *)SD_PATH_DIRECT)); 2617 } 2618 2619 /* 2620 * The following functions are for smart probing: 2621 * sd_scsi_probe_cache_init() 2622 * sd_scsi_probe_cache_fini() 2623 * sd_scsi_clear_probe_cache() 2624 * sd_scsi_probe_with_cache() 2625 */ 2626 2627 /* 2628 * Function: sd_scsi_probe_cache_init 2629 * 2630 * Description: Initializes the probe response cache mutex and head pointer. 2631 * 2632 * Context: Kernel thread context 2633 */ 2634 2635 static void 2636 sd_scsi_probe_cache_init(void) 2637 { 2638 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2639 sd_scsi_probe_cache_head = NULL; 2640 } 2641 2642 2643 /* 2644 * Function: sd_scsi_probe_cache_fini 2645 * 2646 * Description: Frees all resources associated with the probe response cache. 2647 * 2648 * Context: Kernel thread context 2649 */ 2650 2651 static void 2652 sd_scsi_probe_cache_fini(void) 2653 { 2654 struct sd_scsi_probe_cache *cp; 2655 struct sd_scsi_probe_cache *ncp; 2656 2657 /* Clean up our smart probing linked list */ 2658 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2659 ncp = cp->next; 2660 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2661 } 2662 sd_scsi_probe_cache_head = NULL; 2663 mutex_destroy(&sd_scsi_probe_cache_mutex); 2664 } 2665 2666 2667 /* 2668 * Function: sd_scsi_clear_probe_cache 2669 * 2670 * Description: This routine clears the probe response cache. This is 2671 * done when open() returns ENXIO so that when deferred 2672 * attach is attempted (possibly after a device has been 2673 * turned on) we will retry the probe. Since we don't know 2674 * which target we failed to open, we just clear the 2675 * entire cache. 2676 * 2677 * Context: Kernel thread context 2678 */ 2679 2680 static void 2681 sd_scsi_clear_probe_cache(void) 2682 { 2683 struct sd_scsi_probe_cache *cp; 2684 int i; 2685 2686 mutex_enter(&sd_scsi_probe_cache_mutex); 2687 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2688 /* 2689 * Reset all entries to SCSIPROBE_EXISTS. This will 2690 * force probing to be performed the next time 2691 * sd_scsi_probe_with_cache is called. 2692 */ 2693 for (i = 0; i < NTARGETS_WIDE; i++) { 2694 cp->cache[i] = SCSIPROBE_EXISTS; 2695 } 2696 } 2697 mutex_exit(&sd_scsi_probe_cache_mutex); 2698 } 2699 2700 2701 /* 2702 * Function: sd_scsi_probe_with_cache 2703 * 2704 * Description: This routine implements support for a scsi device probe 2705 * with cache. The driver maintains a cache of the target 2706 * responses to scsi probes. If we get no response from a 2707 * target during a probe inquiry, we remember that, and we 2708 * avoid additional calls to scsi_probe on non-zero LUNs 2709 * on the same target until the cache is cleared. By doing 2710 * so we avoid the 1/4 sec selection timeout for nonzero 2711 * LUNs. lun0 of a target is always probed. 2712 * 2713 * Arguments: devp - Pointer to a scsi_device(9S) structure 2714 * waitfunc - indicates what the allocator routines should 2715 * do when resources are not available. This value 2716 * is passed on to scsi_probe() when that routine 2717 * is called. 2718 * 2719 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2720 * otherwise the value returned by scsi_probe(9F). 2721 * 2722 * Context: Kernel thread context 2723 */ 2724 2725 static int 2726 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2727 { 2728 struct sd_scsi_probe_cache *cp; 2729 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2730 int lun, tgt; 2731 2732 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2733 SCSI_ADDR_PROP_LUN, 0); 2734 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2735 SCSI_ADDR_PROP_TARGET, -1); 2736 2737 /* Make sure caching enabled and target in range */ 2738 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2739 /* do it the old way (no cache) */ 2740 return (scsi_probe(devp, waitfn)); 2741 } 2742 2743 mutex_enter(&sd_scsi_probe_cache_mutex); 2744 2745 /* Find the cache for this scsi bus instance */ 2746 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2747 if (cp->pdip == pdip) { 2748 break; 2749 } 2750 } 2751 2752 /* If we can't find a cache for this pdip, create one */ 2753 if (cp == NULL) { 2754 int i; 2755 2756 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2757 KM_SLEEP); 2758 cp->pdip = pdip; 2759 cp->next = sd_scsi_probe_cache_head; 2760 sd_scsi_probe_cache_head = cp; 2761 for (i = 0; i < NTARGETS_WIDE; i++) { 2762 cp->cache[i] = SCSIPROBE_EXISTS; 2763 } 2764 } 2765 2766 mutex_exit(&sd_scsi_probe_cache_mutex); 2767 2768 /* Recompute the cache for this target if LUN zero */ 2769 if (lun == 0) { 2770 cp->cache[tgt] = SCSIPROBE_EXISTS; 2771 } 2772 2773 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2774 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2775 return (SCSIPROBE_NORESP); 2776 } 2777 2778 /* Do the actual probe; save & return the result */ 2779 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2780 } 2781 2782 2783 /* 2784 * Function: sd_scsi_target_lun_init 2785 * 2786 * Description: Initializes the attached lun chain mutex and head pointer. 2787 * 2788 * Context: Kernel thread context 2789 */ 2790 2791 static void 2792 sd_scsi_target_lun_init(void) 2793 { 2794 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2795 sd_scsi_target_lun_head = NULL; 2796 } 2797 2798 2799 /* 2800 * Function: sd_scsi_target_lun_fini 2801 * 2802 * Description: Frees all resources associated with the attached lun 2803 * chain 2804 * 2805 * Context: Kernel thread context 2806 */ 2807 2808 static void 2809 sd_scsi_target_lun_fini(void) 2810 { 2811 struct sd_scsi_hba_tgt_lun *cp; 2812 struct sd_scsi_hba_tgt_lun *ncp; 2813 2814 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2815 ncp = cp->next; 2816 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2817 } 2818 sd_scsi_target_lun_head = NULL; 2819 mutex_destroy(&sd_scsi_target_lun_mutex); 2820 } 2821 2822 2823 /* 2824 * Function: sd_scsi_get_target_lun_count 2825 * 2826 * Description: This routine will check in the attached lun chain to see 2827 * how many luns are attached on the required SCSI controller 2828 * and target. Currently, some capabilities like tagged queue 2829 * are supported per target based by HBA. So all luns in a 2830 * target have the same capabilities. Based on this assumption, 2831 * sd should only set these capabilities once per target. This 2832 * function is called when sd needs to decide how many luns 2833 * already attached on a target. 2834 * 2835 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2836 * controller device. 2837 * target - The target ID on the controller's SCSI bus. 2838 * 2839 * Return Code: The number of luns attached on the required target and 2840 * controller. 2841 * -1 if target ID is not in parallel SCSI scope or the given 2842 * dip is not in the chain. 2843 * 2844 * Context: Kernel thread context 2845 */ 2846 2847 static int 2848 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2849 { 2850 struct sd_scsi_hba_tgt_lun *cp; 2851 2852 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2853 return (-1); 2854 } 2855 2856 mutex_enter(&sd_scsi_target_lun_mutex); 2857 2858 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2859 if (cp->pdip == dip) { 2860 break; 2861 } 2862 } 2863 2864 mutex_exit(&sd_scsi_target_lun_mutex); 2865 2866 if (cp == NULL) { 2867 return (-1); 2868 } 2869 2870 return (cp->nlun[target]); 2871 } 2872 2873 2874 /* 2875 * Function: sd_scsi_update_lun_on_target 2876 * 2877 * Description: This routine is used to update the attached lun chain when a 2878 * lun is attached or detached on a target. 2879 * 2880 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2881 * controller device. 2882 * target - The target ID on the controller's SCSI bus. 2883 * flag - Indicate the lun is attached or detached. 2884 * 2885 * Context: Kernel thread context 2886 */ 2887 2888 static void 2889 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2890 { 2891 struct sd_scsi_hba_tgt_lun *cp; 2892 2893 mutex_enter(&sd_scsi_target_lun_mutex); 2894 2895 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2896 if (cp->pdip == dip) { 2897 break; 2898 } 2899 } 2900 2901 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2902 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2903 KM_SLEEP); 2904 cp->pdip = dip; 2905 cp->next = sd_scsi_target_lun_head; 2906 sd_scsi_target_lun_head = cp; 2907 } 2908 2909 mutex_exit(&sd_scsi_target_lun_mutex); 2910 2911 if (cp != NULL) { 2912 if (flag == SD_SCSI_LUN_ATTACH) { 2913 cp->nlun[target] ++; 2914 } else { 2915 cp->nlun[target] --; 2916 } 2917 } 2918 } 2919 2920 2921 /* 2922 * Function: sd_spin_up_unit 2923 * 2924 * Description: Issues the following commands to spin-up the device: 2925 * START STOP UNIT, and INQUIRY. 2926 * 2927 * Arguments: un - driver soft state (unit) structure 2928 * 2929 * Return Code: 0 - success 2930 * EIO - failure 2931 * EACCES - reservation conflict 2932 * 2933 * Context: Kernel thread context 2934 */ 2935 2936 static int 2937 sd_spin_up_unit(struct sd_lun *un) 2938 { 2939 size_t resid = 0; 2940 int has_conflict = FALSE; 2941 uchar_t *bufaddr; 2942 2943 ASSERT(un != NULL); 2944 2945 /* 2946 * Send a throwaway START UNIT command. 2947 * 2948 * If we fail on this, we don't care presently what precisely 2949 * is wrong. EMC's arrays will also fail this with a check 2950 * condition (0x2/0x4/0x3) if the device is "inactive," but 2951 * we don't want to fail the attach because it may become 2952 * "active" later. 2953 */ 2954 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2955 == EACCES) 2956 has_conflict = TRUE; 2957 2958 /* 2959 * Send another INQUIRY command to the target. This is necessary for 2960 * non-removable media direct access devices because their INQUIRY data 2961 * may not be fully qualified until they are spun up (perhaps via the 2962 * START command above). Note: This seems to be needed for some 2963 * legacy devices only.) The INQUIRY command should succeed even if a 2964 * Reservation Conflict is present. 2965 */ 2966 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2967 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2968 kmem_free(bufaddr, SUN_INQSIZE); 2969 return (EIO); 2970 } 2971 2972 /* 2973 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2974 * Note that this routine does not return a failure here even if the 2975 * INQUIRY command did not return any data. This is a legacy behavior. 2976 */ 2977 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2978 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2979 } 2980 2981 kmem_free(bufaddr, SUN_INQSIZE); 2982 2983 /* If we hit a reservation conflict above, tell the caller. */ 2984 if (has_conflict == TRUE) { 2985 return (EACCES); 2986 } 2987 2988 return (0); 2989 } 2990 2991 #ifdef _LP64 2992 /* 2993 * Function: sd_enable_descr_sense 2994 * 2995 * Description: This routine attempts to select descriptor sense format 2996 * using the Control mode page. Devices that support 64 bit 2997 * LBAs (for >2TB luns) should also implement descriptor 2998 * sense data so we will call this function whenever we see 2999 * a lun larger than 2TB. If for some reason the device 3000 * supports 64 bit LBAs but doesn't support descriptor sense 3001 * presumably the mode select will fail. Everything will 3002 * continue to work normally except that we will not get 3003 * complete sense data for commands that fail with an LBA 3004 * larger than 32 bits. 3005 * 3006 * Arguments: un - driver soft state (unit) structure 3007 * 3008 * Context: Kernel thread context only 3009 */ 3010 3011 static void 3012 sd_enable_descr_sense(struct sd_lun *un) 3013 { 3014 uchar_t *header; 3015 struct mode_control_scsi3 *ctrl_bufp; 3016 size_t buflen; 3017 size_t bd_len; 3018 3019 /* 3020 * Read MODE SENSE page 0xA, Control Mode Page 3021 */ 3022 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3023 sizeof (struct mode_control_scsi3); 3024 header = kmem_zalloc(buflen, KM_SLEEP); 3025 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3026 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3027 SD_ERROR(SD_LOG_COMMON, un, 3028 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3029 goto eds_exit; 3030 } 3031 3032 /* 3033 * Determine size of Block Descriptors in order to locate 3034 * the mode page data. ATAPI devices return 0, SCSI devices 3035 * should return MODE_BLK_DESC_LENGTH. 3036 */ 3037 bd_len = ((struct mode_header *)header)->bdesc_length; 3038 3039 /* Clear the mode data length field for MODE SELECT */ 3040 ((struct mode_header *)header)->length = 0; 3041 3042 ctrl_bufp = (struct mode_control_scsi3 *) 3043 (header + MODE_HEADER_LENGTH + bd_len); 3044 3045 /* 3046 * If the page length is smaller than the expected value, 3047 * the target device doesn't support D_SENSE. Bail out here. 3048 */ 3049 if (ctrl_bufp->mode_page.length < 3050 sizeof (struct mode_control_scsi3) - 2) { 3051 SD_ERROR(SD_LOG_COMMON, un, 3052 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3053 goto eds_exit; 3054 } 3055 3056 /* 3057 * Clear PS bit for MODE SELECT 3058 */ 3059 ctrl_bufp->mode_page.ps = 0; 3060 3061 /* 3062 * Set D_SENSE to enable descriptor sense format. 3063 */ 3064 ctrl_bufp->d_sense = 1; 3065 3066 /* 3067 * Use MODE SELECT to commit the change to the D_SENSE bit 3068 */ 3069 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3070 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3071 SD_INFO(SD_LOG_COMMON, un, 3072 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3073 goto eds_exit; 3074 } 3075 3076 eds_exit: 3077 kmem_free(header, buflen); 3078 } 3079 3080 /* 3081 * Function: sd_reenable_dsense_task 3082 * 3083 * Description: Re-enable descriptor sense after device or bus reset 3084 * 3085 * Context: Executes in a taskq() thread context 3086 */ 3087 static void 3088 sd_reenable_dsense_task(void *arg) 3089 { 3090 struct sd_lun *un = arg; 3091 3092 ASSERT(un != NULL); 3093 sd_enable_descr_sense(un); 3094 } 3095 #endif /* _LP64 */ 3096 3097 /* 3098 * Function: sd_set_mmc_caps 3099 * 3100 * Description: This routine determines if the device is MMC compliant and if 3101 * the device supports CDDA via a mode sense of the CDVD 3102 * capabilities mode page. Also checks if the device is a 3103 * dvdram writable device. 3104 * 3105 * Arguments: un - driver soft state (unit) structure 3106 * 3107 * Context: Kernel thread context only 3108 */ 3109 3110 static void 3111 sd_set_mmc_caps(struct sd_lun *un) 3112 { 3113 struct mode_header_grp2 *sense_mhp; 3114 uchar_t *sense_page; 3115 caddr_t buf; 3116 int bd_len; 3117 int status; 3118 struct uscsi_cmd com; 3119 int rtn; 3120 uchar_t *out_data_rw, *out_data_hd; 3121 uchar_t *rqbuf_rw, *rqbuf_hd; 3122 3123 ASSERT(un != NULL); 3124 3125 /* 3126 * The flags which will be set in this function are - mmc compliant, 3127 * dvdram writable device, cdda support. Initialize them to FALSE 3128 * and if a capability is detected - it will be set to TRUE. 3129 */ 3130 un->un_f_mmc_cap = FALSE; 3131 un->un_f_dvdram_writable_device = FALSE; 3132 un->un_f_cfg_cdda = FALSE; 3133 3134 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3135 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3136 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3137 3138 if (status != 0) { 3139 /* command failed; just return */ 3140 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3141 return; 3142 } 3143 /* 3144 * If the mode sense request for the CDROM CAPABILITIES 3145 * page (0x2A) succeeds the device is assumed to be MMC. 3146 */ 3147 un->un_f_mmc_cap = TRUE; 3148 3149 /* Get to the page data */ 3150 sense_mhp = (struct mode_header_grp2 *)buf; 3151 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3152 sense_mhp->bdesc_length_lo; 3153 if (bd_len > MODE_BLK_DESC_LENGTH) { 3154 /* 3155 * We did not get back the expected block descriptor 3156 * length so we cannot determine if the device supports 3157 * CDDA. However, we still indicate the device is MMC 3158 * according to the successful response to the page 3159 * 0x2A mode sense request. 3160 */ 3161 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3162 "sd_set_mmc_caps: Mode Sense returned " 3163 "invalid block descriptor length\n"); 3164 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3165 return; 3166 } 3167 3168 /* See if read CDDA is supported */ 3169 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3170 bd_len); 3171 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3172 3173 /* See if writing DVD RAM is supported. */ 3174 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3175 if (un->un_f_dvdram_writable_device == TRUE) { 3176 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3177 return; 3178 } 3179 3180 /* 3181 * If the device presents DVD or CD capabilities in the mode 3182 * page, we can return here since a RRD will not have 3183 * these capabilities. 3184 */ 3185 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3186 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3187 return; 3188 } 3189 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3190 3191 /* 3192 * If un->un_f_dvdram_writable_device is still FALSE, 3193 * check for a Removable Rigid Disk (RRD). A RRD 3194 * device is identified by the features RANDOM_WRITABLE and 3195 * HARDWARE_DEFECT_MANAGEMENT. 3196 */ 3197 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3198 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3199 3200 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3201 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3202 RANDOM_WRITABLE, SD_PATH_STANDARD); 3203 if (rtn != 0) { 3204 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3205 kmem_free(rqbuf_rw, SENSE_LENGTH); 3206 return; 3207 } 3208 3209 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3210 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3211 3212 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3213 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3214 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3215 if (rtn == 0) { 3216 /* 3217 * We have good information, check for random writable 3218 * and hardware defect features. 3219 */ 3220 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3221 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3222 un->un_f_dvdram_writable_device = TRUE; 3223 } 3224 } 3225 3226 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3227 kmem_free(rqbuf_rw, SENSE_LENGTH); 3228 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3229 kmem_free(rqbuf_hd, SENSE_LENGTH); 3230 } 3231 3232 /* 3233 * Function: sd_check_for_writable_cd 3234 * 3235 * Description: This routine determines if the media in the device is 3236 * writable or not. It uses the get configuration command (0x46) 3237 * to determine if the media is writable 3238 * 3239 * Arguments: un - driver soft state (unit) structure 3240 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3241 * chain and the normal command waitq, or 3242 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3243 * "direct" chain and bypass the normal command 3244 * waitq. 3245 * 3246 * Context: Never called at interrupt context. 3247 */ 3248 3249 static void 3250 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3251 { 3252 struct uscsi_cmd com; 3253 uchar_t *out_data; 3254 uchar_t *rqbuf; 3255 int rtn; 3256 uchar_t *out_data_rw, *out_data_hd; 3257 uchar_t *rqbuf_rw, *rqbuf_hd; 3258 struct mode_header_grp2 *sense_mhp; 3259 uchar_t *sense_page; 3260 caddr_t buf; 3261 int bd_len; 3262 int status; 3263 3264 ASSERT(un != NULL); 3265 ASSERT(mutex_owned(SD_MUTEX(un))); 3266 3267 /* 3268 * Initialize the writable media to false, if configuration info. 3269 * tells us otherwise then only we will set it. 3270 */ 3271 un->un_f_mmc_writable_media = FALSE; 3272 mutex_exit(SD_MUTEX(un)); 3273 3274 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3275 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3276 3277 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3278 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3279 3280 mutex_enter(SD_MUTEX(un)); 3281 if (rtn == 0) { 3282 /* 3283 * We have good information, check for writable DVD. 3284 */ 3285 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3286 un->un_f_mmc_writable_media = TRUE; 3287 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3288 kmem_free(rqbuf, SENSE_LENGTH); 3289 return; 3290 } 3291 } 3292 3293 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3294 kmem_free(rqbuf, SENSE_LENGTH); 3295 3296 /* 3297 * Determine if this is a RRD type device. 3298 */ 3299 mutex_exit(SD_MUTEX(un)); 3300 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3301 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3302 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3303 mutex_enter(SD_MUTEX(un)); 3304 if (status != 0) { 3305 /* command failed; just return */ 3306 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3307 return; 3308 } 3309 3310 /* Get to the page data */ 3311 sense_mhp = (struct mode_header_grp2 *)buf; 3312 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3313 if (bd_len > MODE_BLK_DESC_LENGTH) { 3314 /* 3315 * We did not get back the expected block descriptor length so 3316 * we cannot check the mode page. 3317 */ 3318 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3319 "sd_check_for_writable_cd: Mode Sense returned " 3320 "invalid block descriptor length\n"); 3321 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3322 return; 3323 } 3324 3325 /* 3326 * If the device presents DVD or CD capabilities in the mode 3327 * page, we can return here since a RRD device will not have 3328 * these capabilities. 3329 */ 3330 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3331 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3332 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3333 return; 3334 } 3335 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3336 3337 /* 3338 * If un->un_f_mmc_writable_media is still FALSE, 3339 * check for RRD type media. A RRD device is identified 3340 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3341 */ 3342 mutex_exit(SD_MUTEX(un)); 3343 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3344 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3345 3346 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3347 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3348 RANDOM_WRITABLE, path_flag); 3349 if (rtn != 0) { 3350 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3351 kmem_free(rqbuf_rw, SENSE_LENGTH); 3352 mutex_enter(SD_MUTEX(un)); 3353 return; 3354 } 3355 3356 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3357 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3358 3359 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3360 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3361 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3362 mutex_enter(SD_MUTEX(un)); 3363 if (rtn == 0) { 3364 /* 3365 * We have good information, check for random writable 3366 * and hardware defect features as current. 3367 */ 3368 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3369 (out_data_rw[10] & 0x1) && 3370 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3371 (out_data_hd[10] & 0x1)) { 3372 un->un_f_mmc_writable_media = TRUE; 3373 } 3374 } 3375 3376 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3377 kmem_free(rqbuf_rw, SENSE_LENGTH); 3378 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3379 kmem_free(rqbuf_hd, SENSE_LENGTH); 3380 } 3381 3382 /* 3383 * Function: sd_read_unit_properties 3384 * 3385 * Description: The following implements a property lookup mechanism. 3386 * Properties for particular disks (keyed on vendor, model 3387 * and rev numbers) are sought in the sd.conf file via 3388 * sd_process_sdconf_file(), and if not found there, are 3389 * looked for in a list hardcoded in this driver via 3390 * sd_process_sdconf_table() Once located the properties 3391 * are used to update the driver unit structure. 3392 * 3393 * Arguments: un - driver soft state (unit) structure 3394 */ 3395 3396 static void 3397 sd_read_unit_properties(struct sd_lun *un) 3398 { 3399 /* 3400 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3401 * the "sd-config-list" property (from the sd.conf file) or if 3402 * there was not a match for the inquiry vid/pid. If this event 3403 * occurs the static driver configuration table is searched for 3404 * a match. 3405 */ 3406 ASSERT(un != NULL); 3407 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3408 sd_process_sdconf_table(un); 3409 } 3410 3411 /* check for LSI device */ 3412 sd_is_lsi(un); 3413 3414 3415 } 3416 3417 3418 /* 3419 * Function: sd_process_sdconf_file 3420 * 3421 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3422 * driver's config file (ie, sd.conf) and update the driver 3423 * soft state structure accordingly. 3424 * 3425 * Arguments: un - driver soft state (unit) structure 3426 * 3427 * Return Code: SD_SUCCESS - The properties were successfully set according 3428 * to the driver configuration file. 3429 * SD_FAILURE - The driver config list was not obtained or 3430 * there was no vid/pid match. This indicates that 3431 * the static config table should be used. 3432 * 3433 * The config file has a property, "sd-config-list". Currently we support 3434 * two kinds of formats. For both formats, the value of this property 3435 * is a list of duplets: 3436 * 3437 * sd-config-list= 3438 * <duplet>, 3439 * [,<duplet>]*; 3440 * 3441 * For the improved format, where 3442 * 3443 * <duplet>:= "<vid+pid>","<tunable-list>" 3444 * 3445 * and 3446 * 3447 * <tunable-list>:= <tunable> [, <tunable> ]*; 3448 * <tunable> = <name> : <value> 3449 * 3450 * The <vid+pid> is the string that is returned by the target device on a 3451 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3452 * to apply to all target devices with the specified <vid+pid>. 3453 * 3454 * Each <tunable> is a "<name> : <value>" pair. 3455 * 3456 * For the old format, the structure of each duplet is as follows: 3457 * 3458 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3459 * 3460 * The first entry of the duplet is the device ID string (the concatenated 3461 * vid & pid; not to be confused with a device_id). This is defined in 3462 * the same way as in the sd_disk_table. 3463 * 3464 * The second part of the duplet is a string that identifies a 3465 * data-property-name-list. The data-property-name-list is defined as 3466 * follows: 3467 * 3468 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3469 * 3470 * The syntax of <data-property-name> depends on the <version> field. 3471 * 3472 * If version = SD_CONF_VERSION_1 we have the following syntax: 3473 * 3474 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3475 * 3476 * where the prop0 value will be used to set prop0 if bit0 set in the 3477 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3478 * 3479 */ 3480 3481 static int 3482 sd_process_sdconf_file(struct sd_lun *un) 3483 { 3484 char **config_list = NULL; 3485 uint_t nelements; 3486 char *vidptr; 3487 int vidlen; 3488 char *dnlist_ptr; 3489 char *dataname_ptr; 3490 char *dataname_lasts; 3491 int *data_list = NULL; 3492 uint_t data_list_len; 3493 int rval = SD_FAILURE; 3494 int i; 3495 3496 ASSERT(un != NULL); 3497 3498 /* Obtain the configuration list associated with the .conf file */ 3499 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3500 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3501 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3502 return (SD_FAILURE); 3503 } 3504 3505 /* 3506 * Compare vids in each duplet to the inquiry vid - if a match is 3507 * made, get the data value and update the soft state structure 3508 * accordingly. 3509 * 3510 * Each duplet should show as a pair of strings, return SD_FAILURE 3511 * otherwise. 3512 */ 3513 if (nelements & 1) { 3514 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3515 "sd-config-list should show as pairs of strings.\n"); 3516 if (config_list) 3517 ddi_prop_free(config_list); 3518 return (SD_FAILURE); 3519 } 3520 3521 for (i = 0; i < nelements; i += 2) { 3522 /* 3523 * Note: The assumption here is that each vid entry is on 3524 * a unique line from its associated duplet. 3525 */ 3526 vidptr = config_list[i]; 3527 vidlen = (int)strlen(vidptr); 3528 if ((vidlen == 0) || 3529 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3530 continue; 3531 } 3532 3533 /* 3534 * dnlist contains 1 or more blank separated 3535 * data-property-name entries 3536 */ 3537 dnlist_ptr = config_list[i + 1]; 3538 3539 if (strchr(dnlist_ptr, ':') != NULL) { 3540 /* 3541 * Decode the improved format sd-config-list. 3542 */ 3543 sd_nvpair_str_decode(un, dnlist_ptr); 3544 } else { 3545 /* 3546 * The old format sd-config-list, loop through all 3547 * data-property-name entries in the 3548 * data-property-name-list 3549 * setting the properties for each. 3550 */ 3551 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3552 &dataname_lasts); dataname_ptr != NULL; 3553 dataname_ptr = sd_strtok_r(NULL, " \t", 3554 &dataname_lasts)) { 3555 int version; 3556 3557 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3558 "sd_process_sdconf_file: disk:%s, " 3559 "data:%s\n", vidptr, dataname_ptr); 3560 3561 /* Get the data list */ 3562 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3563 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3564 &data_list_len) != DDI_PROP_SUCCESS) { 3565 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3566 "sd_process_sdconf_file: data " 3567 "property (%s) has no value\n", 3568 dataname_ptr); 3569 continue; 3570 } 3571 3572 version = data_list[0]; 3573 3574 if (version == SD_CONF_VERSION_1) { 3575 sd_tunables values; 3576 3577 /* Set the properties */ 3578 if (sd_chk_vers1_data(un, data_list[1], 3579 &data_list[2], data_list_len, 3580 dataname_ptr) == SD_SUCCESS) { 3581 sd_get_tunables_from_conf(un, 3582 data_list[1], &data_list[2], 3583 &values); 3584 sd_set_vers1_properties(un, 3585 data_list[1], &values); 3586 rval = SD_SUCCESS; 3587 } else { 3588 rval = SD_FAILURE; 3589 } 3590 } else { 3591 scsi_log(SD_DEVINFO(un), sd_label, 3592 CE_WARN, "data property %s version " 3593 "0x%x is invalid.", 3594 dataname_ptr, version); 3595 rval = SD_FAILURE; 3596 } 3597 if (data_list) 3598 ddi_prop_free(data_list); 3599 } 3600 } 3601 } 3602 3603 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3604 if (config_list) { 3605 ddi_prop_free(config_list); 3606 } 3607 3608 return (rval); 3609 } 3610 3611 /* 3612 * Function: sd_nvpair_str_decode() 3613 * 3614 * Description: Parse the improved format sd-config-list to get 3615 * each entry of tunable, which includes a name-value pair. 3616 * Then call sd_set_properties() to set the property. 3617 * 3618 * Arguments: un - driver soft state (unit) structure 3619 * nvpair_str - the tunable list 3620 */ 3621 static void 3622 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3623 { 3624 char *nv, *name, *value, *token; 3625 char *nv_lasts, *v_lasts, *x_lasts; 3626 3627 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3628 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3629 token = sd_strtok_r(nv, ":", &v_lasts); 3630 name = sd_strtok_r(token, " \t", &x_lasts); 3631 token = sd_strtok_r(NULL, ":", &v_lasts); 3632 value = sd_strtok_r(token, " \t", &x_lasts); 3633 if (name == NULL || value == NULL) { 3634 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3635 "sd_nvpair_str_decode: " 3636 "name or value is not valid!\n"); 3637 } else { 3638 sd_set_properties(un, name, value); 3639 } 3640 } 3641 } 3642 3643 /* 3644 * Function: sd_strtok_r() 3645 * 3646 * Description: This function uses strpbrk and strspn to break 3647 * string into tokens on sequentially subsequent calls. Return 3648 * NULL when no non-separator characters remain. The first 3649 * argument is NULL for subsequent calls. 3650 */ 3651 static char * 3652 sd_strtok_r(char *string, const char *sepset, char **lasts) 3653 { 3654 char *q, *r; 3655 3656 /* First or subsequent call */ 3657 if (string == NULL) 3658 string = *lasts; 3659 3660 if (string == NULL) 3661 return (NULL); 3662 3663 /* Skip leading separators */ 3664 q = string + strspn(string, sepset); 3665 3666 if (*q == '\0') 3667 return (NULL); 3668 3669 if ((r = strpbrk(q, sepset)) == NULL) 3670 *lasts = NULL; 3671 else { 3672 *r = '\0'; 3673 *lasts = r + 1; 3674 } 3675 return (q); 3676 } 3677 3678 /* 3679 * Function: sd_set_properties() 3680 * 3681 * Description: Set device properties based on the improved 3682 * format sd-config-list. 3683 * 3684 * Arguments: un - driver soft state (unit) structure 3685 * name - supported tunable name 3686 * value - tunable value 3687 */ 3688 static void 3689 sd_set_properties(struct sd_lun *un, char *name, char *value) 3690 { 3691 char *endptr = NULL; 3692 long val = 0; 3693 3694 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3695 if (strcasecmp(value, "true") == 0) { 3696 un->un_f_suppress_cache_flush = TRUE; 3697 } else if (strcasecmp(value, "false") == 0) { 3698 un->un_f_suppress_cache_flush = FALSE; 3699 } else { 3700 goto value_invalid; 3701 } 3702 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3703 "suppress_cache_flush flag set to %d\n", 3704 un->un_f_suppress_cache_flush); 3705 return; 3706 } 3707 3708 if (strcasecmp(name, "controller-type") == 0) { 3709 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3710 un->un_ctype = val; 3711 } else { 3712 goto value_invalid; 3713 } 3714 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3715 "ctype set to %d\n", un->un_ctype); 3716 return; 3717 } 3718 3719 if (strcasecmp(name, "delay-busy") == 0) { 3720 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3721 un->un_busy_timeout = drv_usectohz(val / 1000); 3722 } else { 3723 goto value_invalid; 3724 } 3725 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3726 "busy_timeout set to %d\n", un->un_busy_timeout); 3727 return; 3728 } 3729 3730 if (strcasecmp(name, "disksort") == 0) { 3731 if (strcasecmp(value, "true") == 0) { 3732 un->un_f_disksort_disabled = FALSE; 3733 } else if (strcasecmp(value, "false") == 0) { 3734 un->un_f_disksort_disabled = TRUE; 3735 } else { 3736 goto value_invalid; 3737 } 3738 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3739 "disksort disabled flag set to %d\n", 3740 un->un_f_disksort_disabled); 3741 return; 3742 } 3743 3744 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3745 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3746 un->un_reserve_release_time = val; 3747 } else { 3748 goto value_invalid; 3749 } 3750 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3751 "reservation release timeout set to %d\n", 3752 un->un_reserve_release_time); 3753 return; 3754 } 3755 3756 if (strcasecmp(name, "reset-lun") == 0) { 3757 if (strcasecmp(value, "true") == 0) { 3758 un->un_f_lun_reset_enabled = TRUE; 3759 } else if (strcasecmp(value, "false") == 0) { 3760 un->un_f_lun_reset_enabled = FALSE; 3761 } else { 3762 goto value_invalid; 3763 } 3764 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3765 "lun reset enabled flag set to %d\n", 3766 un->un_f_lun_reset_enabled); 3767 return; 3768 } 3769 3770 if (strcasecmp(name, "retries-busy") == 0) { 3771 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3772 un->un_busy_retry_count = val; 3773 } else { 3774 goto value_invalid; 3775 } 3776 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3777 "busy retry count set to %d\n", un->un_busy_retry_count); 3778 return; 3779 } 3780 3781 if (strcasecmp(name, "retries-timeout") == 0) { 3782 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3783 un->un_retry_count = val; 3784 } else { 3785 goto value_invalid; 3786 } 3787 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3788 "timeout retry count set to %d\n", un->un_retry_count); 3789 return; 3790 } 3791 3792 if (strcasecmp(name, "retries-notready") == 0) { 3793 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3794 un->un_notready_retry_count = val; 3795 } else { 3796 goto value_invalid; 3797 } 3798 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3799 "notready retry count set to %d\n", 3800 un->un_notready_retry_count); 3801 return; 3802 } 3803 3804 if (strcasecmp(name, "retries-reset") == 0) { 3805 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3806 un->un_reset_retry_count = val; 3807 } else { 3808 goto value_invalid; 3809 } 3810 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3811 "reset retry count set to %d\n", 3812 un->un_reset_retry_count); 3813 return; 3814 } 3815 3816 if (strcasecmp(name, "throttle-max") == 0) { 3817 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3818 un->un_saved_throttle = un->un_throttle = val; 3819 } else { 3820 goto value_invalid; 3821 } 3822 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3823 "throttle set to %d\n", un->un_throttle); 3824 } 3825 3826 if (strcasecmp(name, "throttle-min") == 0) { 3827 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3828 un->un_min_throttle = val; 3829 } else { 3830 goto value_invalid; 3831 } 3832 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3833 "min throttle set to %d\n", un->un_min_throttle); 3834 } 3835 3836 /* 3837 * Validate the throttle values. 3838 * If any of the numbers are invalid, set everything to defaults. 3839 */ 3840 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3841 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3842 (un->un_min_throttle > un->un_throttle)) { 3843 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3844 un->un_min_throttle = sd_min_throttle; 3845 } 3846 return; 3847 3848 value_invalid: 3849 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3850 "value of prop %s is invalid\n", name); 3851 } 3852 3853 /* 3854 * Function: sd_get_tunables_from_conf() 3855 * 3856 * 3857 * This function reads the data list from the sd.conf file and pulls 3858 * the values that can have numeric values as arguments and places 3859 * the values in the appropriate sd_tunables member. 3860 * Since the order of the data list members varies across platforms 3861 * This function reads them from the data list in a platform specific 3862 * order and places them into the correct sd_tunable member that is 3863 * consistent across all platforms. 3864 */ 3865 static void 3866 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3867 sd_tunables *values) 3868 { 3869 int i; 3870 int mask; 3871 3872 bzero(values, sizeof (sd_tunables)); 3873 3874 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3875 3876 mask = 1 << i; 3877 if (mask > flags) { 3878 break; 3879 } 3880 3881 switch (mask & flags) { 3882 case 0: /* This mask bit not set in flags */ 3883 continue; 3884 case SD_CONF_BSET_THROTTLE: 3885 values->sdt_throttle = data_list[i]; 3886 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3887 "sd_get_tunables_from_conf: throttle = %d\n", 3888 values->sdt_throttle); 3889 break; 3890 case SD_CONF_BSET_CTYPE: 3891 values->sdt_ctype = data_list[i]; 3892 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3893 "sd_get_tunables_from_conf: ctype = %d\n", 3894 values->sdt_ctype); 3895 break; 3896 case SD_CONF_BSET_NRR_COUNT: 3897 values->sdt_not_rdy_retries = data_list[i]; 3898 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3899 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3900 values->sdt_not_rdy_retries); 3901 break; 3902 case SD_CONF_BSET_BSY_RETRY_COUNT: 3903 values->sdt_busy_retries = data_list[i]; 3904 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3905 "sd_get_tunables_from_conf: busy_retries = %d\n", 3906 values->sdt_busy_retries); 3907 break; 3908 case SD_CONF_BSET_RST_RETRIES: 3909 values->sdt_reset_retries = data_list[i]; 3910 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3911 "sd_get_tunables_from_conf: reset_retries = %d\n", 3912 values->sdt_reset_retries); 3913 break; 3914 case SD_CONF_BSET_RSV_REL_TIME: 3915 values->sdt_reserv_rel_time = data_list[i]; 3916 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3917 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3918 values->sdt_reserv_rel_time); 3919 break; 3920 case SD_CONF_BSET_MIN_THROTTLE: 3921 values->sdt_min_throttle = data_list[i]; 3922 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3923 "sd_get_tunables_from_conf: min_throttle = %d\n", 3924 values->sdt_min_throttle); 3925 break; 3926 case SD_CONF_BSET_DISKSORT_DISABLED: 3927 values->sdt_disk_sort_dis = data_list[i]; 3928 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3929 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3930 values->sdt_disk_sort_dis); 3931 break; 3932 case SD_CONF_BSET_LUN_RESET_ENABLED: 3933 values->sdt_lun_reset_enable = data_list[i]; 3934 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3935 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3936 "\n", values->sdt_lun_reset_enable); 3937 break; 3938 case SD_CONF_BSET_CACHE_IS_NV: 3939 values->sdt_suppress_cache_flush = data_list[i]; 3940 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3941 "sd_get_tunables_from_conf: \ 3942 suppress_cache_flush = %d" 3943 "\n", values->sdt_suppress_cache_flush); 3944 break; 3945 } 3946 } 3947 } 3948 3949 /* 3950 * Function: sd_process_sdconf_table 3951 * 3952 * Description: Search the static configuration table for a match on the 3953 * inquiry vid/pid and update the driver soft state structure 3954 * according to the table property values for the device. 3955 * 3956 * The form of a configuration table entry is: 3957 * <vid+pid>,<flags>,<property-data> 3958 * "SEAGATE ST42400N",1,0x40000, 3959 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3960 * 3961 * Arguments: un - driver soft state (unit) structure 3962 */ 3963 3964 static void 3965 sd_process_sdconf_table(struct sd_lun *un) 3966 { 3967 char *id = NULL; 3968 int table_index; 3969 int idlen; 3970 3971 ASSERT(un != NULL); 3972 for (table_index = 0; table_index < sd_disk_table_size; 3973 table_index++) { 3974 id = sd_disk_table[table_index].device_id; 3975 idlen = strlen(id); 3976 if (idlen == 0) { 3977 continue; 3978 } 3979 3980 /* 3981 * The static configuration table currently does not 3982 * implement version 10 properties. Additionally, 3983 * multiple data-property-name entries are not 3984 * implemented in the static configuration table. 3985 */ 3986 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3987 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3988 "sd_process_sdconf_table: disk %s\n", id); 3989 sd_set_vers1_properties(un, 3990 sd_disk_table[table_index].flags, 3991 sd_disk_table[table_index].properties); 3992 break; 3993 } 3994 } 3995 } 3996 3997 3998 /* 3999 * Function: sd_sdconf_id_match 4000 * 4001 * Description: This local function implements a case sensitive vid/pid 4002 * comparison as well as the boundary cases of wild card and 4003 * multiple blanks. 4004 * 4005 * Note: An implicit assumption made here is that the scsi 4006 * inquiry structure will always keep the vid, pid and 4007 * revision strings in consecutive sequence, so they can be 4008 * read as a single string. If this assumption is not the 4009 * case, a separate string, to be used for the check, needs 4010 * to be built with these strings concatenated. 4011 * 4012 * Arguments: un - driver soft state (unit) structure 4013 * id - table or config file vid/pid 4014 * idlen - length of the vid/pid (bytes) 4015 * 4016 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4017 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4018 */ 4019 4020 static int 4021 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4022 { 4023 struct scsi_inquiry *sd_inq; 4024 int rval = SD_SUCCESS; 4025 4026 ASSERT(un != NULL); 4027 sd_inq = un->un_sd->sd_inq; 4028 ASSERT(id != NULL); 4029 4030 /* 4031 * We use the inq_vid as a pointer to a buffer containing the 4032 * vid and pid and use the entire vid/pid length of the table 4033 * entry for the comparison. This works because the inq_pid 4034 * data member follows inq_vid in the scsi_inquiry structure. 4035 */ 4036 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4037 /* 4038 * The user id string is compared to the inquiry vid/pid 4039 * using a case insensitive comparison and ignoring 4040 * multiple spaces. 4041 */ 4042 rval = sd_blank_cmp(un, id, idlen); 4043 if (rval != SD_SUCCESS) { 4044 /* 4045 * User id strings that start and end with a "*" 4046 * are a special case. These do not have a 4047 * specific vendor, and the product string can 4048 * appear anywhere in the 16 byte PID portion of 4049 * the inquiry data. This is a simple strstr() 4050 * type search for the user id in the inquiry data. 4051 */ 4052 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4053 char *pidptr = &id[1]; 4054 int i; 4055 int j; 4056 int pidstrlen = idlen - 2; 4057 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4058 pidstrlen; 4059 4060 if (j < 0) { 4061 return (SD_FAILURE); 4062 } 4063 for (i = 0; i < j; i++) { 4064 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4065 pidptr, pidstrlen) == 0) { 4066 rval = SD_SUCCESS; 4067 break; 4068 } 4069 } 4070 } 4071 } 4072 } 4073 return (rval); 4074 } 4075 4076 4077 /* 4078 * Function: sd_blank_cmp 4079 * 4080 * Description: If the id string starts and ends with a space, treat 4081 * multiple consecutive spaces as equivalent to a single 4082 * space. For example, this causes a sd_disk_table entry 4083 * of " NEC CDROM " to match a device's id string of 4084 * "NEC CDROM". 4085 * 4086 * Note: The success exit condition for this routine is if 4087 * the pointer to the table entry is '\0' and the cnt of 4088 * the inquiry length is zero. This will happen if the inquiry 4089 * string returned by the device is padded with spaces to be 4090 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4091 * SCSI spec states that the inquiry string is to be padded with 4092 * spaces. 4093 * 4094 * Arguments: un - driver soft state (unit) structure 4095 * id - table or config file vid/pid 4096 * idlen - length of the vid/pid (bytes) 4097 * 4098 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4099 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4100 */ 4101 4102 static int 4103 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4104 { 4105 char *p1; 4106 char *p2; 4107 int cnt; 4108 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4109 sizeof (SD_INQUIRY(un)->inq_pid); 4110 4111 ASSERT(un != NULL); 4112 p2 = un->un_sd->sd_inq->inq_vid; 4113 ASSERT(id != NULL); 4114 p1 = id; 4115 4116 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4117 /* 4118 * Note: string p1 is terminated by a NUL but string p2 4119 * isn't. The end of p2 is determined by cnt. 4120 */ 4121 for (;;) { 4122 /* skip over any extra blanks in both strings */ 4123 while ((*p1 != '\0') && (*p1 == ' ')) { 4124 p1++; 4125 } 4126 while ((cnt != 0) && (*p2 == ' ')) { 4127 p2++; 4128 cnt--; 4129 } 4130 4131 /* compare the two strings */ 4132 if ((cnt == 0) || 4133 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4134 break; 4135 } 4136 while ((cnt > 0) && 4137 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4138 p1++; 4139 p2++; 4140 cnt--; 4141 } 4142 } 4143 } 4144 4145 /* return SD_SUCCESS if both strings match */ 4146 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4147 } 4148 4149 4150 /* 4151 * Function: sd_chk_vers1_data 4152 * 4153 * Description: Verify the version 1 device properties provided by the 4154 * user via the configuration file 4155 * 4156 * Arguments: un - driver soft state (unit) structure 4157 * flags - integer mask indicating properties to be set 4158 * prop_list - integer list of property values 4159 * list_len - number of the elements 4160 * 4161 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4162 * SD_FAILURE - Indicates the user provided data is invalid 4163 */ 4164 4165 static int 4166 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4167 int list_len, char *dataname_ptr) 4168 { 4169 int i; 4170 int mask = 1; 4171 int index = 0; 4172 4173 ASSERT(un != NULL); 4174 4175 /* Check for a NULL property name and list */ 4176 if (dataname_ptr == NULL) { 4177 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4178 "sd_chk_vers1_data: NULL data property name."); 4179 return (SD_FAILURE); 4180 } 4181 if (prop_list == NULL) { 4182 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4183 "sd_chk_vers1_data: %s NULL data property list.", 4184 dataname_ptr); 4185 return (SD_FAILURE); 4186 } 4187 4188 /* Display a warning if undefined bits are set in the flags */ 4189 if (flags & ~SD_CONF_BIT_MASK) { 4190 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4191 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4192 "Properties not set.", 4193 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4194 return (SD_FAILURE); 4195 } 4196 4197 /* 4198 * Verify the length of the list by identifying the highest bit set 4199 * in the flags and validating that the property list has a length 4200 * up to the index of this bit. 4201 */ 4202 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4203 if (flags & mask) { 4204 index++; 4205 } 4206 mask = 1 << i; 4207 } 4208 if (list_len < (index + 2)) { 4209 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4210 "sd_chk_vers1_data: " 4211 "Data property list %s size is incorrect. " 4212 "Properties not set.", dataname_ptr); 4213 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4214 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4215 return (SD_FAILURE); 4216 } 4217 return (SD_SUCCESS); 4218 } 4219 4220 4221 /* 4222 * Function: sd_set_vers1_properties 4223 * 4224 * Description: Set version 1 device properties based on a property list 4225 * retrieved from the driver configuration file or static 4226 * configuration table. Version 1 properties have the format: 4227 * 4228 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4229 * 4230 * where the prop0 value will be used to set prop0 if bit0 4231 * is set in the flags 4232 * 4233 * Arguments: un - driver soft state (unit) structure 4234 * flags - integer mask indicating properties to be set 4235 * prop_list - integer list of property values 4236 */ 4237 4238 static void 4239 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4240 { 4241 ASSERT(un != NULL); 4242 4243 /* 4244 * Set the flag to indicate cache is to be disabled. An attempt 4245 * to disable the cache via sd_cache_control() will be made 4246 * later during attach once the basic initialization is complete. 4247 */ 4248 if (flags & SD_CONF_BSET_NOCACHE) { 4249 un->un_f_opt_disable_cache = TRUE; 4250 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4251 "sd_set_vers1_properties: caching disabled flag set\n"); 4252 } 4253 4254 /* CD-specific configuration parameters */ 4255 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4256 un->un_f_cfg_playmsf_bcd = TRUE; 4257 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4258 "sd_set_vers1_properties: playmsf_bcd set\n"); 4259 } 4260 if (flags & SD_CONF_BSET_READSUB_BCD) { 4261 un->un_f_cfg_readsub_bcd = TRUE; 4262 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4263 "sd_set_vers1_properties: readsub_bcd set\n"); 4264 } 4265 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4266 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4267 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4268 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4269 } 4270 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4271 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4272 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4273 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4274 } 4275 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4276 un->un_f_cfg_no_read_header = TRUE; 4277 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4278 "sd_set_vers1_properties: no_read_header set\n"); 4279 } 4280 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4281 un->un_f_cfg_read_cd_xd4 = TRUE; 4282 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4283 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4284 } 4285 4286 /* Support for devices which do not have valid/unique serial numbers */ 4287 if (flags & SD_CONF_BSET_FAB_DEVID) { 4288 un->un_f_opt_fab_devid = TRUE; 4289 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4290 "sd_set_vers1_properties: fab_devid bit set\n"); 4291 } 4292 4293 /* Support for user throttle configuration */ 4294 if (flags & SD_CONF_BSET_THROTTLE) { 4295 ASSERT(prop_list != NULL); 4296 un->un_saved_throttle = un->un_throttle = 4297 prop_list->sdt_throttle; 4298 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4299 "sd_set_vers1_properties: throttle set to %d\n", 4300 prop_list->sdt_throttle); 4301 } 4302 4303 /* Set the per disk retry count according to the conf file or table. */ 4304 if (flags & SD_CONF_BSET_NRR_COUNT) { 4305 ASSERT(prop_list != NULL); 4306 if (prop_list->sdt_not_rdy_retries) { 4307 un->un_notready_retry_count = 4308 prop_list->sdt_not_rdy_retries; 4309 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4310 "sd_set_vers1_properties: not ready retry count" 4311 " set to %d\n", un->un_notready_retry_count); 4312 } 4313 } 4314 4315 /* The controller type is reported for generic disk driver ioctls */ 4316 if (flags & SD_CONF_BSET_CTYPE) { 4317 ASSERT(prop_list != NULL); 4318 switch (prop_list->sdt_ctype) { 4319 case CTYPE_CDROM: 4320 un->un_ctype = prop_list->sdt_ctype; 4321 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4322 "sd_set_vers1_properties: ctype set to " 4323 "CTYPE_CDROM\n"); 4324 break; 4325 case CTYPE_CCS: 4326 un->un_ctype = prop_list->sdt_ctype; 4327 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4328 "sd_set_vers1_properties: ctype set to " 4329 "CTYPE_CCS\n"); 4330 break; 4331 case CTYPE_ROD: /* RW optical */ 4332 un->un_ctype = prop_list->sdt_ctype; 4333 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4334 "sd_set_vers1_properties: ctype set to " 4335 "CTYPE_ROD\n"); 4336 break; 4337 default: 4338 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4339 "sd_set_vers1_properties: Could not set " 4340 "invalid ctype value (%d)", 4341 prop_list->sdt_ctype); 4342 } 4343 } 4344 4345 /* Purple failover timeout */ 4346 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4347 ASSERT(prop_list != NULL); 4348 un->un_busy_retry_count = 4349 prop_list->sdt_busy_retries; 4350 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4351 "sd_set_vers1_properties: " 4352 "busy retry count set to %d\n", 4353 un->un_busy_retry_count); 4354 } 4355 4356 /* Purple reset retry count */ 4357 if (flags & SD_CONF_BSET_RST_RETRIES) { 4358 ASSERT(prop_list != NULL); 4359 un->un_reset_retry_count = 4360 prop_list->sdt_reset_retries; 4361 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4362 "sd_set_vers1_properties: " 4363 "reset retry count set to %d\n", 4364 un->un_reset_retry_count); 4365 } 4366 4367 /* Purple reservation release timeout */ 4368 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4369 ASSERT(prop_list != NULL); 4370 un->un_reserve_release_time = 4371 prop_list->sdt_reserv_rel_time; 4372 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4373 "sd_set_vers1_properties: " 4374 "reservation release timeout set to %d\n", 4375 un->un_reserve_release_time); 4376 } 4377 4378 /* 4379 * Driver flag telling the driver to verify that no commands are pending 4380 * for a device before issuing a Test Unit Ready. This is a workaround 4381 * for a firmware bug in some Seagate eliteI drives. 4382 */ 4383 if (flags & SD_CONF_BSET_TUR_CHECK) { 4384 un->un_f_cfg_tur_check = TRUE; 4385 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4386 "sd_set_vers1_properties: tur queue check set\n"); 4387 } 4388 4389 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4390 un->un_min_throttle = prop_list->sdt_min_throttle; 4391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4392 "sd_set_vers1_properties: min throttle set to %d\n", 4393 un->un_min_throttle); 4394 } 4395 4396 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4397 un->un_f_disksort_disabled = 4398 (prop_list->sdt_disk_sort_dis != 0) ? 4399 TRUE : FALSE; 4400 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4401 "sd_set_vers1_properties: disksort disabled " 4402 "flag set to %d\n", 4403 prop_list->sdt_disk_sort_dis); 4404 } 4405 4406 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4407 un->un_f_lun_reset_enabled = 4408 (prop_list->sdt_lun_reset_enable != 0) ? 4409 TRUE : FALSE; 4410 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4411 "sd_set_vers1_properties: lun reset enabled " 4412 "flag set to %d\n", 4413 prop_list->sdt_lun_reset_enable); 4414 } 4415 4416 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4417 un->un_f_suppress_cache_flush = 4418 (prop_list->sdt_suppress_cache_flush != 0) ? 4419 TRUE : FALSE; 4420 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4421 "sd_set_vers1_properties: suppress_cache_flush " 4422 "flag set to %d\n", 4423 prop_list->sdt_suppress_cache_flush); 4424 } 4425 4426 /* 4427 * Validate the throttle values. 4428 * If any of the numbers are invalid, set everything to defaults. 4429 */ 4430 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4431 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4432 (un->un_min_throttle > un->un_throttle)) { 4433 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4434 un->un_min_throttle = sd_min_throttle; 4435 } 4436 } 4437 4438 /* 4439 * Function: sd_is_lsi() 4440 * 4441 * Description: Check for lsi devices, step through the static device 4442 * table to match vid/pid. 4443 * 4444 * Args: un - ptr to sd_lun 4445 * 4446 * Notes: When creating new LSI property, need to add the new LSI property 4447 * to this function. 4448 */ 4449 static void 4450 sd_is_lsi(struct sd_lun *un) 4451 { 4452 char *id = NULL; 4453 int table_index; 4454 int idlen; 4455 void *prop; 4456 4457 ASSERT(un != NULL); 4458 for (table_index = 0; table_index < sd_disk_table_size; 4459 table_index++) { 4460 id = sd_disk_table[table_index].device_id; 4461 idlen = strlen(id); 4462 if (idlen == 0) { 4463 continue; 4464 } 4465 4466 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4467 prop = sd_disk_table[table_index].properties; 4468 if (prop == &lsi_properties || 4469 prop == &lsi_oem_properties || 4470 prop == &lsi_properties_scsi || 4471 prop == &symbios_properties) { 4472 un->un_f_cfg_is_lsi = TRUE; 4473 } 4474 break; 4475 } 4476 } 4477 } 4478 4479 /* 4480 * Function: sd_get_physical_geometry 4481 * 4482 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4483 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4484 * target, and use this information to initialize the physical 4485 * geometry cache specified by pgeom_p. 4486 * 4487 * MODE SENSE is an optional command, so failure in this case 4488 * does not necessarily denote an error. We want to use the 4489 * MODE SENSE commands to derive the physical geometry of the 4490 * device, but if either command fails, the logical geometry is 4491 * used as the fallback for disk label geometry in cmlb. 4492 * 4493 * This requires that un->un_blockcount and un->un_tgt_blocksize 4494 * have already been initialized for the current target and 4495 * that the current values be passed as args so that we don't 4496 * end up ever trying to use -1 as a valid value. This could 4497 * happen if either value is reset while we're not holding 4498 * the mutex. 4499 * 4500 * Arguments: un - driver soft state (unit) structure 4501 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4502 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4503 * to use the USCSI "direct" chain and bypass the normal 4504 * command waitq. 4505 * 4506 * Context: Kernel thread only (can sleep). 4507 */ 4508 4509 static int 4510 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4511 diskaddr_t capacity, int lbasize, int path_flag) 4512 { 4513 struct mode_format *page3p; 4514 struct mode_geometry *page4p; 4515 struct mode_header *headerp; 4516 int sector_size; 4517 int nsect; 4518 int nhead; 4519 int ncyl; 4520 int intrlv; 4521 int spc; 4522 diskaddr_t modesense_capacity; 4523 int rpm; 4524 int bd_len; 4525 int mode_header_length; 4526 uchar_t *p3bufp; 4527 uchar_t *p4bufp; 4528 int cdbsize; 4529 int ret = EIO; 4530 4531 ASSERT(un != NULL); 4532 4533 if (lbasize == 0) { 4534 if (ISCD(un)) { 4535 lbasize = 2048; 4536 } else { 4537 lbasize = un->un_sys_blocksize; 4538 } 4539 } 4540 pgeom_p->g_secsize = (unsigned short)lbasize; 4541 4542 /* 4543 * If the unit is a cd/dvd drive MODE SENSE page three 4544 * and MODE SENSE page four are reserved (see SBC spec 4545 * and MMC spec). To prevent soft errors just return 4546 * using the default LBA size. 4547 */ 4548 if (ISCD(un)) 4549 return (ret); 4550 4551 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4552 4553 /* 4554 * Retrieve MODE SENSE page 3 - Format Device Page 4555 */ 4556 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4557 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4558 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4559 != 0) { 4560 SD_ERROR(SD_LOG_COMMON, un, 4561 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4562 goto page3_exit; 4563 } 4564 4565 /* 4566 * Determine size of Block Descriptors in order to locate the mode 4567 * page data. ATAPI devices return 0, SCSI devices should return 4568 * MODE_BLK_DESC_LENGTH. 4569 */ 4570 headerp = (struct mode_header *)p3bufp; 4571 if (un->un_f_cfg_is_atapi == TRUE) { 4572 struct mode_header_grp2 *mhp = 4573 (struct mode_header_grp2 *)headerp; 4574 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4575 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4576 } else { 4577 mode_header_length = MODE_HEADER_LENGTH; 4578 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4579 } 4580 4581 if (bd_len > MODE_BLK_DESC_LENGTH) { 4582 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4583 "received unexpected bd_len of %d, page3\n", bd_len); 4584 goto page3_exit; 4585 } 4586 4587 page3p = (struct mode_format *) 4588 ((caddr_t)headerp + mode_header_length + bd_len); 4589 4590 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4591 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4592 "mode sense pg3 code mismatch %d\n", 4593 page3p->mode_page.code); 4594 goto page3_exit; 4595 } 4596 4597 /* 4598 * Use this physical geometry data only if BOTH MODE SENSE commands 4599 * complete successfully; otherwise, revert to the logical geometry. 4600 * So, we need to save everything in temporary variables. 4601 */ 4602 sector_size = BE_16(page3p->data_bytes_sect); 4603 4604 /* 4605 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4606 */ 4607 if (sector_size == 0) { 4608 sector_size = un->un_sys_blocksize; 4609 } else { 4610 sector_size &= ~(un->un_sys_blocksize - 1); 4611 } 4612 4613 nsect = BE_16(page3p->sect_track); 4614 intrlv = BE_16(page3p->interleave); 4615 4616 SD_INFO(SD_LOG_COMMON, un, 4617 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4618 SD_INFO(SD_LOG_COMMON, un, 4619 " mode page: %d; nsect: %d; sector size: %d;\n", 4620 page3p->mode_page.code, nsect, sector_size); 4621 SD_INFO(SD_LOG_COMMON, un, 4622 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4623 BE_16(page3p->track_skew), 4624 BE_16(page3p->cylinder_skew)); 4625 4626 4627 /* 4628 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4629 */ 4630 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4631 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4632 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4633 != 0) { 4634 SD_ERROR(SD_LOG_COMMON, un, 4635 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4636 goto page4_exit; 4637 } 4638 4639 /* 4640 * Determine size of Block Descriptors in order to locate the mode 4641 * page data. ATAPI devices return 0, SCSI devices should return 4642 * MODE_BLK_DESC_LENGTH. 4643 */ 4644 headerp = (struct mode_header *)p4bufp; 4645 if (un->un_f_cfg_is_atapi == TRUE) { 4646 struct mode_header_grp2 *mhp = 4647 (struct mode_header_grp2 *)headerp; 4648 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4649 } else { 4650 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4651 } 4652 4653 if (bd_len > MODE_BLK_DESC_LENGTH) { 4654 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4655 "received unexpected bd_len of %d, page4\n", bd_len); 4656 goto page4_exit; 4657 } 4658 4659 page4p = (struct mode_geometry *) 4660 ((caddr_t)headerp + mode_header_length + bd_len); 4661 4662 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4663 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4664 "mode sense pg4 code mismatch %d\n", 4665 page4p->mode_page.code); 4666 goto page4_exit; 4667 } 4668 4669 /* 4670 * Stash the data now, after we know that both commands completed. 4671 */ 4672 4673 4674 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4675 spc = nhead * nsect; 4676 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4677 rpm = BE_16(page4p->rpm); 4678 4679 modesense_capacity = spc * ncyl; 4680 4681 SD_INFO(SD_LOG_COMMON, un, 4682 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4683 SD_INFO(SD_LOG_COMMON, un, 4684 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4685 SD_INFO(SD_LOG_COMMON, un, 4686 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4687 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4688 (void *)pgeom_p, capacity); 4689 4690 /* 4691 * Compensate if the drive's geometry is not rectangular, i.e., 4692 * the product of C * H * S returned by MODE SENSE >= that returned 4693 * by read capacity. This is an idiosyncrasy of the original x86 4694 * disk subsystem. 4695 */ 4696 if (modesense_capacity >= capacity) { 4697 SD_INFO(SD_LOG_COMMON, un, 4698 "sd_get_physical_geometry: adjusting acyl; " 4699 "old: %d; new: %d\n", pgeom_p->g_acyl, 4700 (modesense_capacity - capacity + spc - 1) / spc); 4701 if (sector_size != 0) { 4702 /* 1243403: NEC D38x7 drives don't support sec size */ 4703 pgeom_p->g_secsize = (unsigned short)sector_size; 4704 } 4705 pgeom_p->g_nsect = (unsigned short)nsect; 4706 pgeom_p->g_nhead = (unsigned short)nhead; 4707 pgeom_p->g_capacity = capacity; 4708 pgeom_p->g_acyl = 4709 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4710 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4711 } 4712 4713 pgeom_p->g_rpm = (unsigned short)rpm; 4714 pgeom_p->g_intrlv = (unsigned short)intrlv; 4715 ret = 0; 4716 4717 SD_INFO(SD_LOG_COMMON, un, 4718 "sd_get_physical_geometry: mode sense geometry:\n"); 4719 SD_INFO(SD_LOG_COMMON, un, 4720 " nsect: %d; sector size: %d; interlv: %d\n", 4721 nsect, sector_size, intrlv); 4722 SD_INFO(SD_LOG_COMMON, un, 4723 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4724 nhead, ncyl, rpm, modesense_capacity); 4725 SD_INFO(SD_LOG_COMMON, un, 4726 "sd_get_physical_geometry: (cached)\n"); 4727 SD_INFO(SD_LOG_COMMON, un, 4728 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4729 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4730 pgeom_p->g_nhead, pgeom_p->g_nsect); 4731 SD_INFO(SD_LOG_COMMON, un, 4732 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4733 pgeom_p->g_secsize, pgeom_p->g_capacity, 4734 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4735 4736 page4_exit: 4737 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4738 page3_exit: 4739 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4740 4741 return (ret); 4742 } 4743 4744 /* 4745 * Function: sd_get_virtual_geometry 4746 * 4747 * Description: Ask the controller to tell us about the target device. 4748 * 4749 * Arguments: un - pointer to softstate 4750 * capacity - disk capacity in #blocks 4751 * lbasize - disk block size in bytes 4752 * 4753 * Context: Kernel thread only 4754 */ 4755 4756 static int 4757 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4758 diskaddr_t capacity, int lbasize) 4759 { 4760 uint_t geombuf; 4761 int spc; 4762 4763 ASSERT(un != NULL); 4764 4765 /* Set sector size, and total number of sectors */ 4766 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4767 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4768 4769 /* Let the HBA tell us its geometry */ 4770 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4771 4772 /* A value of -1 indicates an undefined "geometry" property */ 4773 if (geombuf == (-1)) { 4774 return (EINVAL); 4775 } 4776 4777 /* Initialize the logical geometry cache. */ 4778 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4779 lgeom_p->g_nsect = geombuf & 0xffff; 4780 lgeom_p->g_secsize = un->un_sys_blocksize; 4781 4782 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4783 4784 /* 4785 * Note: The driver originally converted the capacity value from 4786 * target blocks to system blocks. However, the capacity value passed 4787 * to this routine is already in terms of system blocks (this scaling 4788 * is done when the READ CAPACITY command is issued and processed). 4789 * This 'error' may have gone undetected because the usage of g_ncyl 4790 * (which is based upon g_capacity) is very limited within the driver 4791 */ 4792 lgeom_p->g_capacity = capacity; 4793 4794 /* 4795 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4796 * hba may return zero values if the device has been removed. 4797 */ 4798 if (spc == 0) { 4799 lgeom_p->g_ncyl = 0; 4800 } else { 4801 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4802 } 4803 lgeom_p->g_acyl = 0; 4804 4805 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4806 return (0); 4807 4808 } 4809 /* 4810 * Function: sd_update_block_info 4811 * 4812 * Description: Calculate a byte count to sector count bitshift value 4813 * from sector size. 4814 * 4815 * Arguments: un: unit struct. 4816 * lbasize: new target sector size 4817 * capacity: new target capacity, ie. block count 4818 * 4819 * Context: Kernel thread context 4820 */ 4821 4822 static void 4823 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4824 { 4825 if (lbasize != 0) { 4826 un->un_tgt_blocksize = lbasize; 4827 un->un_f_tgt_blocksize_is_valid = TRUE; 4828 } 4829 4830 if (capacity != 0) { 4831 un->un_blockcount = capacity; 4832 un->un_f_blockcount_is_valid = TRUE; 4833 } 4834 } 4835 4836 4837 /* 4838 * Function: sd_register_devid 4839 * 4840 * Description: This routine will obtain the device id information from the 4841 * target, obtain the serial number, and register the device 4842 * id with the ddi framework. 4843 * 4844 * Arguments: devi - the system's dev_info_t for the device. 4845 * un - driver soft state (unit) structure 4846 * reservation_flag - indicates if a reservation conflict 4847 * occurred during attach 4848 * 4849 * Context: Kernel Thread 4850 */ 4851 static void 4852 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4853 { 4854 int rval = 0; 4855 uchar_t *inq80 = NULL; 4856 size_t inq80_len = MAX_INQUIRY_SIZE; 4857 size_t inq80_resid = 0; 4858 uchar_t *inq83 = NULL; 4859 size_t inq83_len = MAX_INQUIRY_SIZE; 4860 size_t inq83_resid = 0; 4861 int dlen, len; 4862 char *sn; 4863 4864 ASSERT(un != NULL); 4865 ASSERT(mutex_owned(SD_MUTEX(un))); 4866 ASSERT((SD_DEVINFO(un)) == devi); 4867 4868 /* 4869 * If transport has already registered a devid for this target 4870 * then that takes precedence over the driver's determination 4871 * of the devid. 4872 */ 4873 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4874 ASSERT(un->un_devid); 4875 return; /* use devid registered by the transport */ 4876 } 4877 4878 /* 4879 * This is the case of antiquated Sun disk drives that have the 4880 * FAB_DEVID property set in the disk_table. These drives 4881 * manage the devid's by storing them in last 2 available sectors 4882 * on the drive and have them fabricated by the ddi layer by calling 4883 * ddi_devid_init and passing the DEVID_FAB flag. 4884 */ 4885 if (un->un_f_opt_fab_devid == TRUE) { 4886 /* 4887 * Depending on EINVAL isn't reliable, since a reserved disk 4888 * may result in invalid geometry, so check to make sure a 4889 * reservation conflict did not occur during attach. 4890 */ 4891 if ((sd_get_devid(un) == EINVAL) && 4892 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4893 /* 4894 * The devid is invalid AND there is no reservation 4895 * conflict. Fabricate a new devid. 4896 */ 4897 (void) sd_create_devid(un); 4898 } 4899 4900 /* Register the devid if it exists */ 4901 if (un->un_devid != NULL) { 4902 (void) ddi_devid_register(SD_DEVINFO(un), 4903 un->un_devid); 4904 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4905 "sd_register_devid: Devid Fabricated\n"); 4906 } 4907 return; 4908 } 4909 4910 /* 4911 * We check the availability of the World Wide Name (0x83) and Unit 4912 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4913 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4914 * 0x83 is available, that is the best choice. Our next choice is 4915 * 0x80. If neither are available, we munge the devid from the device 4916 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4917 * to fabricate a devid for non-Sun qualified disks. 4918 */ 4919 if (sd_check_vpd_page_support(un) == 0) { 4920 /* collect page 80 data if available */ 4921 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4922 4923 mutex_exit(SD_MUTEX(un)); 4924 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4925 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4926 0x01, 0x80, &inq80_resid); 4927 4928 if (rval != 0) { 4929 kmem_free(inq80, inq80_len); 4930 inq80 = NULL; 4931 inq80_len = 0; 4932 } else if (ddi_prop_exists( 4933 DDI_DEV_T_NONE, SD_DEVINFO(un), 4934 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4935 INQUIRY_SERIAL_NO) == 0) { 4936 /* 4937 * If we don't already have a serial number 4938 * property, do quick verify of data returned 4939 * and define property. 4940 */ 4941 dlen = inq80_len - inq80_resid; 4942 len = (size_t)inq80[3]; 4943 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4944 /* 4945 * Ensure sn termination, skip leading 4946 * blanks, and create property 4947 * 'inquiry-serial-no'. 4948 */ 4949 sn = (char *)&inq80[4]; 4950 sn[len] = 0; 4951 while (*sn && (*sn == ' ')) 4952 sn++; 4953 if (*sn) { 4954 (void) ddi_prop_update_string( 4955 DDI_DEV_T_NONE, 4956 SD_DEVINFO(un), 4957 INQUIRY_SERIAL_NO, sn); 4958 } 4959 } 4960 } 4961 mutex_enter(SD_MUTEX(un)); 4962 } 4963 4964 /* collect page 83 data if available */ 4965 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4966 mutex_exit(SD_MUTEX(un)); 4967 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4968 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4969 0x01, 0x83, &inq83_resid); 4970 4971 if (rval != 0) { 4972 kmem_free(inq83, inq83_len); 4973 inq83 = NULL; 4974 inq83_len = 0; 4975 } 4976 mutex_enter(SD_MUTEX(un)); 4977 } 4978 } 4979 4980 /* encode best devid possible based on data available */ 4981 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4982 (char *)ddi_driver_name(SD_DEVINFO(un)), 4983 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4984 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4985 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4986 4987 /* devid successfully encoded, register devid */ 4988 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4989 4990 } else { 4991 /* 4992 * Unable to encode a devid based on data available. 4993 * This is not a Sun qualified disk. Older Sun disk 4994 * drives that have the SD_FAB_DEVID property 4995 * set in the disk_table and non Sun qualified 4996 * disks are treated in the same manner. These 4997 * drives manage the devid's by storing them in 4998 * last 2 available sectors on the drive and 4999 * have them fabricated by the ddi layer by 5000 * calling ddi_devid_init and passing the 5001 * DEVID_FAB flag. 5002 * Create a fabricate devid only if there's no 5003 * fabricate devid existed. 5004 */ 5005 if (sd_get_devid(un) == EINVAL) { 5006 (void) sd_create_devid(un); 5007 } 5008 un->un_f_opt_fab_devid = TRUE; 5009 5010 /* Register the devid if it exists */ 5011 if (un->un_devid != NULL) { 5012 (void) ddi_devid_register(SD_DEVINFO(un), 5013 un->un_devid); 5014 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5015 "sd_register_devid: devid fabricated using " 5016 "ddi framework\n"); 5017 } 5018 } 5019 5020 /* clean up resources */ 5021 if (inq80 != NULL) { 5022 kmem_free(inq80, inq80_len); 5023 } 5024 if (inq83 != NULL) { 5025 kmem_free(inq83, inq83_len); 5026 } 5027 } 5028 5029 5030 5031 /* 5032 * Function: sd_get_devid 5033 * 5034 * Description: This routine will return 0 if a valid device id has been 5035 * obtained from the target and stored in the soft state. If a 5036 * valid device id has not been previously read and stored, a 5037 * read attempt will be made. 5038 * 5039 * Arguments: un - driver soft state (unit) structure 5040 * 5041 * Return Code: 0 if we successfully get the device id 5042 * 5043 * Context: Kernel Thread 5044 */ 5045 5046 static int 5047 sd_get_devid(struct sd_lun *un) 5048 { 5049 struct dk_devid *dkdevid; 5050 ddi_devid_t tmpid; 5051 uint_t *ip; 5052 size_t sz; 5053 diskaddr_t blk; 5054 int status; 5055 int chksum; 5056 int i; 5057 size_t buffer_size; 5058 5059 ASSERT(un != NULL); 5060 ASSERT(mutex_owned(SD_MUTEX(un))); 5061 5062 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5063 un); 5064 5065 if (un->un_devid != NULL) { 5066 return (0); 5067 } 5068 5069 mutex_exit(SD_MUTEX(un)); 5070 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5071 (void *)SD_PATH_DIRECT) != 0) { 5072 mutex_enter(SD_MUTEX(un)); 5073 return (EINVAL); 5074 } 5075 5076 /* 5077 * Read and verify device id, stored in the reserved cylinders at the 5078 * end of the disk. Backup label is on the odd sectors of the last 5079 * track of the last cylinder. Device id will be on track of the next 5080 * to last cylinder. 5081 */ 5082 mutex_enter(SD_MUTEX(un)); 5083 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5084 mutex_exit(SD_MUTEX(un)); 5085 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5086 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 5087 SD_PATH_DIRECT); 5088 if (status != 0) { 5089 goto error; 5090 } 5091 5092 /* Validate the revision */ 5093 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5094 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5095 status = EINVAL; 5096 goto error; 5097 } 5098 5099 /* Calculate the checksum */ 5100 chksum = 0; 5101 ip = (uint_t *)dkdevid; 5102 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5103 i++) { 5104 chksum ^= ip[i]; 5105 } 5106 5107 /* Compare the checksums */ 5108 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5109 status = EINVAL; 5110 goto error; 5111 } 5112 5113 /* Validate the device id */ 5114 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5115 status = EINVAL; 5116 goto error; 5117 } 5118 5119 /* 5120 * Store the device id in the driver soft state 5121 */ 5122 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5123 tmpid = kmem_alloc(sz, KM_SLEEP); 5124 5125 mutex_enter(SD_MUTEX(un)); 5126 5127 un->un_devid = tmpid; 5128 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5129 5130 kmem_free(dkdevid, buffer_size); 5131 5132 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5133 5134 return (status); 5135 error: 5136 mutex_enter(SD_MUTEX(un)); 5137 kmem_free(dkdevid, buffer_size); 5138 return (status); 5139 } 5140 5141 5142 /* 5143 * Function: sd_create_devid 5144 * 5145 * Description: This routine will fabricate the device id and write it 5146 * to the disk. 5147 * 5148 * Arguments: un - driver soft state (unit) structure 5149 * 5150 * Return Code: value of the fabricated device id 5151 * 5152 * Context: Kernel Thread 5153 */ 5154 5155 static ddi_devid_t 5156 sd_create_devid(struct sd_lun *un) 5157 { 5158 ASSERT(un != NULL); 5159 5160 /* Fabricate the devid */ 5161 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5162 == DDI_FAILURE) { 5163 return (NULL); 5164 } 5165 5166 /* Write the devid to disk */ 5167 if (sd_write_deviceid(un) != 0) { 5168 ddi_devid_free(un->un_devid); 5169 un->un_devid = NULL; 5170 } 5171 5172 return (un->un_devid); 5173 } 5174 5175 5176 /* 5177 * Function: sd_write_deviceid 5178 * 5179 * Description: This routine will write the device id to the disk 5180 * reserved sector. 5181 * 5182 * Arguments: un - driver soft state (unit) structure 5183 * 5184 * Return Code: EINVAL 5185 * value returned by sd_send_scsi_cmd 5186 * 5187 * Context: Kernel Thread 5188 */ 5189 5190 static int 5191 sd_write_deviceid(struct sd_lun *un) 5192 { 5193 struct dk_devid *dkdevid; 5194 diskaddr_t blk; 5195 uint_t *ip, chksum; 5196 int status; 5197 int i; 5198 5199 ASSERT(mutex_owned(SD_MUTEX(un))); 5200 5201 mutex_exit(SD_MUTEX(un)); 5202 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5203 (void *)SD_PATH_DIRECT) != 0) { 5204 mutex_enter(SD_MUTEX(un)); 5205 return (-1); 5206 } 5207 5208 5209 /* Allocate the buffer */ 5210 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5211 5212 /* Fill in the revision */ 5213 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5214 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5215 5216 /* Copy in the device id */ 5217 mutex_enter(SD_MUTEX(un)); 5218 bcopy(un->un_devid, &dkdevid->dkd_devid, 5219 ddi_devid_sizeof(un->un_devid)); 5220 mutex_exit(SD_MUTEX(un)); 5221 5222 /* Calculate the checksum */ 5223 chksum = 0; 5224 ip = (uint_t *)dkdevid; 5225 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5226 i++) { 5227 chksum ^= ip[i]; 5228 } 5229 5230 /* Fill-in checksum */ 5231 DKD_FORMCHKSUM(chksum, dkdevid); 5232 5233 /* Write the reserved sector */ 5234 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5235 SD_PATH_DIRECT); 5236 5237 kmem_free(dkdevid, un->un_sys_blocksize); 5238 5239 mutex_enter(SD_MUTEX(un)); 5240 return (status); 5241 } 5242 5243 5244 /* 5245 * Function: sd_check_vpd_page_support 5246 * 5247 * Description: This routine sends an inquiry command with the EVPD bit set and 5248 * a page code of 0x00 to the device. It is used to determine which 5249 * vital product pages are available to find the devid. We are 5250 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5251 * device does not support that command. 5252 * 5253 * Arguments: un - driver soft state (unit) structure 5254 * 5255 * Return Code: 0 - success 5256 * 1 - check condition 5257 * 5258 * Context: This routine can sleep. 5259 */ 5260 5261 static int 5262 sd_check_vpd_page_support(struct sd_lun *un) 5263 { 5264 uchar_t *page_list = NULL; 5265 uchar_t page_length = 0xff; /* Use max possible length */ 5266 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5267 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5268 int rval = 0; 5269 int counter; 5270 5271 ASSERT(un != NULL); 5272 ASSERT(mutex_owned(SD_MUTEX(un))); 5273 5274 mutex_exit(SD_MUTEX(un)); 5275 5276 /* 5277 * We'll set the page length to the maximum to save figuring it out 5278 * with an additional call. 5279 */ 5280 page_list = kmem_zalloc(page_length, KM_SLEEP); 5281 5282 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5283 page_code, NULL); 5284 5285 mutex_enter(SD_MUTEX(un)); 5286 5287 /* 5288 * Now we must validate that the device accepted the command, as some 5289 * drives do not support it. If the drive does support it, we will 5290 * return 0, and the supported pages will be in un_vpd_page_mask. If 5291 * not, we return -1. 5292 */ 5293 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5294 /* Loop to find one of the 2 pages we need */ 5295 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5296 5297 /* 5298 * Pages are returned in ascending order, and 0x83 is what we 5299 * are hoping for. 5300 */ 5301 while ((page_list[counter] <= 0x86) && 5302 (counter <= (page_list[VPD_PAGE_LENGTH] + 5303 VPD_HEAD_OFFSET))) { 5304 /* 5305 * Add 3 because page_list[3] is the number of 5306 * pages minus 3 5307 */ 5308 5309 switch (page_list[counter]) { 5310 case 0x00: 5311 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5312 break; 5313 case 0x80: 5314 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5315 break; 5316 case 0x81: 5317 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5318 break; 5319 case 0x82: 5320 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5321 break; 5322 case 0x83: 5323 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5324 break; 5325 case 0x86: 5326 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5327 break; 5328 } 5329 counter++; 5330 } 5331 5332 } else { 5333 rval = -1; 5334 5335 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5336 "sd_check_vpd_page_support: This drive does not implement " 5337 "VPD pages.\n"); 5338 } 5339 5340 kmem_free(page_list, page_length); 5341 5342 return (rval); 5343 } 5344 5345 5346 /* 5347 * Function: sd_setup_pm 5348 * 5349 * Description: Initialize Power Management on the device 5350 * 5351 * Context: Kernel Thread 5352 */ 5353 5354 static void 5355 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5356 { 5357 uint_t log_page_size; 5358 uchar_t *log_page_data; 5359 int rval; 5360 5361 /* 5362 * Since we are called from attach, holding a mutex for 5363 * un is unnecessary. Because some of the routines called 5364 * from here require SD_MUTEX to not be held, assert this 5365 * right up front. 5366 */ 5367 ASSERT(!mutex_owned(SD_MUTEX(un))); 5368 /* 5369 * Since the sd device does not have the 'reg' property, 5370 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5371 * The following code is to tell cpr that this device 5372 * DOES need to be suspended and resumed. 5373 */ 5374 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5375 "pm-hardware-state", "needs-suspend-resume"); 5376 5377 /* 5378 * This complies with the new power management framework 5379 * for certain desktop machines. Create the pm_components 5380 * property as a string array property. 5381 */ 5382 if (un->un_f_pm_supported) { 5383 /* 5384 * not all devices have a motor, try it first. 5385 * some devices may return ILLEGAL REQUEST, some 5386 * will hang 5387 * The following START_STOP_UNIT is used to check if target 5388 * device has a motor. 5389 */ 5390 un->un_f_start_stop_supported = TRUE; 5391 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5392 SD_PATH_DIRECT) != 0) { 5393 un->un_f_start_stop_supported = FALSE; 5394 } 5395 5396 /* 5397 * create pm properties anyways otherwise the parent can't 5398 * go to sleep 5399 */ 5400 (void) sd_create_pm_components(devi, un); 5401 un->un_f_pm_is_enabled = TRUE; 5402 return; 5403 } 5404 5405 if (!un->un_f_log_sense_supported) { 5406 un->un_power_level = SD_SPINDLE_ON; 5407 un->un_f_pm_is_enabled = FALSE; 5408 return; 5409 } 5410 5411 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5412 5413 #ifdef SDDEBUG 5414 if (sd_force_pm_supported) { 5415 /* Force a successful result */ 5416 rval = 1; 5417 } 5418 #endif 5419 5420 /* 5421 * If the start-stop cycle counter log page is not supported 5422 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5423 * then we should not create the pm_components property. 5424 */ 5425 if (rval == -1) { 5426 /* 5427 * Error. 5428 * Reading log sense failed, most likely this is 5429 * an older drive that does not support log sense. 5430 * If this fails auto-pm is not supported. 5431 */ 5432 un->un_power_level = SD_SPINDLE_ON; 5433 un->un_f_pm_is_enabled = FALSE; 5434 5435 } else if (rval == 0) { 5436 /* 5437 * Page not found. 5438 * The start stop cycle counter is implemented as page 5439 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5440 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5441 */ 5442 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5443 /* 5444 * Page found, use this one. 5445 */ 5446 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5447 un->un_f_pm_is_enabled = TRUE; 5448 } else { 5449 /* 5450 * Error or page not found. 5451 * auto-pm is not supported for this device. 5452 */ 5453 un->un_power_level = SD_SPINDLE_ON; 5454 un->un_f_pm_is_enabled = FALSE; 5455 } 5456 } else { 5457 /* 5458 * Page found, use it. 5459 */ 5460 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5461 un->un_f_pm_is_enabled = TRUE; 5462 } 5463 5464 5465 if (un->un_f_pm_is_enabled == TRUE) { 5466 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5467 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5468 5469 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5470 log_page_size, un->un_start_stop_cycle_page, 5471 0x01, 0, SD_PATH_DIRECT); 5472 #ifdef SDDEBUG 5473 if (sd_force_pm_supported) { 5474 /* Force a successful result */ 5475 rval = 0; 5476 } 5477 #endif 5478 5479 /* 5480 * If the Log sense for Page( Start/stop cycle counter page) 5481 * succeeds, then power management is supported and we can 5482 * enable auto-pm. 5483 */ 5484 if (rval == 0) { 5485 (void) sd_create_pm_components(devi, un); 5486 } else { 5487 un->un_power_level = SD_SPINDLE_ON; 5488 un->un_f_pm_is_enabled = FALSE; 5489 } 5490 5491 kmem_free(log_page_data, log_page_size); 5492 } 5493 } 5494 5495 5496 /* 5497 * Function: sd_create_pm_components 5498 * 5499 * Description: Initialize PM property. 5500 * 5501 * Context: Kernel thread context 5502 */ 5503 5504 static void 5505 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5506 { 5507 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5508 5509 ASSERT(!mutex_owned(SD_MUTEX(un))); 5510 5511 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5512 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5513 /* 5514 * When components are initially created they are idle, 5515 * power up any non-removables. 5516 * Note: the return value of pm_raise_power can't be used 5517 * for determining if PM should be enabled for this device. 5518 * Even if you check the return values and remove this 5519 * property created above, the PM framework will not honor the 5520 * change after the first call to pm_raise_power. Hence, 5521 * removal of that property does not help if pm_raise_power 5522 * fails. In the case of removable media, the start/stop 5523 * will fail if the media is not present. 5524 */ 5525 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5526 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5527 mutex_enter(SD_MUTEX(un)); 5528 un->un_power_level = SD_SPINDLE_ON; 5529 mutex_enter(&un->un_pm_mutex); 5530 /* Set to on and not busy. */ 5531 un->un_pm_count = 0; 5532 } else { 5533 mutex_enter(SD_MUTEX(un)); 5534 un->un_power_level = SD_SPINDLE_OFF; 5535 mutex_enter(&un->un_pm_mutex); 5536 /* Set to off. */ 5537 un->un_pm_count = -1; 5538 } 5539 mutex_exit(&un->un_pm_mutex); 5540 mutex_exit(SD_MUTEX(un)); 5541 } else { 5542 un->un_power_level = SD_SPINDLE_ON; 5543 un->un_f_pm_is_enabled = FALSE; 5544 } 5545 } 5546 5547 5548 /* 5549 * Function: sd_ddi_suspend 5550 * 5551 * Description: Performs system power-down operations. This includes 5552 * setting the drive state to indicate its suspended so 5553 * that no new commands will be accepted. Also, wait for 5554 * all commands that are in transport or queued to a timer 5555 * for retry to complete. All timeout threads are cancelled. 5556 * 5557 * Return Code: DDI_FAILURE or DDI_SUCCESS 5558 * 5559 * Context: Kernel thread context 5560 */ 5561 5562 static int 5563 sd_ddi_suspend(dev_info_t *devi) 5564 { 5565 struct sd_lun *un; 5566 clock_t wait_cmds_complete; 5567 5568 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5569 if (un == NULL) { 5570 return (DDI_FAILURE); 5571 } 5572 5573 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5574 5575 mutex_enter(SD_MUTEX(un)); 5576 5577 /* Return success if the device is already suspended. */ 5578 if (un->un_state == SD_STATE_SUSPENDED) { 5579 mutex_exit(SD_MUTEX(un)); 5580 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5581 "device already suspended, exiting\n"); 5582 return (DDI_SUCCESS); 5583 } 5584 5585 /* Return failure if the device is being used by HA */ 5586 if (un->un_resvd_status & 5587 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5588 mutex_exit(SD_MUTEX(un)); 5589 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5590 "device in use by HA, exiting\n"); 5591 return (DDI_FAILURE); 5592 } 5593 5594 /* 5595 * Return failure if the device is in a resource wait 5596 * or power changing state. 5597 */ 5598 if ((un->un_state == SD_STATE_RWAIT) || 5599 (un->un_state == SD_STATE_PM_CHANGING)) { 5600 mutex_exit(SD_MUTEX(un)); 5601 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5602 "device in resource wait state, exiting\n"); 5603 return (DDI_FAILURE); 5604 } 5605 5606 5607 un->un_save_state = un->un_last_state; 5608 New_state(un, SD_STATE_SUSPENDED); 5609 5610 /* 5611 * Wait for all commands that are in transport or queued to a timer 5612 * for retry to complete. 5613 * 5614 * While waiting, no new commands will be accepted or sent because of 5615 * the new state we set above. 5616 * 5617 * Wait till current operation has completed. If we are in the resource 5618 * wait state (with an intr outstanding) then we need to wait till the 5619 * intr completes and starts the next cmd. We want to wait for 5620 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5621 */ 5622 wait_cmds_complete = ddi_get_lbolt() + 5623 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5624 5625 while (un->un_ncmds_in_transport != 0) { 5626 /* 5627 * Fail if commands do not finish in the specified time. 5628 */ 5629 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5630 wait_cmds_complete) == -1) { 5631 /* 5632 * Undo the state changes made above. Everything 5633 * must go back to it's original value. 5634 */ 5635 Restore_state(un); 5636 un->un_last_state = un->un_save_state; 5637 /* Wake up any threads that might be waiting. */ 5638 cv_broadcast(&un->un_suspend_cv); 5639 mutex_exit(SD_MUTEX(un)); 5640 SD_ERROR(SD_LOG_IO_PM, un, 5641 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5642 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5643 return (DDI_FAILURE); 5644 } 5645 } 5646 5647 /* 5648 * Cancel SCSI watch thread and timeouts, if any are active 5649 */ 5650 5651 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5652 opaque_t temp_token = un->un_swr_token; 5653 mutex_exit(SD_MUTEX(un)); 5654 scsi_watch_suspend(temp_token); 5655 mutex_enter(SD_MUTEX(un)); 5656 } 5657 5658 if (un->un_reset_throttle_timeid != NULL) { 5659 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5660 un->un_reset_throttle_timeid = NULL; 5661 mutex_exit(SD_MUTEX(un)); 5662 (void) untimeout(temp_id); 5663 mutex_enter(SD_MUTEX(un)); 5664 } 5665 5666 if (un->un_dcvb_timeid != NULL) { 5667 timeout_id_t temp_id = un->un_dcvb_timeid; 5668 un->un_dcvb_timeid = NULL; 5669 mutex_exit(SD_MUTEX(un)); 5670 (void) untimeout(temp_id); 5671 mutex_enter(SD_MUTEX(un)); 5672 } 5673 5674 mutex_enter(&un->un_pm_mutex); 5675 if (un->un_pm_timeid != NULL) { 5676 timeout_id_t temp_id = un->un_pm_timeid; 5677 un->un_pm_timeid = NULL; 5678 mutex_exit(&un->un_pm_mutex); 5679 mutex_exit(SD_MUTEX(un)); 5680 (void) untimeout(temp_id); 5681 mutex_enter(SD_MUTEX(un)); 5682 } else { 5683 mutex_exit(&un->un_pm_mutex); 5684 } 5685 5686 if (un->un_retry_timeid != NULL) { 5687 timeout_id_t temp_id = un->un_retry_timeid; 5688 un->un_retry_timeid = NULL; 5689 mutex_exit(SD_MUTEX(un)); 5690 (void) untimeout(temp_id); 5691 mutex_enter(SD_MUTEX(un)); 5692 5693 if (un->un_retry_bp != NULL) { 5694 un->un_retry_bp->av_forw = un->un_waitq_headp; 5695 un->un_waitq_headp = un->un_retry_bp; 5696 if (un->un_waitq_tailp == NULL) { 5697 un->un_waitq_tailp = un->un_retry_bp; 5698 } 5699 un->un_retry_bp = NULL; 5700 un->un_retry_statp = NULL; 5701 } 5702 } 5703 5704 if (un->un_direct_priority_timeid != NULL) { 5705 timeout_id_t temp_id = un->un_direct_priority_timeid; 5706 un->un_direct_priority_timeid = NULL; 5707 mutex_exit(SD_MUTEX(un)); 5708 (void) untimeout(temp_id); 5709 mutex_enter(SD_MUTEX(un)); 5710 } 5711 5712 if (un->un_f_is_fibre == TRUE) { 5713 /* 5714 * Remove callbacks for insert and remove events 5715 */ 5716 if (un->un_insert_event != NULL) { 5717 mutex_exit(SD_MUTEX(un)); 5718 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5719 mutex_enter(SD_MUTEX(un)); 5720 un->un_insert_event = NULL; 5721 } 5722 5723 if (un->un_remove_event != NULL) { 5724 mutex_exit(SD_MUTEX(un)); 5725 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5726 mutex_enter(SD_MUTEX(un)); 5727 un->un_remove_event = NULL; 5728 } 5729 } 5730 5731 mutex_exit(SD_MUTEX(un)); 5732 5733 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5734 5735 return (DDI_SUCCESS); 5736 } 5737 5738 5739 /* 5740 * Function: sd_ddi_pm_suspend 5741 * 5742 * Description: Set the drive state to low power. 5743 * Someone else is required to actually change the drive 5744 * power level. 5745 * 5746 * Arguments: un - driver soft state (unit) structure 5747 * 5748 * Return Code: DDI_FAILURE or DDI_SUCCESS 5749 * 5750 * Context: Kernel thread context 5751 */ 5752 5753 static int 5754 sd_ddi_pm_suspend(struct sd_lun *un) 5755 { 5756 ASSERT(un != NULL); 5757 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5758 5759 ASSERT(!mutex_owned(SD_MUTEX(un))); 5760 mutex_enter(SD_MUTEX(un)); 5761 5762 /* 5763 * Exit if power management is not enabled for this device, or if 5764 * the device is being used by HA. 5765 */ 5766 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5767 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5768 mutex_exit(SD_MUTEX(un)); 5769 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5770 return (DDI_SUCCESS); 5771 } 5772 5773 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5774 un->un_ncmds_in_driver); 5775 5776 /* 5777 * See if the device is not busy, ie.: 5778 * - we have no commands in the driver for this device 5779 * - not waiting for resources 5780 */ 5781 if ((un->un_ncmds_in_driver == 0) && 5782 (un->un_state != SD_STATE_RWAIT)) { 5783 /* 5784 * The device is not busy, so it is OK to go to low power state. 5785 * Indicate low power, but rely on someone else to actually 5786 * change it. 5787 */ 5788 mutex_enter(&un->un_pm_mutex); 5789 un->un_pm_count = -1; 5790 mutex_exit(&un->un_pm_mutex); 5791 un->un_power_level = SD_SPINDLE_OFF; 5792 } 5793 5794 mutex_exit(SD_MUTEX(un)); 5795 5796 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5797 5798 return (DDI_SUCCESS); 5799 } 5800 5801 5802 /* 5803 * Function: sd_ddi_resume 5804 * 5805 * Description: Performs system power-up operations.. 5806 * 5807 * Return Code: DDI_SUCCESS 5808 * DDI_FAILURE 5809 * 5810 * Context: Kernel thread context 5811 */ 5812 5813 static int 5814 sd_ddi_resume(dev_info_t *devi) 5815 { 5816 struct sd_lun *un; 5817 5818 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5819 if (un == NULL) { 5820 return (DDI_FAILURE); 5821 } 5822 5823 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5824 5825 mutex_enter(SD_MUTEX(un)); 5826 Restore_state(un); 5827 5828 /* 5829 * Restore the state which was saved to give the 5830 * the right state in un_last_state 5831 */ 5832 un->un_last_state = un->un_save_state; 5833 /* 5834 * Note: throttle comes back at full. 5835 * Also note: this MUST be done before calling pm_raise_power 5836 * otherwise the system can get hung in biowait. The scenario where 5837 * this'll happen is under cpr suspend. Writing of the system 5838 * state goes through sddump, which writes 0 to un_throttle. If 5839 * writing the system state then fails, example if the partition is 5840 * too small, then cpr attempts a resume. If throttle isn't restored 5841 * from the saved value until after calling pm_raise_power then 5842 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5843 * in biowait. 5844 */ 5845 un->un_throttle = un->un_saved_throttle; 5846 5847 /* 5848 * The chance of failure is very rare as the only command done in power 5849 * entry point is START command when you transition from 0->1 or 5850 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5851 * which suspend was done. Ignore the return value as the resume should 5852 * not be failed. In the case of removable media the media need not be 5853 * inserted and hence there is a chance that raise power will fail with 5854 * media not present. 5855 */ 5856 if (un->un_f_attach_spinup) { 5857 mutex_exit(SD_MUTEX(un)); 5858 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5859 mutex_enter(SD_MUTEX(un)); 5860 } 5861 5862 /* 5863 * Don't broadcast to the suspend cv and therefore possibly 5864 * start I/O until after power has been restored. 5865 */ 5866 cv_broadcast(&un->un_suspend_cv); 5867 cv_broadcast(&un->un_state_cv); 5868 5869 /* restart thread */ 5870 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5871 scsi_watch_resume(un->un_swr_token); 5872 } 5873 5874 #if (defined(__fibre)) 5875 if (un->un_f_is_fibre == TRUE) { 5876 /* 5877 * Add callbacks for insert and remove events 5878 */ 5879 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5880 sd_init_event_callbacks(un); 5881 } 5882 } 5883 #endif 5884 5885 /* 5886 * Transport any pending commands to the target. 5887 * 5888 * If this is a low-activity device commands in queue will have to wait 5889 * until new commands come in, which may take awhile. Also, we 5890 * specifically don't check un_ncmds_in_transport because we know that 5891 * there really are no commands in progress after the unit was 5892 * suspended and we could have reached the throttle level, been 5893 * suspended, and have no new commands coming in for awhile. Highly 5894 * unlikely, but so is the low-activity disk scenario. 5895 */ 5896 ddi_xbuf_dispatch(un->un_xbuf_attr); 5897 5898 sd_start_cmds(un, NULL); 5899 mutex_exit(SD_MUTEX(un)); 5900 5901 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5902 5903 return (DDI_SUCCESS); 5904 } 5905 5906 5907 /* 5908 * Function: sd_ddi_pm_resume 5909 * 5910 * Description: Set the drive state to powered on. 5911 * Someone else is required to actually change the drive 5912 * power level. 5913 * 5914 * Arguments: un - driver soft state (unit) structure 5915 * 5916 * Return Code: DDI_SUCCESS 5917 * 5918 * Context: Kernel thread context 5919 */ 5920 5921 static int 5922 sd_ddi_pm_resume(struct sd_lun *un) 5923 { 5924 ASSERT(un != NULL); 5925 5926 ASSERT(!mutex_owned(SD_MUTEX(un))); 5927 mutex_enter(SD_MUTEX(un)); 5928 un->un_power_level = SD_SPINDLE_ON; 5929 5930 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5931 mutex_enter(&un->un_pm_mutex); 5932 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5933 un->un_pm_count++; 5934 ASSERT(un->un_pm_count == 0); 5935 /* 5936 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5937 * un_suspend_cv is for a system resume, not a power management 5938 * device resume. (4297749) 5939 * cv_broadcast(&un->un_suspend_cv); 5940 */ 5941 } 5942 mutex_exit(&un->un_pm_mutex); 5943 mutex_exit(SD_MUTEX(un)); 5944 5945 return (DDI_SUCCESS); 5946 } 5947 5948 5949 /* 5950 * Function: sd_pm_idletimeout_handler 5951 * 5952 * Description: A timer routine that's active only while a device is busy. 5953 * The purpose is to extend slightly the pm framework's busy 5954 * view of the device to prevent busy/idle thrashing for 5955 * back-to-back commands. Do this by comparing the current time 5956 * to the time at which the last command completed and when the 5957 * difference is greater than sd_pm_idletime, call 5958 * pm_idle_component. In addition to indicating idle to the pm 5959 * framework, update the chain type to again use the internal pm 5960 * layers of the driver. 5961 * 5962 * Arguments: arg - driver soft state (unit) structure 5963 * 5964 * Context: Executes in a timeout(9F) thread context 5965 */ 5966 5967 static void 5968 sd_pm_idletimeout_handler(void *arg) 5969 { 5970 struct sd_lun *un = arg; 5971 5972 time_t now; 5973 5974 mutex_enter(&sd_detach_mutex); 5975 if (un->un_detach_count != 0) { 5976 /* Abort if the instance is detaching */ 5977 mutex_exit(&sd_detach_mutex); 5978 return; 5979 } 5980 mutex_exit(&sd_detach_mutex); 5981 5982 now = ddi_get_time(); 5983 /* 5984 * Grab both mutexes, in the proper order, since we're accessing 5985 * both PM and softstate variables. 5986 */ 5987 mutex_enter(SD_MUTEX(un)); 5988 mutex_enter(&un->un_pm_mutex); 5989 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5990 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5991 /* 5992 * Update the chain types. 5993 * This takes affect on the next new command received. 5994 */ 5995 if (un->un_f_non_devbsize_supported) { 5996 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5997 } else { 5998 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5999 } 6000 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6001 6002 SD_TRACE(SD_LOG_IO_PM, un, 6003 "sd_pm_idletimeout_handler: idling device\n"); 6004 (void) pm_idle_component(SD_DEVINFO(un), 0); 6005 un->un_pm_idle_timeid = NULL; 6006 } else { 6007 un->un_pm_idle_timeid = 6008 timeout(sd_pm_idletimeout_handler, un, 6009 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6010 } 6011 mutex_exit(&un->un_pm_mutex); 6012 mutex_exit(SD_MUTEX(un)); 6013 } 6014 6015 6016 /* 6017 * Function: sd_pm_timeout_handler 6018 * 6019 * Description: Callback to tell framework we are idle. 6020 * 6021 * Context: timeout(9f) thread context. 6022 */ 6023 6024 static void 6025 sd_pm_timeout_handler(void *arg) 6026 { 6027 struct sd_lun *un = arg; 6028 6029 (void) pm_idle_component(SD_DEVINFO(un), 0); 6030 mutex_enter(&un->un_pm_mutex); 6031 un->un_pm_timeid = NULL; 6032 mutex_exit(&un->un_pm_mutex); 6033 } 6034 6035 6036 /* 6037 * Function: sdpower 6038 * 6039 * Description: PM entry point. 6040 * 6041 * Return Code: DDI_SUCCESS 6042 * DDI_FAILURE 6043 * 6044 * Context: Kernel thread context 6045 */ 6046 6047 static int 6048 sdpower(dev_info_t *devi, int component, int level) 6049 { 6050 struct sd_lun *un; 6051 int instance; 6052 int rval = DDI_SUCCESS; 6053 uint_t i, log_page_size, maxcycles, ncycles; 6054 uchar_t *log_page_data; 6055 int log_sense_page; 6056 int medium_present; 6057 time_t intvlp; 6058 dev_t dev; 6059 struct pm_trans_data sd_pm_tran_data; 6060 uchar_t save_state; 6061 int sval; 6062 uchar_t state_before_pm; 6063 int got_semaphore_here; 6064 6065 instance = ddi_get_instance(devi); 6066 6067 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6068 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6069 component != 0) { 6070 return (DDI_FAILURE); 6071 } 6072 6073 dev = sd_make_device(SD_DEVINFO(un)); 6074 6075 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6076 6077 /* 6078 * Must synchronize power down with close. 6079 * Attempt to decrement/acquire the open/close semaphore, 6080 * but do NOT wait on it. If it's not greater than zero, 6081 * ie. it can't be decremented without waiting, then 6082 * someone else, either open or close, already has it 6083 * and the try returns 0. Use that knowledge here to determine 6084 * if it's OK to change the device power level. 6085 * Also, only increment it on exit if it was decremented, ie. gotten, 6086 * here. 6087 */ 6088 got_semaphore_here = sema_tryp(&un->un_semoclose); 6089 6090 mutex_enter(SD_MUTEX(un)); 6091 6092 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6093 un->un_ncmds_in_driver); 6094 6095 /* 6096 * If un_ncmds_in_driver is non-zero it indicates commands are 6097 * already being processed in the driver, or if the semaphore was 6098 * not gotten here it indicates an open or close is being processed. 6099 * At the same time somebody is requesting to go low power which 6100 * can't happen, therefore we need to return failure. 6101 */ 6102 if ((level == SD_SPINDLE_OFF) && 6103 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6104 mutex_exit(SD_MUTEX(un)); 6105 6106 if (got_semaphore_here != 0) { 6107 sema_v(&un->un_semoclose); 6108 } 6109 SD_TRACE(SD_LOG_IO_PM, un, 6110 "sdpower: exit, device has queued cmds.\n"); 6111 return (DDI_FAILURE); 6112 } 6113 6114 /* 6115 * if it is OFFLINE that means the disk is completely dead 6116 * in our case we have to put the disk in on or off by sending commands 6117 * Of course that will fail anyway so return back here. 6118 * 6119 * Power changes to a device that's OFFLINE or SUSPENDED 6120 * are not allowed. 6121 */ 6122 if ((un->un_state == SD_STATE_OFFLINE) || 6123 (un->un_state == SD_STATE_SUSPENDED)) { 6124 mutex_exit(SD_MUTEX(un)); 6125 6126 if (got_semaphore_here != 0) { 6127 sema_v(&un->un_semoclose); 6128 } 6129 SD_TRACE(SD_LOG_IO_PM, un, 6130 "sdpower: exit, device is off-line.\n"); 6131 return (DDI_FAILURE); 6132 } 6133 6134 /* 6135 * Change the device's state to indicate it's power level 6136 * is being changed. Do this to prevent a power off in the 6137 * middle of commands, which is especially bad on devices 6138 * that are really powered off instead of just spun down. 6139 */ 6140 state_before_pm = un->un_state; 6141 un->un_state = SD_STATE_PM_CHANGING; 6142 6143 mutex_exit(SD_MUTEX(un)); 6144 6145 /* 6146 * If "pm-capable" property is set to TRUE by HBA drivers, 6147 * bypass the following checking, otherwise, check the log 6148 * sense information for this device 6149 */ 6150 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6151 /* 6152 * Get the log sense information to understand whether the 6153 * the powercycle counts have gone beyond the threshhold. 6154 */ 6155 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6156 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6157 6158 mutex_enter(SD_MUTEX(un)); 6159 log_sense_page = un->un_start_stop_cycle_page; 6160 mutex_exit(SD_MUTEX(un)); 6161 6162 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 6163 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6164 #ifdef SDDEBUG 6165 if (sd_force_pm_supported) { 6166 /* Force a successful result */ 6167 rval = 0; 6168 } 6169 #endif 6170 if (rval != 0) { 6171 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6172 "Log Sense Failed\n"); 6173 kmem_free(log_page_data, log_page_size); 6174 /* Cannot support power management on those drives */ 6175 6176 if (got_semaphore_here != 0) { 6177 sema_v(&un->un_semoclose); 6178 } 6179 /* 6180 * On exit put the state back to it's original value 6181 * and broadcast to anyone waiting for the power 6182 * change completion. 6183 */ 6184 mutex_enter(SD_MUTEX(un)); 6185 un->un_state = state_before_pm; 6186 cv_broadcast(&un->un_suspend_cv); 6187 mutex_exit(SD_MUTEX(un)); 6188 SD_TRACE(SD_LOG_IO_PM, un, 6189 "sdpower: exit, Log Sense Failed.\n"); 6190 return (DDI_FAILURE); 6191 } 6192 6193 /* 6194 * From the page data - Convert the essential information to 6195 * pm_trans_data 6196 */ 6197 maxcycles = 6198 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6199 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6200 6201 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6202 6203 ncycles = 6204 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6205 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6206 6207 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6208 6209 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6210 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6211 log_page_data[8+i]; 6212 } 6213 6214 kmem_free(log_page_data, log_page_size); 6215 6216 /* 6217 * Call pm_trans_check routine to get the Ok from 6218 * the global policy 6219 */ 6220 6221 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6222 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6223 6224 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6225 #ifdef SDDEBUG 6226 if (sd_force_pm_supported) { 6227 /* Force a successful result */ 6228 rval = 1; 6229 } 6230 #endif 6231 switch (rval) { 6232 case 0: 6233 /* 6234 * Not Ok to Power cycle or error in parameters passed 6235 * Would have given the advised time to consider power 6236 * cycle. Based on the new intvlp parameter we are 6237 * supposed to pretend we are busy so that pm framework 6238 * will never call our power entry point. Because of 6239 * that install a timeout handler and wait for the 6240 * recommended time to elapse so that power management 6241 * can be effective again. 6242 * 6243 * To effect this behavior, call pm_busy_component to 6244 * indicate to the framework this device is busy. 6245 * By not adjusting un_pm_count the rest of PM in 6246 * the driver will function normally, and independent 6247 * of this but because the framework is told the device 6248 * is busy it won't attempt powering down until it gets 6249 * a matching idle. The timeout handler sends this. 6250 * Note: sd_pm_entry can't be called here to do this 6251 * because sdpower may have been called as a result 6252 * of a call to pm_raise_power from within sd_pm_entry. 6253 * 6254 * If a timeout handler is already active then 6255 * don't install another. 6256 */ 6257 mutex_enter(&un->un_pm_mutex); 6258 if (un->un_pm_timeid == NULL) { 6259 un->un_pm_timeid = 6260 timeout(sd_pm_timeout_handler, 6261 un, intvlp * drv_usectohz(1000000)); 6262 mutex_exit(&un->un_pm_mutex); 6263 (void) pm_busy_component(SD_DEVINFO(un), 0); 6264 } else { 6265 mutex_exit(&un->un_pm_mutex); 6266 } 6267 if (got_semaphore_here != 0) { 6268 sema_v(&un->un_semoclose); 6269 } 6270 /* 6271 * On exit put the state back to it's original value 6272 * and broadcast to anyone waiting for the power 6273 * change completion. 6274 */ 6275 mutex_enter(SD_MUTEX(un)); 6276 un->un_state = state_before_pm; 6277 cv_broadcast(&un->un_suspend_cv); 6278 mutex_exit(SD_MUTEX(un)); 6279 6280 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6281 "trans check Failed, not ok to power cycle.\n"); 6282 return (DDI_FAILURE); 6283 6284 case -1: 6285 if (got_semaphore_here != 0) { 6286 sema_v(&un->un_semoclose); 6287 } 6288 /* 6289 * On exit put the state back to it's original value 6290 * and broadcast to anyone waiting for the power 6291 * change completion. 6292 */ 6293 mutex_enter(SD_MUTEX(un)); 6294 un->un_state = state_before_pm; 6295 cv_broadcast(&un->un_suspend_cv); 6296 mutex_exit(SD_MUTEX(un)); 6297 SD_TRACE(SD_LOG_IO_PM, un, 6298 "sdpower: exit, trans check command Failed.\n"); 6299 return (DDI_FAILURE); 6300 } 6301 } 6302 6303 if (level == SD_SPINDLE_OFF) { 6304 /* 6305 * Save the last state... if the STOP FAILS we need it 6306 * for restoring 6307 */ 6308 mutex_enter(SD_MUTEX(un)); 6309 save_state = un->un_last_state; 6310 /* 6311 * There must not be any cmds. getting processed 6312 * in the driver when we get here. Power to the 6313 * device is potentially going off. 6314 */ 6315 ASSERT(un->un_ncmds_in_driver == 0); 6316 mutex_exit(SD_MUTEX(un)); 6317 6318 /* 6319 * For now suspend the device completely before spindle is 6320 * turned off 6321 */ 6322 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6323 if (got_semaphore_here != 0) { 6324 sema_v(&un->un_semoclose); 6325 } 6326 /* 6327 * On exit put the state back to it's original value 6328 * and broadcast to anyone waiting for the power 6329 * change completion. 6330 */ 6331 mutex_enter(SD_MUTEX(un)); 6332 un->un_state = state_before_pm; 6333 cv_broadcast(&un->un_suspend_cv); 6334 mutex_exit(SD_MUTEX(un)); 6335 SD_TRACE(SD_LOG_IO_PM, un, 6336 "sdpower: exit, PM suspend Failed.\n"); 6337 return (DDI_FAILURE); 6338 } 6339 } 6340 6341 /* 6342 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6343 * close, or strategy. Dump no long uses this routine, it uses it's 6344 * own code so it can be done in polled mode. 6345 */ 6346 6347 medium_present = TRUE; 6348 6349 /* 6350 * When powering up, issue a TUR in case the device is at unit 6351 * attention. Don't do retries. Bypass the PM layer, otherwise 6352 * a deadlock on un_pm_busy_cv will occur. 6353 */ 6354 if (level == SD_SPINDLE_ON) { 6355 (void) sd_send_scsi_TEST_UNIT_READY(un, 6356 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6357 } 6358 6359 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6360 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6361 6362 sval = sd_send_scsi_START_STOP_UNIT(un, 6363 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6364 SD_PATH_DIRECT); 6365 /* Command failed, check for media present. */ 6366 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6367 medium_present = FALSE; 6368 } 6369 6370 /* 6371 * The conditions of interest here are: 6372 * if a spindle off with media present fails, 6373 * then restore the state and return an error. 6374 * else if a spindle on fails, 6375 * then return an error (there's no state to restore). 6376 * In all other cases we setup for the new state 6377 * and return success. 6378 */ 6379 switch (level) { 6380 case SD_SPINDLE_OFF: 6381 if ((medium_present == TRUE) && (sval != 0)) { 6382 /* The stop command from above failed */ 6383 rval = DDI_FAILURE; 6384 /* 6385 * The stop command failed, and we have media 6386 * present. Put the level back by calling the 6387 * sd_pm_resume() and set the state back to 6388 * it's previous value. 6389 */ 6390 (void) sd_ddi_pm_resume(un); 6391 mutex_enter(SD_MUTEX(un)); 6392 un->un_last_state = save_state; 6393 mutex_exit(SD_MUTEX(un)); 6394 break; 6395 } 6396 /* 6397 * The stop command from above succeeded. 6398 */ 6399 if (un->un_f_monitor_media_state) { 6400 /* 6401 * Terminate watch thread in case of removable media 6402 * devices going into low power state. This is as per 6403 * the requirements of pm framework, otherwise commands 6404 * will be generated for the device (through watch 6405 * thread), even when the device is in low power state. 6406 */ 6407 mutex_enter(SD_MUTEX(un)); 6408 un->un_f_watcht_stopped = FALSE; 6409 if (un->un_swr_token != NULL) { 6410 opaque_t temp_token = un->un_swr_token; 6411 un->un_f_watcht_stopped = TRUE; 6412 un->un_swr_token = NULL; 6413 mutex_exit(SD_MUTEX(un)); 6414 (void) scsi_watch_request_terminate(temp_token, 6415 SCSI_WATCH_TERMINATE_ALL_WAIT); 6416 } else { 6417 mutex_exit(SD_MUTEX(un)); 6418 } 6419 } 6420 break; 6421 6422 default: /* The level requested is spindle on... */ 6423 /* 6424 * Legacy behavior: return success on a failed spinup 6425 * if there is no media in the drive. 6426 * Do this by looking at medium_present here. 6427 */ 6428 if ((sval != 0) && medium_present) { 6429 /* The start command from above failed */ 6430 rval = DDI_FAILURE; 6431 break; 6432 } 6433 /* 6434 * The start command from above succeeded 6435 * Resume the devices now that we have 6436 * started the disks 6437 */ 6438 (void) sd_ddi_pm_resume(un); 6439 6440 /* 6441 * Resume the watch thread since it was suspended 6442 * when the device went into low power mode. 6443 */ 6444 if (un->un_f_monitor_media_state) { 6445 mutex_enter(SD_MUTEX(un)); 6446 if (un->un_f_watcht_stopped == TRUE) { 6447 opaque_t temp_token; 6448 6449 un->un_f_watcht_stopped = FALSE; 6450 mutex_exit(SD_MUTEX(un)); 6451 temp_token = scsi_watch_request_submit( 6452 SD_SCSI_DEVP(un), 6453 sd_check_media_time, 6454 SENSE_LENGTH, sd_media_watch_cb, 6455 (caddr_t)dev); 6456 mutex_enter(SD_MUTEX(un)); 6457 un->un_swr_token = temp_token; 6458 } 6459 mutex_exit(SD_MUTEX(un)); 6460 } 6461 } 6462 if (got_semaphore_here != 0) { 6463 sema_v(&un->un_semoclose); 6464 } 6465 /* 6466 * On exit put the state back to it's original value 6467 * and broadcast to anyone waiting for the power 6468 * change completion. 6469 */ 6470 mutex_enter(SD_MUTEX(un)); 6471 un->un_state = state_before_pm; 6472 cv_broadcast(&un->un_suspend_cv); 6473 mutex_exit(SD_MUTEX(un)); 6474 6475 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6476 6477 return (rval); 6478 } 6479 6480 6481 6482 /* 6483 * Function: sdattach 6484 * 6485 * Description: Driver's attach(9e) entry point function. 6486 * 6487 * Arguments: devi - opaque device info handle 6488 * cmd - attach type 6489 * 6490 * Return Code: DDI_SUCCESS 6491 * DDI_FAILURE 6492 * 6493 * Context: Kernel thread context 6494 */ 6495 6496 static int 6497 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6498 { 6499 switch (cmd) { 6500 case DDI_ATTACH: 6501 return (sd_unit_attach(devi)); 6502 case DDI_RESUME: 6503 return (sd_ddi_resume(devi)); 6504 default: 6505 break; 6506 } 6507 return (DDI_FAILURE); 6508 } 6509 6510 6511 /* 6512 * Function: sddetach 6513 * 6514 * Description: Driver's detach(9E) entry point function. 6515 * 6516 * Arguments: devi - opaque device info handle 6517 * cmd - detach type 6518 * 6519 * Return Code: DDI_SUCCESS 6520 * DDI_FAILURE 6521 * 6522 * Context: Kernel thread context 6523 */ 6524 6525 static int 6526 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6527 { 6528 switch (cmd) { 6529 case DDI_DETACH: 6530 return (sd_unit_detach(devi)); 6531 case DDI_SUSPEND: 6532 return (sd_ddi_suspend(devi)); 6533 default: 6534 break; 6535 } 6536 return (DDI_FAILURE); 6537 } 6538 6539 6540 /* 6541 * Function: sd_sync_with_callback 6542 * 6543 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6544 * state while the callback routine is active. 6545 * 6546 * Arguments: un: softstate structure for the instance 6547 * 6548 * Context: Kernel thread context 6549 */ 6550 6551 static void 6552 sd_sync_with_callback(struct sd_lun *un) 6553 { 6554 ASSERT(un != NULL); 6555 6556 mutex_enter(SD_MUTEX(un)); 6557 6558 ASSERT(un->un_in_callback >= 0); 6559 6560 while (un->un_in_callback > 0) { 6561 mutex_exit(SD_MUTEX(un)); 6562 delay(2); 6563 mutex_enter(SD_MUTEX(un)); 6564 } 6565 6566 mutex_exit(SD_MUTEX(un)); 6567 } 6568 6569 /* 6570 * Function: sd_unit_attach 6571 * 6572 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6573 * the soft state structure for the device and performs 6574 * all necessary structure and device initializations. 6575 * 6576 * Arguments: devi: the system's dev_info_t for the device. 6577 * 6578 * Return Code: DDI_SUCCESS if attach is successful. 6579 * DDI_FAILURE if any part of the attach fails. 6580 * 6581 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6582 * Kernel thread context only. Can sleep. 6583 */ 6584 6585 static int 6586 sd_unit_attach(dev_info_t *devi) 6587 { 6588 struct scsi_device *devp; 6589 struct sd_lun *un; 6590 char *variantp; 6591 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6592 int instance; 6593 int rval; 6594 int wc_enabled; 6595 int tgt; 6596 uint64_t capacity; 6597 uint_t lbasize = 0; 6598 dev_info_t *pdip = ddi_get_parent(devi); 6599 int offbyone = 0; 6600 int geom_label_valid = 0; 6601 #if defined(__sparc) 6602 int max_xfer_size; 6603 #endif 6604 6605 /* 6606 * Retrieve the target driver's private data area. This was set 6607 * up by the HBA. 6608 */ 6609 devp = ddi_get_driver_private(devi); 6610 6611 /* 6612 * Retrieve the target ID of the device. 6613 */ 6614 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6615 SCSI_ADDR_PROP_TARGET, -1); 6616 6617 /* 6618 * Since we have no idea what state things were left in by the last 6619 * user of the device, set up some 'default' settings, ie. turn 'em 6620 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6621 * Do this before the scsi_probe, which sends an inquiry. 6622 * This is a fix for bug (4430280). 6623 * Of special importance is wide-xfer. The drive could have been left 6624 * in wide transfer mode by the last driver to communicate with it, 6625 * this includes us. If that's the case, and if the following is not 6626 * setup properly or we don't re-negotiate with the drive prior to 6627 * transferring data to/from the drive, it causes bus parity errors, 6628 * data overruns, and unexpected interrupts. This first occurred when 6629 * the fix for bug (4378686) was made. 6630 */ 6631 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6632 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6633 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6634 6635 /* 6636 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6637 * on a target. Setting it per lun instance actually sets the 6638 * capability of this target, which affects those luns already 6639 * attached on the same target. So during attach, we can only disable 6640 * this capability only when no other lun has been attached on this 6641 * target. By doing this, we assume a target has the same tagged-qing 6642 * capability for every lun. The condition can be removed when HBA 6643 * is changed to support per lun based tagged-qing capability. 6644 */ 6645 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6646 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6647 } 6648 6649 /* 6650 * Use scsi_probe() to issue an INQUIRY command to the device. 6651 * This call will allocate and fill in the scsi_inquiry structure 6652 * and point the sd_inq member of the scsi_device structure to it. 6653 * If the attach succeeds, then this memory will not be de-allocated 6654 * (via scsi_unprobe()) until the instance is detached. 6655 */ 6656 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6657 goto probe_failed; 6658 } 6659 6660 /* 6661 * Check the device type as specified in the inquiry data and 6662 * claim it if it is of a type that we support. 6663 */ 6664 switch (devp->sd_inq->inq_dtype) { 6665 case DTYPE_DIRECT: 6666 break; 6667 case DTYPE_RODIRECT: 6668 break; 6669 case DTYPE_OPTICAL: 6670 break; 6671 case DTYPE_NOTPRESENT: 6672 default: 6673 /* Unsupported device type; fail the attach. */ 6674 goto probe_failed; 6675 } 6676 6677 /* 6678 * Allocate the soft state structure for this unit. 6679 * 6680 * We rely upon this memory being set to all zeroes by 6681 * ddi_soft_state_zalloc(). We assume that any member of the 6682 * soft state structure that is not explicitly initialized by 6683 * this routine will have a value of zero. 6684 */ 6685 instance = ddi_get_instance(devp->sd_dev); 6686 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6687 goto probe_failed; 6688 } 6689 6690 /* 6691 * Retrieve a pointer to the newly-allocated soft state. 6692 * 6693 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6694 * was successful, unless something has gone horribly wrong and the 6695 * ddi's soft state internals are corrupt (in which case it is 6696 * probably better to halt here than just fail the attach....) 6697 */ 6698 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6699 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6700 instance); 6701 /*NOTREACHED*/ 6702 } 6703 6704 /* 6705 * Link the back ptr of the driver soft state to the scsi_device 6706 * struct for this lun. 6707 * Save a pointer to the softstate in the driver-private area of 6708 * the scsi_device struct. 6709 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6710 * we first set un->un_sd below. 6711 */ 6712 un->un_sd = devp; 6713 devp->sd_private = (opaque_t)un; 6714 6715 /* 6716 * The following must be after devp is stored in the soft state struct. 6717 */ 6718 #ifdef SDDEBUG 6719 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6720 "%s_unit_attach: un:0x%p instance:%d\n", 6721 ddi_driver_name(devi), un, instance); 6722 #endif 6723 6724 /* 6725 * Set up the device type and node type (for the minor nodes). 6726 * By default we assume that the device can at least support the 6727 * Common Command Set. Call it a CD-ROM if it reports itself 6728 * as a RODIRECT device. 6729 */ 6730 switch (devp->sd_inq->inq_dtype) { 6731 case DTYPE_RODIRECT: 6732 un->un_node_type = DDI_NT_CD_CHAN; 6733 un->un_ctype = CTYPE_CDROM; 6734 break; 6735 case DTYPE_OPTICAL: 6736 un->un_node_type = DDI_NT_BLOCK_CHAN; 6737 un->un_ctype = CTYPE_ROD; 6738 break; 6739 default: 6740 un->un_node_type = DDI_NT_BLOCK_CHAN; 6741 un->un_ctype = CTYPE_CCS; 6742 break; 6743 } 6744 6745 /* 6746 * Try to read the interconnect type from the HBA. 6747 * 6748 * Note: This driver is currently compiled as two binaries, a parallel 6749 * scsi version (sd) and a fibre channel version (ssd). All functional 6750 * differences are determined at compile time. In the future a single 6751 * binary will be provided and the interconnect type will be used to 6752 * differentiate between fibre and parallel scsi behaviors. At that time 6753 * it will be necessary for all fibre channel HBAs to support this 6754 * property. 6755 * 6756 * set un_f_is_fiber to TRUE ( default fiber ) 6757 */ 6758 un->un_f_is_fibre = TRUE; 6759 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6760 case INTERCONNECT_SSA: 6761 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6762 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6763 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6764 break; 6765 case INTERCONNECT_PARALLEL: 6766 un->un_f_is_fibre = FALSE; 6767 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6768 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6769 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6770 break; 6771 case INTERCONNECT_SATA: 6772 un->un_f_is_fibre = FALSE; 6773 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6774 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6775 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6776 break; 6777 case INTERCONNECT_FIBRE: 6778 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6779 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6780 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6781 break; 6782 case INTERCONNECT_FABRIC: 6783 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6784 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6785 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6786 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6787 break; 6788 default: 6789 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6790 /* 6791 * The HBA does not support the "interconnect-type" property 6792 * (or did not provide a recognized type). 6793 * 6794 * Note: This will be obsoleted when a single fibre channel 6795 * and parallel scsi driver is delivered. In the meantime the 6796 * interconnect type will be set to the platform default.If that 6797 * type is not parallel SCSI, it means that we should be 6798 * assuming "ssd" semantics. However, here this also means that 6799 * the FC HBA is not supporting the "interconnect-type" property 6800 * like we expect it to, so log this occurrence. 6801 */ 6802 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6803 if (!SD_IS_PARALLEL_SCSI(un)) { 6804 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6805 "sd_unit_attach: un:0x%p Assuming " 6806 "INTERCONNECT_FIBRE\n", un); 6807 } else { 6808 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6809 "sd_unit_attach: un:0x%p Assuming " 6810 "INTERCONNECT_PARALLEL\n", un); 6811 un->un_f_is_fibre = FALSE; 6812 } 6813 #else 6814 /* 6815 * Note: This source will be implemented when a single fibre 6816 * channel and parallel scsi driver is delivered. The default 6817 * will be to assume that if a device does not support the 6818 * "interconnect-type" property it is a parallel SCSI HBA and 6819 * we will set the interconnect type for parallel scsi. 6820 */ 6821 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6822 un->un_f_is_fibre = FALSE; 6823 #endif 6824 break; 6825 } 6826 6827 if (un->un_f_is_fibre == TRUE) { 6828 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6829 SCSI_VERSION_3) { 6830 switch (un->un_interconnect_type) { 6831 case SD_INTERCONNECT_FIBRE: 6832 case SD_INTERCONNECT_SSA: 6833 un->un_node_type = DDI_NT_BLOCK_WWN; 6834 break; 6835 default: 6836 break; 6837 } 6838 } 6839 } 6840 6841 /* 6842 * Initialize the Request Sense command for the target 6843 */ 6844 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6845 goto alloc_rqs_failed; 6846 } 6847 6848 /* 6849 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6850 * with separate binary for sd and ssd. 6851 * 6852 * x86 has 1 binary, un_retry_count is set base on connection type. 6853 * The hardcoded values will go away when Sparc uses 1 binary 6854 * for sd and ssd. This hardcoded values need to match 6855 * SD_RETRY_COUNT in sddef.h 6856 * The value used is base on interconnect type. 6857 * fibre = 3, parallel = 5 6858 */ 6859 #if defined(__i386) || defined(__amd64) 6860 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6861 #else 6862 un->un_retry_count = SD_RETRY_COUNT; 6863 #endif 6864 6865 /* 6866 * Set the per disk retry count to the default number of retries 6867 * for disks and CDROMs. This value can be overridden by the 6868 * disk property list or an entry in sd.conf. 6869 */ 6870 un->un_notready_retry_count = 6871 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6872 : DISK_NOT_READY_RETRY_COUNT(un); 6873 6874 /* 6875 * Set the busy retry count to the default value of un_retry_count. 6876 * This can be overridden by entries in sd.conf or the device 6877 * config table. 6878 */ 6879 un->un_busy_retry_count = un->un_retry_count; 6880 6881 /* 6882 * Init the reset threshold for retries. This number determines 6883 * how many retries must be performed before a reset can be issued 6884 * (for certain error conditions). This can be overridden by entries 6885 * in sd.conf or the device config table. 6886 */ 6887 un->un_reset_retry_count = (un->un_retry_count / 2); 6888 6889 /* 6890 * Set the victim_retry_count to the default un_retry_count 6891 */ 6892 un->un_victim_retry_count = (2 * un->un_retry_count); 6893 6894 /* 6895 * Set the reservation release timeout to the default value of 6896 * 5 seconds. This can be overridden by entries in ssd.conf or the 6897 * device config table. 6898 */ 6899 un->un_reserve_release_time = 5; 6900 6901 /* 6902 * Set up the default maximum transfer size. Note that this may 6903 * get updated later in the attach, when setting up default wide 6904 * operations for disks. 6905 */ 6906 #if defined(__i386) || defined(__amd64) 6907 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6908 un->un_partial_dma_supported = 1; 6909 #else 6910 un->un_max_xfer_size = (uint_t)maxphys; 6911 #endif 6912 6913 /* 6914 * Get "allow bus device reset" property (defaults to "enabled" if 6915 * the property was not defined). This is to disable bus resets for 6916 * certain kinds of error recovery. Note: In the future when a run-time 6917 * fibre check is available the soft state flag should default to 6918 * enabled. 6919 */ 6920 if (un->un_f_is_fibre == TRUE) { 6921 un->un_f_allow_bus_device_reset = TRUE; 6922 } else { 6923 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6924 "allow-bus-device-reset", 1) != 0) { 6925 un->un_f_allow_bus_device_reset = TRUE; 6926 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6927 "sd_unit_attach: un:0x%p Bus device reset " 6928 "enabled\n", un); 6929 } else { 6930 un->un_f_allow_bus_device_reset = FALSE; 6931 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6932 "sd_unit_attach: un:0x%p Bus device reset " 6933 "disabled\n", un); 6934 } 6935 } 6936 6937 /* 6938 * Check if this is an ATAPI device. ATAPI devices use Group 1 6939 * Read/Write commands and Group 2 Mode Sense/Select commands. 6940 * 6941 * Note: The "obsolete" way of doing this is to check for the "atapi" 6942 * property. The new "variant" property with a value of "atapi" has been 6943 * introduced so that future 'variants' of standard SCSI behavior (like 6944 * atapi) could be specified by the underlying HBA drivers by supplying 6945 * a new value for the "variant" property, instead of having to define a 6946 * new property. 6947 */ 6948 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6949 un->un_f_cfg_is_atapi = TRUE; 6950 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6951 "sd_unit_attach: un:0x%p Atapi device\n", un); 6952 } 6953 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6954 &variantp) == DDI_PROP_SUCCESS) { 6955 if (strcmp(variantp, "atapi") == 0) { 6956 un->un_f_cfg_is_atapi = TRUE; 6957 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6958 "sd_unit_attach: un:0x%p Atapi device\n", un); 6959 } 6960 ddi_prop_free(variantp); 6961 } 6962 6963 un->un_cmd_timeout = SD_IO_TIME; 6964 6965 un->un_busy_timeout = SD_BSY_TIMEOUT; 6966 6967 /* Info on current states, statuses, etc. (Updated frequently) */ 6968 un->un_state = SD_STATE_NORMAL; 6969 un->un_last_state = SD_STATE_NORMAL; 6970 6971 /* Control & status info for command throttling */ 6972 un->un_throttle = sd_max_throttle; 6973 un->un_saved_throttle = sd_max_throttle; 6974 un->un_min_throttle = sd_min_throttle; 6975 6976 if (un->un_f_is_fibre == TRUE) { 6977 un->un_f_use_adaptive_throttle = TRUE; 6978 } else { 6979 un->un_f_use_adaptive_throttle = FALSE; 6980 } 6981 6982 /* Removable media support. */ 6983 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6984 un->un_mediastate = DKIO_NONE; 6985 un->un_specified_mediastate = DKIO_NONE; 6986 6987 /* CVs for suspend/resume (PM or DR) */ 6988 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6989 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6990 6991 /* Power management support. */ 6992 un->un_power_level = SD_SPINDLE_UNINIT; 6993 6994 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6995 un->un_f_wcc_inprog = 0; 6996 6997 /* 6998 * The open/close semaphore is used to serialize threads executing 6999 * in the driver's open & close entry point routines for a given 7000 * instance. 7001 */ 7002 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7003 7004 /* 7005 * The conf file entry and softstate variable is a forceful override, 7006 * meaning a non-zero value must be entered to change the default. 7007 */ 7008 un->un_f_disksort_disabled = FALSE; 7009 7010 /* 7011 * Retrieve the properties from the static driver table or the driver 7012 * configuration file (.conf) for this unit and update the soft state 7013 * for the device as needed for the indicated properties. 7014 * Note: the property configuration needs to occur here as some of the 7015 * following routines may have dependencies on soft state flags set 7016 * as part of the driver property configuration. 7017 */ 7018 sd_read_unit_properties(un); 7019 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7020 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7021 7022 /* 7023 * Only if a device has "hotpluggable" property, it is 7024 * treated as hotpluggable device. Otherwise, it is 7025 * regarded as non-hotpluggable one. 7026 */ 7027 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7028 -1) != -1) { 7029 un->un_f_is_hotpluggable = TRUE; 7030 } 7031 7032 /* 7033 * set unit's attributes(flags) according to "hotpluggable" and 7034 * RMB bit in INQUIRY data. 7035 */ 7036 sd_set_unit_attributes(un, devi); 7037 7038 /* 7039 * By default, we mark the capacity, lbasize, and geometry 7040 * as invalid. Only if we successfully read a valid capacity 7041 * will we update the un_blockcount and un_tgt_blocksize with the 7042 * valid values (the geometry will be validated later). 7043 */ 7044 un->un_f_blockcount_is_valid = FALSE; 7045 un->un_f_tgt_blocksize_is_valid = FALSE; 7046 7047 /* 7048 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7049 * otherwise. 7050 */ 7051 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7052 un->un_blockcount = 0; 7053 7054 /* 7055 * Set up the per-instance info needed to determine the correct 7056 * CDBs and other info for issuing commands to the target. 7057 */ 7058 sd_init_cdb_limits(un); 7059 7060 /* 7061 * Set up the IO chains to use, based upon the target type. 7062 */ 7063 if (un->un_f_non_devbsize_supported) { 7064 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7065 } else { 7066 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7067 } 7068 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7069 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7070 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7071 7072 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7073 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7074 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7075 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7076 7077 7078 if (ISCD(un)) { 7079 un->un_additional_codes = sd_additional_codes; 7080 } else { 7081 un->un_additional_codes = NULL; 7082 } 7083 7084 /* 7085 * Create the kstats here so they can be available for attach-time 7086 * routines that send commands to the unit (either polled or via 7087 * sd_send_scsi_cmd). 7088 * 7089 * Note: This is a critical sequence that needs to be maintained: 7090 * 1) Instantiate the kstats here, before any routines using the 7091 * iopath (i.e. sd_send_scsi_cmd). 7092 * 2) Instantiate and initialize the partition stats 7093 * (sd_set_pstats). 7094 * 3) Initialize the error stats (sd_set_errstats), following 7095 * sd_validate_geometry(),sd_register_devid(), 7096 * and sd_cache_control(). 7097 */ 7098 7099 un->un_stats = kstat_create(sd_label, instance, 7100 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7101 if (un->un_stats != NULL) { 7102 un->un_stats->ks_lock = SD_MUTEX(un); 7103 kstat_install(un->un_stats); 7104 } 7105 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7106 "sd_unit_attach: un:0x%p un_stats created\n", un); 7107 7108 sd_create_errstats(un, instance); 7109 if (un->un_errstats == NULL) { 7110 goto create_errstats_failed; 7111 } 7112 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7113 "sd_unit_attach: un:0x%p errstats created\n", un); 7114 7115 /* 7116 * The following if/else code was relocated here from below as part 7117 * of the fix for bug (4430280). However with the default setup added 7118 * on entry to this routine, it's no longer absolutely necessary for 7119 * this to be before the call to sd_spin_up_unit. 7120 */ 7121 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7122 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7123 (devp->sd_inq->inq_ansi == 5)) && 7124 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7125 7126 /* 7127 * If tagged queueing is supported by the target 7128 * and by the host adapter then we will enable it 7129 */ 7130 un->un_tagflags = 0; 7131 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7132 (un->un_f_arq_enabled == TRUE)) { 7133 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7134 1, 1) == 1) { 7135 un->un_tagflags = FLAG_STAG; 7136 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7137 "sd_unit_attach: un:0x%p tag queueing " 7138 "enabled\n", un); 7139 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7140 "untagged-qing", 0) == 1) { 7141 un->un_f_opt_queueing = TRUE; 7142 un->un_saved_throttle = un->un_throttle = 7143 min(un->un_throttle, 3); 7144 } else { 7145 un->un_f_opt_queueing = FALSE; 7146 un->un_saved_throttle = un->un_throttle = 1; 7147 } 7148 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7149 == 1) && (un->un_f_arq_enabled == TRUE)) { 7150 /* The Host Adapter supports internal queueing. */ 7151 un->un_f_opt_queueing = TRUE; 7152 un->un_saved_throttle = un->un_throttle = 7153 min(un->un_throttle, 3); 7154 } else { 7155 un->un_f_opt_queueing = FALSE; 7156 un->un_saved_throttle = un->un_throttle = 1; 7157 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7158 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7159 } 7160 7161 /* 7162 * Enable large transfers for SATA/SAS drives 7163 */ 7164 if (SD_IS_SERIAL(un)) { 7165 un->un_max_xfer_size = 7166 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7167 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7168 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7169 "sd_unit_attach: un:0x%p max transfer " 7170 "size=0x%x\n", un, un->un_max_xfer_size); 7171 7172 } 7173 7174 /* Setup or tear down default wide operations for disks */ 7175 7176 /* 7177 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7178 * and "ssd_max_xfer_size" to exist simultaneously on the same 7179 * system and be set to different values. In the future this 7180 * code may need to be updated when the ssd module is 7181 * obsoleted and removed from the system. (4299588) 7182 */ 7183 if (SD_IS_PARALLEL_SCSI(un) && 7184 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7185 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7186 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7187 1, 1) == 1) { 7188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7189 "sd_unit_attach: un:0x%p Wide Transfer " 7190 "enabled\n", un); 7191 } 7192 7193 /* 7194 * If tagged queuing has also been enabled, then 7195 * enable large xfers 7196 */ 7197 if (un->un_saved_throttle == sd_max_throttle) { 7198 un->un_max_xfer_size = 7199 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7200 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7201 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7202 "sd_unit_attach: un:0x%p max transfer " 7203 "size=0x%x\n", un, un->un_max_xfer_size); 7204 } 7205 } else { 7206 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7207 0, 1) == 1) { 7208 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7209 "sd_unit_attach: un:0x%p " 7210 "Wide Transfer disabled\n", un); 7211 } 7212 } 7213 } else { 7214 un->un_tagflags = FLAG_STAG; 7215 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7216 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7217 } 7218 7219 /* 7220 * If this target supports LUN reset, try to enable it. 7221 */ 7222 if (un->un_f_lun_reset_enabled) { 7223 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7224 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7225 "un:0x%p lun_reset capability set\n", un); 7226 } else { 7227 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7228 "un:0x%p lun-reset capability not set\n", un); 7229 } 7230 } 7231 7232 /* 7233 * Adjust the maximum transfer size. This is to fix 7234 * the problem of partial DMA support on SPARC. Some 7235 * HBA driver, like aac, has very small dma_attr_maxxfer 7236 * size, which requires partial DMA support on SPARC. 7237 * In the future the SPARC pci nexus driver may solve 7238 * the problem instead of this fix. 7239 */ 7240 #if defined(__sparc) 7241 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7242 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7243 un->un_max_xfer_size = max_xfer_size; 7244 un->un_partial_dma_supported = 1; 7245 } 7246 #endif 7247 7248 /* 7249 * Set PKT_DMA_PARTIAL flag. 7250 */ 7251 if (un->un_partial_dma_supported == 1) { 7252 un->un_pkt_flags = PKT_DMA_PARTIAL; 7253 } else { 7254 un->un_pkt_flags = 0; 7255 } 7256 7257 /* 7258 * At this point in the attach, we have enough info in the 7259 * soft state to be able to issue commands to the target. 7260 * 7261 * All command paths used below MUST issue their commands as 7262 * SD_PATH_DIRECT. This is important as intermediate layers 7263 * are not all initialized yet (such as PM). 7264 */ 7265 7266 /* 7267 * Send a TEST UNIT READY command to the device. This should clear 7268 * any outstanding UNIT ATTENTION that may be present. 7269 * 7270 * Note: Don't check for success, just track if there is a reservation, 7271 * this is a throw away command to clear any unit attentions. 7272 * 7273 * Note: This MUST be the first command issued to the target during 7274 * attach to ensure power on UNIT ATTENTIONS are cleared. 7275 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7276 * with attempts at spinning up a device with no media. 7277 */ 7278 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7279 reservation_flag = SD_TARGET_IS_RESERVED; 7280 } 7281 7282 /* 7283 * If the device is NOT a removable media device, attempt to spin 7284 * it up (using the START_STOP_UNIT command) and read its capacity 7285 * (using the READ CAPACITY command). Note, however, that either 7286 * of these could fail and in some cases we would continue with 7287 * the attach despite the failure (see below). 7288 */ 7289 if (un->un_f_descr_format_supported) { 7290 switch (sd_spin_up_unit(un)) { 7291 case 0: 7292 /* 7293 * Spin-up was successful; now try to read the 7294 * capacity. If successful then save the results 7295 * and mark the capacity & lbasize as valid. 7296 */ 7297 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7298 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7299 7300 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7301 &lbasize, SD_PATH_DIRECT)) { 7302 case 0: { 7303 if (capacity > DK_MAX_BLOCKS) { 7304 #ifdef _LP64 7305 if (capacity + 1 > 7306 SD_GROUP1_MAX_ADDRESS) { 7307 /* 7308 * Enable descriptor format 7309 * sense data so that we can 7310 * get 64 bit sense data 7311 * fields. 7312 */ 7313 sd_enable_descr_sense(un); 7314 } 7315 #else 7316 /* 32-bit kernels can't handle this */ 7317 scsi_log(SD_DEVINFO(un), 7318 sd_label, CE_WARN, 7319 "disk has %llu blocks, which " 7320 "is too large for a 32-bit " 7321 "kernel", capacity); 7322 7323 #if defined(__i386) || defined(__amd64) 7324 /* 7325 * 1TB disk was treated as (1T - 512)B 7326 * in the past, so that it might have 7327 * valid VTOC and solaris partitions, 7328 * we have to allow it to continue to 7329 * work. 7330 */ 7331 if (capacity -1 > DK_MAX_BLOCKS) 7332 #endif 7333 goto spinup_failed; 7334 #endif 7335 } 7336 7337 /* 7338 * Here it's not necessary to check the case: 7339 * the capacity of the device is bigger than 7340 * what the max hba cdb can support. Because 7341 * sd_send_scsi_READ_CAPACITY will retrieve 7342 * the capacity by sending USCSI command, which 7343 * is constrained by the max hba cdb. Actually, 7344 * sd_send_scsi_READ_CAPACITY will return 7345 * EINVAL when using bigger cdb than required 7346 * cdb length. Will handle this case in 7347 * "case EINVAL". 7348 */ 7349 7350 /* 7351 * The following relies on 7352 * sd_send_scsi_READ_CAPACITY never 7353 * returning 0 for capacity and/or lbasize. 7354 */ 7355 sd_update_block_info(un, lbasize, capacity); 7356 7357 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7358 "sd_unit_attach: un:0x%p capacity = %ld " 7359 "blocks; lbasize= %ld.\n", un, 7360 un->un_blockcount, un->un_tgt_blocksize); 7361 7362 break; 7363 } 7364 case EINVAL: 7365 /* 7366 * In the case where the max-cdb-length property 7367 * is smaller than the required CDB length for 7368 * a SCSI device, a target driver can fail to 7369 * attach to that device. 7370 */ 7371 scsi_log(SD_DEVINFO(un), 7372 sd_label, CE_WARN, 7373 "disk capacity is too large " 7374 "for current cdb length"); 7375 goto spinup_failed; 7376 case EACCES: 7377 /* 7378 * Should never get here if the spin-up 7379 * succeeded, but code it in anyway. 7380 * From here, just continue with the attach... 7381 */ 7382 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7383 "sd_unit_attach: un:0x%p " 7384 "sd_send_scsi_READ_CAPACITY " 7385 "returned reservation conflict\n", un); 7386 reservation_flag = SD_TARGET_IS_RESERVED; 7387 break; 7388 default: 7389 /* 7390 * Likewise, should never get here if the 7391 * spin-up succeeded. Just continue with 7392 * the attach... 7393 */ 7394 break; 7395 } 7396 break; 7397 case EACCES: 7398 /* 7399 * Device is reserved by another host. In this case 7400 * we could not spin it up or read the capacity, but 7401 * we continue with the attach anyway. 7402 */ 7403 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7404 "sd_unit_attach: un:0x%p spin-up reservation " 7405 "conflict.\n", un); 7406 reservation_flag = SD_TARGET_IS_RESERVED; 7407 break; 7408 default: 7409 /* Fail the attach if the spin-up failed. */ 7410 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7411 "sd_unit_attach: un:0x%p spin-up failed.", un); 7412 goto spinup_failed; 7413 } 7414 } 7415 7416 /* 7417 * Check to see if this is a MMC drive 7418 */ 7419 if (ISCD(un)) { 7420 sd_set_mmc_caps(un); 7421 } 7422 7423 7424 /* 7425 * Add a zero-length attribute to tell the world we support 7426 * kernel ioctls (for layered drivers) 7427 */ 7428 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7429 DDI_KERNEL_IOCTL, NULL, 0); 7430 7431 /* 7432 * Add a boolean property to tell the world we support 7433 * the B_FAILFAST flag (for layered drivers) 7434 */ 7435 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7436 "ddi-failfast-supported", NULL, 0); 7437 7438 /* 7439 * Initialize power management 7440 */ 7441 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7442 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7443 sd_setup_pm(un, devi); 7444 if (un->un_f_pm_is_enabled == FALSE) { 7445 /* 7446 * For performance, point to a jump table that does 7447 * not include pm. 7448 * The direct and priority chains don't change with PM. 7449 * 7450 * Note: this is currently done based on individual device 7451 * capabilities. When an interface for determining system 7452 * power enabled state becomes available, or when additional 7453 * layers are added to the command chain, these values will 7454 * have to be re-evaluated for correctness. 7455 */ 7456 if (un->un_f_non_devbsize_supported) { 7457 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7458 } else { 7459 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7460 } 7461 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7462 } 7463 7464 /* 7465 * This property is set to 0 by HA software to avoid retries 7466 * on a reserved disk. (The preferred property name is 7467 * "retry-on-reservation-conflict") (1189689) 7468 * 7469 * Note: The use of a global here can have unintended consequences. A 7470 * per instance variable is preferable to match the capabilities of 7471 * different underlying hba's (4402600) 7472 */ 7473 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7474 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7475 sd_retry_on_reservation_conflict); 7476 if (sd_retry_on_reservation_conflict != 0) { 7477 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7478 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7479 sd_retry_on_reservation_conflict); 7480 } 7481 7482 /* Set up options for QFULL handling. */ 7483 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7484 "qfull-retries", -1)) != -1) { 7485 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7486 rval, 1); 7487 } 7488 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7489 "qfull-retry-interval", -1)) != -1) { 7490 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7491 rval, 1); 7492 } 7493 7494 /* 7495 * This just prints a message that announces the existence of the 7496 * device. The message is always printed in the system logfile, but 7497 * only appears on the console if the system is booted with the 7498 * -v (verbose) argument. 7499 */ 7500 ddi_report_dev(devi); 7501 7502 un->un_mediastate = DKIO_NONE; 7503 7504 cmlb_alloc_handle(&un->un_cmlbhandle); 7505 7506 #if defined(__i386) || defined(__amd64) 7507 /* 7508 * On x86, compensate for off-by-1 legacy error 7509 */ 7510 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7511 (lbasize == un->un_sys_blocksize)) 7512 offbyone = CMLB_OFF_BY_ONE; 7513 #endif 7514 7515 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7516 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7517 un->un_node_type, offbyone, un->un_cmlbhandle, 7518 (void *)SD_PATH_DIRECT) != 0) { 7519 goto cmlb_attach_failed; 7520 } 7521 7522 7523 /* 7524 * Read and validate the device's geometry (ie, disk label) 7525 * A new unformatted drive will not have a valid geometry, but 7526 * the driver needs to successfully attach to this device so 7527 * the drive can be formatted via ioctls. 7528 */ 7529 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7530 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7531 7532 mutex_enter(SD_MUTEX(un)); 7533 7534 /* 7535 * Read and initialize the devid for the unit. 7536 */ 7537 if (un->un_f_devid_supported) { 7538 sd_register_devid(un, devi, reservation_flag); 7539 } 7540 mutex_exit(SD_MUTEX(un)); 7541 7542 #if (defined(__fibre)) 7543 /* 7544 * Register callbacks for fibre only. You can't do this solely 7545 * on the basis of the devid_type because this is hba specific. 7546 * We need to query our hba capabilities to find out whether to 7547 * register or not. 7548 */ 7549 if (un->un_f_is_fibre) { 7550 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7551 sd_init_event_callbacks(un); 7552 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7553 "sd_unit_attach: un:0x%p event callbacks inserted", 7554 un); 7555 } 7556 } 7557 #endif 7558 7559 if (un->un_f_opt_disable_cache == TRUE) { 7560 /* 7561 * Disable both read cache and write cache. This is 7562 * the historic behavior of the keywords in the config file. 7563 */ 7564 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7565 0) { 7566 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7567 "sd_unit_attach: un:0x%p Could not disable " 7568 "caching", un); 7569 goto devid_failed; 7570 } 7571 } 7572 7573 /* 7574 * Check the value of the WCE bit now and 7575 * set un_f_write_cache_enabled accordingly. 7576 */ 7577 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7578 mutex_enter(SD_MUTEX(un)); 7579 un->un_f_write_cache_enabled = (wc_enabled != 0); 7580 mutex_exit(SD_MUTEX(un)); 7581 7582 /* 7583 * Check the value of the NV_SUP bit and set 7584 * un_f_suppress_cache_flush accordingly. 7585 */ 7586 sd_get_nv_sup(un); 7587 7588 /* 7589 * Find out what type of reservation this disk supports. 7590 */ 7591 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7592 case 0: 7593 /* 7594 * SCSI-3 reservations are supported. 7595 */ 7596 un->un_reservation_type = SD_SCSI3_RESERVATION; 7597 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7598 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7599 break; 7600 case ENOTSUP: 7601 /* 7602 * The PERSISTENT RESERVE IN command would not be recognized by 7603 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7604 */ 7605 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7606 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7607 un->un_reservation_type = SD_SCSI2_RESERVATION; 7608 break; 7609 default: 7610 /* 7611 * default to SCSI-3 reservations 7612 */ 7613 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7614 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7615 un->un_reservation_type = SD_SCSI3_RESERVATION; 7616 break; 7617 } 7618 7619 /* 7620 * Set the pstat and error stat values here, so data obtained during the 7621 * previous attach-time routines is available. 7622 * 7623 * Note: This is a critical sequence that needs to be maintained: 7624 * 1) Instantiate the kstats before any routines using the iopath 7625 * (i.e. sd_send_scsi_cmd). 7626 * 2) Initialize the error stats (sd_set_errstats) and partition 7627 * stats (sd_set_pstats)here, following 7628 * cmlb_validate_geometry(), sd_register_devid(), and 7629 * sd_cache_control(). 7630 */ 7631 7632 if (un->un_f_pkstats_enabled && geom_label_valid) { 7633 sd_set_pstats(un); 7634 SD_TRACE(SD_LOG_IO_PARTITION, un, 7635 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7636 } 7637 7638 sd_set_errstats(un); 7639 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7640 "sd_unit_attach: un:0x%p errstats set\n", un); 7641 7642 7643 /* 7644 * After successfully attaching an instance, we record the information 7645 * of how many luns have been attached on the relative target and 7646 * controller for parallel SCSI. This information is used when sd tries 7647 * to set the tagged queuing capability in HBA. 7648 */ 7649 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7650 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7651 } 7652 7653 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7654 "sd_unit_attach: un:0x%p exit success\n", un); 7655 7656 return (DDI_SUCCESS); 7657 7658 /* 7659 * An error occurred during the attach; clean up & return failure. 7660 */ 7661 7662 devid_failed: 7663 7664 setup_pm_failed: 7665 ddi_remove_minor_node(devi, NULL); 7666 7667 cmlb_attach_failed: 7668 /* 7669 * Cleanup from the scsi_ifsetcap() calls (437868) 7670 */ 7671 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7672 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7673 7674 /* 7675 * Refer to the comments of setting tagged-qing in the beginning of 7676 * sd_unit_attach. We can only disable tagged queuing when there is 7677 * no lun attached on the target. 7678 */ 7679 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7680 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7681 } 7682 7683 if (un->un_f_is_fibre == FALSE) { 7684 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7685 } 7686 7687 spinup_failed: 7688 7689 mutex_enter(SD_MUTEX(un)); 7690 7691 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7692 if (un->un_direct_priority_timeid != NULL) { 7693 timeout_id_t temp_id = un->un_direct_priority_timeid; 7694 un->un_direct_priority_timeid = NULL; 7695 mutex_exit(SD_MUTEX(un)); 7696 (void) untimeout(temp_id); 7697 mutex_enter(SD_MUTEX(un)); 7698 } 7699 7700 /* Cancel any pending start/stop timeouts */ 7701 if (un->un_startstop_timeid != NULL) { 7702 timeout_id_t temp_id = un->un_startstop_timeid; 7703 un->un_startstop_timeid = NULL; 7704 mutex_exit(SD_MUTEX(un)); 7705 (void) untimeout(temp_id); 7706 mutex_enter(SD_MUTEX(un)); 7707 } 7708 7709 /* Cancel any pending reset-throttle timeouts */ 7710 if (un->un_reset_throttle_timeid != NULL) { 7711 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7712 un->un_reset_throttle_timeid = NULL; 7713 mutex_exit(SD_MUTEX(un)); 7714 (void) untimeout(temp_id); 7715 mutex_enter(SD_MUTEX(un)); 7716 } 7717 7718 /* Cancel any pending retry timeouts */ 7719 if (un->un_retry_timeid != NULL) { 7720 timeout_id_t temp_id = un->un_retry_timeid; 7721 un->un_retry_timeid = NULL; 7722 mutex_exit(SD_MUTEX(un)); 7723 (void) untimeout(temp_id); 7724 mutex_enter(SD_MUTEX(un)); 7725 } 7726 7727 /* Cancel any pending delayed cv broadcast timeouts */ 7728 if (un->un_dcvb_timeid != NULL) { 7729 timeout_id_t temp_id = un->un_dcvb_timeid; 7730 un->un_dcvb_timeid = NULL; 7731 mutex_exit(SD_MUTEX(un)); 7732 (void) untimeout(temp_id); 7733 mutex_enter(SD_MUTEX(un)); 7734 } 7735 7736 mutex_exit(SD_MUTEX(un)); 7737 7738 /* There should not be any in-progress I/O so ASSERT this check */ 7739 ASSERT(un->un_ncmds_in_transport == 0); 7740 ASSERT(un->un_ncmds_in_driver == 0); 7741 7742 /* Do not free the softstate if the callback routine is active */ 7743 sd_sync_with_callback(un); 7744 7745 /* 7746 * Partition stats apparently are not used with removables. These would 7747 * not have been created during attach, so no need to clean them up... 7748 */ 7749 if (un->un_errstats != NULL) { 7750 kstat_delete(un->un_errstats); 7751 un->un_errstats = NULL; 7752 } 7753 7754 create_errstats_failed: 7755 7756 if (un->un_stats != NULL) { 7757 kstat_delete(un->un_stats); 7758 un->un_stats = NULL; 7759 } 7760 7761 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7762 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7763 7764 ddi_prop_remove_all(devi); 7765 sema_destroy(&un->un_semoclose); 7766 cv_destroy(&un->un_state_cv); 7767 7768 getrbuf_failed: 7769 7770 sd_free_rqs(un); 7771 7772 alloc_rqs_failed: 7773 7774 devp->sd_private = NULL; 7775 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7776 7777 get_softstate_failed: 7778 /* 7779 * Note: the man pages are unclear as to whether or not doing a 7780 * ddi_soft_state_free(sd_state, instance) is the right way to 7781 * clean up after the ddi_soft_state_zalloc() if the subsequent 7782 * ddi_get_soft_state() fails. The implication seems to be 7783 * that the get_soft_state cannot fail if the zalloc succeeds. 7784 */ 7785 ddi_soft_state_free(sd_state, instance); 7786 7787 probe_failed: 7788 scsi_unprobe(devp); 7789 7790 return (DDI_FAILURE); 7791 } 7792 7793 7794 /* 7795 * Function: sd_unit_detach 7796 * 7797 * Description: Performs DDI_DETACH processing for sddetach(). 7798 * 7799 * Return Code: DDI_SUCCESS 7800 * DDI_FAILURE 7801 * 7802 * Context: Kernel thread context 7803 */ 7804 7805 static int 7806 sd_unit_detach(dev_info_t *devi) 7807 { 7808 struct scsi_device *devp; 7809 struct sd_lun *un; 7810 int i; 7811 int tgt; 7812 dev_t dev; 7813 dev_info_t *pdip = ddi_get_parent(devi); 7814 int instance = ddi_get_instance(devi); 7815 7816 mutex_enter(&sd_detach_mutex); 7817 7818 /* 7819 * Fail the detach for any of the following: 7820 * - Unable to get the sd_lun struct for the instance 7821 * - A layered driver has an outstanding open on the instance 7822 * - Another thread is already detaching this instance 7823 * - Another thread is currently performing an open 7824 */ 7825 devp = ddi_get_driver_private(devi); 7826 if ((devp == NULL) || 7827 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7828 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7829 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7830 mutex_exit(&sd_detach_mutex); 7831 return (DDI_FAILURE); 7832 } 7833 7834 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7835 7836 /* 7837 * Mark this instance as currently in a detach, to inhibit any 7838 * opens from a layered driver. 7839 */ 7840 un->un_detach_count++; 7841 mutex_exit(&sd_detach_mutex); 7842 7843 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7844 SCSI_ADDR_PROP_TARGET, -1); 7845 7846 dev = sd_make_device(SD_DEVINFO(un)); 7847 7848 #ifndef lint 7849 _NOTE(COMPETING_THREADS_NOW); 7850 #endif 7851 7852 mutex_enter(SD_MUTEX(un)); 7853 7854 /* 7855 * Fail the detach if there are any outstanding layered 7856 * opens on this device. 7857 */ 7858 for (i = 0; i < NDKMAP; i++) { 7859 if (un->un_ocmap.lyropen[i] != 0) { 7860 goto err_notclosed; 7861 } 7862 } 7863 7864 /* 7865 * Verify there are NO outstanding commands issued to this device. 7866 * ie, un_ncmds_in_transport == 0. 7867 * It's possible to have outstanding commands through the physio 7868 * code path, even though everything's closed. 7869 */ 7870 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7871 (un->un_direct_priority_timeid != NULL) || 7872 (un->un_state == SD_STATE_RWAIT)) { 7873 mutex_exit(SD_MUTEX(un)); 7874 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7875 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7876 goto err_stillbusy; 7877 } 7878 7879 /* 7880 * If we have the device reserved, release the reservation. 7881 */ 7882 if ((un->un_resvd_status & SD_RESERVE) && 7883 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7884 mutex_exit(SD_MUTEX(un)); 7885 /* 7886 * Note: sd_reserve_release sends a command to the device 7887 * via the sd_ioctlcmd() path, and can sleep. 7888 */ 7889 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7890 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7891 "sd_dr_detach: Cannot release reservation \n"); 7892 } 7893 } else { 7894 mutex_exit(SD_MUTEX(un)); 7895 } 7896 7897 /* 7898 * Untimeout any reserve recover, throttle reset, restart unit 7899 * and delayed broadcast timeout threads. Protect the timeout pointer 7900 * from getting nulled by their callback functions. 7901 */ 7902 mutex_enter(SD_MUTEX(un)); 7903 if (un->un_resvd_timeid != NULL) { 7904 timeout_id_t temp_id = un->un_resvd_timeid; 7905 un->un_resvd_timeid = NULL; 7906 mutex_exit(SD_MUTEX(un)); 7907 (void) untimeout(temp_id); 7908 mutex_enter(SD_MUTEX(un)); 7909 } 7910 7911 if (un->un_reset_throttle_timeid != NULL) { 7912 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7913 un->un_reset_throttle_timeid = NULL; 7914 mutex_exit(SD_MUTEX(un)); 7915 (void) untimeout(temp_id); 7916 mutex_enter(SD_MUTEX(un)); 7917 } 7918 7919 if (un->un_startstop_timeid != NULL) { 7920 timeout_id_t temp_id = un->un_startstop_timeid; 7921 un->un_startstop_timeid = NULL; 7922 mutex_exit(SD_MUTEX(un)); 7923 (void) untimeout(temp_id); 7924 mutex_enter(SD_MUTEX(un)); 7925 } 7926 7927 if (un->un_dcvb_timeid != NULL) { 7928 timeout_id_t temp_id = un->un_dcvb_timeid; 7929 un->un_dcvb_timeid = NULL; 7930 mutex_exit(SD_MUTEX(un)); 7931 (void) untimeout(temp_id); 7932 } else { 7933 mutex_exit(SD_MUTEX(un)); 7934 } 7935 7936 /* Remove any pending reservation reclaim requests for this device */ 7937 sd_rmv_resv_reclaim_req(dev); 7938 7939 mutex_enter(SD_MUTEX(un)); 7940 7941 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7942 if (un->un_direct_priority_timeid != NULL) { 7943 timeout_id_t temp_id = un->un_direct_priority_timeid; 7944 un->un_direct_priority_timeid = NULL; 7945 mutex_exit(SD_MUTEX(un)); 7946 (void) untimeout(temp_id); 7947 mutex_enter(SD_MUTEX(un)); 7948 } 7949 7950 /* Cancel any active multi-host disk watch thread requests */ 7951 if (un->un_mhd_token != NULL) { 7952 mutex_exit(SD_MUTEX(un)); 7953 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7954 if (scsi_watch_request_terminate(un->un_mhd_token, 7955 SCSI_WATCH_TERMINATE_NOWAIT)) { 7956 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7957 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7958 /* 7959 * Note: We are returning here after having removed 7960 * some driver timeouts above. This is consistent with 7961 * the legacy implementation but perhaps the watch 7962 * terminate call should be made with the wait flag set. 7963 */ 7964 goto err_stillbusy; 7965 } 7966 mutex_enter(SD_MUTEX(un)); 7967 un->un_mhd_token = NULL; 7968 } 7969 7970 if (un->un_swr_token != NULL) { 7971 mutex_exit(SD_MUTEX(un)); 7972 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7973 if (scsi_watch_request_terminate(un->un_swr_token, 7974 SCSI_WATCH_TERMINATE_NOWAIT)) { 7975 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7976 "sd_dr_detach: Cannot cancel swr watch request\n"); 7977 /* 7978 * Note: We are returning here after having removed 7979 * some driver timeouts above. This is consistent with 7980 * the legacy implementation but perhaps the watch 7981 * terminate call should be made with the wait flag set. 7982 */ 7983 goto err_stillbusy; 7984 } 7985 mutex_enter(SD_MUTEX(un)); 7986 un->un_swr_token = NULL; 7987 } 7988 7989 mutex_exit(SD_MUTEX(un)); 7990 7991 /* 7992 * Clear any scsi_reset_notifies. We clear the reset notifies 7993 * if we have not registered one. 7994 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7995 */ 7996 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7997 sd_mhd_reset_notify_cb, (caddr_t)un); 7998 7999 /* 8000 * protect the timeout pointers from getting nulled by 8001 * their callback functions during the cancellation process. 8002 * In such a scenario untimeout can be invoked with a null value. 8003 */ 8004 _NOTE(NO_COMPETING_THREADS_NOW); 8005 8006 mutex_enter(&un->un_pm_mutex); 8007 if (un->un_pm_idle_timeid != NULL) { 8008 timeout_id_t temp_id = un->un_pm_idle_timeid; 8009 un->un_pm_idle_timeid = NULL; 8010 mutex_exit(&un->un_pm_mutex); 8011 8012 /* 8013 * Timeout is active; cancel it. 8014 * Note that it'll never be active on a device 8015 * that does not support PM therefore we don't 8016 * have to check before calling pm_idle_component. 8017 */ 8018 (void) untimeout(temp_id); 8019 (void) pm_idle_component(SD_DEVINFO(un), 0); 8020 mutex_enter(&un->un_pm_mutex); 8021 } 8022 8023 /* 8024 * Check whether there is already a timeout scheduled for power 8025 * management. If yes then don't lower the power here, that's. 8026 * the timeout handler's job. 8027 */ 8028 if (un->un_pm_timeid != NULL) { 8029 timeout_id_t temp_id = un->un_pm_timeid; 8030 un->un_pm_timeid = NULL; 8031 mutex_exit(&un->un_pm_mutex); 8032 /* 8033 * Timeout is active; cancel it. 8034 * Note that it'll never be active on a device 8035 * that does not support PM therefore we don't 8036 * have to check before calling pm_idle_component. 8037 */ 8038 (void) untimeout(temp_id); 8039 (void) pm_idle_component(SD_DEVINFO(un), 0); 8040 8041 } else { 8042 mutex_exit(&un->un_pm_mutex); 8043 if ((un->un_f_pm_is_enabled == TRUE) && 8044 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8045 DDI_SUCCESS)) { 8046 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8047 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8048 /* 8049 * Fix for bug: 4297749, item # 13 8050 * The above test now includes a check to see if PM is 8051 * supported by this device before call 8052 * pm_lower_power(). 8053 * Note, the following is not dead code. The call to 8054 * pm_lower_power above will generate a call back into 8055 * our sdpower routine which might result in a timeout 8056 * handler getting activated. Therefore the following 8057 * code is valid and necessary. 8058 */ 8059 mutex_enter(&un->un_pm_mutex); 8060 if (un->un_pm_timeid != NULL) { 8061 timeout_id_t temp_id = un->un_pm_timeid; 8062 un->un_pm_timeid = NULL; 8063 mutex_exit(&un->un_pm_mutex); 8064 (void) untimeout(temp_id); 8065 (void) pm_idle_component(SD_DEVINFO(un), 0); 8066 } else { 8067 mutex_exit(&un->un_pm_mutex); 8068 } 8069 } 8070 } 8071 8072 /* 8073 * Cleanup from the scsi_ifsetcap() calls (437868) 8074 * Relocated here from above to be after the call to 8075 * pm_lower_power, which was getting errors. 8076 */ 8077 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8078 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8079 8080 /* 8081 * Currently, tagged queuing is supported per target based by HBA. 8082 * Setting this per lun instance actually sets the capability of this 8083 * target in HBA, which affects those luns already attached on the 8084 * same target. So during detach, we can only disable this capability 8085 * only when this is the only lun left on this target. By doing 8086 * this, we assume a target has the same tagged queuing capability 8087 * for every lun. The condition can be removed when HBA is changed to 8088 * support per lun based tagged queuing capability. 8089 */ 8090 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8091 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8092 } 8093 8094 if (un->un_f_is_fibre == FALSE) { 8095 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8096 } 8097 8098 /* 8099 * Remove any event callbacks, fibre only 8100 */ 8101 if (un->un_f_is_fibre == TRUE) { 8102 if ((un->un_insert_event != NULL) && 8103 (ddi_remove_event_handler(un->un_insert_cb_id) != 8104 DDI_SUCCESS)) { 8105 /* 8106 * Note: We are returning here after having done 8107 * substantial cleanup above. This is consistent 8108 * with the legacy implementation but this may not 8109 * be the right thing to do. 8110 */ 8111 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8112 "sd_dr_detach: Cannot cancel insert event\n"); 8113 goto err_remove_event; 8114 } 8115 un->un_insert_event = NULL; 8116 8117 if ((un->un_remove_event != NULL) && 8118 (ddi_remove_event_handler(un->un_remove_cb_id) != 8119 DDI_SUCCESS)) { 8120 /* 8121 * Note: We are returning here after having done 8122 * substantial cleanup above. This is consistent 8123 * with the legacy implementation but this may not 8124 * be the right thing to do. 8125 */ 8126 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8127 "sd_dr_detach: Cannot cancel remove event\n"); 8128 goto err_remove_event; 8129 } 8130 un->un_remove_event = NULL; 8131 } 8132 8133 /* Do not free the softstate if the callback routine is active */ 8134 sd_sync_with_callback(un); 8135 8136 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8137 cmlb_free_handle(&un->un_cmlbhandle); 8138 8139 /* 8140 * Hold the detach mutex here, to make sure that no other threads ever 8141 * can access a (partially) freed soft state structure. 8142 */ 8143 mutex_enter(&sd_detach_mutex); 8144 8145 /* 8146 * Clean up the soft state struct. 8147 * Cleanup is done in reverse order of allocs/inits. 8148 * At this point there should be no competing threads anymore. 8149 */ 8150 8151 /* Unregister and free device id. */ 8152 ddi_devid_unregister(devi); 8153 if (un->un_devid) { 8154 ddi_devid_free(un->un_devid); 8155 un->un_devid = NULL; 8156 } 8157 8158 /* 8159 * Destroy wmap cache if it exists. 8160 */ 8161 if (un->un_wm_cache != NULL) { 8162 kmem_cache_destroy(un->un_wm_cache); 8163 un->un_wm_cache = NULL; 8164 } 8165 8166 /* 8167 * kstat cleanup is done in detach for all device types (4363169). 8168 * We do not want to fail detach if the device kstats are not deleted 8169 * since there is a confusion about the devo_refcnt for the device. 8170 * We just delete the kstats and let detach complete successfully. 8171 */ 8172 if (un->un_stats != NULL) { 8173 kstat_delete(un->un_stats); 8174 un->un_stats = NULL; 8175 } 8176 if (un->un_errstats != NULL) { 8177 kstat_delete(un->un_errstats); 8178 un->un_errstats = NULL; 8179 } 8180 8181 /* Remove partition stats */ 8182 if (un->un_f_pkstats_enabled) { 8183 for (i = 0; i < NSDMAP; i++) { 8184 if (un->un_pstats[i] != NULL) { 8185 kstat_delete(un->un_pstats[i]); 8186 un->un_pstats[i] = NULL; 8187 } 8188 } 8189 } 8190 8191 /* Remove xbuf registration */ 8192 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8193 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8194 8195 /* Remove driver properties */ 8196 ddi_prop_remove_all(devi); 8197 8198 mutex_destroy(&un->un_pm_mutex); 8199 cv_destroy(&un->un_pm_busy_cv); 8200 8201 cv_destroy(&un->un_wcc_cv); 8202 8203 /* Open/close semaphore */ 8204 sema_destroy(&un->un_semoclose); 8205 8206 /* Removable media condvar. */ 8207 cv_destroy(&un->un_state_cv); 8208 8209 /* Suspend/resume condvar. */ 8210 cv_destroy(&un->un_suspend_cv); 8211 cv_destroy(&un->un_disk_busy_cv); 8212 8213 sd_free_rqs(un); 8214 8215 /* Free up soft state */ 8216 devp->sd_private = NULL; 8217 8218 bzero(un, sizeof (struct sd_lun)); 8219 ddi_soft_state_free(sd_state, instance); 8220 8221 mutex_exit(&sd_detach_mutex); 8222 8223 /* This frees up the INQUIRY data associated with the device. */ 8224 scsi_unprobe(devp); 8225 8226 /* 8227 * After successfully detaching an instance, we update the information 8228 * of how many luns have been attached in the relative target and 8229 * controller for parallel SCSI. This information is used when sd tries 8230 * to set the tagged queuing capability in HBA. 8231 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8232 * check if the device is parallel SCSI. However, we don't need to 8233 * check here because we've already checked during attach. No device 8234 * that is not parallel SCSI is in the chain. 8235 */ 8236 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8237 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8238 } 8239 8240 return (DDI_SUCCESS); 8241 8242 err_notclosed: 8243 mutex_exit(SD_MUTEX(un)); 8244 8245 err_stillbusy: 8246 _NOTE(NO_COMPETING_THREADS_NOW); 8247 8248 err_remove_event: 8249 mutex_enter(&sd_detach_mutex); 8250 un->un_detach_count--; 8251 mutex_exit(&sd_detach_mutex); 8252 8253 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8254 return (DDI_FAILURE); 8255 } 8256 8257 8258 /* 8259 * Function: sd_create_errstats 8260 * 8261 * Description: This routine instantiates the device error stats. 8262 * 8263 * Note: During attach the stats are instantiated first so they are 8264 * available for attach-time routines that utilize the driver 8265 * iopath to send commands to the device. The stats are initialized 8266 * separately so data obtained during some attach-time routines is 8267 * available. (4362483) 8268 * 8269 * Arguments: un - driver soft state (unit) structure 8270 * instance - driver instance 8271 * 8272 * Context: Kernel thread context 8273 */ 8274 8275 static void 8276 sd_create_errstats(struct sd_lun *un, int instance) 8277 { 8278 struct sd_errstats *stp; 8279 char kstatmodule_err[KSTAT_STRLEN]; 8280 char kstatname[KSTAT_STRLEN]; 8281 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8282 8283 ASSERT(un != NULL); 8284 8285 if (un->un_errstats != NULL) { 8286 return; 8287 } 8288 8289 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8290 "%serr", sd_label); 8291 (void) snprintf(kstatname, sizeof (kstatname), 8292 "%s%d,err", sd_label, instance); 8293 8294 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8295 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8296 8297 if (un->un_errstats == NULL) { 8298 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8299 "sd_create_errstats: Failed kstat_create\n"); 8300 return; 8301 } 8302 8303 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8304 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8305 KSTAT_DATA_UINT32); 8306 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8307 KSTAT_DATA_UINT32); 8308 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8309 KSTAT_DATA_UINT32); 8310 kstat_named_init(&stp->sd_vid, "Vendor", 8311 KSTAT_DATA_CHAR); 8312 kstat_named_init(&stp->sd_pid, "Product", 8313 KSTAT_DATA_CHAR); 8314 kstat_named_init(&stp->sd_revision, "Revision", 8315 KSTAT_DATA_CHAR); 8316 kstat_named_init(&stp->sd_serial, "Serial No", 8317 KSTAT_DATA_CHAR); 8318 kstat_named_init(&stp->sd_capacity, "Size", 8319 KSTAT_DATA_ULONGLONG); 8320 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8321 KSTAT_DATA_UINT32); 8322 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8323 KSTAT_DATA_UINT32); 8324 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8325 KSTAT_DATA_UINT32); 8326 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8327 KSTAT_DATA_UINT32); 8328 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8329 KSTAT_DATA_UINT32); 8330 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8331 KSTAT_DATA_UINT32); 8332 8333 un->un_errstats->ks_private = un; 8334 un->un_errstats->ks_update = nulldev; 8335 8336 kstat_install(un->un_errstats); 8337 } 8338 8339 8340 /* 8341 * Function: sd_set_errstats 8342 * 8343 * Description: This routine sets the value of the vendor id, product id, 8344 * revision, serial number, and capacity device error stats. 8345 * 8346 * Note: During attach the stats are instantiated first so they are 8347 * available for attach-time routines that utilize the driver 8348 * iopath to send commands to the device. The stats are initialized 8349 * separately so data obtained during some attach-time routines is 8350 * available. (4362483) 8351 * 8352 * Arguments: un - driver soft state (unit) structure 8353 * 8354 * Context: Kernel thread context 8355 */ 8356 8357 static void 8358 sd_set_errstats(struct sd_lun *un) 8359 { 8360 struct sd_errstats *stp; 8361 8362 ASSERT(un != NULL); 8363 ASSERT(un->un_errstats != NULL); 8364 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8365 ASSERT(stp != NULL); 8366 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8367 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8368 (void) strncpy(stp->sd_revision.value.c, 8369 un->un_sd->sd_inq->inq_revision, 4); 8370 8371 /* 8372 * All the errstats are persistent across detach/attach, 8373 * so reset all the errstats here in case of the hot 8374 * replacement of disk drives, except for not changed 8375 * Sun qualified drives. 8376 */ 8377 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8378 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8379 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8380 stp->sd_softerrs.value.ui32 = 0; 8381 stp->sd_harderrs.value.ui32 = 0; 8382 stp->sd_transerrs.value.ui32 = 0; 8383 stp->sd_rq_media_err.value.ui32 = 0; 8384 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8385 stp->sd_rq_nodev_err.value.ui32 = 0; 8386 stp->sd_rq_recov_err.value.ui32 = 0; 8387 stp->sd_rq_illrq_err.value.ui32 = 0; 8388 stp->sd_rq_pfa_err.value.ui32 = 0; 8389 } 8390 8391 /* 8392 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8393 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8394 * (4376302)) 8395 */ 8396 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8397 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8398 sizeof (SD_INQUIRY(un)->inq_serial)); 8399 } 8400 8401 if (un->un_f_blockcount_is_valid != TRUE) { 8402 /* 8403 * Set capacity error stat to 0 for no media. This ensures 8404 * a valid capacity is displayed in response to 'iostat -E' 8405 * when no media is present in the device. 8406 */ 8407 stp->sd_capacity.value.ui64 = 0; 8408 } else { 8409 /* 8410 * Multiply un_blockcount by un->un_sys_blocksize to get 8411 * capacity. 8412 * 8413 * Note: for non-512 blocksize devices "un_blockcount" has been 8414 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8415 * (un_tgt_blocksize / un->un_sys_blocksize). 8416 */ 8417 stp->sd_capacity.value.ui64 = (uint64_t) 8418 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8419 } 8420 } 8421 8422 8423 /* 8424 * Function: sd_set_pstats 8425 * 8426 * Description: This routine instantiates and initializes the partition 8427 * stats for each partition with more than zero blocks. 8428 * (4363169) 8429 * 8430 * Arguments: un - driver soft state (unit) structure 8431 * 8432 * Context: Kernel thread context 8433 */ 8434 8435 static void 8436 sd_set_pstats(struct sd_lun *un) 8437 { 8438 char kstatname[KSTAT_STRLEN]; 8439 int instance; 8440 int i; 8441 diskaddr_t nblks = 0; 8442 char *partname = NULL; 8443 8444 ASSERT(un != NULL); 8445 8446 instance = ddi_get_instance(SD_DEVINFO(un)); 8447 8448 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8449 for (i = 0; i < NSDMAP; i++) { 8450 8451 if (cmlb_partinfo(un->un_cmlbhandle, i, 8452 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8453 continue; 8454 mutex_enter(SD_MUTEX(un)); 8455 8456 if ((un->un_pstats[i] == NULL) && 8457 (nblks != 0)) { 8458 8459 (void) snprintf(kstatname, sizeof (kstatname), 8460 "%s%d,%s", sd_label, instance, 8461 partname); 8462 8463 un->un_pstats[i] = kstat_create(sd_label, 8464 instance, kstatname, "partition", KSTAT_TYPE_IO, 8465 1, KSTAT_FLAG_PERSISTENT); 8466 if (un->un_pstats[i] != NULL) { 8467 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8468 kstat_install(un->un_pstats[i]); 8469 } 8470 } 8471 mutex_exit(SD_MUTEX(un)); 8472 } 8473 } 8474 8475 8476 #if (defined(__fibre)) 8477 /* 8478 * Function: sd_init_event_callbacks 8479 * 8480 * Description: This routine initializes the insertion and removal event 8481 * callbacks. (fibre only) 8482 * 8483 * Arguments: un - driver soft state (unit) structure 8484 * 8485 * Context: Kernel thread context 8486 */ 8487 8488 static void 8489 sd_init_event_callbacks(struct sd_lun *un) 8490 { 8491 ASSERT(un != NULL); 8492 8493 if ((un->un_insert_event == NULL) && 8494 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8495 &un->un_insert_event) == DDI_SUCCESS)) { 8496 /* 8497 * Add the callback for an insertion event 8498 */ 8499 (void) ddi_add_event_handler(SD_DEVINFO(un), 8500 un->un_insert_event, sd_event_callback, (void *)un, 8501 &(un->un_insert_cb_id)); 8502 } 8503 8504 if ((un->un_remove_event == NULL) && 8505 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8506 &un->un_remove_event) == DDI_SUCCESS)) { 8507 /* 8508 * Add the callback for a removal event 8509 */ 8510 (void) ddi_add_event_handler(SD_DEVINFO(un), 8511 un->un_remove_event, sd_event_callback, (void *)un, 8512 &(un->un_remove_cb_id)); 8513 } 8514 } 8515 8516 8517 /* 8518 * Function: sd_event_callback 8519 * 8520 * Description: This routine handles insert/remove events (photon). The 8521 * state is changed to OFFLINE which can be used to supress 8522 * error msgs. (fibre only) 8523 * 8524 * Arguments: un - driver soft state (unit) structure 8525 * 8526 * Context: Callout thread context 8527 */ 8528 /* ARGSUSED */ 8529 static void 8530 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8531 void *bus_impldata) 8532 { 8533 struct sd_lun *un = (struct sd_lun *)arg; 8534 8535 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8536 if (event == un->un_insert_event) { 8537 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8538 mutex_enter(SD_MUTEX(un)); 8539 if (un->un_state == SD_STATE_OFFLINE) { 8540 if (un->un_last_state != SD_STATE_SUSPENDED) { 8541 un->un_state = un->un_last_state; 8542 } else { 8543 /* 8544 * We have gone through SUSPEND/RESUME while 8545 * we were offline. Restore the last state 8546 */ 8547 un->un_state = un->un_save_state; 8548 } 8549 } 8550 mutex_exit(SD_MUTEX(un)); 8551 8552 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8553 } else if (event == un->un_remove_event) { 8554 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8555 mutex_enter(SD_MUTEX(un)); 8556 /* 8557 * We need to handle an event callback that occurs during 8558 * the suspend operation, since we don't prevent it. 8559 */ 8560 if (un->un_state != SD_STATE_OFFLINE) { 8561 if (un->un_state != SD_STATE_SUSPENDED) { 8562 New_state(un, SD_STATE_OFFLINE); 8563 } else { 8564 un->un_last_state = SD_STATE_OFFLINE; 8565 } 8566 } 8567 mutex_exit(SD_MUTEX(un)); 8568 } else { 8569 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8570 "!Unknown event\n"); 8571 } 8572 8573 } 8574 #endif 8575 8576 /* 8577 * Function: sd_cache_control() 8578 * 8579 * Description: This routine is the driver entry point for setting 8580 * read and write caching by modifying the WCE (write cache 8581 * enable) and RCD (read cache disable) bits of mode 8582 * page 8 (MODEPAGE_CACHING). 8583 * 8584 * Arguments: un - driver soft state (unit) structure 8585 * rcd_flag - flag for controlling the read cache 8586 * wce_flag - flag for controlling the write cache 8587 * 8588 * Return Code: EIO 8589 * code returned by sd_send_scsi_MODE_SENSE and 8590 * sd_send_scsi_MODE_SELECT 8591 * 8592 * Context: Kernel Thread 8593 */ 8594 8595 static int 8596 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8597 { 8598 struct mode_caching *mode_caching_page; 8599 uchar_t *header; 8600 size_t buflen; 8601 int hdrlen; 8602 int bd_len; 8603 int rval = 0; 8604 struct mode_header_grp2 *mhp; 8605 8606 ASSERT(un != NULL); 8607 8608 /* 8609 * Do a test unit ready, otherwise a mode sense may not work if this 8610 * is the first command sent to the device after boot. 8611 */ 8612 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8613 8614 if (un->un_f_cfg_is_atapi == TRUE) { 8615 hdrlen = MODE_HEADER_LENGTH_GRP2; 8616 } else { 8617 hdrlen = MODE_HEADER_LENGTH; 8618 } 8619 8620 /* 8621 * Allocate memory for the retrieved mode page and its headers. Set 8622 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8623 * we get all of the mode sense data otherwise, the mode select 8624 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8625 */ 8626 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8627 sizeof (struct mode_cache_scsi3); 8628 8629 header = kmem_zalloc(buflen, KM_SLEEP); 8630 8631 /* Get the information from the device. */ 8632 if (un->un_f_cfg_is_atapi == TRUE) { 8633 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8634 MODEPAGE_CACHING, SD_PATH_DIRECT); 8635 } else { 8636 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8637 MODEPAGE_CACHING, SD_PATH_DIRECT); 8638 } 8639 if (rval != 0) { 8640 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8641 "sd_cache_control: Mode Sense Failed\n"); 8642 kmem_free(header, buflen); 8643 return (rval); 8644 } 8645 8646 /* 8647 * Determine size of Block Descriptors in order to locate 8648 * the mode page data. ATAPI devices return 0, SCSI devices 8649 * should return MODE_BLK_DESC_LENGTH. 8650 */ 8651 if (un->un_f_cfg_is_atapi == TRUE) { 8652 mhp = (struct mode_header_grp2 *)header; 8653 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8654 } else { 8655 bd_len = ((struct mode_header *)header)->bdesc_length; 8656 } 8657 8658 if (bd_len > MODE_BLK_DESC_LENGTH) { 8659 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8660 "sd_cache_control: Mode Sense returned invalid " 8661 "block descriptor length\n"); 8662 kmem_free(header, buflen); 8663 return (EIO); 8664 } 8665 8666 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8667 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8668 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8669 " caching page code mismatch %d\n", 8670 mode_caching_page->mode_page.code); 8671 kmem_free(header, buflen); 8672 return (EIO); 8673 } 8674 8675 /* Check the relevant bits on successful mode sense. */ 8676 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8677 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8678 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8679 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8680 8681 size_t sbuflen; 8682 uchar_t save_pg; 8683 8684 /* 8685 * Construct select buffer length based on the 8686 * length of the sense data returned. 8687 */ 8688 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8689 sizeof (struct mode_page) + 8690 (int)mode_caching_page->mode_page.length; 8691 8692 /* 8693 * Set the caching bits as requested. 8694 */ 8695 if (rcd_flag == SD_CACHE_ENABLE) 8696 mode_caching_page->rcd = 0; 8697 else if (rcd_flag == SD_CACHE_DISABLE) 8698 mode_caching_page->rcd = 1; 8699 8700 if (wce_flag == SD_CACHE_ENABLE) 8701 mode_caching_page->wce = 1; 8702 else if (wce_flag == SD_CACHE_DISABLE) 8703 mode_caching_page->wce = 0; 8704 8705 /* 8706 * Save the page if the mode sense says the 8707 * drive supports it. 8708 */ 8709 save_pg = mode_caching_page->mode_page.ps ? 8710 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8711 8712 /* Clear reserved bits before mode select. */ 8713 mode_caching_page->mode_page.ps = 0; 8714 8715 /* 8716 * Clear out mode header for mode select. 8717 * The rest of the retrieved page will be reused. 8718 */ 8719 bzero(header, hdrlen); 8720 8721 if (un->un_f_cfg_is_atapi == TRUE) { 8722 mhp = (struct mode_header_grp2 *)header; 8723 mhp->bdesc_length_hi = bd_len >> 8; 8724 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8725 } else { 8726 ((struct mode_header *)header)->bdesc_length = bd_len; 8727 } 8728 8729 /* Issue mode select to change the cache settings */ 8730 if (un->un_f_cfg_is_atapi == TRUE) { 8731 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8732 sbuflen, save_pg, SD_PATH_DIRECT); 8733 } else { 8734 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8735 sbuflen, save_pg, SD_PATH_DIRECT); 8736 } 8737 } 8738 8739 kmem_free(header, buflen); 8740 return (rval); 8741 } 8742 8743 8744 /* 8745 * Function: sd_get_write_cache_enabled() 8746 * 8747 * Description: This routine is the driver entry point for determining if 8748 * write caching is enabled. It examines the WCE (write cache 8749 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8750 * 8751 * Arguments: un - driver soft state (unit) structure 8752 * is_enabled - pointer to int where write cache enabled state 8753 * is returned (non-zero -> write cache enabled) 8754 * 8755 * 8756 * Return Code: EIO 8757 * code returned by sd_send_scsi_MODE_SENSE 8758 * 8759 * Context: Kernel Thread 8760 * 8761 * NOTE: If ioctl is added to disable write cache, this sequence should 8762 * be followed so that no locking is required for accesses to 8763 * un->un_f_write_cache_enabled: 8764 * do mode select to clear wce 8765 * do synchronize cache to flush cache 8766 * set un->un_f_write_cache_enabled = FALSE 8767 * 8768 * Conversely, an ioctl to enable the write cache should be done 8769 * in this order: 8770 * set un->un_f_write_cache_enabled = TRUE 8771 * do mode select to set wce 8772 */ 8773 8774 static int 8775 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8776 { 8777 struct mode_caching *mode_caching_page; 8778 uchar_t *header; 8779 size_t buflen; 8780 int hdrlen; 8781 int bd_len; 8782 int rval = 0; 8783 8784 ASSERT(un != NULL); 8785 ASSERT(is_enabled != NULL); 8786 8787 /* in case of error, flag as enabled */ 8788 *is_enabled = TRUE; 8789 8790 /* 8791 * Do a test unit ready, otherwise a mode sense may not work if this 8792 * is the first command sent to the device after boot. 8793 */ 8794 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8795 8796 if (un->un_f_cfg_is_atapi == TRUE) { 8797 hdrlen = MODE_HEADER_LENGTH_GRP2; 8798 } else { 8799 hdrlen = MODE_HEADER_LENGTH; 8800 } 8801 8802 /* 8803 * Allocate memory for the retrieved mode page and its headers. Set 8804 * a pointer to the page itself. 8805 */ 8806 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8807 header = kmem_zalloc(buflen, KM_SLEEP); 8808 8809 /* Get the information from the device. */ 8810 if (un->un_f_cfg_is_atapi == TRUE) { 8811 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8812 MODEPAGE_CACHING, SD_PATH_DIRECT); 8813 } else { 8814 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8815 MODEPAGE_CACHING, SD_PATH_DIRECT); 8816 } 8817 if (rval != 0) { 8818 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8819 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8820 kmem_free(header, buflen); 8821 return (rval); 8822 } 8823 8824 /* 8825 * Determine size of Block Descriptors in order to locate 8826 * the mode page data. ATAPI devices return 0, SCSI devices 8827 * should return MODE_BLK_DESC_LENGTH. 8828 */ 8829 if (un->un_f_cfg_is_atapi == TRUE) { 8830 struct mode_header_grp2 *mhp; 8831 mhp = (struct mode_header_grp2 *)header; 8832 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8833 } else { 8834 bd_len = ((struct mode_header *)header)->bdesc_length; 8835 } 8836 8837 if (bd_len > MODE_BLK_DESC_LENGTH) { 8838 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8839 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8840 "block descriptor length\n"); 8841 kmem_free(header, buflen); 8842 return (EIO); 8843 } 8844 8845 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8846 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8847 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8848 " caching page code mismatch %d\n", 8849 mode_caching_page->mode_page.code); 8850 kmem_free(header, buflen); 8851 return (EIO); 8852 } 8853 *is_enabled = mode_caching_page->wce; 8854 8855 kmem_free(header, buflen); 8856 return (0); 8857 } 8858 8859 /* 8860 * Function: sd_get_nv_sup() 8861 * 8862 * Description: This routine is the driver entry point for 8863 * determining whether non-volatile cache is supported. This 8864 * determination process works as follows: 8865 * 8866 * 1. sd first queries sd.conf on whether 8867 * suppress_cache_flush bit is set for this device. 8868 * 8869 * 2. if not there, then queries the internal disk table. 8870 * 8871 * 3. if either sd.conf or internal disk table specifies 8872 * cache flush be suppressed, we don't bother checking 8873 * NV_SUP bit. 8874 * 8875 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8876 * the optional INQUIRY VPD page 0x86. If the device 8877 * supports VPD page 0x86, sd examines the NV_SUP 8878 * (non-volatile cache support) bit in the INQUIRY VPD page 8879 * 0x86: 8880 * o If NV_SUP bit is set, sd assumes the device has a 8881 * non-volatile cache and set the 8882 * un_f_sync_nv_supported to TRUE. 8883 * o Otherwise cache is not non-volatile, 8884 * un_f_sync_nv_supported is set to FALSE. 8885 * 8886 * Arguments: un - driver soft state (unit) structure 8887 * 8888 * Return Code: 8889 * 8890 * Context: Kernel Thread 8891 */ 8892 8893 static void 8894 sd_get_nv_sup(struct sd_lun *un) 8895 { 8896 int rval = 0; 8897 uchar_t *inq86 = NULL; 8898 size_t inq86_len = MAX_INQUIRY_SIZE; 8899 size_t inq86_resid = 0; 8900 struct dk_callback *dkc; 8901 8902 ASSERT(un != NULL); 8903 8904 mutex_enter(SD_MUTEX(un)); 8905 8906 /* 8907 * Be conservative on the device's support of 8908 * SYNC_NV bit: un_f_sync_nv_supported is 8909 * initialized to be false. 8910 */ 8911 un->un_f_sync_nv_supported = FALSE; 8912 8913 /* 8914 * If either sd.conf or internal disk table 8915 * specifies cache flush be suppressed, then 8916 * we don't bother checking NV_SUP bit. 8917 */ 8918 if (un->un_f_suppress_cache_flush == TRUE) { 8919 mutex_exit(SD_MUTEX(un)); 8920 return; 8921 } 8922 8923 if (sd_check_vpd_page_support(un) == 0 && 8924 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8925 mutex_exit(SD_MUTEX(un)); 8926 /* collect page 86 data if available */ 8927 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8928 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8929 0x01, 0x86, &inq86_resid); 8930 8931 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8932 SD_TRACE(SD_LOG_COMMON, un, 8933 "sd_get_nv_sup: \ 8934 successfully get VPD page: %x \ 8935 PAGE LENGTH: %x BYTE 6: %x\n", 8936 inq86[1], inq86[3], inq86[6]); 8937 8938 mutex_enter(SD_MUTEX(un)); 8939 /* 8940 * check the value of NV_SUP bit: only if the device 8941 * reports NV_SUP bit to be 1, the 8942 * un_f_sync_nv_supported bit will be set to true. 8943 */ 8944 if (inq86[6] & SD_VPD_NV_SUP) { 8945 un->un_f_sync_nv_supported = TRUE; 8946 } 8947 mutex_exit(SD_MUTEX(un)); 8948 } 8949 kmem_free(inq86, inq86_len); 8950 } else { 8951 mutex_exit(SD_MUTEX(un)); 8952 } 8953 8954 /* 8955 * Send a SYNC CACHE command to check whether 8956 * SYNC_NV bit is supported. This command should have 8957 * un_f_sync_nv_supported set to correct value. 8958 */ 8959 mutex_enter(SD_MUTEX(un)); 8960 if (un->un_f_sync_nv_supported) { 8961 mutex_exit(SD_MUTEX(un)); 8962 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8963 dkc->dkc_flag = FLUSH_VOLATILE; 8964 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8965 8966 /* 8967 * Send a TEST UNIT READY command to the device. This should 8968 * clear any outstanding UNIT ATTENTION that may be present. 8969 */ 8970 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8971 8972 kmem_free(dkc, sizeof (struct dk_callback)); 8973 } else { 8974 mutex_exit(SD_MUTEX(un)); 8975 } 8976 8977 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8978 un_f_suppress_cache_flush is set to %d\n", 8979 un->un_f_suppress_cache_flush); 8980 } 8981 8982 /* 8983 * Function: sd_make_device 8984 * 8985 * Description: Utility routine to return the Solaris device number from 8986 * the data in the device's dev_info structure. 8987 * 8988 * Return Code: The Solaris device number 8989 * 8990 * Context: Any 8991 */ 8992 8993 static dev_t 8994 sd_make_device(dev_info_t *devi) 8995 { 8996 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8997 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8998 } 8999 9000 9001 /* 9002 * Function: sd_pm_entry 9003 * 9004 * Description: Called at the start of a new command to manage power 9005 * and busy status of a device. This includes determining whether 9006 * the current power state of the device is sufficient for 9007 * performing the command or whether it must be changed. 9008 * The PM framework is notified appropriately. 9009 * Only with a return status of DDI_SUCCESS will the 9010 * component be busy to the framework. 9011 * 9012 * All callers of sd_pm_entry must check the return status 9013 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9014 * of DDI_FAILURE indicates the device failed to power up. 9015 * In this case un_pm_count has been adjusted so the result 9016 * on exit is still powered down, ie. count is less than 0. 9017 * Calling sd_pm_exit with this count value hits an ASSERT. 9018 * 9019 * Return Code: DDI_SUCCESS or DDI_FAILURE 9020 * 9021 * Context: Kernel thread context. 9022 */ 9023 9024 static int 9025 sd_pm_entry(struct sd_lun *un) 9026 { 9027 int return_status = DDI_SUCCESS; 9028 9029 ASSERT(!mutex_owned(SD_MUTEX(un))); 9030 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9031 9032 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9033 9034 if (un->un_f_pm_is_enabled == FALSE) { 9035 SD_TRACE(SD_LOG_IO_PM, un, 9036 "sd_pm_entry: exiting, PM not enabled\n"); 9037 return (return_status); 9038 } 9039 9040 /* 9041 * Just increment a counter if PM is enabled. On the transition from 9042 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9043 * the count with each IO and mark the device as idle when the count 9044 * hits 0. 9045 * 9046 * If the count is less than 0 the device is powered down. If a powered 9047 * down device is successfully powered up then the count must be 9048 * incremented to reflect the power up. Note that it'll get incremented 9049 * a second time to become busy. 9050 * 9051 * Because the following has the potential to change the device state 9052 * and must release the un_pm_mutex to do so, only one thread can be 9053 * allowed through at a time. 9054 */ 9055 9056 mutex_enter(&un->un_pm_mutex); 9057 while (un->un_pm_busy == TRUE) { 9058 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9059 } 9060 un->un_pm_busy = TRUE; 9061 9062 if (un->un_pm_count < 1) { 9063 9064 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9065 9066 /* 9067 * Indicate we are now busy so the framework won't attempt to 9068 * power down the device. This call will only fail if either 9069 * we passed a bad component number or the device has no 9070 * components. Neither of these should ever happen. 9071 */ 9072 mutex_exit(&un->un_pm_mutex); 9073 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9074 ASSERT(return_status == DDI_SUCCESS); 9075 9076 mutex_enter(&un->un_pm_mutex); 9077 9078 if (un->un_pm_count < 0) { 9079 mutex_exit(&un->un_pm_mutex); 9080 9081 SD_TRACE(SD_LOG_IO_PM, un, 9082 "sd_pm_entry: power up component\n"); 9083 9084 /* 9085 * pm_raise_power will cause sdpower to be called 9086 * which brings the device power level to the 9087 * desired state, ON in this case. If successful, 9088 * un_pm_count and un_power_level will be updated 9089 * appropriately. 9090 */ 9091 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9092 SD_SPINDLE_ON); 9093 9094 mutex_enter(&un->un_pm_mutex); 9095 9096 if (return_status != DDI_SUCCESS) { 9097 /* 9098 * Power up failed. 9099 * Idle the device and adjust the count 9100 * so the result on exit is that we're 9101 * still powered down, ie. count is less than 0. 9102 */ 9103 SD_TRACE(SD_LOG_IO_PM, un, 9104 "sd_pm_entry: power up failed," 9105 " idle the component\n"); 9106 9107 (void) pm_idle_component(SD_DEVINFO(un), 0); 9108 un->un_pm_count--; 9109 } else { 9110 /* 9111 * Device is powered up, verify the 9112 * count is non-negative. 9113 * This is debug only. 9114 */ 9115 ASSERT(un->un_pm_count == 0); 9116 } 9117 } 9118 9119 if (return_status == DDI_SUCCESS) { 9120 /* 9121 * For performance, now that the device has been tagged 9122 * as busy, and it's known to be powered up, update the 9123 * chain types to use jump tables that do not include 9124 * pm. This significantly lowers the overhead and 9125 * therefore improves performance. 9126 */ 9127 9128 mutex_exit(&un->un_pm_mutex); 9129 mutex_enter(SD_MUTEX(un)); 9130 SD_TRACE(SD_LOG_IO_PM, un, 9131 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9132 un->un_uscsi_chain_type); 9133 9134 if (un->un_f_non_devbsize_supported) { 9135 un->un_buf_chain_type = 9136 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9137 } else { 9138 un->un_buf_chain_type = 9139 SD_CHAIN_INFO_DISK_NO_PM; 9140 } 9141 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9142 9143 SD_TRACE(SD_LOG_IO_PM, un, 9144 " changed uscsi_chain_type to %d\n", 9145 un->un_uscsi_chain_type); 9146 mutex_exit(SD_MUTEX(un)); 9147 mutex_enter(&un->un_pm_mutex); 9148 9149 if (un->un_pm_idle_timeid == NULL) { 9150 /* 300 ms. */ 9151 un->un_pm_idle_timeid = 9152 timeout(sd_pm_idletimeout_handler, un, 9153 (drv_usectohz((clock_t)300000))); 9154 /* 9155 * Include an extra call to busy which keeps the 9156 * device busy with-respect-to the PM layer 9157 * until the timer fires, at which time it'll 9158 * get the extra idle call. 9159 */ 9160 (void) pm_busy_component(SD_DEVINFO(un), 0); 9161 } 9162 } 9163 } 9164 un->un_pm_busy = FALSE; 9165 /* Next... */ 9166 cv_signal(&un->un_pm_busy_cv); 9167 9168 un->un_pm_count++; 9169 9170 SD_TRACE(SD_LOG_IO_PM, un, 9171 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9172 9173 mutex_exit(&un->un_pm_mutex); 9174 9175 return (return_status); 9176 } 9177 9178 9179 /* 9180 * Function: sd_pm_exit 9181 * 9182 * Description: Called at the completion of a command to manage busy 9183 * status for the device. If the device becomes idle the 9184 * PM framework is notified. 9185 * 9186 * Context: Kernel thread context 9187 */ 9188 9189 static void 9190 sd_pm_exit(struct sd_lun *un) 9191 { 9192 ASSERT(!mutex_owned(SD_MUTEX(un))); 9193 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9194 9195 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9196 9197 /* 9198 * After attach the following flag is only read, so don't 9199 * take the penalty of acquiring a mutex for it. 9200 */ 9201 if (un->un_f_pm_is_enabled == TRUE) { 9202 9203 mutex_enter(&un->un_pm_mutex); 9204 un->un_pm_count--; 9205 9206 SD_TRACE(SD_LOG_IO_PM, un, 9207 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9208 9209 ASSERT(un->un_pm_count >= 0); 9210 if (un->un_pm_count == 0) { 9211 mutex_exit(&un->un_pm_mutex); 9212 9213 SD_TRACE(SD_LOG_IO_PM, un, 9214 "sd_pm_exit: idle component\n"); 9215 9216 (void) pm_idle_component(SD_DEVINFO(un), 0); 9217 9218 } else { 9219 mutex_exit(&un->un_pm_mutex); 9220 } 9221 } 9222 9223 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9224 } 9225 9226 9227 /* 9228 * Function: sdopen 9229 * 9230 * Description: Driver's open(9e) entry point function. 9231 * 9232 * Arguments: dev_i - pointer to device number 9233 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9234 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9235 * cred_p - user credential pointer 9236 * 9237 * Return Code: EINVAL 9238 * ENXIO 9239 * EIO 9240 * EROFS 9241 * EBUSY 9242 * 9243 * Context: Kernel thread context 9244 */ 9245 /* ARGSUSED */ 9246 static int 9247 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9248 { 9249 struct sd_lun *un; 9250 int nodelay; 9251 int part; 9252 uint64_t partmask; 9253 int instance; 9254 dev_t dev; 9255 int rval = EIO; 9256 diskaddr_t nblks = 0; 9257 diskaddr_t label_cap; 9258 9259 /* Validate the open type */ 9260 if (otyp >= OTYPCNT) { 9261 return (EINVAL); 9262 } 9263 9264 dev = *dev_p; 9265 instance = SDUNIT(dev); 9266 mutex_enter(&sd_detach_mutex); 9267 9268 /* 9269 * Fail the open if there is no softstate for the instance, or 9270 * if another thread somewhere is trying to detach the instance. 9271 */ 9272 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9273 (un->un_detach_count != 0)) { 9274 mutex_exit(&sd_detach_mutex); 9275 /* 9276 * The probe cache only needs to be cleared when open (9e) fails 9277 * with ENXIO (4238046). 9278 */ 9279 /* 9280 * un-conditionally clearing probe cache is ok with 9281 * separate sd/ssd binaries 9282 * x86 platform can be an issue with both parallel 9283 * and fibre in 1 binary 9284 */ 9285 sd_scsi_clear_probe_cache(); 9286 return (ENXIO); 9287 } 9288 9289 /* 9290 * The un_layer_count is to prevent another thread in specfs from 9291 * trying to detach the instance, which can happen when we are 9292 * called from a higher-layer driver instead of thru specfs. 9293 * This will not be needed when DDI provides a layered driver 9294 * interface that allows specfs to know that an instance is in 9295 * use by a layered driver & should not be detached. 9296 * 9297 * Note: the semantics for layered driver opens are exactly one 9298 * close for every open. 9299 */ 9300 if (otyp == OTYP_LYR) { 9301 un->un_layer_count++; 9302 } 9303 9304 /* 9305 * Keep a count of the current # of opens in progress. This is because 9306 * some layered drivers try to call us as a regular open. This can 9307 * cause problems that we cannot prevent, however by keeping this count 9308 * we can at least keep our open and detach routines from racing against 9309 * each other under such conditions. 9310 */ 9311 un->un_opens_in_progress++; 9312 mutex_exit(&sd_detach_mutex); 9313 9314 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9315 part = SDPART(dev); 9316 partmask = 1 << part; 9317 9318 /* 9319 * We use a semaphore here in order to serialize 9320 * open and close requests on the device. 9321 */ 9322 sema_p(&un->un_semoclose); 9323 9324 mutex_enter(SD_MUTEX(un)); 9325 9326 /* 9327 * All device accesses go thru sdstrategy() where we check 9328 * on suspend status but there could be a scsi_poll command, 9329 * which bypasses sdstrategy(), so we need to check pm 9330 * status. 9331 */ 9332 9333 if (!nodelay) { 9334 while ((un->un_state == SD_STATE_SUSPENDED) || 9335 (un->un_state == SD_STATE_PM_CHANGING)) { 9336 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9337 } 9338 9339 mutex_exit(SD_MUTEX(un)); 9340 if (sd_pm_entry(un) != DDI_SUCCESS) { 9341 rval = EIO; 9342 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9343 "sdopen: sd_pm_entry failed\n"); 9344 goto open_failed_with_pm; 9345 } 9346 mutex_enter(SD_MUTEX(un)); 9347 } 9348 9349 /* check for previous exclusive open */ 9350 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9351 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9352 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9353 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9354 9355 if (un->un_exclopen & (partmask)) { 9356 goto excl_open_fail; 9357 } 9358 9359 if (flag & FEXCL) { 9360 int i; 9361 if (un->un_ocmap.lyropen[part]) { 9362 goto excl_open_fail; 9363 } 9364 for (i = 0; i < (OTYPCNT - 1); i++) { 9365 if (un->un_ocmap.regopen[i] & (partmask)) { 9366 goto excl_open_fail; 9367 } 9368 } 9369 } 9370 9371 /* 9372 * Check the write permission if this is a removable media device, 9373 * NDELAY has not been set, and writable permission is requested. 9374 * 9375 * Note: If NDELAY was set and this is write-protected media the WRITE 9376 * attempt will fail with EIO as part of the I/O processing. This is a 9377 * more permissive implementation that allows the open to succeed and 9378 * WRITE attempts to fail when appropriate. 9379 */ 9380 if (un->un_f_chk_wp_open) { 9381 if ((flag & FWRITE) && (!nodelay)) { 9382 mutex_exit(SD_MUTEX(un)); 9383 /* 9384 * Defer the check for write permission on writable 9385 * DVD drive till sdstrategy and will not fail open even 9386 * if FWRITE is set as the device can be writable 9387 * depending upon the media and the media can change 9388 * after the call to open(). 9389 */ 9390 if (un->un_f_dvdram_writable_device == FALSE) { 9391 if (ISCD(un) || sr_check_wp(dev)) { 9392 rval = EROFS; 9393 mutex_enter(SD_MUTEX(un)); 9394 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9395 "write to cd or write protected media\n"); 9396 goto open_fail; 9397 } 9398 } 9399 mutex_enter(SD_MUTEX(un)); 9400 } 9401 } 9402 9403 /* 9404 * If opening in NDELAY/NONBLOCK mode, just return. 9405 * Check if disk is ready and has a valid geometry later. 9406 */ 9407 if (!nodelay) { 9408 mutex_exit(SD_MUTEX(un)); 9409 rval = sd_ready_and_valid(un); 9410 mutex_enter(SD_MUTEX(un)); 9411 /* 9412 * Fail if device is not ready or if the number of disk 9413 * blocks is zero or negative for non CD devices. 9414 */ 9415 9416 nblks = 0; 9417 9418 if (rval == SD_READY_VALID && (!ISCD(un))) { 9419 /* if cmlb_partinfo fails, nblks remains 0 */ 9420 mutex_exit(SD_MUTEX(un)); 9421 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9422 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9423 mutex_enter(SD_MUTEX(un)); 9424 } 9425 9426 if ((rval != SD_READY_VALID) || 9427 (!ISCD(un) && nblks <= 0)) { 9428 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9429 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9430 "device not ready or invalid disk block value\n"); 9431 goto open_fail; 9432 } 9433 #if defined(__i386) || defined(__amd64) 9434 } else { 9435 uchar_t *cp; 9436 /* 9437 * x86 requires special nodelay handling, so that p0 is 9438 * always defined and accessible. 9439 * Invalidate geometry only if device is not already open. 9440 */ 9441 cp = &un->un_ocmap.chkd[0]; 9442 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9443 if (*cp != (uchar_t)0) { 9444 break; 9445 } 9446 cp++; 9447 } 9448 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9449 mutex_exit(SD_MUTEX(un)); 9450 cmlb_invalidate(un->un_cmlbhandle, 9451 (void *)SD_PATH_DIRECT); 9452 mutex_enter(SD_MUTEX(un)); 9453 } 9454 9455 #endif 9456 } 9457 9458 if (otyp == OTYP_LYR) { 9459 un->un_ocmap.lyropen[part]++; 9460 } else { 9461 un->un_ocmap.regopen[otyp] |= partmask; 9462 } 9463 9464 /* Set up open and exclusive open flags */ 9465 if (flag & FEXCL) { 9466 un->un_exclopen |= (partmask); 9467 } 9468 9469 /* 9470 * If the lun is EFI labeled and lun capacity is greater than the 9471 * capacity contained in the label, log a sys-event to notify the 9472 * interested module. 9473 * To avoid an infinite loop of logging sys-event, we only log the 9474 * event when the lun is not opened in NDELAY mode. The event handler 9475 * should open the lun in NDELAY mode. 9476 */ 9477 if (!(flag & FNDELAY)) { 9478 mutex_exit(SD_MUTEX(un)); 9479 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9480 (void*)SD_PATH_DIRECT) == 0) { 9481 mutex_enter(SD_MUTEX(un)); 9482 if (un->un_f_blockcount_is_valid && 9483 un->un_blockcount > label_cap) { 9484 mutex_exit(SD_MUTEX(un)); 9485 sd_log_lun_expansion_event(un, 9486 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9487 mutex_enter(SD_MUTEX(un)); 9488 } 9489 } else { 9490 mutex_enter(SD_MUTEX(un)); 9491 } 9492 } 9493 9494 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9495 "open of part %d type %d\n", part, otyp); 9496 9497 mutex_exit(SD_MUTEX(un)); 9498 if (!nodelay) { 9499 sd_pm_exit(un); 9500 } 9501 9502 sema_v(&un->un_semoclose); 9503 9504 mutex_enter(&sd_detach_mutex); 9505 un->un_opens_in_progress--; 9506 mutex_exit(&sd_detach_mutex); 9507 9508 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9509 return (DDI_SUCCESS); 9510 9511 excl_open_fail: 9512 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9513 rval = EBUSY; 9514 9515 open_fail: 9516 mutex_exit(SD_MUTEX(un)); 9517 9518 /* 9519 * On a failed open we must exit the pm management. 9520 */ 9521 if (!nodelay) { 9522 sd_pm_exit(un); 9523 } 9524 open_failed_with_pm: 9525 sema_v(&un->un_semoclose); 9526 9527 mutex_enter(&sd_detach_mutex); 9528 un->un_opens_in_progress--; 9529 if (otyp == OTYP_LYR) { 9530 un->un_layer_count--; 9531 } 9532 mutex_exit(&sd_detach_mutex); 9533 9534 return (rval); 9535 } 9536 9537 9538 /* 9539 * Function: sdclose 9540 * 9541 * Description: Driver's close(9e) entry point function. 9542 * 9543 * Arguments: dev - device number 9544 * flag - file status flag, informational only 9545 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9546 * cred_p - user credential pointer 9547 * 9548 * Return Code: ENXIO 9549 * 9550 * Context: Kernel thread context 9551 */ 9552 /* ARGSUSED */ 9553 static int 9554 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9555 { 9556 struct sd_lun *un; 9557 uchar_t *cp; 9558 int part; 9559 int nodelay; 9560 int rval = 0; 9561 9562 /* Validate the open type */ 9563 if (otyp >= OTYPCNT) { 9564 return (ENXIO); 9565 } 9566 9567 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9568 return (ENXIO); 9569 } 9570 9571 part = SDPART(dev); 9572 nodelay = flag & (FNDELAY | FNONBLOCK); 9573 9574 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9575 "sdclose: close of part %d type %d\n", part, otyp); 9576 9577 /* 9578 * We use a semaphore here in order to serialize 9579 * open and close requests on the device. 9580 */ 9581 sema_p(&un->un_semoclose); 9582 9583 mutex_enter(SD_MUTEX(un)); 9584 9585 /* Don't proceed if power is being changed. */ 9586 while (un->un_state == SD_STATE_PM_CHANGING) { 9587 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9588 } 9589 9590 if (un->un_exclopen & (1 << part)) { 9591 un->un_exclopen &= ~(1 << part); 9592 } 9593 9594 /* Update the open partition map */ 9595 if (otyp == OTYP_LYR) { 9596 un->un_ocmap.lyropen[part] -= 1; 9597 } else { 9598 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9599 } 9600 9601 cp = &un->un_ocmap.chkd[0]; 9602 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9603 if (*cp != NULL) { 9604 break; 9605 } 9606 cp++; 9607 } 9608 9609 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9610 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9611 9612 /* 9613 * We avoid persistance upon the last close, and set 9614 * the throttle back to the maximum. 9615 */ 9616 un->un_throttle = un->un_saved_throttle; 9617 9618 if (un->un_state == SD_STATE_OFFLINE) { 9619 if (un->un_f_is_fibre == FALSE) { 9620 scsi_log(SD_DEVINFO(un), sd_label, 9621 CE_WARN, "offline\n"); 9622 } 9623 mutex_exit(SD_MUTEX(un)); 9624 cmlb_invalidate(un->un_cmlbhandle, 9625 (void *)SD_PATH_DIRECT); 9626 mutex_enter(SD_MUTEX(un)); 9627 9628 } else { 9629 /* 9630 * Flush any outstanding writes in NVRAM cache. 9631 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9632 * cmd, it may not work for non-Pluto devices. 9633 * SYNCHRONIZE CACHE is not required for removables, 9634 * except DVD-RAM drives. 9635 * 9636 * Also note: because SYNCHRONIZE CACHE is currently 9637 * the only command issued here that requires the 9638 * drive be powered up, only do the power up before 9639 * sending the Sync Cache command. If additional 9640 * commands are added which require a powered up 9641 * drive, the following sequence may have to change. 9642 * 9643 * And finally, note that parallel SCSI on SPARC 9644 * only issues a Sync Cache to DVD-RAM, a newly 9645 * supported device. 9646 */ 9647 #if defined(__i386) || defined(__amd64) 9648 if ((un->un_f_sync_cache_supported && 9649 un->un_f_sync_cache_required) || 9650 un->un_f_dvdram_writable_device == TRUE) { 9651 #else 9652 if (un->un_f_dvdram_writable_device == TRUE) { 9653 #endif 9654 mutex_exit(SD_MUTEX(un)); 9655 if (sd_pm_entry(un) == DDI_SUCCESS) { 9656 rval = 9657 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9658 NULL); 9659 /* ignore error if not supported */ 9660 if (rval == ENOTSUP) { 9661 rval = 0; 9662 } else if (rval != 0) { 9663 rval = EIO; 9664 } 9665 sd_pm_exit(un); 9666 } else { 9667 rval = EIO; 9668 } 9669 mutex_enter(SD_MUTEX(un)); 9670 } 9671 9672 /* 9673 * For devices which supports DOOR_LOCK, send an ALLOW 9674 * MEDIA REMOVAL command, but don't get upset if it 9675 * fails. We need to raise the power of the drive before 9676 * we can call sd_send_scsi_DOORLOCK() 9677 */ 9678 if (un->un_f_doorlock_supported) { 9679 mutex_exit(SD_MUTEX(un)); 9680 if (sd_pm_entry(un) == DDI_SUCCESS) { 9681 rval = sd_send_scsi_DOORLOCK(un, 9682 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9683 9684 sd_pm_exit(un); 9685 if (ISCD(un) && (rval != 0) && 9686 (nodelay != 0)) { 9687 rval = ENXIO; 9688 } 9689 } else { 9690 rval = EIO; 9691 } 9692 mutex_enter(SD_MUTEX(un)); 9693 } 9694 9695 /* 9696 * If a device has removable media, invalidate all 9697 * parameters related to media, such as geometry, 9698 * blocksize, and blockcount. 9699 */ 9700 if (un->un_f_has_removable_media) { 9701 sr_ejected(un); 9702 } 9703 9704 /* 9705 * Destroy the cache (if it exists) which was 9706 * allocated for the write maps since this is 9707 * the last close for this media. 9708 */ 9709 if (un->un_wm_cache) { 9710 /* 9711 * Check if there are pending commands. 9712 * and if there are give a warning and 9713 * do not destroy the cache. 9714 */ 9715 if (un->un_ncmds_in_driver > 0) { 9716 scsi_log(SD_DEVINFO(un), 9717 sd_label, CE_WARN, 9718 "Unable to clean up memory " 9719 "because of pending I/O\n"); 9720 } else { 9721 kmem_cache_destroy( 9722 un->un_wm_cache); 9723 un->un_wm_cache = NULL; 9724 } 9725 } 9726 } 9727 } 9728 9729 mutex_exit(SD_MUTEX(un)); 9730 sema_v(&un->un_semoclose); 9731 9732 if (otyp == OTYP_LYR) { 9733 mutex_enter(&sd_detach_mutex); 9734 /* 9735 * The detach routine may run when the layer count 9736 * drops to zero. 9737 */ 9738 un->un_layer_count--; 9739 mutex_exit(&sd_detach_mutex); 9740 } 9741 9742 return (rval); 9743 } 9744 9745 9746 /* 9747 * Function: sd_ready_and_valid 9748 * 9749 * Description: Test if device is ready and has a valid geometry. 9750 * 9751 * Arguments: dev - device number 9752 * un - driver soft state (unit) structure 9753 * 9754 * Return Code: SD_READY_VALID ready and valid label 9755 * SD_NOT_READY_VALID not ready, no label 9756 * SD_RESERVED_BY_OTHERS reservation conflict 9757 * 9758 * Context: Never called at interrupt context. 9759 */ 9760 9761 static int 9762 sd_ready_and_valid(struct sd_lun *un) 9763 { 9764 struct sd_errstats *stp; 9765 uint64_t capacity; 9766 uint_t lbasize; 9767 int rval = SD_READY_VALID; 9768 char name_str[48]; 9769 int is_valid; 9770 9771 ASSERT(un != NULL); 9772 ASSERT(!mutex_owned(SD_MUTEX(un))); 9773 9774 mutex_enter(SD_MUTEX(un)); 9775 /* 9776 * If a device has removable media, we must check if media is 9777 * ready when checking if this device is ready and valid. 9778 */ 9779 if (un->un_f_has_removable_media) { 9780 mutex_exit(SD_MUTEX(un)); 9781 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9782 rval = SD_NOT_READY_VALID; 9783 mutex_enter(SD_MUTEX(un)); 9784 goto done; 9785 } 9786 9787 is_valid = SD_IS_VALID_LABEL(un); 9788 mutex_enter(SD_MUTEX(un)); 9789 if (!is_valid || 9790 (un->un_f_blockcount_is_valid == FALSE) || 9791 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9792 9793 /* capacity has to be read every open. */ 9794 mutex_exit(SD_MUTEX(un)); 9795 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9796 &lbasize, SD_PATH_DIRECT) != 0) { 9797 cmlb_invalidate(un->un_cmlbhandle, 9798 (void *)SD_PATH_DIRECT); 9799 mutex_enter(SD_MUTEX(un)); 9800 rval = SD_NOT_READY_VALID; 9801 goto done; 9802 } else { 9803 mutex_enter(SD_MUTEX(un)); 9804 sd_update_block_info(un, lbasize, capacity); 9805 } 9806 } 9807 9808 /* 9809 * Check if the media in the device is writable or not. 9810 */ 9811 if (!is_valid && ISCD(un)) { 9812 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9813 } 9814 9815 } else { 9816 /* 9817 * Do a test unit ready to clear any unit attention from non-cd 9818 * devices. 9819 */ 9820 mutex_exit(SD_MUTEX(un)); 9821 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9822 mutex_enter(SD_MUTEX(un)); 9823 } 9824 9825 9826 /* 9827 * If this is a non 512 block device, allocate space for 9828 * the wmap cache. This is being done here since every time 9829 * a media is changed this routine will be called and the 9830 * block size is a function of media rather than device. 9831 */ 9832 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9833 if (!(un->un_wm_cache)) { 9834 (void) snprintf(name_str, sizeof (name_str), 9835 "%s%d_cache", 9836 ddi_driver_name(SD_DEVINFO(un)), 9837 ddi_get_instance(SD_DEVINFO(un))); 9838 un->un_wm_cache = kmem_cache_create( 9839 name_str, sizeof (struct sd_w_map), 9840 8, sd_wm_cache_constructor, 9841 sd_wm_cache_destructor, NULL, 9842 (void *)un, NULL, 0); 9843 if (!(un->un_wm_cache)) { 9844 rval = ENOMEM; 9845 goto done; 9846 } 9847 } 9848 } 9849 9850 if (un->un_state == SD_STATE_NORMAL) { 9851 /* 9852 * If the target is not yet ready here (defined by a TUR 9853 * failure), invalidate the geometry and print an 'offline' 9854 * message. This is a legacy message, as the state of the 9855 * target is not actually changed to SD_STATE_OFFLINE. 9856 * 9857 * If the TUR fails for EACCES (Reservation Conflict), 9858 * SD_RESERVED_BY_OTHERS will be returned to indicate 9859 * reservation conflict. If the TUR fails for other 9860 * reasons, SD_NOT_READY_VALID will be returned. 9861 */ 9862 int err; 9863 9864 mutex_exit(SD_MUTEX(un)); 9865 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9866 mutex_enter(SD_MUTEX(un)); 9867 9868 if (err != 0) { 9869 mutex_exit(SD_MUTEX(un)); 9870 cmlb_invalidate(un->un_cmlbhandle, 9871 (void *)SD_PATH_DIRECT); 9872 mutex_enter(SD_MUTEX(un)); 9873 if (err == EACCES) { 9874 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9875 "reservation conflict\n"); 9876 rval = SD_RESERVED_BY_OTHERS; 9877 } else { 9878 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9879 "drive offline\n"); 9880 rval = SD_NOT_READY_VALID; 9881 } 9882 goto done; 9883 } 9884 } 9885 9886 if (un->un_f_format_in_progress == FALSE) { 9887 mutex_exit(SD_MUTEX(un)); 9888 if (cmlb_validate(un->un_cmlbhandle, 0, 9889 (void *)SD_PATH_DIRECT) != 0) { 9890 rval = SD_NOT_READY_VALID; 9891 mutex_enter(SD_MUTEX(un)); 9892 goto done; 9893 } 9894 if (un->un_f_pkstats_enabled) { 9895 sd_set_pstats(un); 9896 SD_TRACE(SD_LOG_IO_PARTITION, un, 9897 "sd_ready_and_valid: un:0x%p pstats created and " 9898 "set\n", un); 9899 } 9900 mutex_enter(SD_MUTEX(un)); 9901 } 9902 9903 /* 9904 * If this device supports DOOR_LOCK command, try and send 9905 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9906 * if it fails. For a CD, however, it is an error 9907 */ 9908 if (un->un_f_doorlock_supported) { 9909 mutex_exit(SD_MUTEX(un)); 9910 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9911 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9912 rval = SD_NOT_READY_VALID; 9913 mutex_enter(SD_MUTEX(un)); 9914 goto done; 9915 } 9916 mutex_enter(SD_MUTEX(un)); 9917 } 9918 9919 /* The state has changed, inform the media watch routines */ 9920 un->un_mediastate = DKIO_INSERTED; 9921 cv_broadcast(&un->un_state_cv); 9922 rval = SD_READY_VALID; 9923 9924 done: 9925 9926 /* 9927 * Initialize the capacity kstat value, if no media previously 9928 * (capacity kstat is 0) and a media has been inserted 9929 * (un_blockcount > 0). 9930 */ 9931 if (un->un_errstats != NULL) { 9932 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9933 if ((stp->sd_capacity.value.ui64 == 0) && 9934 (un->un_f_blockcount_is_valid == TRUE)) { 9935 stp->sd_capacity.value.ui64 = 9936 (uint64_t)((uint64_t)un->un_blockcount * 9937 un->un_sys_blocksize); 9938 } 9939 } 9940 9941 mutex_exit(SD_MUTEX(un)); 9942 return (rval); 9943 } 9944 9945 9946 /* 9947 * Function: sdmin 9948 * 9949 * Description: Routine to limit the size of a data transfer. Used in 9950 * conjunction with physio(9F). 9951 * 9952 * Arguments: bp - pointer to the indicated buf(9S) struct. 9953 * 9954 * Context: Kernel thread context. 9955 */ 9956 9957 static void 9958 sdmin(struct buf *bp) 9959 { 9960 struct sd_lun *un; 9961 int instance; 9962 9963 instance = SDUNIT(bp->b_edev); 9964 9965 un = ddi_get_soft_state(sd_state, instance); 9966 ASSERT(un != NULL); 9967 9968 if (bp->b_bcount > un->un_max_xfer_size) { 9969 bp->b_bcount = un->un_max_xfer_size; 9970 } 9971 } 9972 9973 9974 /* 9975 * Function: sdread 9976 * 9977 * Description: Driver's read(9e) entry point function. 9978 * 9979 * Arguments: dev - device number 9980 * uio - structure pointer describing where data is to be stored 9981 * in user's space 9982 * cred_p - user credential pointer 9983 * 9984 * Return Code: ENXIO 9985 * EIO 9986 * EINVAL 9987 * value returned by physio 9988 * 9989 * Context: Kernel thread context. 9990 */ 9991 /* ARGSUSED */ 9992 static int 9993 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9994 { 9995 struct sd_lun *un = NULL; 9996 int secmask; 9997 int err; 9998 9999 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10000 return (ENXIO); 10001 } 10002 10003 ASSERT(!mutex_owned(SD_MUTEX(un))); 10004 10005 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10006 mutex_enter(SD_MUTEX(un)); 10007 /* 10008 * Because the call to sd_ready_and_valid will issue I/O we 10009 * must wait here if either the device is suspended or 10010 * if it's power level is changing. 10011 */ 10012 while ((un->un_state == SD_STATE_SUSPENDED) || 10013 (un->un_state == SD_STATE_PM_CHANGING)) { 10014 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10015 } 10016 un->un_ncmds_in_driver++; 10017 mutex_exit(SD_MUTEX(un)); 10018 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10019 mutex_enter(SD_MUTEX(un)); 10020 un->un_ncmds_in_driver--; 10021 ASSERT(un->un_ncmds_in_driver >= 0); 10022 mutex_exit(SD_MUTEX(un)); 10023 return (EIO); 10024 } 10025 mutex_enter(SD_MUTEX(un)); 10026 un->un_ncmds_in_driver--; 10027 ASSERT(un->un_ncmds_in_driver >= 0); 10028 mutex_exit(SD_MUTEX(un)); 10029 } 10030 10031 /* 10032 * Read requests are restricted to multiples of the system block size. 10033 */ 10034 secmask = un->un_sys_blocksize - 1; 10035 10036 if (uio->uio_loffset & ((offset_t)(secmask))) { 10037 SD_ERROR(SD_LOG_READ_WRITE, un, 10038 "sdread: file offset not modulo %d\n", 10039 un->un_sys_blocksize); 10040 err = EINVAL; 10041 } else if (uio->uio_iov->iov_len & (secmask)) { 10042 SD_ERROR(SD_LOG_READ_WRITE, un, 10043 "sdread: transfer length not modulo %d\n", 10044 un->un_sys_blocksize); 10045 err = EINVAL; 10046 } else { 10047 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10048 } 10049 return (err); 10050 } 10051 10052 10053 /* 10054 * Function: sdwrite 10055 * 10056 * Description: Driver's write(9e) entry point function. 10057 * 10058 * Arguments: dev - device number 10059 * uio - structure pointer describing where data is stored in 10060 * user's space 10061 * cred_p - user credential pointer 10062 * 10063 * Return Code: ENXIO 10064 * EIO 10065 * EINVAL 10066 * value returned by physio 10067 * 10068 * Context: Kernel thread context. 10069 */ 10070 /* ARGSUSED */ 10071 static int 10072 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10073 { 10074 struct sd_lun *un = NULL; 10075 int secmask; 10076 int err; 10077 10078 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10079 return (ENXIO); 10080 } 10081 10082 ASSERT(!mutex_owned(SD_MUTEX(un))); 10083 10084 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10085 mutex_enter(SD_MUTEX(un)); 10086 /* 10087 * Because the call to sd_ready_and_valid will issue I/O we 10088 * must wait here if either the device is suspended or 10089 * if it's power level is changing. 10090 */ 10091 while ((un->un_state == SD_STATE_SUSPENDED) || 10092 (un->un_state == SD_STATE_PM_CHANGING)) { 10093 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10094 } 10095 un->un_ncmds_in_driver++; 10096 mutex_exit(SD_MUTEX(un)); 10097 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10098 mutex_enter(SD_MUTEX(un)); 10099 un->un_ncmds_in_driver--; 10100 ASSERT(un->un_ncmds_in_driver >= 0); 10101 mutex_exit(SD_MUTEX(un)); 10102 return (EIO); 10103 } 10104 mutex_enter(SD_MUTEX(un)); 10105 un->un_ncmds_in_driver--; 10106 ASSERT(un->un_ncmds_in_driver >= 0); 10107 mutex_exit(SD_MUTEX(un)); 10108 } 10109 10110 /* 10111 * Write requests are restricted to multiples of the system block size. 10112 */ 10113 secmask = un->un_sys_blocksize - 1; 10114 10115 if (uio->uio_loffset & ((offset_t)(secmask))) { 10116 SD_ERROR(SD_LOG_READ_WRITE, un, 10117 "sdwrite: file offset not modulo %d\n", 10118 un->un_sys_blocksize); 10119 err = EINVAL; 10120 } else if (uio->uio_iov->iov_len & (secmask)) { 10121 SD_ERROR(SD_LOG_READ_WRITE, un, 10122 "sdwrite: transfer length not modulo %d\n", 10123 un->un_sys_blocksize); 10124 err = EINVAL; 10125 } else { 10126 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10127 } 10128 return (err); 10129 } 10130 10131 10132 /* 10133 * Function: sdaread 10134 * 10135 * Description: Driver's aread(9e) entry point function. 10136 * 10137 * Arguments: dev - device number 10138 * aio - structure pointer describing where data is to be stored 10139 * cred_p - user credential pointer 10140 * 10141 * Return Code: ENXIO 10142 * EIO 10143 * EINVAL 10144 * value returned by aphysio 10145 * 10146 * Context: Kernel thread context. 10147 */ 10148 /* ARGSUSED */ 10149 static int 10150 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10151 { 10152 struct sd_lun *un = NULL; 10153 struct uio *uio = aio->aio_uio; 10154 int secmask; 10155 int err; 10156 10157 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10158 return (ENXIO); 10159 } 10160 10161 ASSERT(!mutex_owned(SD_MUTEX(un))); 10162 10163 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10164 mutex_enter(SD_MUTEX(un)); 10165 /* 10166 * Because the call to sd_ready_and_valid will issue I/O we 10167 * must wait here if either the device is suspended or 10168 * if it's power level is changing. 10169 */ 10170 while ((un->un_state == SD_STATE_SUSPENDED) || 10171 (un->un_state == SD_STATE_PM_CHANGING)) { 10172 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10173 } 10174 un->un_ncmds_in_driver++; 10175 mutex_exit(SD_MUTEX(un)); 10176 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10177 mutex_enter(SD_MUTEX(un)); 10178 un->un_ncmds_in_driver--; 10179 ASSERT(un->un_ncmds_in_driver >= 0); 10180 mutex_exit(SD_MUTEX(un)); 10181 return (EIO); 10182 } 10183 mutex_enter(SD_MUTEX(un)); 10184 un->un_ncmds_in_driver--; 10185 ASSERT(un->un_ncmds_in_driver >= 0); 10186 mutex_exit(SD_MUTEX(un)); 10187 } 10188 10189 /* 10190 * Read requests are restricted to multiples of the system block size. 10191 */ 10192 secmask = un->un_sys_blocksize - 1; 10193 10194 if (uio->uio_loffset & ((offset_t)(secmask))) { 10195 SD_ERROR(SD_LOG_READ_WRITE, un, 10196 "sdaread: file offset not modulo %d\n", 10197 un->un_sys_blocksize); 10198 err = EINVAL; 10199 } else if (uio->uio_iov->iov_len & (secmask)) { 10200 SD_ERROR(SD_LOG_READ_WRITE, un, 10201 "sdaread: transfer length not modulo %d\n", 10202 un->un_sys_blocksize); 10203 err = EINVAL; 10204 } else { 10205 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10206 } 10207 return (err); 10208 } 10209 10210 10211 /* 10212 * Function: sdawrite 10213 * 10214 * Description: Driver's awrite(9e) entry point function. 10215 * 10216 * Arguments: dev - device number 10217 * aio - structure pointer describing where data is stored 10218 * cred_p - user credential pointer 10219 * 10220 * Return Code: ENXIO 10221 * EIO 10222 * EINVAL 10223 * value returned by aphysio 10224 * 10225 * Context: Kernel thread context. 10226 */ 10227 /* ARGSUSED */ 10228 static int 10229 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10230 { 10231 struct sd_lun *un = NULL; 10232 struct uio *uio = aio->aio_uio; 10233 int secmask; 10234 int err; 10235 10236 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10237 return (ENXIO); 10238 } 10239 10240 ASSERT(!mutex_owned(SD_MUTEX(un))); 10241 10242 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10243 mutex_enter(SD_MUTEX(un)); 10244 /* 10245 * Because the call to sd_ready_and_valid will issue I/O we 10246 * must wait here if either the device is suspended or 10247 * if it's power level is changing. 10248 */ 10249 while ((un->un_state == SD_STATE_SUSPENDED) || 10250 (un->un_state == SD_STATE_PM_CHANGING)) { 10251 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10252 } 10253 un->un_ncmds_in_driver++; 10254 mutex_exit(SD_MUTEX(un)); 10255 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10256 mutex_enter(SD_MUTEX(un)); 10257 un->un_ncmds_in_driver--; 10258 ASSERT(un->un_ncmds_in_driver >= 0); 10259 mutex_exit(SD_MUTEX(un)); 10260 return (EIO); 10261 } 10262 mutex_enter(SD_MUTEX(un)); 10263 un->un_ncmds_in_driver--; 10264 ASSERT(un->un_ncmds_in_driver >= 0); 10265 mutex_exit(SD_MUTEX(un)); 10266 } 10267 10268 /* 10269 * Write requests are restricted to multiples of the system block size. 10270 */ 10271 secmask = un->un_sys_blocksize - 1; 10272 10273 if (uio->uio_loffset & ((offset_t)(secmask))) { 10274 SD_ERROR(SD_LOG_READ_WRITE, un, 10275 "sdawrite: file offset not modulo %d\n", 10276 un->un_sys_blocksize); 10277 err = EINVAL; 10278 } else if (uio->uio_iov->iov_len & (secmask)) { 10279 SD_ERROR(SD_LOG_READ_WRITE, un, 10280 "sdawrite: transfer length not modulo %d\n", 10281 un->un_sys_blocksize); 10282 err = EINVAL; 10283 } else { 10284 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10285 } 10286 return (err); 10287 } 10288 10289 10290 10291 10292 10293 /* 10294 * Driver IO processing follows the following sequence: 10295 * 10296 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10297 * | | ^ 10298 * v v | 10299 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10300 * | | | | 10301 * v | | | 10302 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10303 * | | ^ ^ 10304 * v v | | 10305 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10306 * | | | | 10307 * +---+ | +------------+ +-------+ 10308 * | | | | 10309 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10310 * | v | | 10311 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10312 * | | ^ | 10313 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10314 * | v | | 10315 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10316 * | | ^ | 10317 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10318 * | v | | 10319 * | sd_checksum_iostart() sd_checksum_iodone() | 10320 * | | ^ | 10321 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10322 * | v | | 10323 * | sd_pm_iostart() sd_pm_iodone() | 10324 * | | ^ | 10325 * | | | | 10326 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10327 * | ^ 10328 * v | 10329 * sd_core_iostart() | 10330 * | | 10331 * | +------>(*destroypkt)() 10332 * +-> sd_start_cmds() <-+ | | 10333 * | | | v 10334 * | | | scsi_destroy_pkt(9F) 10335 * | | | 10336 * +->(*initpkt)() +- sdintr() 10337 * | | | | 10338 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10339 * | +-> scsi_setup_cdb(9F) | 10340 * | | 10341 * +--> scsi_transport(9F) | 10342 * | | 10343 * +----> SCSA ---->+ 10344 * 10345 * 10346 * This code is based upon the following presumptions: 10347 * 10348 * - iostart and iodone functions operate on buf(9S) structures. These 10349 * functions perform the necessary operations on the buf(9S) and pass 10350 * them along to the next function in the chain by using the macros 10351 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10352 * (for iodone side functions). 10353 * 10354 * - The iostart side functions may sleep. The iodone side functions 10355 * are called under interrupt context and may NOT sleep. Therefore 10356 * iodone side functions also may not call iostart side functions. 10357 * (NOTE: iostart side functions should NOT sleep for memory, as 10358 * this could result in deadlock.) 10359 * 10360 * - An iostart side function may call its corresponding iodone side 10361 * function directly (if necessary). 10362 * 10363 * - In the event of an error, an iostart side function can return a buf(9S) 10364 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10365 * b_error in the usual way of course). 10366 * 10367 * - The taskq mechanism may be used by the iodone side functions to dispatch 10368 * requests to the iostart side functions. The iostart side functions in 10369 * this case would be called under the context of a taskq thread, so it's 10370 * OK for them to block/sleep/spin in this case. 10371 * 10372 * - iostart side functions may allocate "shadow" buf(9S) structs and 10373 * pass them along to the next function in the chain. The corresponding 10374 * iodone side functions must coalesce the "shadow" bufs and return 10375 * the "original" buf to the next higher layer. 10376 * 10377 * - The b_private field of the buf(9S) struct holds a pointer to 10378 * an sd_xbuf struct, which contains information needed to 10379 * construct the scsi_pkt for the command. 10380 * 10381 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10382 * layer must acquire & release the SD_MUTEX(un) as needed. 10383 */ 10384 10385 10386 /* 10387 * Create taskq for all targets in the system. This is created at 10388 * _init(9E) and destroyed at _fini(9E). 10389 * 10390 * Note: here we set the minalloc to a reasonably high number to ensure that 10391 * we will have an adequate supply of task entries available at interrupt time. 10392 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10393 * sd_create_taskq(). Since we do not want to sleep for allocations at 10394 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10395 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10396 * requests any one instant in time. 10397 */ 10398 #define SD_TASKQ_NUMTHREADS 8 10399 #define SD_TASKQ_MINALLOC 256 10400 #define SD_TASKQ_MAXALLOC 256 10401 10402 static taskq_t *sd_tq = NULL; 10403 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10404 10405 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10406 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10407 10408 /* 10409 * The following task queue is being created for the write part of 10410 * read-modify-write of non-512 block size devices. 10411 * Limit the number of threads to 1 for now. This number has been chosen 10412 * considering the fact that it applies only to dvd ram drives/MO drives 10413 * currently. Performance for which is not main criteria at this stage. 10414 * Note: It needs to be explored if we can use a single taskq in future 10415 */ 10416 #define SD_WMR_TASKQ_NUMTHREADS 1 10417 static taskq_t *sd_wmr_tq = NULL; 10418 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10419 10420 /* 10421 * Function: sd_taskq_create 10422 * 10423 * Description: Create taskq thread(s) and preallocate task entries 10424 * 10425 * Return Code: Returns a pointer to the allocated taskq_t. 10426 * 10427 * Context: Can sleep. Requires blockable context. 10428 * 10429 * Notes: - The taskq() facility currently is NOT part of the DDI. 10430 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10431 * - taskq_create() will block for memory, also it will panic 10432 * if it cannot create the requested number of threads. 10433 * - Currently taskq_create() creates threads that cannot be 10434 * swapped. 10435 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10436 * supply of taskq entries at interrupt time (ie, so that we 10437 * do not have to sleep for memory) 10438 */ 10439 10440 static void 10441 sd_taskq_create(void) 10442 { 10443 char taskq_name[TASKQ_NAMELEN]; 10444 10445 ASSERT(sd_tq == NULL); 10446 ASSERT(sd_wmr_tq == NULL); 10447 10448 (void) snprintf(taskq_name, sizeof (taskq_name), 10449 "%s_drv_taskq", sd_label); 10450 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10451 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10452 TASKQ_PREPOPULATE)); 10453 10454 (void) snprintf(taskq_name, sizeof (taskq_name), 10455 "%s_rmw_taskq", sd_label); 10456 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10457 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10458 TASKQ_PREPOPULATE)); 10459 } 10460 10461 10462 /* 10463 * Function: sd_taskq_delete 10464 * 10465 * Description: Complementary cleanup routine for sd_taskq_create(). 10466 * 10467 * Context: Kernel thread context. 10468 */ 10469 10470 static void 10471 sd_taskq_delete(void) 10472 { 10473 ASSERT(sd_tq != NULL); 10474 ASSERT(sd_wmr_tq != NULL); 10475 taskq_destroy(sd_tq); 10476 taskq_destroy(sd_wmr_tq); 10477 sd_tq = NULL; 10478 sd_wmr_tq = NULL; 10479 } 10480 10481 10482 /* 10483 * Function: sdstrategy 10484 * 10485 * Description: Driver's strategy (9E) entry point function. 10486 * 10487 * Arguments: bp - pointer to buf(9S) 10488 * 10489 * Return Code: Always returns zero 10490 * 10491 * Context: Kernel thread context. 10492 */ 10493 10494 static int 10495 sdstrategy(struct buf *bp) 10496 { 10497 struct sd_lun *un; 10498 10499 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10500 if (un == NULL) { 10501 bioerror(bp, EIO); 10502 bp->b_resid = bp->b_bcount; 10503 biodone(bp); 10504 return (0); 10505 } 10506 /* As was done in the past, fail new cmds. if state is dumping. */ 10507 if (un->un_state == SD_STATE_DUMPING) { 10508 bioerror(bp, ENXIO); 10509 bp->b_resid = bp->b_bcount; 10510 biodone(bp); 10511 return (0); 10512 } 10513 10514 ASSERT(!mutex_owned(SD_MUTEX(un))); 10515 10516 /* 10517 * Commands may sneak in while we released the mutex in 10518 * DDI_SUSPEND, we should block new commands. However, old 10519 * commands that are still in the driver at this point should 10520 * still be allowed to drain. 10521 */ 10522 mutex_enter(SD_MUTEX(un)); 10523 /* 10524 * Must wait here if either the device is suspended or 10525 * if it's power level is changing. 10526 */ 10527 while ((un->un_state == SD_STATE_SUSPENDED) || 10528 (un->un_state == SD_STATE_PM_CHANGING)) { 10529 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10530 } 10531 10532 un->un_ncmds_in_driver++; 10533 10534 /* 10535 * atapi: Since we are running the CD for now in PIO mode we need to 10536 * call bp_mapin here to avoid bp_mapin called interrupt context under 10537 * the HBA's init_pkt routine. 10538 */ 10539 if (un->un_f_cfg_is_atapi == TRUE) { 10540 mutex_exit(SD_MUTEX(un)); 10541 bp_mapin(bp); 10542 mutex_enter(SD_MUTEX(un)); 10543 } 10544 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10545 un->un_ncmds_in_driver); 10546 10547 if (bp->b_flags & B_WRITE) 10548 un->un_f_sync_cache_required = TRUE; 10549 10550 mutex_exit(SD_MUTEX(un)); 10551 10552 /* 10553 * This will (eventually) allocate the sd_xbuf area and 10554 * call sd_xbuf_strategy(). We just want to return the 10555 * result of ddi_xbuf_qstrategy so that we have an opt- 10556 * imized tail call which saves us a stack frame. 10557 */ 10558 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10559 } 10560 10561 10562 /* 10563 * Function: sd_xbuf_strategy 10564 * 10565 * Description: Function for initiating IO operations via the 10566 * ddi_xbuf_qstrategy() mechanism. 10567 * 10568 * Context: Kernel thread context. 10569 */ 10570 10571 static void 10572 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10573 { 10574 struct sd_lun *un = arg; 10575 10576 ASSERT(bp != NULL); 10577 ASSERT(xp != NULL); 10578 ASSERT(un != NULL); 10579 ASSERT(!mutex_owned(SD_MUTEX(un))); 10580 10581 /* 10582 * Initialize the fields in the xbuf and save a pointer to the 10583 * xbuf in bp->b_private. 10584 */ 10585 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10586 10587 /* Send the buf down the iostart chain */ 10588 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10589 } 10590 10591 10592 /* 10593 * Function: sd_xbuf_init 10594 * 10595 * Description: Prepare the given sd_xbuf struct for use. 10596 * 10597 * Arguments: un - ptr to softstate 10598 * bp - ptr to associated buf(9S) 10599 * xp - ptr to associated sd_xbuf 10600 * chain_type - IO chain type to use: 10601 * SD_CHAIN_NULL 10602 * SD_CHAIN_BUFIO 10603 * SD_CHAIN_USCSI 10604 * SD_CHAIN_DIRECT 10605 * SD_CHAIN_DIRECT_PRIORITY 10606 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10607 * initialization; may be NULL if none. 10608 * 10609 * Context: Kernel thread context 10610 */ 10611 10612 static void 10613 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10614 uchar_t chain_type, void *pktinfop) 10615 { 10616 int index; 10617 10618 ASSERT(un != NULL); 10619 ASSERT(bp != NULL); 10620 ASSERT(xp != NULL); 10621 10622 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10623 bp, chain_type); 10624 10625 xp->xb_un = un; 10626 xp->xb_pktp = NULL; 10627 xp->xb_pktinfo = pktinfop; 10628 xp->xb_private = bp->b_private; 10629 xp->xb_blkno = (daddr_t)bp->b_blkno; 10630 10631 /* 10632 * Set up the iostart and iodone chain indexes in the xbuf, based 10633 * upon the specified chain type to use. 10634 */ 10635 switch (chain_type) { 10636 case SD_CHAIN_NULL: 10637 /* 10638 * Fall thru to just use the values for the buf type, even 10639 * tho for the NULL chain these values will never be used. 10640 */ 10641 /* FALLTHRU */ 10642 case SD_CHAIN_BUFIO: 10643 index = un->un_buf_chain_type; 10644 break; 10645 case SD_CHAIN_USCSI: 10646 index = un->un_uscsi_chain_type; 10647 break; 10648 case SD_CHAIN_DIRECT: 10649 index = un->un_direct_chain_type; 10650 break; 10651 case SD_CHAIN_DIRECT_PRIORITY: 10652 index = un->un_priority_chain_type; 10653 break; 10654 default: 10655 /* We're really broken if we ever get here... */ 10656 panic("sd_xbuf_init: illegal chain type!"); 10657 /*NOTREACHED*/ 10658 } 10659 10660 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10661 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10662 10663 /* 10664 * It might be a bit easier to simply bzero the entire xbuf above, 10665 * but it turns out that since we init a fair number of members anyway, 10666 * we save a fair number cycles by doing explicit assignment of zero. 10667 */ 10668 xp->xb_pkt_flags = 0; 10669 xp->xb_dma_resid = 0; 10670 xp->xb_retry_count = 0; 10671 xp->xb_victim_retry_count = 0; 10672 xp->xb_ua_retry_count = 0; 10673 xp->xb_nr_retry_count = 0; 10674 xp->xb_sense_bp = NULL; 10675 xp->xb_sense_status = 0; 10676 xp->xb_sense_state = 0; 10677 xp->xb_sense_resid = 0; 10678 10679 bp->b_private = xp; 10680 bp->b_flags &= ~(B_DONE | B_ERROR); 10681 bp->b_resid = 0; 10682 bp->av_forw = NULL; 10683 bp->av_back = NULL; 10684 bioerror(bp, 0); 10685 10686 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10687 } 10688 10689 10690 /* 10691 * Function: sd_uscsi_strategy 10692 * 10693 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10694 * 10695 * Arguments: bp - buf struct ptr 10696 * 10697 * Return Code: Always returns 0 10698 * 10699 * Context: Kernel thread context 10700 */ 10701 10702 static int 10703 sd_uscsi_strategy(struct buf *bp) 10704 { 10705 struct sd_lun *un; 10706 struct sd_uscsi_info *uip; 10707 struct sd_xbuf *xp; 10708 uchar_t chain_type; 10709 uchar_t cmd; 10710 10711 ASSERT(bp != NULL); 10712 10713 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10714 if (un == NULL) { 10715 bioerror(bp, EIO); 10716 bp->b_resid = bp->b_bcount; 10717 biodone(bp); 10718 return (0); 10719 } 10720 10721 ASSERT(!mutex_owned(SD_MUTEX(un))); 10722 10723 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10724 10725 /* 10726 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10727 */ 10728 ASSERT(bp->b_private != NULL); 10729 uip = (struct sd_uscsi_info *)bp->b_private; 10730 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 10731 10732 mutex_enter(SD_MUTEX(un)); 10733 /* 10734 * atapi: Since we are running the CD for now in PIO mode we need to 10735 * call bp_mapin here to avoid bp_mapin called interrupt context under 10736 * the HBA's init_pkt routine. 10737 */ 10738 if (un->un_f_cfg_is_atapi == TRUE) { 10739 mutex_exit(SD_MUTEX(un)); 10740 bp_mapin(bp); 10741 mutex_enter(SD_MUTEX(un)); 10742 } 10743 un->un_ncmds_in_driver++; 10744 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10745 un->un_ncmds_in_driver); 10746 10747 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 10748 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 10749 un->un_f_sync_cache_required = TRUE; 10750 10751 mutex_exit(SD_MUTEX(un)); 10752 10753 switch (uip->ui_flags) { 10754 case SD_PATH_DIRECT: 10755 chain_type = SD_CHAIN_DIRECT; 10756 break; 10757 case SD_PATH_DIRECT_PRIORITY: 10758 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10759 break; 10760 default: 10761 chain_type = SD_CHAIN_USCSI; 10762 break; 10763 } 10764 10765 /* 10766 * We may allocate extra buf for external USCSI commands. If the 10767 * application asks for bigger than 20-byte sense data via USCSI, 10768 * SCSA layer will allocate 252 bytes sense buf for that command. 10769 */ 10770 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10771 SENSE_LENGTH) { 10772 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10773 MAX_SENSE_LENGTH, KM_SLEEP); 10774 } else { 10775 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10776 } 10777 10778 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10779 10780 /* Use the index obtained within xbuf_init */ 10781 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10782 10783 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10784 10785 return (0); 10786 } 10787 10788 /* 10789 * Function: sd_send_scsi_cmd 10790 * 10791 * Description: Runs a USCSI command for user (when called thru sdioctl), 10792 * or for the driver 10793 * 10794 * Arguments: dev - the dev_t for the device 10795 * incmd - ptr to a valid uscsi_cmd struct 10796 * flag - bit flag, indicating open settings, 32/64 bit type 10797 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10798 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10799 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10800 * to use the USCSI "direct" chain and bypass the normal 10801 * command waitq. 10802 * 10803 * Return Code: 0 - successful completion of the given command 10804 * EIO - scsi_uscsi_handle_command() failed 10805 * ENXIO - soft state not found for specified dev 10806 * EINVAL 10807 * EFAULT - copyin/copyout error 10808 * return code of scsi_uscsi_handle_command(): 10809 * EIO 10810 * ENXIO 10811 * EACCES 10812 * 10813 * Context: Waits for command to complete. Can sleep. 10814 */ 10815 10816 static int 10817 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10818 enum uio_seg dataspace, int path_flag) 10819 { 10820 struct sd_uscsi_info *uip; 10821 struct uscsi_cmd *uscmd; 10822 struct sd_lun *un; 10823 int format = 0; 10824 int rval; 10825 10826 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10827 if (un == NULL) { 10828 return (ENXIO); 10829 } 10830 10831 ASSERT(!mutex_owned(SD_MUTEX(un))); 10832 10833 #ifdef SDDEBUG 10834 switch (dataspace) { 10835 case UIO_USERSPACE: 10836 SD_TRACE(SD_LOG_IO, un, 10837 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10838 break; 10839 case UIO_SYSSPACE: 10840 SD_TRACE(SD_LOG_IO, un, 10841 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10842 break; 10843 default: 10844 SD_TRACE(SD_LOG_IO, un, 10845 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10846 break; 10847 } 10848 #endif 10849 10850 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10851 SD_ADDRESS(un), &uscmd); 10852 if (rval != 0) { 10853 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10854 "scsi_uscsi_alloc_and_copyin failed\n", un); 10855 return (rval); 10856 } 10857 10858 if ((uscmd->uscsi_cdb != NULL) && 10859 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10860 mutex_enter(SD_MUTEX(un)); 10861 un->un_f_format_in_progress = TRUE; 10862 mutex_exit(SD_MUTEX(un)); 10863 format = 1; 10864 } 10865 10866 /* 10867 * Allocate an sd_uscsi_info struct and fill it with the info 10868 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10869 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10870 * since we allocate the buf here in this function, we do not 10871 * need to preserve the prior contents of b_private. 10872 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10873 */ 10874 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10875 uip->ui_flags = path_flag; 10876 uip->ui_cmdp = uscmd; 10877 10878 /* 10879 * Commands sent with priority are intended for error recovery 10880 * situations, and do not have retries performed. 10881 */ 10882 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10883 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10884 } 10885 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10886 10887 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10888 sd_uscsi_strategy, NULL, uip); 10889 10890 #ifdef SDDEBUG 10891 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10892 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10893 uscmd->uscsi_status, uscmd->uscsi_resid); 10894 if (uscmd->uscsi_bufaddr != NULL) { 10895 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10896 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10897 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10898 if (dataspace == UIO_SYSSPACE) { 10899 SD_DUMP_MEMORY(un, SD_LOG_IO, 10900 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10901 uscmd->uscsi_buflen, SD_LOG_HEX); 10902 } 10903 } 10904 #endif 10905 10906 if (format == 1) { 10907 mutex_enter(SD_MUTEX(un)); 10908 un->un_f_format_in_progress = FALSE; 10909 mutex_exit(SD_MUTEX(un)); 10910 } 10911 10912 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10913 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10914 10915 return (rval); 10916 } 10917 10918 10919 /* 10920 * Function: sd_buf_iodone 10921 * 10922 * Description: Frees the sd_xbuf & returns the buf to its originator. 10923 * 10924 * Context: May be called from interrupt context. 10925 */ 10926 /* ARGSUSED */ 10927 static void 10928 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10929 { 10930 struct sd_xbuf *xp; 10931 10932 ASSERT(un != NULL); 10933 ASSERT(bp != NULL); 10934 ASSERT(!mutex_owned(SD_MUTEX(un))); 10935 10936 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10937 10938 xp = SD_GET_XBUF(bp); 10939 ASSERT(xp != NULL); 10940 10941 mutex_enter(SD_MUTEX(un)); 10942 10943 /* 10944 * Grab time when the cmd completed. 10945 * This is used for determining if the system has been 10946 * idle long enough to make it idle to the PM framework. 10947 * This is for lowering the overhead, and therefore improving 10948 * performance per I/O operation. 10949 */ 10950 un->un_pm_idle_time = ddi_get_time(); 10951 10952 un->un_ncmds_in_driver--; 10953 ASSERT(un->un_ncmds_in_driver >= 0); 10954 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10955 un->un_ncmds_in_driver); 10956 10957 mutex_exit(SD_MUTEX(un)); 10958 10959 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10960 biodone(bp); /* bp is gone after this */ 10961 10962 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10963 } 10964 10965 10966 /* 10967 * Function: sd_uscsi_iodone 10968 * 10969 * Description: Frees the sd_xbuf & returns the buf to its originator. 10970 * 10971 * Context: May be called from interrupt context. 10972 */ 10973 /* ARGSUSED */ 10974 static void 10975 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10976 { 10977 struct sd_xbuf *xp; 10978 10979 ASSERT(un != NULL); 10980 ASSERT(bp != NULL); 10981 10982 xp = SD_GET_XBUF(bp); 10983 ASSERT(xp != NULL); 10984 ASSERT(!mutex_owned(SD_MUTEX(un))); 10985 10986 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10987 10988 bp->b_private = xp->xb_private; 10989 10990 mutex_enter(SD_MUTEX(un)); 10991 10992 /* 10993 * Grab time when the cmd completed. 10994 * This is used for determining if the system has been 10995 * idle long enough to make it idle to the PM framework. 10996 * This is for lowering the overhead, and therefore improving 10997 * performance per I/O operation. 10998 */ 10999 un->un_pm_idle_time = ddi_get_time(); 11000 11001 un->un_ncmds_in_driver--; 11002 ASSERT(un->un_ncmds_in_driver >= 0); 11003 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11004 un->un_ncmds_in_driver); 11005 11006 mutex_exit(SD_MUTEX(un)); 11007 11008 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 11009 SENSE_LENGTH) { 11010 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 11011 MAX_SENSE_LENGTH); 11012 } else { 11013 kmem_free(xp, sizeof (struct sd_xbuf)); 11014 } 11015 11016 biodone(bp); 11017 11018 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11019 } 11020 11021 11022 /* 11023 * Function: sd_mapblockaddr_iostart 11024 * 11025 * Description: Verify request lies within the partition limits for 11026 * the indicated minor device. Issue "overrun" buf if 11027 * request would exceed partition range. Converts 11028 * partition-relative block address to absolute. 11029 * 11030 * Context: Can sleep 11031 * 11032 * Issues: This follows what the old code did, in terms of accessing 11033 * some of the partition info in the unit struct without holding 11034 * the mutext. This is a general issue, if the partition info 11035 * can be altered while IO is in progress... as soon as we send 11036 * a buf, its partitioning can be invalid before it gets to the 11037 * device. Probably the right fix is to move partitioning out 11038 * of the driver entirely. 11039 */ 11040 11041 static void 11042 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11043 { 11044 diskaddr_t nblocks; /* #blocks in the given partition */ 11045 daddr_t blocknum; /* Block number specified by the buf */ 11046 size_t requested_nblocks; 11047 size_t available_nblocks; 11048 int partition; 11049 diskaddr_t partition_offset; 11050 struct sd_xbuf *xp; 11051 11052 11053 ASSERT(un != NULL); 11054 ASSERT(bp != NULL); 11055 ASSERT(!mutex_owned(SD_MUTEX(un))); 11056 11057 SD_TRACE(SD_LOG_IO_PARTITION, un, 11058 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11059 11060 xp = SD_GET_XBUF(bp); 11061 ASSERT(xp != NULL); 11062 11063 /* 11064 * If the geometry is not indicated as valid, attempt to access 11065 * the unit & verify the geometry/label. This can be the case for 11066 * removable-media devices, of if the device was opened in 11067 * NDELAY/NONBLOCK mode. 11068 */ 11069 if (!SD_IS_VALID_LABEL(un) && 11070 (sd_ready_and_valid(un) != SD_READY_VALID)) { 11071 /* 11072 * For removable devices it is possible to start an I/O 11073 * without a media by opening the device in nodelay mode. 11074 * Also for writable CDs there can be many scenarios where 11075 * there is no geometry yet but volume manager is trying to 11076 * issue a read() just because it can see TOC on the CD. So 11077 * do not print a message for removables. 11078 */ 11079 if (!un->un_f_has_removable_media) { 11080 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11081 "i/o to invalid geometry\n"); 11082 } 11083 bioerror(bp, EIO); 11084 bp->b_resid = bp->b_bcount; 11085 SD_BEGIN_IODONE(index, un, bp); 11086 return; 11087 } 11088 11089 partition = SDPART(bp->b_edev); 11090 11091 nblocks = 0; 11092 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 11093 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 11094 11095 /* 11096 * blocknum is the starting block number of the request. At this 11097 * point it is still relative to the start of the minor device. 11098 */ 11099 blocknum = xp->xb_blkno; 11100 11101 /* 11102 * Legacy: If the starting block number is one past the last block 11103 * in the partition, do not set B_ERROR in the buf. 11104 */ 11105 if (blocknum == nblocks) { 11106 goto error_exit; 11107 } 11108 11109 /* 11110 * Confirm that the first block of the request lies within the 11111 * partition limits. Also the requested number of bytes must be 11112 * a multiple of the system block size. 11113 */ 11114 if ((blocknum < 0) || (blocknum >= nblocks) || 11115 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11116 bp->b_flags |= B_ERROR; 11117 goto error_exit; 11118 } 11119 11120 /* 11121 * If the requsted # blocks exceeds the available # blocks, that 11122 * is an overrun of the partition. 11123 */ 11124 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11125 available_nblocks = (size_t)(nblocks - blocknum); 11126 ASSERT(nblocks >= blocknum); 11127 11128 if (requested_nblocks > available_nblocks) { 11129 /* 11130 * Allocate an "overrun" buf to allow the request to proceed 11131 * for the amount of space available in the partition. The 11132 * amount not transferred will be added into the b_resid 11133 * when the operation is complete. The overrun buf 11134 * replaces the original buf here, and the original buf 11135 * is saved inside the overrun buf, for later use. 11136 */ 11137 size_t resid = SD_SYSBLOCKS2BYTES(un, 11138 (offset_t)(requested_nblocks - available_nblocks)); 11139 size_t count = bp->b_bcount - resid; 11140 /* 11141 * Note: count is an unsigned entity thus it'll NEVER 11142 * be less than 0 so ASSERT the original values are 11143 * correct. 11144 */ 11145 ASSERT(bp->b_bcount >= resid); 11146 11147 bp = sd_bioclone_alloc(bp, count, blocknum, 11148 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 11149 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 11150 ASSERT(xp != NULL); 11151 } 11152 11153 /* At this point there should be no residual for this buf. */ 11154 ASSERT(bp->b_resid == 0); 11155 11156 /* Convert the block number to an absolute address. */ 11157 xp->xb_blkno += partition_offset; 11158 11159 SD_NEXT_IOSTART(index, un, bp); 11160 11161 SD_TRACE(SD_LOG_IO_PARTITION, un, 11162 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 11163 11164 return; 11165 11166 error_exit: 11167 bp->b_resid = bp->b_bcount; 11168 SD_BEGIN_IODONE(index, un, bp); 11169 SD_TRACE(SD_LOG_IO_PARTITION, un, 11170 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 11171 } 11172 11173 11174 /* 11175 * Function: sd_mapblockaddr_iodone 11176 * 11177 * Description: Completion-side processing for partition management. 11178 * 11179 * Context: May be called under interrupt context 11180 */ 11181 11182 static void 11183 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 11184 { 11185 /* int partition; */ /* Not used, see below. */ 11186 ASSERT(un != NULL); 11187 ASSERT(bp != NULL); 11188 ASSERT(!mutex_owned(SD_MUTEX(un))); 11189 11190 SD_TRACE(SD_LOG_IO_PARTITION, un, 11191 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 11192 11193 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 11194 /* 11195 * We have an "overrun" buf to deal with... 11196 */ 11197 struct sd_xbuf *xp; 11198 struct buf *obp; /* ptr to the original buf */ 11199 11200 xp = SD_GET_XBUF(bp); 11201 ASSERT(xp != NULL); 11202 11203 /* Retrieve the pointer to the original buf */ 11204 obp = (struct buf *)xp->xb_private; 11205 ASSERT(obp != NULL); 11206 11207 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 11208 bioerror(obp, bp->b_error); 11209 11210 sd_bioclone_free(bp); 11211 11212 /* 11213 * Get back the original buf. 11214 * Note that since the restoration of xb_blkno below 11215 * was removed, the sd_xbuf is not needed. 11216 */ 11217 bp = obp; 11218 /* 11219 * xp = SD_GET_XBUF(bp); 11220 * ASSERT(xp != NULL); 11221 */ 11222 } 11223 11224 /* 11225 * Convert sd->xb_blkno back to a minor-device relative value. 11226 * Note: this has been commented out, as it is not needed in the 11227 * current implementation of the driver (ie, since this function 11228 * is at the top of the layering chains, so the info will be 11229 * discarded) and it is in the "hot" IO path. 11230 * 11231 * partition = getminor(bp->b_edev) & SDPART_MASK; 11232 * xp->xb_blkno -= un->un_offset[partition]; 11233 */ 11234 11235 SD_NEXT_IODONE(index, un, bp); 11236 11237 SD_TRACE(SD_LOG_IO_PARTITION, un, 11238 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 11239 } 11240 11241 11242 /* 11243 * Function: sd_mapblocksize_iostart 11244 * 11245 * Description: Convert between system block size (un->un_sys_blocksize) 11246 * and target block size (un->un_tgt_blocksize). 11247 * 11248 * Context: Can sleep to allocate resources. 11249 * 11250 * Assumptions: A higher layer has already performed any partition validation, 11251 * and converted the xp->xb_blkno to an absolute value relative 11252 * to the start of the device. 11253 * 11254 * It is also assumed that the higher layer has implemented 11255 * an "overrun" mechanism for the case where the request would 11256 * read/write beyond the end of a partition. In this case we 11257 * assume (and ASSERT) that bp->b_resid == 0. 11258 * 11259 * Note: The implementation for this routine assumes the target 11260 * block size remains constant between allocation and transport. 11261 */ 11262 11263 static void 11264 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11265 { 11266 struct sd_mapblocksize_info *bsp; 11267 struct sd_xbuf *xp; 11268 offset_t first_byte; 11269 daddr_t start_block, end_block; 11270 daddr_t request_bytes; 11271 ushort_t is_aligned = FALSE; 11272 11273 ASSERT(un != NULL); 11274 ASSERT(bp != NULL); 11275 ASSERT(!mutex_owned(SD_MUTEX(un))); 11276 ASSERT(bp->b_resid == 0); 11277 11278 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11279 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11280 11281 /* 11282 * For a non-writable CD, a write request is an error 11283 */ 11284 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11285 (un->un_f_mmc_writable_media == FALSE)) { 11286 bioerror(bp, EIO); 11287 bp->b_resid = bp->b_bcount; 11288 SD_BEGIN_IODONE(index, un, bp); 11289 return; 11290 } 11291 11292 /* 11293 * We do not need a shadow buf if the device is using 11294 * un->un_sys_blocksize as its block size or if bcount == 0. 11295 * In this case there is no layer-private data block allocated. 11296 */ 11297 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11298 (bp->b_bcount == 0)) { 11299 goto done; 11300 } 11301 11302 #if defined(__i386) || defined(__amd64) 11303 /* We do not support non-block-aligned transfers for ROD devices */ 11304 ASSERT(!ISROD(un)); 11305 #endif 11306 11307 xp = SD_GET_XBUF(bp); 11308 ASSERT(xp != NULL); 11309 11310 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11311 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11312 un->un_tgt_blocksize, un->un_sys_blocksize); 11313 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11314 "request start block:0x%x\n", xp->xb_blkno); 11315 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11316 "request len:0x%x\n", bp->b_bcount); 11317 11318 /* 11319 * Allocate the layer-private data area for the mapblocksize layer. 11320 * Layers are allowed to use the xp_private member of the sd_xbuf 11321 * struct to store the pointer to their layer-private data block, but 11322 * each layer also has the responsibility of restoring the prior 11323 * contents of xb_private before returning the buf/xbuf to the 11324 * higher layer that sent it. 11325 * 11326 * Here we save the prior contents of xp->xb_private into the 11327 * bsp->mbs_oprivate field of our layer-private data area. This value 11328 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11329 * the layer-private area and returning the buf/xbuf to the layer 11330 * that sent it. 11331 * 11332 * Note that here we use kmem_zalloc for the allocation as there are 11333 * parts of the mapblocksize code that expect certain fields to be 11334 * zero unless explicitly set to a required value. 11335 */ 11336 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11337 bsp->mbs_oprivate = xp->xb_private; 11338 xp->xb_private = bsp; 11339 11340 /* 11341 * This treats the data on the disk (target) as an array of bytes. 11342 * first_byte is the byte offset, from the beginning of the device, 11343 * to the location of the request. This is converted from a 11344 * un->un_sys_blocksize block address to a byte offset, and then back 11345 * to a block address based upon a un->un_tgt_blocksize block size. 11346 * 11347 * xp->xb_blkno should be absolute upon entry into this function, 11348 * but, but it is based upon partitions that use the "system" 11349 * block size. It must be adjusted to reflect the block size of 11350 * the target. 11351 * 11352 * Note that end_block is actually the block that follows the last 11353 * block of the request, but that's what is needed for the computation. 11354 */ 11355 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11356 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11357 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11358 un->un_tgt_blocksize; 11359 11360 /* request_bytes is rounded up to a multiple of the target block size */ 11361 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11362 11363 /* 11364 * See if the starting address of the request and the request 11365 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11366 * then we do not need to allocate a shadow buf to handle the request. 11367 */ 11368 if (((first_byte % un->un_tgt_blocksize) == 0) && 11369 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11370 is_aligned = TRUE; 11371 } 11372 11373 if ((bp->b_flags & B_READ) == 0) { 11374 /* 11375 * Lock the range for a write operation. An aligned request is 11376 * considered a simple write; otherwise the request must be a 11377 * read-modify-write. 11378 */ 11379 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11380 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11381 } 11382 11383 /* 11384 * Alloc a shadow buf if the request is not aligned. Also, this is 11385 * where the READ command is generated for a read-modify-write. (The 11386 * write phase is deferred until after the read completes.) 11387 */ 11388 if (is_aligned == FALSE) { 11389 11390 struct sd_mapblocksize_info *shadow_bsp; 11391 struct sd_xbuf *shadow_xp; 11392 struct buf *shadow_bp; 11393 11394 /* 11395 * Allocate the shadow buf and it associated xbuf. Note that 11396 * after this call the xb_blkno value in both the original 11397 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11398 * same: absolute relative to the start of the device, and 11399 * adjusted for the target block size. The b_blkno in the 11400 * shadow buf will also be set to this value. We should never 11401 * change b_blkno in the original bp however. 11402 * 11403 * Note also that the shadow buf will always need to be a 11404 * READ command, regardless of whether the incoming command 11405 * is a READ or a WRITE. 11406 */ 11407 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11408 xp->xb_blkno, 11409 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11410 11411 shadow_xp = SD_GET_XBUF(shadow_bp); 11412 11413 /* 11414 * Allocate the layer-private data for the shadow buf. 11415 * (No need to preserve xb_private in the shadow xbuf.) 11416 */ 11417 shadow_xp->xb_private = shadow_bsp = 11418 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11419 11420 /* 11421 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11422 * to figure out where the start of the user data is (based upon 11423 * the system block size) in the data returned by the READ 11424 * command (which will be based upon the target blocksize). Note 11425 * that this is only really used if the request is unaligned. 11426 */ 11427 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11428 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11429 ASSERT((bsp->mbs_copy_offset >= 0) && 11430 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11431 11432 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11433 11434 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11435 11436 /* Transfer the wmap (if any) to the shadow buf */ 11437 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11438 bsp->mbs_wmp = NULL; 11439 11440 /* 11441 * The shadow buf goes on from here in place of the 11442 * original buf. 11443 */ 11444 shadow_bsp->mbs_orig_bp = bp; 11445 bp = shadow_bp; 11446 } 11447 11448 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11449 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11450 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11451 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11452 request_bytes); 11453 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11454 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11455 11456 done: 11457 SD_NEXT_IOSTART(index, un, bp); 11458 11459 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11460 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11461 } 11462 11463 11464 /* 11465 * Function: sd_mapblocksize_iodone 11466 * 11467 * Description: Completion side processing for block-size mapping. 11468 * 11469 * Context: May be called under interrupt context 11470 */ 11471 11472 static void 11473 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11474 { 11475 struct sd_mapblocksize_info *bsp; 11476 struct sd_xbuf *xp; 11477 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11478 struct buf *orig_bp; /* ptr to the original buf */ 11479 offset_t shadow_end; 11480 offset_t request_end; 11481 offset_t shadow_start; 11482 ssize_t copy_offset; 11483 size_t copy_length; 11484 size_t shortfall; 11485 uint_t is_write; /* TRUE if this bp is a WRITE */ 11486 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11487 11488 ASSERT(un != NULL); 11489 ASSERT(bp != NULL); 11490 11491 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11492 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11493 11494 /* 11495 * There is no shadow buf or layer-private data if the target is 11496 * using un->un_sys_blocksize as its block size or if bcount == 0. 11497 */ 11498 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11499 (bp->b_bcount == 0)) { 11500 goto exit; 11501 } 11502 11503 xp = SD_GET_XBUF(bp); 11504 ASSERT(xp != NULL); 11505 11506 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11507 bsp = xp->xb_private; 11508 11509 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11510 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11511 11512 if (is_write) { 11513 /* 11514 * For a WRITE request we must free up the block range that 11515 * we have locked up. This holds regardless of whether this is 11516 * an aligned write request or a read-modify-write request. 11517 */ 11518 sd_range_unlock(un, bsp->mbs_wmp); 11519 bsp->mbs_wmp = NULL; 11520 } 11521 11522 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11523 /* 11524 * An aligned read or write command will have no shadow buf; 11525 * there is not much else to do with it. 11526 */ 11527 goto done; 11528 } 11529 11530 orig_bp = bsp->mbs_orig_bp; 11531 ASSERT(orig_bp != NULL); 11532 orig_xp = SD_GET_XBUF(orig_bp); 11533 ASSERT(orig_xp != NULL); 11534 ASSERT(!mutex_owned(SD_MUTEX(un))); 11535 11536 if (!is_write && has_wmap) { 11537 /* 11538 * A READ with a wmap means this is the READ phase of a 11539 * read-modify-write. If an error occurred on the READ then 11540 * we do not proceed with the WRITE phase or copy any data. 11541 * Just release the write maps and return with an error. 11542 */ 11543 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11544 orig_bp->b_resid = orig_bp->b_bcount; 11545 bioerror(orig_bp, bp->b_error); 11546 sd_range_unlock(un, bsp->mbs_wmp); 11547 goto freebuf_done; 11548 } 11549 } 11550 11551 /* 11552 * Here is where we set up to copy the data from the shadow buf 11553 * into the space associated with the original buf. 11554 * 11555 * To deal with the conversion between block sizes, these 11556 * computations treat the data as an array of bytes, with the 11557 * first byte (byte 0) corresponding to the first byte in the 11558 * first block on the disk. 11559 */ 11560 11561 /* 11562 * shadow_start and shadow_len indicate the location and size of 11563 * the data returned with the shadow IO request. 11564 */ 11565 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11566 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11567 11568 /* 11569 * copy_offset gives the offset (in bytes) from the start of the first 11570 * block of the READ request to the beginning of the data. We retrieve 11571 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11572 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11573 * data to be copied (in bytes). 11574 */ 11575 copy_offset = bsp->mbs_copy_offset; 11576 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11577 copy_length = orig_bp->b_bcount; 11578 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11579 11580 /* 11581 * Set up the resid and error fields of orig_bp as appropriate. 11582 */ 11583 if (shadow_end >= request_end) { 11584 /* We got all the requested data; set resid to zero */ 11585 orig_bp->b_resid = 0; 11586 } else { 11587 /* 11588 * We failed to get enough data to fully satisfy the original 11589 * request. Just copy back whatever data we got and set 11590 * up the residual and error code as required. 11591 * 11592 * 'shortfall' is the amount by which the data received with the 11593 * shadow buf has "fallen short" of the requested amount. 11594 */ 11595 shortfall = (size_t)(request_end - shadow_end); 11596 11597 if (shortfall > orig_bp->b_bcount) { 11598 /* 11599 * We did not get enough data to even partially 11600 * fulfill the original request. The residual is 11601 * equal to the amount requested. 11602 */ 11603 orig_bp->b_resid = orig_bp->b_bcount; 11604 } else { 11605 /* 11606 * We did not get all the data that we requested 11607 * from the device, but we will try to return what 11608 * portion we did get. 11609 */ 11610 orig_bp->b_resid = shortfall; 11611 } 11612 ASSERT(copy_length >= orig_bp->b_resid); 11613 copy_length -= orig_bp->b_resid; 11614 } 11615 11616 /* Propagate the error code from the shadow buf to the original buf */ 11617 bioerror(orig_bp, bp->b_error); 11618 11619 if (is_write) { 11620 goto freebuf_done; /* No data copying for a WRITE */ 11621 } 11622 11623 if (has_wmap) { 11624 /* 11625 * This is a READ command from the READ phase of a 11626 * read-modify-write request. We have to copy the data given 11627 * by the user OVER the data returned by the READ command, 11628 * then convert the command from a READ to a WRITE and send 11629 * it back to the target. 11630 */ 11631 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11632 copy_length); 11633 11634 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11635 11636 /* 11637 * Dispatch the WRITE command to the taskq thread, which 11638 * will in turn send the command to the target. When the 11639 * WRITE command completes, we (sd_mapblocksize_iodone()) 11640 * will get called again as part of the iodone chain 11641 * processing for it. Note that we will still be dealing 11642 * with the shadow buf at that point. 11643 */ 11644 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11645 KM_NOSLEEP) != 0) { 11646 /* 11647 * Dispatch was successful so we are done. Return 11648 * without going any higher up the iodone chain. Do 11649 * not free up any layer-private data until after the 11650 * WRITE completes. 11651 */ 11652 return; 11653 } 11654 11655 /* 11656 * Dispatch of the WRITE command failed; set up the error 11657 * condition and send this IO back up the iodone chain. 11658 */ 11659 bioerror(orig_bp, EIO); 11660 orig_bp->b_resid = orig_bp->b_bcount; 11661 11662 } else { 11663 /* 11664 * This is a regular READ request (ie, not a RMW). Copy the 11665 * data from the shadow buf into the original buf. The 11666 * copy_offset compensates for any "misalignment" between the 11667 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11668 * original buf (with its un->un_sys_blocksize blocks). 11669 */ 11670 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11671 copy_length); 11672 } 11673 11674 freebuf_done: 11675 11676 /* 11677 * At this point we still have both the shadow buf AND the original 11678 * buf to deal with, as well as the layer-private data area in each. 11679 * Local variables are as follows: 11680 * 11681 * bp -- points to shadow buf 11682 * xp -- points to xbuf of shadow buf 11683 * bsp -- points to layer-private data area of shadow buf 11684 * orig_bp -- points to original buf 11685 * 11686 * First free the shadow buf and its associated xbuf, then free the 11687 * layer-private data area from the shadow buf. There is no need to 11688 * restore xb_private in the shadow xbuf. 11689 */ 11690 sd_shadow_buf_free(bp); 11691 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11692 11693 /* 11694 * Now update the local variables to point to the original buf, xbuf, 11695 * and layer-private area. 11696 */ 11697 bp = orig_bp; 11698 xp = SD_GET_XBUF(bp); 11699 ASSERT(xp != NULL); 11700 ASSERT(xp == orig_xp); 11701 bsp = xp->xb_private; 11702 ASSERT(bsp != NULL); 11703 11704 done: 11705 /* 11706 * Restore xb_private to whatever it was set to by the next higher 11707 * layer in the chain, then free the layer-private data area. 11708 */ 11709 xp->xb_private = bsp->mbs_oprivate; 11710 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11711 11712 exit: 11713 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11714 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11715 11716 SD_NEXT_IODONE(index, un, bp); 11717 } 11718 11719 11720 /* 11721 * Function: sd_checksum_iostart 11722 * 11723 * Description: A stub function for a layer that's currently not used. 11724 * For now just a placeholder. 11725 * 11726 * Context: Kernel thread context 11727 */ 11728 11729 static void 11730 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11731 { 11732 ASSERT(un != NULL); 11733 ASSERT(bp != NULL); 11734 ASSERT(!mutex_owned(SD_MUTEX(un))); 11735 SD_NEXT_IOSTART(index, un, bp); 11736 } 11737 11738 11739 /* 11740 * Function: sd_checksum_iodone 11741 * 11742 * Description: A stub function for a layer that's currently not used. 11743 * For now just a placeholder. 11744 * 11745 * Context: May be called under interrupt context 11746 */ 11747 11748 static void 11749 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11750 { 11751 ASSERT(un != NULL); 11752 ASSERT(bp != NULL); 11753 ASSERT(!mutex_owned(SD_MUTEX(un))); 11754 SD_NEXT_IODONE(index, un, bp); 11755 } 11756 11757 11758 /* 11759 * Function: sd_checksum_uscsi_iostart 11760 * 11761 * Description: A stub function for a layer that's currently not used. 11762 * For now just a placeholder. 11763 * 11764 * Context: Kernel thread context 11765 */ 11766 11767 static void 11768 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11769 { 11770 ASSERT(un != NULL); 11771 ASSERT(bp != NULL); 11772 ASSERT(!mutex_owned(SD_MUTEX(un))); 11773 SD_NEXT_IOSTART(index, un, bp); 11774 } 11775 11776 11777 /* 11778 * Function: sd_checksum_uscsi_iodone 11779 * 11780 * Description: A stub function for a layer that's currently not used. 11781 * For now just a placeholder. 11782 * 11783 * Context: May be called under interrupt context 11784 */ 11785 11786 static void 11787 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11788 { 11789 ASSERT(un != NULL); 11790 ASSERT(bp != NULL); 11791 ASSERT(!mutex_owned(SD_MUTEX(un))); 11792 SD_NEXT_IODONE(index, un, bp); 11793 } 11794 11795 11796 /* 11797 * Function: sd_pm_iostart 11798 * 11799 * Description: iostart-side routine for Power mangement. 11800 * 11801 * Context: Kernel thread context 11802 */ 11803 11804 static void 11805 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11806 { 11807 ASSERT(un != NULL); 11808 ASSERT(bp != NULL); 11809 ASSERT(!mutex_owned(SD_MUTEX(un))); 11810 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11811 11812 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11813 11814 if (sd_pm_entry(un) != DDI_SUCCESS) { 11815 /* 11816 * Set up to return the failed buf back up the 'iodone' 11817 * side of the calling chain. 11818 */ 11819 bioerror(bp, EIO); 11820 bp->b_resid = bp->b_bcount; 11821 11822 SD_BEGIN_IODONE(index, un, bp); 11823 11824 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11825 return; 11826 } 11827 11828 SD_NEXT_IOSTART(index, un, bp); 11829 11830 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11831 } 11832 11833 11834 /* 11835 * Function: sd_pm_iodone 11836 * 11837 * Description: iodone-side routine for power mangement. 11838 * 11839 * Context: may be called from interrupt context 11840 */ 11841 11842 static void 11843 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11844 { 11845 ASSERT(un != NULL); 11846 ASSERT(bp != NULL); 11847 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11848 11849 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11850 11851 /* 11852 * After attach the following flag is only read, so don't 11853 * take the penalty of acquiring a mutex for it. 11854 */ 11855 if (un->un_f_pm_is_enabled == TRUE) { 11856 sd_pm_exit(un); 11857 } 11858 11859 SD_NEXT_IODONE(index, un, bp); 11860 11861 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11862 } 11863 11864 11865 /* 11866 * Function: sd_core_iostart 11867 * 11868 * Description: Primary driver function for enqueuing buf(9S) structs from 11869 * the system and initiating IO to the target device 11870 * 11871 * Context: Kernel thread context. Can sleep. 11872 * 11873 * Assumptions: - The given xp->xb_blkno is absolute 11874 * (ie, relative to the start of the device). 11875 * - The IO is to be done using the native blocksize of 11876 * the device, as specified in un->un_tgt_blocksize. 11877 */ 11878 /* ARGSUSED */ 11879 static void 11880 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11881 { 11882 struct sd_xbuf *xp; 11883 11884 ASSERT(un != NULL); 11885 ASSERT(bp != NULL); 11886 ASSERT(!mutex_owned(SD_MUTEX(un))); 11887 ASSERT(bp->b_resid == 0); 11888 11889 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11890 11891 xp = SD_GET_XBUF(bp); 11892 ASSERT(xp != NULL); 11893 11894 mutex_enter(SD_MUTEX(un)); 11895 11896 /* 11897 * If we are currently in the failfast state, fail any new IO 11898 * that has B_FAILFAST set, then return. 11899 */ 11900 if ((bp->b_flags & B_FAILFAST) && 11901 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11902 mutex_exit(SD_MUTEX(un)); 11903 bioerror(bp, EIO); 11904 bp->b_resid = bp->b_bcount; 11905 SD_BEGIN_IODONE(index, un, bp); 11906 return; 11907 } 11908 11909 if (SD_IS_DIRECT_PRIORITY(xp)) { 11910 /* 11911 * Priority command -- transport it immediately. 11912 * 11913 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11914 * because all direct priority commands should be associated 11915 * with error recovery actions which we don't want to retry. 11916 */ 11917 sd_start_cmds(un, bp); 11918 } else { 11919 /* 11920 * Normal command -- add it to the wait queue, then start 11921 * transporting commands from the wait queue. 11922 */ 11923 sd_add_buf_to_waitq(un, bp); 11924 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11925 sd_start_cmds(un, NULL); 11926 } 11927 11928 mutex_exit(SD_MUTEX(un)); 11929 11930 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11931 } 11932 11933 11934 /* 11935 * Function: sd_init_cdb_limits 11936 * 11937 * Description: This is to handle scsi_pkt initialization differences 11938 * between the driver platforms. 11939 * 11940 * Legacy behaviors: 11941 * 11942 * If the block number or the sector count exceeds the 11943 * capabilities of a Group 0 command, shift over to a 11944 * Group 1 command. We don't blindly use Group 1 11945 * commands because a) some drives (CDC Wren IVs) get a 11946 * bit confused, and b) there is probably a fair amount 11947 * of speed difference for a target to receive and decode 11948 * a 10 byte command instead of a 6 byte command. 11949 * 11950 * The xfer time difference of 6 vs 10 byte CDBs is 11951 * still significant so this code is still worthwhile. 11952 * 10 byte CDBs are very inefficient with the fas HBA driver 11953 * and older disks. Each CDB byte took 1 usec with some 11954 * popular disks. 11955 * 11956 * Context: Must be called at attach time 11957 */ 11958 11959 static void 11960 sd_init_cdb_limits(struct sd_lun *un) 11961 { 11962 int hba_cdb_limit; 11963 11964 /* 11965 * Use CDB_GROUP1 commands for most devices except for 11966 * parallel SCSI fixed drives in which case we get better 11967 * performance using CDB_GROUP0 commands (where applicable). 11968 */ 11969 un->un_mincdb = SD_CDB_GROUP1; 11970 #if !defined(__fibre) 11971 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11972 !un->un_f_has_removable_media) { 11973 un->un_mincdb = SD_CDB_GROUP0; 11974 } 11975 #endif 11976 11977 /* 11978 * Try to read the max-cdb-length supported by HBA. 11979 */ 11980 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11981 if (0 >= un->un_max_hba_cdb) { 11982 un->un_max_hba_cdb = CDB_GROUP4; 11983 hba_cdb_limit = SD_CDB_GROUP4; 11984 } else if (0 < un->un_max_hba_cdb && 11985 un->un_max_hba_cdb < CDB_GROUP1) { 11986 hba_cdb_limit = SD_CDB_GROUP0; 11987 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11988 un->un_max_hba_cdb < CDB_GROUP5) { 11989 hba_cdb_limit = SD_CDB_GROUP1; 11990 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11991 un->un_max_hba_cdb < CDB_GROUP4) { 11992 hba_cdb_limit = SD_CDB_GROUP5; 11993 } else { 11994 hba_cdb_limit = SD_CDB_GROUP4; 11995 } 11996 11997 /* 11998 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11999 * commands for fixed disks unless we are building for a 32 bit 12000 * kernel. 12001 */ 12002 #ifdef _LP64 12003 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12004 min(hba_cdb_limit, SD_CDB_GROUP4); 12005 #else 12006 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12007 min(hba_cdb_limit, SD_CDB_GROUP1); 12008 #endif 12009 12010 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12011 ? sizeof (struct scsi_arq_status) : 1); 12012 un->un_cmd_timeout = (ushort_t)sd_io_time; 12013 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12014 } 12015 12016 12017 /* 12018 * Function: sd_initpkt_for_buf 12019 * 12020 * Description: Allocate and initialize for transport a scsi_pkt struct, 12021 * based upon the info specified in the given buf struct. 12022 * 12023 * Assumes the xb_blkno in the request is absolute (ie, 12024 * relative to the start of the device (NOT partition!). 12025 * Also assumes that the request is using the native block 12026 * size of the device (as returned by the READ CAPACITY 12027 * command). 12028 * 12029 * Return Code: SD_PKT_ALLOC_SUCCESS 12030 * SD_PKT_ALLOC_FAILURE 12031 * SD_PKT_ALLOC_FAILURE_NO_DMA 12032 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12033 * 12034 * Context: Kernel thread and may be called from software interrupt context 12035 * as part of a sdrunout callback. This function may not block or 12036 * call routines that block 12037 */ 12038 12039 static int 12040 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12041 { 12042 struct sd_xbuf *xp; 12043 struct scsi_pkt *pktp = NULL; 12044 struct sd_lun *un; 12045 size_t blockcount; 12046 daddr_t startblock; 12047 int rval; 12048 int cmd_flags; 12049 12050 ASSERT(bp != NULL); 12051 ASSERT(pktpp != NULL); 12052 xp = SD_GET_XBUF(bp); 12053 ASSERT(xp != NULL); 12054 un = SD_GET_UN(bp); 12055 ASSERT(un != NULL); 12056 ASSERT(mutex_owned(SD_MUTEX(un))); 12057 ASSERT(bp->b_resid == 0); 12058 12059 SD_TRACE(SD_LOG_IO_CORE, un, 12060 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12061 12062 mutex_exit(SD_MUTEX(un)); 12063 12064 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12065 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12066 /* 12067 * Already have a scsi_pkt -- just need DMA resources. 12068 * We must recompute the CDB in case the mapping returns 12069 * a nonzero pkt_resid. 12070 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12071 * that is being retried, the unmap/remap of the DMA resouces 12072 * will result in the entire transfer starting over again 12073 * from the very first block. 12074 */ 12075 ASSERT(xp->xb_pktp != NULL); 12076 pktp = xp->xb_pktp; 12077 } else { 12078 pktp = NULL; 12079 } 12080 #endif /* __i386 || __amd64 */ 12081 12082 startblock = xp->xb_blkno; /* Absolute block num. */ 12083 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12084 12085 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12086 12087 /* 12088 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12089 * call scsi_init_pkt, and build the CDB. 12090 */ 12091 rval = sd_setup_rw_pkt(un, &pktp, bp, 12092 cmd_flags, sdrunout, (caddr_t)un, 12093 startblock, blockcount); 12094 12095 if (rval == 0) { 12096 /* 12097 * Success. 12098 * 12099 * If partial DMA is being used and required for this transfer. 12100 * set it up here. 12101 */ 12102 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12103 (pktp->pkt_resid != 0)) { 12104 12105 /* 12106 * Save the CDB length and pkt_resid for the 12107 * next xfer 12108 */ 12109 xp->xb_dma_resid = pktp->pkt_resid; 12110 12111 /* rezero resid */ 12112 pktp->pkt_resid = 0; 12113 12114 } else { 12115 xp->xb_dma_resid = 0; 12116 } 12117 12118 pktp->pkt_flags = un->un_tagflags; 12119 pktp->pkt_time = un->un_cmd_timeout; 12120 pktp->pkt_comp = sdintr; 12121 12122 pktp->pkt_private = bp; 12123 *pktpp = pktp; 12124 12125 SD_TRACE(SD_LOG_IO_CORE, un, 12126 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12127 12128 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12129 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12130 #endif 12131 12132 mutex_enter(SD_MUTEX(un)); 12133 return (SD_PKT_ALLOC_SUCCESS); 12134 12135 } 12136 12137 /* 12138 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12139 * from sd_setup_rw_pkt. 12140 */ 12141 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12142 12143 if (rval == SD_PKT_ALLOC_FAILURE) { 12144 *pktpp = NULL; 12145 /* 12146 * Set the driver state to RWAIT to indicate the driver 12147 * is waiting on resource allocations. The driver will not 12148 * suspend, pm_suspend, or detatch while the state is RWAIT. 12149 */ 12150 mutex_enter(SD_MUTEX(un)); 12151 New_state(un, SD_STATE_RWAIT); 12152 12153 SD_ERROR(SD_LOG_IO_CORE, un, 12154 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 12155 12156 if ((bp->b_flags & B_ERROR) != 0) { 12157 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12158 } 12159 return (SD_PKT_ALLOC_FAILURE); 12160 } else { 12161 /* 12162 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12163 * 12164 * This should never happen. Maybe someone messed with the 12165 * kernel's minphys? 12166 */ 12167 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12168 "Request rejected: too large for CDB: " 12169 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 12170 SD_ERROR(SD_LOG_IO_CORE, un, 12171 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 12172 mutex_enter(SD_MUTEX(un)); 12173 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12174 12175 } 12176 } 12177 12178 12179 /* 12180 * Function: sd_destroypkt_for_buf 12181 * 12182 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 12183 * 12184 * Context: Kernel thread or interrupt context 12185 */ 12186 12187 static void 12188 sd_destroypkt_for_buf(struct buf *bp) 12189 { 12190 ASSERT(bp != NULL); 12191 ASSERT(SD_GET_UN(bp) != NULL); 12192 12193 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12194 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 12195 12196 ASSERT(SD_GET_PKTP(bp) != NULL); 12197 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12198 12199 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12200 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 12201 } 12202 12203 /* 12204 * Function: sd_setup_rw_pkt 12205 * 12206 * Description: Determines appropriate CDB group for the requested LBA 12207 * and transfer length, calls scsi_init_pkt, and builds 12208 * the CDB. Do not use for partial DMA transfers except 12209 * for the initial transfer since the CDB size must 12210 * remain constant. 12211 * 12212 * Context: Kernel thread and may be called from software interrupt 12213 * context as part of a sdrunout callback. This function may not 12214 * block or call routines that block 12215 */ 12216 12217 12218 int 12219 sd_setup_rw_pkt(struct sd_lun *un, 12220 struct scsi_pkt **pktpp, struct buf *bp, int flags, 12221 int (*callback)(caddr_t), caddr_t callback_arg, 12222 diskaddr_t lba, uint32_t blockcount) 12223 { 12224 struct scsi_pkt *return_pktp; 12225 union scsi_cdb *cdbp; 12226 struct sd_cdbinfo *cp = NULL; 12227 int i; 12228 12229 /* 12230 * See which size CDB to use, based upon the request. 12231 */ 12232 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 12233 12234 /* 12235 * Check lba and block count against sd_cdbtab limits. 12236 * In the partial DMA case, we have to use the same size 12237 * CDB for all the transfers. Check lba + blockcount 12238 * against the max LBA so we know that segment of the 12239 * transfer can use the CDB we select. 12240 */ 12241 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 12242 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 12243 12244 /* 12245 * The command will fit into the CDB type 12246 * specified by sd_cdbtab[i]. 12247 */ 12248 cp = sd_cdbtab + i; 12249 12250 /* 12251 * Call scsi_init_pkt so we can fill in the 12252 * CDB. 12253 */ 12254 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 12255 bp, cp->sc_grpcode, un->un_status_len, 0, 12256 flags, callback, callback_arg); 12257 12258 if (return_pktp != NULL) { 12259 12260 /* 12261 * Return new value of pkt 12262 */ 12263 *pktpp = return_pktp; 12264 12265 /* 12266 * To be safe, zero the CDB insuring there is 12267 * no leftover data from a previous command. 12268 */ 12269 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12270 12271 /* 12272 * Handle partial DMA mapping 12273 */ 12274 if (return_pktp->pkt_resid != 0) { 12275 12276 /* 12277 * Not going to xfer as many blocks as 12278 * originally expected 12279 */ 12280 blockcount -= 12281 SD_BYTES2TGTBLOCKS(un, 12282 return_pktp->pkt_resid); 12283 } 12284 12285 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12286 12287 /* 12288 * Set command byte based on the CDB 12289 * type we matched. 12290 */ 12291 cdbp->scc_cmd = cp->sc_grpmask | 12292 ((bp->b_flags & B_READ) ? 12293 SCMD_READ : SCMD_WRITE); 12294 12295 SD_FILL_SCSI1_LUN(un, return_pktp); 12296 12297 /* 12298 * Fill in LBA and length 12299 */ 12300 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12301 (cp->sc_grpcode == CDB_GROUP4) || 12302 (cp->sc_grpcode == CDB_GROUP0) || 12303 (cp->sc_grpcode == CDB_GROUP5)); 12304 12305 if (cp->sc_grpcode == CDB_GROUP1) { 12306 FORMG1ADDR(cdbp, lba); 12307 FORMG1COUNT(cdbp, blockcount); 12308 return (0); 12309 } else if (cp->sc_grpcode == CDB_GROUP4) { 12310 FORMG4LONGADDR(cdbp, lba); 12311 FORMG4COUNT(cdbp, blockcount); 12312 return (0); 12313 } else if (cp->sc_grpcode == CDB_GROUP0) { 12314 FORMG0ADDR(cdbp, lba); 12315 FORMG0COUNT(cdbp, blockcount); 12316 return (0); 12317 } else if (cp->sc_grpcode == CDB_GROUP5) { 12318 FORMG5ADDR(cdbp, lba); 12319 FORMG5COUNT(cdbp, blockcount); 12320 return (0); 12321 } 12322 12323 /* 12324 * It should be impossible to not match one 12325 * of the CDB types above, so we should never 12326 * reach this point. Set the CDB command byte 12327 * to test-unit-ready to avoid writing 12328 * to somewhere we don't intend. 12329 */ 12330 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12331 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12332 } else { 12333 /* 12334 * Couldn't get scsi_pkt 12335 */ 12336 return (SD_PKT_ALLOC_FAILURE); 12337 } 12338 } 12339 } 12340 12341 /* 12342 * None of the available CDB types were suitable. This really 12343 * should never happen: on a 64 bit system we support 12344 * READ16/WRITE16 which will hold an entire 64 bit disk address 12345 * and on a 32 bit system we will refuse to bind to a device 12346 * larger than 2TB so addresses will never be larger than 32 bits. 12347 */ 12348 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12349 } 12350 12351 /* 12352 * Function: sd_setup_next_rw_pkt 12353 * 12354 * Description: Setup packet for partial DMA transfers, except for the 12355 * initial transfer. sd_setup_rw_pkt should be used for 12356 * the initial transfer. 12357 * 12358 * Context: Kernel thread and may be called from interrupt context. 12359 */ 12360 12361 int 12362 sd_setup_next_rw_pkt(struct sd_lun *un, 12363 struct scsi_pkt *pktp, struct buf *bp, 12364 diskaddr_t lba, uint32_t blockcount) 12365 { 12366 uchar_t com; 12367 union scsi_cdb *cdbp; 12368 uchar_t cdb_group_id; 12369 12370 ASSERT(pktp != NULL); 12371 ASSERT(pktp->pkt_cdbp != NULL); 12372 12373 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12374 com = cdbp->scc_cmd; 12375 cdb_group_id = CDB_GROUPID(com); 12376 12377 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12378 (cdb_group_id == CDB_GROUPID_1) || 12379 (cdb_group_id == CDB_GROUPID_4) || 12380 (cdb_group_id == CDB_GROUPID_5)); 12381 12382 /* 12383 * Move pkt to the next portion of the xfer. 12384 * func is NULL_FUNC so we do not have to release 12385 * the disk mutex here. 12386 */ 12387 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12388 NULL_FUNC, NULL) == pktp) { 12389 /* Success. Handle partial DMA */ 12390 if (pktp->pkt_resid != 0) { 12391 blockcount -= 12392 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12393 } 12394 12395 cdbp->scc_cmd = com; 12396 SD_FILL_SCSI1_LUN(un, pktp); 12397 if (cdb_group_id == CDB_GROUPID_1) { 12398 FORMG1ADDR(cdbp, lba); 12399 FORMG1COUNT(cdbp, blockcount); 12400 return (0); 12401 } else if (cdb_group_id == CDB_GROUPID_4) { 12402 FORMG4LONGADDR(cdbp, lba); 12403 FORMG4COUNT(cdbp, blockcount); 12404 return (0); 12405 } else if (cdb_group_id == CDB_GROUPID_0) { 12406 FORMG0ADDR(cdbp, lba); 12407 FORMG0COUNT(cdbp, blockcount); 12408 return (0); 12409 } else if (cdb_group_id == CDB_GROUPID_5) { 12410 FORMG5ADDR(cdbp, lba); 12411 FORMG5COUNT(cdbp, blockcount); 12412 return (0); 12413 } 12414 12415 /* Unreachable */ 12416 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12417 } 12418 12419 /* 12420 * Error setting up next portion of cmd transfer. 12421 * Something is definitely very wrong and this 12422 * should not happen. 12423 */ 12424 return (SD_PKT_ALLOC_FAILURE); 12425 } 12426 12427 /* 12428 * Function: sd_initpkt_for_uscsi 12429 * 12430 * Description: Allocate and initialize for transport a scsi_pkt struct, 12431 * based upon the info specified in the given uscsi_cmd struct. 12432 * 12433 * Return Code: SD_PKT_ALLOC_SUCCESS 12434 * SD_PKT_ALLOC_FAILURE 12435 * SD_PKT_ALLOC_FAILURE_NO_DMA 12436 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12437 * 12438 * Context: Kernel thread and may be called from software interrupt context 12439 * as part of a sdrunout callback. This function may not block or 12440 * call routines that block 12441 */ 12442 12443 static int 12444 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12445 { 12446 struct uscsi_cmd *uscmd; 12447 struct sd_xbuf *xp; 12448 struct scsi_pkt *pktp; 12449 struct sd_lun *un; 12450 uint32_t flags = 0; 12451 12452 ASSERT(bp != NULL); 12453 ASSERT(pktpp != NULL); 12454 xp = SD_GET_XBUF(bp); 12455 ASSERT(xp != NULL); 12456 un = SD_GET_UN(bp); 12457 ASSERT(un != NULL); 12458 ASSERT(mutex_owned(SD_MUTEX(un))); 12459 12460 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12461 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12462 ASSERT(uscmd != NULL); 12463 12464 SD_TRACE(SD_LOG_IO_CORE, un, 12465 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12466 12467 /* 12468 * Allocate the scsi_pkt for the command. 12469 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12470 * during scsi_init_pkt time and will continue to use the 12471 * same path as long as the same scsi_pkt is used without 12472 * intervening scsi_dma_free(). Since uscsi command does 12473 * not call scsi_dmafree() before retry failed command, it 12474 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12475 * set such that scsi_vhci can use other available path for 12476 * retry. Besides, ucsci command does not allow DMA breakup, 12477 * so there is no need to set PKT_DMA_PARTIAL flag. 12478 */ 12479 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12480 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12481 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12482 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12483 - sizeof (struct scsi_extended_sense)), 0, 12484 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12485 sdrunout, (caddr_t)un); 12486 } else { 12487 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12488 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12489 sizeof (struct scsi_arq_status), 0, 12490 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12491 sdrunout, (caddr_t)un); 12492 } 12493 12494 if (pktp == NULL) { 12495 *pktpp = NULL; 12496 /* 12497 * Set the driver state to RWAIT to indicate the driver 12498 * is waiting on resource allocations. The driver will not 12499 * suspend, pm_suspend, or detatch while the state is RWAIT. 12500 */ 12501 New_state(un, SD_STATE_RWAIT); 12502 12503 SD_ERROR(SD_LOG_IO_CORE, un, 12504 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12505 12506 if ((bp->b_flags & B_ERROR) != 0) { 12507 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12508 } 12509 return (SD_PKT_ALLOC_FAILURE); 12510 } 12511 12512 /* 12513 * We do not do DMA breakup for USCSI commands, so return failure 12514 * here if all the needed DMA resources were not allocated. 12515 */ 12516 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12517 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12518 scsi_destroy_pkt(pktp); 12519 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12520 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12521 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12522 } 12523 12524 /* Init the cdb from the given uscsi struct */ 12525 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12526 uscmd->uscsi_cdb[0], 0, 0, 0); 12527 12528 SD_FILL_SCSI1_LUN(un, pktp); 12529 12530 /* 12531 * Set up the optional USCSI flags. See the uscsi (7I) man page 12532 * for listing of the supported flags. 12533 */ 12534 12535 if (uscmd->uscsi_flags & USCSI_SILENT) { 12536 flags |= FLAG_SILENT; 12537 } 12538 12539 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12540 flags |= FLAG_DIAGNOSE; 12541 } 12542 12543 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12544 flags |= FLAG_ISOLATE; 12545 } 12546 12547 if (un->un_f_is_fibre == FALSE) { 12548 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12549 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12550 } 12551 } 12552 12553 /* 12554 * Set the pkt flags here so we save time later. 12555 * Note: These flags are NOT in the uscsi man page!!! 12556 */ 12557 if (uscmd->uscsi_flags & USCSI_HEAD) { 12558 flags |= FLAG_HEAD; 12559 } 12560 12561 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12562 flags |= FLAG_NOINTR; 12563 } 12564 12565 /* 12566 * For tagged queueing, things get a bit complicated. 12567 * Check first for head of queue and last for ordered queue. 12568 * If neither head nor order, use the default driver tag flags. 12569 */ 12570 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12571 if (uscmd->uscsi_flags & USCSI_HTAG) { 12572 flags |= FLAG_HTAG; 12573 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12574 flags |= FLAG_OTAG; 12575 } else { 12576 flags |= un->un_tagflags & FLAG_TAGMASK; 12577 } 12578 } 12579 12580 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12581 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12582 } 12583 12584 pktp->pkt_flags = flags; 12585 12586 /* Transfer uscsi information to scsi_pkt */ 12587 (void) scsi_uscsi_pktinit(uscmd, pktp); 12588 12589 /* Copy the caller's CDB into the pkt... */ 12590 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12591 12592 if (uscmd->uscsi_timeout == 0) { 12593 pktp->pkt_time = un->un_uscsi_timeout; 12594 } else { 12595 pktp->pkt_time = uscmd->uscsi_timeout; 12596 } 12597 12598 /* need it later to identify USCSI request in sdintr */ 12599 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12600 12601 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12602 12603 pktp->pkt_private = bp; 12604 pktp->pkt_comp = sdintr; 12605 *pktpp = pktp; 12606 12607 SD_TRACE(SD_LOG_IO_CORE, un, 12608 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12609 12610 return (SD_PKT_ALLOC_SUCCESS); 12611 } 12612 12613 12614 /* 12615 * Function: sd_destroypkt_for_uscsi 12616 * 12617 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12618 * IOs.. Also saves relevant info into the associated uscsi_cmd 12619 * struct. 12620 * 12621 * Context: May be called under interrupt context 12622 */ 12623 12624 static void 12625 sd_destroypkt_for_uscsi(struct buf *bp) 12626 { 12627 struct uscsi_cmd *uscmd; 12628 struct sd_xbuf *xp; 12629 struct scsi_pkt *pktp; 12630 struct sd_lun *un; 12631 12632 ASSERT(bp != NULL); 12633 xp = SD_GET_XBUF(bp); 12634 ASSERT(xp != NULL); 12635 un = SD_GET_UN(bp); 12636 ASSERT(un != NULL); 12637 ASSERT(!mutex_owned(SD_MUTEX(un))); 12638 pktp = SD_GET_PKTP(bp); 12639 ASSERT(pktp != NULL); 12640 12641 SD_TRACE(SD_LOG_IO_CORE, un, 12642 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12643 12644 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12645 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12646 ASSERT(uscmd != NULL); 12647 12648 /* Save the status and the residual into the uscsi_cmd struct */ 12649 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12650 uscmd->uscsi_resid = bp->b_resid; 12651 12652 /* Transfer scsi_pkt information to uscsi */ 12653 (void) scsi_uscsi_pktfini(pktp, uscmd); 12654 12655 /* 12656 * If enabled, copy any saved sense data into the area specified 12657 * by the uscsi command. 12658 */ 12659 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12660 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12661 /* 12662 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12663 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12664 */ 12665 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12666 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12667 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12668 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12669 MAX_SENSE_LENGTH); 12670 } else { 12671 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12672 SENSE_LENGTH); 12673 } 12674 } 12675 12676 /* We are done with the scsi_pkt; free it now */ 12677 ASSERT(SD_GET_PKTP(bp) != NULL); 12678 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12679 12680 SD_TRACE(SD_LOG_IO_CORE, un, 12681 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12682 } 12683 12684 12685 /* 12686 * Function: sd_bioclone_alloc 12687 * 12688 * Description: Allocate a buf(9S) and init it as per the given buf 12689 * and the various arguments. The associated sd_xbuf 12690 * struct is (nearly) duplicated. The struct buf *bp 12691 * argument is saved in new_xp->xb_private. 12692 * 12693 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12694 * datalen - size of data area for the shadow bp 12695 * blkno - starting LBA 12696 * func - function pointer for b_iodone in the shadow buf. (May 12697 * be NULL if none.) 12698 * 12699 * Return Code: Pointer to allocates buf(9S) struct 12700 * 12701 * Context: Can sleep. 12702 */ 12703 12704 static struct buf * 12705 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12706 daddr_t blkno, int (*func)(struct buf *)) 12707 { 12708 struct sd_lun *un; 12709 struct sd_xbuf *xp; 12710 struct sd_xbuf *new_xp; 12711 struct buf *new_bp; 12712 12713 ASSERT(bp != NULL); 12714 xp = SD_GET_XBUF(bp); 12715 ASSERT(xp != NULL); 12716 un = SD_GET_UN(bp); 12717 ASSERT(un != NULL); 12718 ASSERT(!mutex_owned(SD_MUTEX(un))); 12719 12720 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12721 NULL, KM_SLEEP); 12722 12723 new_bp->b_lblkno = blkno; 12724 12725 /* 12726 * Allocate an xbuf for the shadow bp and copy the contents of the 12727 * original xbuf into it. 12728 */ 12729 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12730 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12731 12732 /* 12733 * The given bp is automatically saved in the xb_private member 12734 * of the new xbuf. Callers are allowed to depend on this. 12735 */ 12736 new_xp->xb_private = bp; 12737 12738 new_bp->b_private = new_xp; 12739 12740 return (new_bp); 12741 } 12742 12743 /* 12744 * Function: sd_shadow_buf_alloc 12745 * 12746 * Description: Allocate a buf(9S) and init it as per the given buf 12747 * and the various arguments. The associated sd_xbuf 12748 * struct is (nearly) duplicated. The struct buf *bp 12749 * argument is saved in new_xp->xb_private. 12750 * 12751 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12752 * datalen - size of data area for the shadow bp 12753 * bflags - B_READ or B_WRITE (pseudo flag) 12754 * blkno - starting LBA 12755 * func - function pointer for b_iodone in the shadow buf. (May 12756 * be NULL if none.) 12757 * 12758 * Return Code: Pointer to allocates buf(9S) struct 12759 * 12760 * Context: Can sleep. 12761 */ 12762 12763 static struct buf * 12764 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12765 daddr_t blkno, int (*func)(struct buf *)) 12766 { 12767 struct sd_lun *un; 12768 struct sd_xbuf *xp; 12769 struct sd_xbuf *new_xp; 12770 struct buf *new_bp; 12771 12772 ASSERT(bp != NULL); 12773 xp = SD_GET_XBUF(bp); 12774 ASSERT(xp != NULL); 12775 un = SD_GET_UN(bp); 12776 ASSERT(un != NULL); 12777 ASSERT(!mutex_owned(SD_MUTEX(un))); 12778 12779 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12780 bp_mapin(bp); 12781 } 12782 12783 bflags &= (B_READ | B_WRITE); 12784 #if defined(__i386) || defined(__amd64) 12785 new_bp = getrbuf(KM_SLEEP); 12786 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12787 new_bp->b_bcount = datalen; 12788 new_bp->b_flags = bflags | 12789 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12790 #else 12791 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12792 datalen, bflags, SLEEP_FUNC, NULL); 12793 #endif 12794 new_bp->av_forw = NULL; 12795 new_bp->av_back = NULL; 12796 new_bp->b_dev = bp->b_dev; 12797 new_bp->b_blkno = blkno; 12798 new_bp->b_iodone = func; 12799 new_bp->b_edev = bp->b_edev; 12800 new_bp->b_resid = 0; 12801 12802 /* We need to preserve the B_FAILFAST flag */ 12803 if (bp->b_flags & B_FAILFAST) { 12804 new_bp->b_flags |= B_FAILFAST; 12805 } 12806 12807 /* 12808 * Allocate an xbuf for the shadow bp and copy the contents of the 12809 * original xbuf into it. 12810 */ 12811 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12812 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12813 12814 /* Need later to copy data between the shadow buf & original buf! */ 12815 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12816 12817 /* 12818 * The given bp is automatically saved in the xb_private member 12819 * of the new xbuf. Callers are allowed to depend on this. 12820 */ 12821 new_xp->xb_private = bp; 12822 12823 new_bp->b_private = new_xp; 12824 12825 return (new_bp); 12826 } 12827 12828 /* 12829 * Function: sd_bioclone_free 12830 * 12831 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12832 * in the larger than partition operation. 12833 * 12834 * Context: May be called under interrupt context 12835 */ 12836 12837 static void 12838 sd_bioclone_free(struct buf *bp) 12839 { 12840 struct sd_xbuf *xp; 12841 12842 ASSERT(bp != NULL); 12843 xp = SD_GET_XBUF(bp); 12844 ASSERT(xp != NULL); 12845 12846 /* 12847 * Call bp_mapout() before freeing the buf, in case a lower 12848 * layer or HBA had done a bp_mapin(). we must do this here 12849 * as we are the "originator" of the shadow buf. 12850 */ 12851 bp_mapout(bp); 12852 12853 /* 12854 * Null out b_iodone before freeing the bp, to ensure that the driver 12855 * never gets confused by a stale value in this field. (Just a little 12856 * extra defensiveness here.) 12857 */ 12858 bp->b_iodone = NULL; 12859 12860 freerbuf(bp); 12861 12862 kmem_free(xp, sizeof (struct sd_xbuf)); 12863 } 12864 12865 /* 12866 * Function: sd_shadow_buf_free 12867 * 12868 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12869 * 12870 * Context: May be called under interrupt context 12871 */ 12872 12873 static void 12874 sd_shadow_buf_free(struct buf *bp) 12875 { 12876 struct sd_xbuf *xp; 12877 12878 ASSERT(bp != NULL); 12879 xp = SD_GET_XBUF(bp); 12880 ASSERT(xp != NULL); 12881 12882 #if defined(__sparc) 12883 /* 12884 * Call bp_mapout() before freeing the buf, in case a lower 12885 * layer or HBA had done a bp_mapin(). we must do this here 12886 * as we are the "originator" of the shadow buf. 12887 */ 12888 bp_mapout(bp); 12889 #endif 12890 12891 /* 12892 * Null out b_iodone before freeing the bp, to ensure that the driver 12893 * never gets confused by a stale value in this field. (Just a little 12894 * extra defensiveness here.) 12895 */ 12896 bp->b_iodone = NULL; 12897 12898 #if defined(__i386) || defined(__amd64) 12899 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12900 freerbuf(bp); 12901 #else 12902 scsi_free_consistent_buf(bp); 12903 #endif 12904 12905 kmem_free(xp, sizeof (struct sd_xbuf)); 12906 } 12907 12908 12909 /* 12910 * Function: sd_print_transport_rejected_message 12911 * 12912 * Description: This implements the ludicrously complex rules for printing 12913 * a "transport rejected" message. This is to address the 12914 * specific problem of having a flood of this error message 12915 * produced when a failover occurs. 12916 * 12917 * Context: Any. 12918 */ 12919 12920 static void 12921 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12922 int code) 12923 { 12924 ASSERT(un != NULL); 12925 ASSERT(mutex_owned(SD_MUTEX(un))); 12926 ASSERT(xp != NULL); 12927 12928 /* 12929 * Print the "transport rejected" message under the following 12930 * conditions: 12931 * 12932 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12933 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12934 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12935 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12936 * scsi_transport(9F) (which indicates that the target might have 12937 * gone off-line). This uses the un->un_tran_fatal_count 12938 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12939 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12940 * from scsi_transport(). 12941 * 12942 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12943 * the preceeding cases in order for the message to be printed. 12944 */ 12945 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12946 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12947 (code != TRAN_FATAL_ERROR) || 12948 (un->un_tran_fatal_count == 1)) { 12949 switch (code) { 12950 case TRAN_BADPKT: 12951 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12952 "transport rejected bad packet\n"); 12953 break; 12954 case TRAN_FATAL_ERROR: 12955 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12956 "transport rejected fatal error\n"); 12957 break; 12958 default: 12959 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12960 "transport rejected (%d)\n", code); 12961 break; 12962 } 12963 } 12964 } 12965 } 12966 12967 12968 /* 12969 * Function: sd_add_buf_to_waitq 12970 * 12971 * Description: Add the given buf(9S) struct to the wait queue for the 12972 * instance. If sorting is enabled, then the buf is added 12973 * to the queue via an elevator sort algorithm (a la 12974 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12975 * If sorting is not enabled, then the buf is just added 12976 * to the end of the wait queue. 12977 * 12978 * Return Code: void 12979 * 12980 * Context: Does not sleep/block, therefore technically can be called 12981 * from any context. However if sorting is enabled then the 12982 * execution time is indeterminate, and may take long if 12983 * the wait queue grows large. 12984 */ 12985 12986 static void 12987 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12988 { 12989 struct buf *ap; 12990 12991 ASSERT(bp != NULL); 12992 ASSERT(un != NULL); 12993 ASSERT(mutex_owned(SD_MUTEX(un))); 12994 12995 /* If the queue is empty, add the buf as the only entry & return. */ 12996 if (un->un_waitq_headp == NULL) { 12997 ASSERT(un->un_waitq_tailp == NULL); 12998 un->un_waitq_headp = un->un_waitq_tailp = bp; 12999 bp->av_forw = NULL; 13000 return; 13001 } 13002 13003 ASSERT(un->un_waitq_tailp != NULL); 13004 13005 /* 13006 * If sorting is disabled, just add the buf to the tail end of 13007 * the wait queue and return. 13008 */ 13009 if (un->un_f_disksort_disabled) { 13010 un->un_waitq_tailp->av_forw = bp; 13011 un->un_waitq_tailp = bp; 13012 bp->av_forw = NULL; 13013 return; 13014 } 13015 13016 /* 13017 * Sort thru the list of requests currently on the wait queue 13018 * and add the new buf request at the appropriate position. 13019 * 13020 * The un->un_waitq_headp is an activity chain pointer on which 13021 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13022 * first queue holds those requests which are positioned after 13023 * the current SD_GET_BLKNO() (in the first request); the second holds 13024 * requests which came in after their SD_GET_BLKNO() number was passed. 13025 * Thus we implement a one way scan, retracting after reaching 13026 * the end of the drive to the first request on the second 13027 * queue, at which time it becomes the first queue. 13028 * A one-way scan is natural because of the way UNIX read-ahead 13029 * blocks are allocated. 13030 * 13031 * If we lie after the first request, then we must locate the 13032 * second request list and add ourselves to it. 13033 */ 13034 ap = un->un_waitq_headp; 13035 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13036 while (ap->av_forw != NULL) { 13037 /* 13038 * Look for an "inversion" in the (normally 13039 * ascending) block numbers. This indicates 13040 * the start of the second request list. 13041 */ 13042 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13043 /* 13044 * Search the second request list for the 13045 * first request at a larger block number. 13046 * We go before that; however if there is 13047 * no such request, we go at the end. 13048 */ 13049 do { 13050 if (SD_GET_BLKNO(bp) < 13051 SD_GET_BLKNO(ap->av_forw)) { 13052 goto insert; 13053 } 13054 ap = ap->av_forw; 13055 } while (ap->av_forw != NULL); 13056 goto insert; /* after last */ 13057 } 13058 ap = ap->av_forw; 13059 } 13060 13061 /* 13062 * No inversions... we will go after the last, and 13063 * be the first request in the second request list. 13064 */ 13065 goto insert; 13066 } 13067 13068 /* 13069 * Request is at/after the current request... 13070 * sort in the first request list. 13071 */ 13072 while (ap->av_forw != NULL) { 13073 /* 13074 * We want to go after the current request (1) if 13075 * there is an inversion after it (i.e. it is the end 13076 * of the first request list), or (2) if the next 13077 * request is a larger block no. than our request. 13078 */ 13079 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13080 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13081 goto insert; 13082 } 13083 ap = ap->av_forw; 13084 } 13085 13086 /* 13087 * Neither a second list nor a larger request, therefore 13088 * we go at the end of the first list (which is the same 13089 * as the end of the whole schebang). 13090 */ 13091 insert: 13092 bp->av_forw = ap->av_forw; 13093 ap->av_forw = bp; 13094 13095 /* 13096 * If we inserted onto the tail end of the waitq, make sure the 13097 * tail pointer is updated. 13098 */ 13099 if (ap == un->un_waitq_tailp) { 13100 un->un_waitq_tailp = bp; 13101 } 13102 } 13103 13104 13105 /* 13106 * Function: sd_start_cmds 13107 * 13108 * Description: Remove and transport cmds from the driver queues. 13109 * 13110 * Arguments: un - pointer to the unit (soft state) struct for the target. 13111 * 13112 * immed_bp - ptr to a buf to be transported immediately. Only 13113 * the immed_bp is transported; bufs on the waitq are not 13114 * processed and the un_retry_bp is not checked. If immed_bp is 13115 * NULL, then normal queue processing is performed. 13116 * 13117 * Context: May be called from kernel thread context, interrupt context, 13118 * or runout callback context. This function may not block or 13119 * call routines that block. 13120 */ 13121 13122 static void 13123 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13124 { 13125 struct sd_xbuf *xp; 13126 struct buf *bp; 13127 void (*statp)(kstat_io_t *); 13128 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13129 void (*saved_statp)(kstat_io_t *); 13130 #endif 13131 int rval; 13132 13133 ASSERT(un != NULL); 13134 ASSERT(mutex_owned(SD_MUTEX(un))); 13135 ASSERT(un->un_ncmds_in_transport >= 0); 13136 ASSERT(un->un_throttle >= 0); 13137 13138 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 13139 13140 do { 13141 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13142 saved_statp = NULL; 13143 #endif 13144 13145 /* 13146 * If we are syncing or dumping, fail the command to 13147 * avoid recursively calling back into scsi_transport(). 13148 * The dump I/O itself uses a separate code path so this 13149 * only prevents non-dump I/O from being sent while dumping. 13150 * File system sync takes place before dumping begins. 13151 * During panic, filesystem I/O is allowed provided 13152 * un_in_callback is <= 1. This is to prevent recursion 13153 * such as sd_start_cmds -> scsi_transport -> sdintr -> 13154 * sd_start_cmds and so on. See panic.c for more information 13155 * about the states the system can be in during panic. 13156 */ 13157 if ((un->un_state == SD_STATE_DUMPING) || 13158 (ddi_in_panic() && (un->un_in_callback > 1))) { 13159 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13160 "sd_start_cmds: panicking\n"); 13161 goto exit; 13162 } 13163 13164 if ((bp = immed_bp) != NULL) { 13165 /* 13166 * We have a bp that must be transported immediately. 13167 * It's OK to transport the immed_bp here without doing 13168 * the throttle limit check because the immed_bp is 13169 * always used in a retry/recovery case. This means 13170 * that we know we are not at the throttle limit by 13171 * virtue of the fact that to get here we must have 13172 * already gotten a command back via sdintr(). This also 13173 * relies on (1) the command on un_retry_bp preventing 13174 * further commands from the waitq from being issued; 13175 * and (2) the code in sd_retry_command checking the 13176 * throttle limit before issuing a delayed or immediate 13177 * retry. This holds even if the throttle limit is 13178 * currently ratcheted down from its maximum value. 13179 */ 13180 statp = kstat_runq_enter; 13181 if (bp == un->un_retry_bp) { 13182 ASSERT((un->un_retry_statp == NULL) || 13183 (un->un_retry_statp == kstat_waitq_enter) || 13184 (un->un_retry_statp == 13185 kstat_runq_back_to_waitq)); 13186 /* 13187 * If the waitq kstat was incremented when 13188 * sd_set_retry_bp() queued this bp for a retry, 13189 * then we must set up statp so that the waitq 13190 * count will get decremented correctly below. 13191 * Also we must clear un->un_retry_statp to 13192 * ensure that we do not act on a stale value 13193 * in this field. 13194 */ 13195 if ((un->un_retry_statp == kstat_waitq_enter) || 13196 (un->un_retry_statp == 13197 kstat_runq_back_to_waitq)) { 13198 statp = kstat_waitq_to_runq; 13199 } 13200 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13201 saved_statp = un->un_retry_statp; 13202 #endif 13203 un->un_retry_statp = NULL; 13204 13205 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13206 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 13207 "un_throttle:%d un_ncmds_in_transport:%d\n", 13208 un, un->un_retry_bp, un->un_throttle, 13209 un->un_ncmds_in_transport); 13210 } else { 13211 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 13212 "processing priority bp:0x%p\n", bp); 13213 } 13214 13215 } else if ((bp = un->un_waitq_headp) != NULL) { 13216 /* 13217 * A command on the waitq is ready to go, but do not 13218 * send it if: 13219 * 13220 * (1) the throttle limit has been reached, or 13221 * (2) a retry is pending, or 13222 * (3) a START_STOP_UNIT callback pending, or 13223 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 13224 * command is pending. 13225 * 13226 * For all of these conditions, IO processing will 13227 * restart after the condition is cleared. 13228 */ 13229 if (un->un_ncmds_in_transport >= un->un_throttle) { 13230 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13231 "sd_start_cmds: exiting, " 13232 "throttle limit reached!\n"); 13233 goto exit; 13234 } 13235 if (un->un_retry_bp != NULL) { 13236 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13237 "sd_start_cmds: exiting, retry pending!\n"); 13238 goto exit; 13239 } 13240 if (un->un_startstop_timeid != NULL) { 13241 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13242 "sd_start_cmds: exiting, " 13243 "START_STOP pending!\n"); 13244 goto exit; 13245 } 13246 if (un->un_direct_priority_timeid != NULL) { 13247 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13248 "sd_start_cmds: exiting, " 13249 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 13250 goto exit; 13251 } 13252 13253 /* Dequeue the command */ 13254 un->un_waitq_headp = bp->av_forw; 13255 if (un->un_waitq_headp == NULL) { 13256 un->un_waitq_tailp = NULL; 13257 } 13258 bp->av_forw = NULL; 13259 statp = kstat_waitq_to_runq; 13260 SD_TRACE(SD_LOG_IO_CORE, un, 13261 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13262 13263 } else { 13264 /* No work to do so bail out now */ 13265 SD_TRACE(SD_LOG_IO_CORE, un, 13266 "sd_start_cmds: no more work, exiting!\n"); 13267 goto exit; 13268 } 13269 13270 /* 13271 * Reset the state to normal. This is the mechanism by which 13272 * the state transitions from either SD_STATE_RWAIT or 13273 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13274 * If state is SD_STATE_PM_CHANGING then this command is 13275 * part of the device power control and the state must 13276 * not be put back to normal. Doing so would would 13277 * allow new commands to proceed when they shouldn't, 13278 * the device may be going off. 13279 */ 13280 if ((un->un_state != SD_STATE_SUSPENDED) && 13281 (un->un_state != SD_STATE_PM_CHANGING)) { 13282 New_state(un, SD_STATE_NORMAL); 13283 } 13284 13285 xp = SD_GET_XBUF(bp); 13286 ASSERT(xp != NULL); 13287 13288 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13289 /* 13290 * Allocate the scsi_pkt if we need one, or attach DMA 13291 * resources if we have a scsi_pkt that needs them. The 13292 * latter should only occur for commands that are being 13293 * retried. 13294 */ 13295 if ((xp->xb_pktp == NULL) || 13296 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13297 #else 13298 if (xp->xb_pktp == NULL) { 13299 #endif 13300 /* 13301 * There is no scsi_pkt allocated for this buf. Call 13302 * the initpkt function to allocate & init one. 13303 * 13304 * The scsi_init_pkt runout callback functionality is 13305 * implemented as follows: 13306 * 13307 * 1) The initpkt function always calls 13308 * scsi_init_pkt(9F) with sdrunout specified as the 13309 * callback routine. 13310 * 2) A successful packet allocation is initialized and 13311 * the I/O is transported. 13312 * 3) The I/O associated with an allocation resource 13313 * failure is left on its queue to be retried via 13314 * runout or the next I/O. 13315 * 4) The I/O associated with a DMA error is removed 13316 * from the queue and failed with EIO. Processing of 13317 * the transport queues is also halted to be 13318 * restarted via runout or the next I/O. 13319 * 5) The I/O associated with a CDB size or packet 13320 * size error is removed from the queue and failed 13321 * with EIO. Processing of the transport queues is 13322 * continued. 13323 * 13324 * Note: there is no interface for canceling a runout 13325 * callback. To prevent the driver from detaching or 13326 * suspending while a runout is pending the driver 13327 * state is set to SD_STATE_RWAIT 13328 * 13329 * Note: using the scsi_init_pkt callback facility can 13330 * result in an I/O request persisting at the head of 13331 * the list which cannot be satisfied even after 13332 * multiple retries. In the future the driver may 13333 * implement some kind of maximum runout count before 13334 * failing an I/O. 13335 * 13336 * Note: the use of funcp below may seem superfluous, 13337 * but it helps warlock figure out the correct 13338 * initpkt function calls (see [s]sd.wlcmd). 13339 */ 13340 struct scsi_pkt *pktp; 13341 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13342 13343 ASSERT(bp != un->un_rqs_bp); 13344 13345 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13346 switch ((*funcp)(bp, &pktp)) { 13347 case SD_PKT_ALLOC_SUCCESS: 13348 xp->xb_pktp = pktp; 13349 SD_TRACE(SD_LOG_IO_CORE, un, 13350 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13351 pktp); 13352 goto got_pkt; 13353 13354 case SD_PKT_ALLOC_FAILURE: 13355 /* 13356 * Temporary (hopefully) resource depletion. 13357 * Since retries and RQS commands always have a 13358 * scsi_pkt allocated, these cases should never 13359 * get here. So the only cases this needs to 13360 * handle is a bp from the waitq (which we put 13361 * back onto the waitq for sdrunout), or a bp 13362 * sent as an immed_bp (which we just fail). 13363 */ 13364 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13365 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13366 13367 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13368 13369 if (bp == immed_bp) { 13370 /* 13371 * If SD_XB_DMA_FREED is clear, then 13372 * this is a failure to allocate a 13373 * scsi_pkt, and we must fail the 13374 * command. 13375 */ 13376 if ((xp->xb_pkt_flags & 13377 SD_XB_DMA_FREED) == 0) { 13378 break; 13379 } 13380 13381 /* 13382 * If this immediate command is NOT our 13383 * un_retry_bp, then we must fail it. 13384 */ 13385 if (bp != un->un_retry_bp) { 13386 break; 13387 } 13388 13389 /* 13390 * We get here if this cmd is our 13391 * un_retry_bp that was DMAFREED, but 13392 * scsi_init_pkt() failed to reallocate 13393 * DMA resources when we attempted to 13394 * retry it. This can happen when an 13395 * mpxio failover is in progress, but 13396 * we don't want to just fail the 13397 * command in this case. 13398 * 13399 * Use timeout(9F) to restart it after 13400 * a 100ms delay. We don't want to 13401 * let sdrunout() restart it, because 13402 * sdrunout() is just supposed to start 13403 * commands that are sitting on the 13404 * wait queue. The un_retry_bp stays 13405 * set until the command completes, but 13406 * sdrunout can be called many times 13407 * before that happens. Since sdrunout 13408 * cannot tell if the un_retry_bp is 13409 * already in the transport, it could 13410 * end up calling scsi_transport() for 13411 * the un_retry_bp multiple times. 13412 * 13413 * Also: don't schedule the callback 13414 * if some other callback is already 13415 * pending. 13416 */ 13417 if (un->un_retry_statp == NULL) { 13418 /* 13419 * restore the kstat pointer to 13420 * keep kstat counts coherent 13421 * when we do retry the command. 13422 */ 13423 un->un_retry_statp = 13424 saved_statp; 13425 } 13426 13427 if ((un->un_startstop_timeid == NULL) && 13428 (un->un_retry_timeid == NULL) && 13429 (un->un_direct_priority_timeid == 13430 NULL)) { 13431 13432 un->un_retry_timeid = 13433 timeout( 13434 sd_start_retry_command, 13435 un, SD_RESTART_TIMEOUT); 13436 } 13437 goto exit; 13438 } 13439 13440 #else 13441 if (bp == immed_bp) { 13442 break; /* Just fail the command */ 13443 } 13444 #endif 13445 13446 /* Add the buf back to the head of the waitq */ 13447 bp->av_forw = un->un_waitq_headp; 13448 un->un_waitq_headp = bp; 13449 if (un->un_waitq_tailp == NULL) { 13450 un->un_waitq_tailp = bp; 13451 } 13452 goto exit; 13453 13454 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13455 /* 13456 * HBA DMA resource failure. Fail the command 13457 * and continue processing of the queues. 13458 */ 13459 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13460 "sd_start_cmds: " 13461 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13462 break; 13463 13464 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13465 /* 13466 * Note:x86: Partial DMA mapping not supported 13467 * for USCSI commands, and all the needed DMA 13468 * resources were not allocated. 13469 */ 13470 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13471 "sd_start_cmds: " 13472 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13473 break; 13474 13475 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13476 /* 13477 * Note:x86: Request cannot fit into CDB based 13478 * on lba and len. 13479 */ 13480 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13481 "sd_start_cmds: " 13482 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13483 break; 13484 13485 default: 13486 /* Should NEVER get here! */ 13487 panic("scsi_initpkt error"); 13488 /*NOTREACHED*/ 13489 } 13490 13491 /* 13492 * Fatal error in allocating a scsi_pkt for this buf. 13493 * Update kstats & return the buf with an error code. 13494 * We must use sd_return_failed_command_no_restart() to 13495 * avoid a recursive call back into sd_start_cmds(). 13496 * However this also means that we must keep processing 13497 * the waitq here in order to avoid stalling. 13498 */ 13499 if (statp == kstat_waitq_to_runq) { 13500 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13501 } 13502 sd_return_failed_command_no_restart(un, bp, EIO); 13503 if (bp == immed_bp) { 13504 /* immed_bp is gone by now, so clear this */ 13505 immed_bp = NULL; 13506 } 13507 continue; 13508 } 13509 got_pkt: 13510 if (bp == immed_bp) { 13511 /* goto the head of the class.... */ 13512 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13513 } 13514 13515 un->un_ncmds_in_transport++; 13516 SD_UPDATE_KSTATS(un, statp, bp); 13517 13518 /* 13519 * Call scsi_transport() to send the command to the target. 13520 * According to SCSA architecture, we must drop the mutex here 13521 * before calling scsi_transport() in order to avoid deadlock. 13522 * Note that the scsi_pkt's completion routine can be executed 13523 * (from interrupt context) even before the call to 13524 * scsi_transport() returns. 13525 */ 13526 SD_TRACE(SD_LOG_IO_CORE, un, 13527 "sd_start_cmds: calling scsi_transport()\n"); 13528 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13529 13530 mutex_exit(SD_MUTEX(un)); 13531 rval = scsi_transport(xp->xb_pktp); 13532 mutex_enter(SD_MUTEX(un)); 13533 13534 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13535 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13536 13537 switch (rval) { 13538 case TRAN_ACCEPT: 13539 /* Clear this with every pkt accepted by the HBA */ 13540 un->un_tran_fatal_count = 0; 13541 break; /* Success; try the next cmd (if any) */ 13542 13543 case TRAN_BUSY: 13544 un->un_ncmds_in_transport--; 13545 ASSERT(un->un_ncmds_in_transport >= 0); 13546 13547 /* 13548 * Don't retry request sense, the sense data 13549 * is lost when another request is sent. 13550 * Free up the rqs buf and retry 13551 * the original failed cmd. Update kstat. 13552 */ 13553 if (bp == un->un_rqs_bp) { 13554 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13555 bp = sd_mark_rqs_idle(un, xp); 13556 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13557 NULL, NULL, EIO, un->un_busy_timeout / 500, 13558 kstat_waitq_enter); 13559 goto exit; 13560 } 13561 13562 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13563 /* 13564 * Free the DMA resources for the scsi_pkt. This will 13565 * allow mpxio to select another path the next time 13566 * we call scsi_transport() with this scsi_pkt. 13567 * See sdintr() for the rationalization behind this. 13568 */ 13569 if ((un->un_f_is_fibre == TRUE) && 13570 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13571 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13572 scsi_dmafree(xp->xb_pktp); 13573 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13574 } 13575 #endif 13576 13577 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13578 /* 13579 * Commands that are SD_PATH_DIRECT_PRIORITY 13580 * are for error recovery situations. These do 13581 * not use the normal command waitq, so if they 13582 * get a TRAN_BUSY we cannot put them back onto 13583 * the waitq for later retry. One possible 13584 * problem is that there could already be some 13585 * other command on un_retry_bp that is waiting 13586 * for this one to complete, so we would be 13587 * deadlocked if we put this command back onto 13588 * the waitq for later retry (since un_retry_bp 13589 * must complete before the driver gets back to 13590 * commands on the waitq). 13591 * 13592 * To avoid deadlock we must schedule a callback 13593 * that will restart this command after a set 13594 * interval. This should keep retrying for as 13595 * long as the underlying transport keeps 13596 * returning TRAN_BUSY (just like for other 13597 * commands). Use the same timeout interval as 13598 * for the ordinary TRAN_BUSY retry. 13599 */ 13600 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13601 "sd_start_cmds: scsi_transport() returned " 13602 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13603 13604 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13605 un->un_direct_priority_timeid = 13606 timeout(sd_start_direct_priority_command, 13607 bp, un->un_busy_timeout / 500); 13608 13609 goto exit; 13610 } 13611 13612 /* 13613 * For TRAN_BUSY, we want to reduce the throttle value, 13614 * unless we are retrying a command. 13615 */ 13616 if (bp != un->un_retry_bp) { 13617 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13618 } 13619 13620 /* 13621 * Set up the bp to be tried again 10 ms later. 13622 * Note:x86: Is there a timeout value in the sd_lun 13623 * for this condition? 13624 */ 13625 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 13626 kstat_runq_back_to_waitq); 13627 goto exit; 13628 13629 case TRAN_FATAL_ERROR: 13630 un->un_tran_fatal_count++; 13631 /* FALLTHRU */ 13632 13633 case TRAN_BADPKT: 13634 default: 13635 un->un_ncmds_in_transport--; 13636 ASSERT(un->un_ncmds_in_transport >= 0); 13637 13638 /* 13639 * If this is our REQUEST SENSE command with a 13640 * transport error, we must get back the pointers 13641 * to the original buf, and mark the REQUEST 13642 * SENSE command as "available". 13643 */ 13644 if (bp == un->un_rqs_bp) { 13645 bp = sd_mark_rqs_idle(un, xp); 13646 xp = SD_GET_XBUF(bp); 13647 } else { 13648 /* 13649 * Legacy behavior: do not update transport 13650 * error count for request sense commands. 13651 */ 13652 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13653 } 13654 13655 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13656 sd_print_transport_rejected_message(un, xp, rval); 13657 13658 /* 13659 * We must use sd_return_failed_command_no_restart() to 13660 * avoid a recursive call back into sd_start_cmds(). 13661 * However this also means that we must keep processing 13662 * the waitq here in order to avoid stalling. 13663 */ 13664 sd_return_failed_command_no_restart(un, bp, EIO); 13665 13666 /* 13667 * Notify any threads waiting in sd_ddi_suspend() that 13668 * a command completion has occurred. 13669 */ 13670 if (un->un_state == SD_STATE_SUSPENDED) { 13671 cv_broadcast(&un->un_disk_busy_cv); 13672 } 13673 13674 if (bp == immed_bp) { 13675 /* immed_bp is gone by now, so clear this */ 13676 immed_bp = NULL; 13677 } 13678 break; 13679 } 13680 13681 } while (immed_bp == NULL); 13682 13683 exit: 13684 ASSERT(mutex_owned(SD_MUTEX(un))); 13685 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13686 } 13687 13688 13689 /* 13690 * Function: sd_return_command 13691 * 13692 * Description: Returns a command to its originator (with or without an 13693 * error). Also starts commands waiting to be transported 13694 * to the target. 13695 * 13696 * Context: May be called from interrupt, kernel, or timeout context 13697 */ 13698 13699 static void 13700 sd_return_command(struct sd_lun *un, struct buf *bp) 13701 { 13702 struct sd_xbuf *xp; 13703 struct scsi_pkt *pktp; 13704 13705 ASSERT(bp != NULL); 13706 ASSERT(un != NULL); 13707 ASSERT(mutex_owned(SD_MUTEX(un))); 13708 ASSERT(bp != un->un_rqs_bp); 13709 xp = SD_GET_XBUF(bp); 13710 ASSERT(xp != NULL); 13711 13712 pktp = SD_GET_PKTP(bp); 13713 13714 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13715 13716 /* 13717 * Note: check for the "sdrestart failed" case. 13718 */ 13719 if ((un->un_partial_dma_supported == 1) && 13720 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13721 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13722 (xp->xb_pktp->pkt_resid == 0)) { 13723 13724 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13725 /* 13726 * Successfully set up next portion of cmd 13727 * transfer, try sending it 13728 */ 13729 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13730 NULL, NULL, 0, (clock_t)0, NULL); 13731 sd_start_cmds(un, NULL); 13732 return; /* Note:x86: need a return here? */ 13733 } 13734 } 13735 13736 /* 13737 * If this is the failfast bp, clear it from un_failfast_bp. This 13738 * can happen if upon being re-tried the failfast bp either 13739 * succeeded or encountered another error (possibly even a different 13740 * error than the one that precipitated the failfast state, but in 13741 * that case it would have had to exhaust retries as well). Regardless, 13742 * this should not occur whenever the instance is in the active 13743 * failfast state. 13744 */ 13745 if (bp == un->un_failfast_bp) { 13746 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13747 un->un_failfast_bp = NULL; 13748 } 13749 13750 /* 13751 * Clear the failfast state upon successful completion of ANY cmd. 13752 */ 13753 if (bp->b_error == 0) { 13754 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13755 } 13756 13757 /* 13758 * This is used if the command was retried one or more times. Show that 13759 * we are done with it, and allow processing of the waitq to resume. 13760 */ 13761 if (bp == un->un_retry_bp) { 13762 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13763 "sd_return_command: un:0x%p: " 13764 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13765 un->un_retry_bp = NULL; 13766 un->un_retry_statp = NULL; 13767 } 13768 13769 SD_UPDATE_RDWR_STATS(un, bp); 13770 SD_UPDATE_PARTITION_STATS(un, bp); 13771 13772 switch (un->un_state) { 13773 case SD_STATE_SUSPENDED: 13774 /* 13775 * Notify any threads waiting in sd_ddi_suspend() that 13776 * a command completion has occurred. 13777 */ 13778 cv_broadcast(&un->un_disk_busy_cv); 13779 break; 13780 default: 13781 sd_start_cmds(un, NULL); 13782 break; 13783 } 13784 13785 /* Return this command up the iodone chain to its originator. */ 13786 mutex_exit(SD_MUTEX(un)); 13787 13788 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13789 xp->xb_pktp = NULL; 13790 13791 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13792 13793 ASSERT(!mutex_owned(SD_MUTEX(un))); 13794 mutex_enter(SD_MUTEX(un)); 13795 13796 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13797 } 13798 13799 13800 /* 13801 * Function: sd_return_failed_command 13802 * 13803 * Description: Command completion when an error occurred. 13804 * 13805 * Context: May be called from interrupt context 13806 */ 13807 13808 static void 13809 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13810 { 13811 ASSERT(bp != NULL); 13812 ASSERT(un != NULL); 13813 ASSERT(mutex_owned(SD_MUTEX(un))); 13814 13815 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13816 "sd_return_failed_command: entry\n"); 13817 13818 /* 13819 * b_resid could already be nonzero due to a partial data 13820 * transfer, so do not change it here. 13821 */ 13822 SD_BIOERROR(bp, errcode); 13823 13824 sd_return_command(un, bp); 13825 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13826 "sd_return_failed_command: exit\n"); 13827 } 13828 13829 13830 /* 13831 * Function: sd_return_failed_command_no_restart 13832 * 13833 * Description: Same as sd_return_failed_command, but ensures that no 13834 * call back into sd_start_cmds will be issued. 13835 * 13836 * Context: May be called from interrupt context 13837 */ 13838 13839 static void 13840 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13841 int errcode) 13842 { 13843 struct sd_xbuf *xp; 13844 13845 ASSERT(bp != NULL); 13846 ASSERT(un != NULL); 13847 ASSERT(mutex_owned(SD_MUTEX(un))); 13848 xp = SD_GET_XBUF(bp); 13849 ASSERT(xp != NULL); 13850 ASSERT(errcode != 0); 13851 13852 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13853 "sd_return_failed_command_no_restart: entry\n"); 13854 13855 /* 13856 * b_resid could already be nonzero due to a partial data 13857 * transfer, so do not change it here. 13858 */ 13859 SD_BIOERROR(bp, errcode); 13860 13861 /* 13862 * If this is the failfast bp, clear it. This can happen if the 13863 * failfast bp encounterd a fatal error when we attempted to 13864 * re-try it (such as a scsi_transport(9F) failure). However 13865 * we should NOT be in an active failfast state if the failfast 13866 * bp is not NULL. 13867 */ 13868 if (bp == un->un_failfast_bp) { 13869 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13870 un->un_failfast_bp = NULL; 13871 } 13872 13873 if (bp == un->un_retry_bp) { 13874 /* 13875 * This command was retried one or more times. Show that we are 13876 * done with it, and allow processing of the waitq to resume. 13877 */ 13878 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13879 "sd_return_failed_command_no_restart: " 13880 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13881 un->un_retry_bp = NULL; 13882 un->un_retry_statp = NULL; 13883 } 13884 13885 SD_UPDATE_RDWR_STATS(un, bp); 13886 SD_UPDATE_PARTITION_STATS(un, bp); 13887 13888 mutex_exit(SD_MUTEX(un)); 13889 13890 if (xp->xb_pktp != NULL) { 13891 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13892 xp->xb_pktp = NULL; 13893 } 13894 13895 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13896 13897 mutex_enter(SD_MUTEX(un)); 13898 13899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13900 "sd_return_failed_command_no_restart: exit\n"); 13901 } 13902 13903 13904 /* 13905 * Function: sd_retry_command 13906 * 13907 * Description: queue up a command for retry, or (optionally) fail it 13908 * if retry counts are exhausted. 13909 * 13910 * Arguments: un - Pointer to the sd_lun struct for the target. 13911 * 13912 * bp - Pointer to the buf for the command to be retried. 13913 * 13914 * retry_check_flag - Flag to see which (if any) of the retry 13915 * counts should be decremented/checked. If the indicated 13916 * retry count is exhausted, then the command will not be 13917 * retried; it will be failed instead. This should use a 13918 * value equal to one of the following: 13919 * 13920 * SD_RETRIES_NOCHECK 13921 * SD_RESD_RETRIES_STANDARD 13922 * SD_RETRIES_VICTIM 13923 * 13924 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13925 * if the check should be made to see of FLAG_ISOLATE is set 13926 * in the pkt. If FLAG_ISOLATE is set, then the command is 13927 * not retried, it is simply failed. 13928 * 13929 * user_funcp - Ptr to function to call before dispatching the 13930 * command. May be NULL if no action needs to be performed. 13931 * (Primarily intended for printing messages.) 13932 * 13933 * user_arg - Optional argument to be passed along to 13934 * the user_funcp call. 13935 * 13936 * failure_code - errno return code to set in the bp if the 13937 * command is going to be failed. 13938 * 13939 * retry_delay - Retry delay interval in (clock_t) units. May 13940 * be zero which indicates that the retry should be retried 13941 * immediately (ie, without an intervening delay). 13942 * 13943 * statp - Ptr to kstat function to be updated if the command 13944 * is queued for a delayed retry. May be NULL if no kstat 13945 * update is desired. 13946 * 13947 * Context: May be called from interrupt context. 13948 */ 13949 13950 static void 13951 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13952 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13953 code), void *user_arg, int failure_code, clock_t retry_delay, 13954 void (*statp)(kstat_io_t *)) 13955 { 13956 struct sd_xbuf *xp; 13957 struct scsi_pkt *pktp; 13958 13959 ASSERT(un != NULL); 13960 ASSERT(mutex_owned(SD_MUTEX(un))); 13961 ASSERT(bp != NULL); 13962 xp = SD_GET_XBUF(bp); 13963 ASSERT(xp != NULL); 13964 pktp = SD_GET_PKTP(bp); 13965 ASSERT(pktp != NULL); 13966 13967 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13968 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13969 13970 /* 13971 * If we are syncing or dumping, fail the command to avoid 13972 * recursively calling back into scsi_transport(). 13973 */ 13974 if (ddi_in_panic()) { 13975 goto fail_command_no_log; 13976 } 13977 13978 /* 13979 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13980 * log an error and fail the command. 13981 */ 13982 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13983 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13984 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13985 sd_dump_memory(un, SD_LOG_IO, "CDB", 13986 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13987 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13988 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13989 goto fail_command; 13990 } 13991 13992 /* 13993 * If we are suspended, then put the command onto head of the 13994 * wait queue since we don't want to start more commands, and 13995 * clear the un_retry_bp. Next time when we are resumed, will 13996 * handle the command in the wait queue. 13997 */ 13998 switch (un->un_state) { 13999 case SD_STATE_SUSPENDED: 14000 case SD_STATE_DUMPING: 14001 bp->av_forw = un->un_waitq_headp; 14002 un->un_waitq_headp = bp; 14003 if (un->un_waitq_tailp == NULL) { 14004 un->un_waitq_tailp = bp; 14005 } 14006 if (bp == un->un_retry_bp) { 14007 un->un_retry_bp = NULL; 14008 un->un_retry_statp = NULL; 14009 } 14010 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14011 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14012 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14013 return; 14014 default: 14015 break; 14016 } 14017 14018 /* 14019 * If the caller wants us to check FLAG_ISOLATE, then see if that 14020 * is set; if it is then we do not want to retry the command. 14021 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14022 */ 14023 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14024 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14025 goto fail_command; 14026 } 14027 } 14028 14029 14030 /* 14031 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14032 * command timeout or a selection timeout has occurred. This means 14033 * that we were unable to establish an kind of communication with 14034 * the target, and subsequent retries and/or commands are likely 14035 * to encounter similar results and take a long time to complete. 14036 * 14037 * If this is a failfast error condition, we need to update the 14038 * failfast state, even if this bp does not have B_FAILFAST set. 14039 */ 14040 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14041 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14042 ASSERT(un->un_failfast_bp == NULL); 14043 /* 14044 * If we are already in the active failfast state, and 14045 * another failfast error condition has been detected, 14046 * then fail this command if it has B_FAILFAST set. 14047 * If B_FAILFAST is clear, then maintain the legacy 14048 * behavior of retrying heroically, even tho this will 14049 * take a lot more time to fail the command. 14050 */ 14051 if (bp->b_flags & B_FAILFAST) { 14052 goto fail_command; 14053 } 14054 } else { 14055 /* 14056 * We're not in the active failfast state, but we 14057 * have a failfast error condition, so we must begin 14058 * transition to the next state. We do this regardless 14059 * of whether or not this bp has B_FAILFAST set. 14060 */ 14061 if (un->un_failfast_bp == NULL) { 14062 /* 14063 * This is the first bp to meet a failfast 14064 * condition so save it on un_failfast_bp & 14065 * do normal retry processing. Do not enter 14066 * active failfast state yet. This marks 14067 * entry into the "failfast pending" state. 14068 */ 14069 un->un_failfast_bp = bp; 14070 14071 } else if (un->un_failfast_bp == bp) { 14072 /* 14073 * This is the second time *this* bp has 14074 * encountered a failfast error condition, 14075 * so enter active failfast state & flush 14076 * queues as appropriate. 14077 */ 14078 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14079 un->un_failfast_bp = NULL; 14080 sd_failfast_flushq(un); 14081 14082 /* 14083 * Fail this bp now if B_FAILFAST set; 14084 * otherwise continue with retries. (It would 14085 * be pretty ironic if this bp succeeded on a 14086 * subsequent retry after we just flushed all 14087 * the queues). 14088 */ 14089 if (bp->b_flags & B_FAILFAST) { 14090 goto fail_command; 14091 } 14092 14093 #if !defined(lint) && !defined(__lint) 14094 } else { 14095 /* 14096 * If neither of the preceeding conditionals 14097 * was true, it means that there is some 14098 * *other* bp that has met an inital failfast 14099 * condition and is currently either being 14100 * retried or is waiting to be retried. In 14101 * that case we should perform normal retry 14102 * processing on *this* bp, since there is a 14103 * chance that the current failfast condition 14104 * is transient and recoverable. If that does 14105 * not turn out to be the case, then retries 14106 * will be cleared when the wait queue is 14107 * flushed anyway. 14108 */ 14109 #endif 14110 } 14111 } 14112 } else { 14113 /* 14114 * SD_RETRIES_FAILFAST is clear, which indicates that we 14115 * likely were able to at least establish some level of 14116 * communication with the target and subsequent commands 14117 * and/or retries are likely to get through to the target, 14118 * In this case we want to be aggressive about clearing 14119 * the failfast state. Note that this does not affect 14120 * the "failfast pending" condition. 14121 */ 14122 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14123 } 14124 14125 14126 /* 14127 * Check the specified retry count to see if we can still do 14128 * any retries with this pkt before we should fail it. 14129 */ 14130 switch (retry_check_flag & SD_RETRIES_MASK) { 14131 case SD_RETRIES_VICTIM: 14132 /* 14133 * Check the victim retry count. If exhausted, then fall 14134 * thru & check against the standard retry count. 14135 */ 14136 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 14137 /* Increment count & proceed with the retry */ 14138 xp->xb_victim_retry_count++; 14139 break; 14140 } 14141 /* Victim retries exhausted, fall back to std. retries... */ 14142 /* FALLTHRU */ 14143 14144 case SD_RETRIES_STANDARD: 14145 if (xp->xb_retry_count >= un->un_retry_count) { 14146 /* Retries exhausted, fail the command */ 14147 SD_TRACE(SD_LOG_IO_CORE, un, 14148 "sd_retry_command: retries exhausted!\n"); 14149 /* 14150 * update b_resid for failed SCMD_READ & SCMD_WRITE 14151 * commands with nonzero pkt_resid. 14152 */ 14153 if ((pktp->pkt_reason == CMD_CMPLT) && 14154 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 14155 (pktp->pkt_resid != 0)) { 14156 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 14157 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 14158 SD_UPDATE_B_RESID(bp, pktp); 14159 } 14160 } 14161 goto fail_command; 14162 } 14163 xp->xb_retry_count++; 14164 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14165 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14166 break; 14167 14168 case SD_RETRIES_UA: 14169 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 14170 /* Retries exhausted, fail the command */ 14171 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14172 "Unit Attention retries exhausted. " 14173 "Check the target.\n"); 14174 goto fail_command; 14175 } 14176 xp->xb_ua_retry_count++; 14177 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14178 "sd_retry_command: retry count:%d\n", 14179 xp->xb_ua_retry_count); 14180 break; 14181 14182 case SD_RETRIES_BUSY: 14183 if (xp->xb_retry_count >= un->un_busy_retry_count) { 14184 /* Retries exhausted, fail the command */ 14185 SD_TRACE(SD_LOG_IO_CORE, un, 14186 "sd_retry_command: retries exhausted!\n"); 14187 goto fail_command; 14188 } 14189 xp->xb_retry_count++; 14190 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14191 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14192 break; 14193 14194 case SD_RETRIES_NOCHECK: 14195 default: 14196 /* No retry count to check. Just proceed with the retry */ 14197 break; 14198 } 14199 14200 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14201 14202 /* 14203 * If we were given a zero timeout, we must attempt to retry the 14204 * command immediately (ie, without a delay). 14205 */ 14206 if (retry_delay == 0) { 14207 /* 14208 * Check some limiting conditions to see if we can actually 14209 * do the immediate retry. If we cannot, then we must 14210 * fall back to queueing up a delayed retry. 14211 */ 14212 if (un->un_ncmds_in_transport >= un->un_throttle) { 14213 /* 14214 * We are at the throttle limit for the target, 14215 * fall back to delayed retry. 14216 */ 14217 retry_delay = un->un_busy_timeout; 14218 statp = kstat_waitq_enter; 14219 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14220 "sd_retry_command: immed. retry hit " 14221 "throttle!\n"); 14222 } else { 14223 /* 14224 * We're clear to proceed with the immediate retry. 14225 * First call the user-provided function (if any) 14226 */ 14227 if (user_funcp != NULL) { 14228 (*user_funcp)(un, bp, user_arg, 14229 SD_IMMEDIATE_RETRY_ISSUED); 14230 #ifdef __lock_lint 14231 sd_print_incomplete_msg(un, bp, user_arg, 14232 SD_IMMEDIATE_RETRY_ISSUED); 14233 sd_print_cmd_incomplete_msg(un, bp, user_arg, 14234 SD_IMMEDIATE_RETRY_ISSUED); 14235 sd_print_sense_failed_msg(un, bp, user_arg, 14236 SD_IMMEDIATE_RETRY_ISSUED); 14237 #endif 14238 } 14239 14240 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14241 "sd_retry_command: issuing immediate retry\n"); 14242 14243 /* 14244 * Call sd_start_cmds() to transport the command to 14245 * the target. 14246 */ 14247 sd_start_cmds(un, bp); 14248 14249 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14250 "sd_retry_command exit\n"); 14251 return; 14252 } 14253 } 14254 14255 /* 14256 * Set up to retry the command after a delay. 14257 * First call the user-provided function (if any) 14258 */ 14259 if (user_funcp != NULL) { 14260 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 14261 } 14262 14263 sd_set_retry_bp(un, bp, retry_delay, statp); 14264 14265 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14266 return; 14267 14268 fail_command: 14269 14270 if (user_funcp != NULL) { 14271 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14272 } 14273 14274 fail_command_no_log: 14275 14276 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14277 "sd_retry_command: returning failed command\n"); 14278 14279 sd_return_failed_command(un, bp, failure_code); 14280 14281 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14282 } 14283 14284 14285 /* 14286 * Function: sd_set_retry_bp 14287 * 14288 * Description: Set up the given bp for retry. 14289 * 14290 * Arguments: un - ptr to associated softstate 14291 * bp - ptr to buf(9S) for the command 14292 * retry_delay - time interval before issuing retry (may be 0) 14293 * statp - optional pointer to kstat function 14294 * 14295 * Context: May be called under interrupt context 14296 */ 14297 14298 static void 14299 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14300 void (*statp)(kstat_io_t *)) 14301 { 14302 ASSERT(un != NULL); 14303 ASSERT(mutex_owned(SD_MUTEX(un))); 14304 ASSERT(bp != NULL); 14305 14306 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14307 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14308 14309 /* 14310 * Indicate that the command is being retried. This will not allow any 14311 * other commands on the wait queue to be transported to the target 14312 * until this command has been completed (success or failure). The 14313 * "retry command" is not transported to the target until the given 14314 * time delay expires, unless the user specified a 0 retry_delay. 14315 * 14316 * Note: the timeout(9F) callback routine is what actually calls 14317 * sd_start_cmds() to transport the command, with the exception of a 14318 * zero retry_delay. The only current implementor of a zero retry delay 14319 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14320 */ 14321 if (un->un_retry_bp == NULL) { 14322 ASSERT(un->un_retry_statp == NULL); 14323 un->un_retry_bp = bp; 14324 14325 /* 14326 * If the user has not specified a delay the command should 14327 * be queued and no timeout should be scheduled. 14328 */ 14329 if (retry_delay == 0) { 14330 /* 14331 * Save the kstat pointer that will be used in the 14332 * call to SD_UPDATE_KSTATS() below, so that 14333 * sd_start_cmds() can correctly decrement the waitq 14334 * count when it is time to transport this command. 14335 */ 14336 un->un_retry_statp = statp; 14337 goto done; 14338 } 14339 } 14340 14341 if (un->un_retry_bp == bp) { 14342 /* 14343 * Save the kstat pointer that will be used in the call to 14344 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14345 * correctly decrement the waitq count when it is time to 14346 * transport this command. 14347 */ 14348 un->un_retry_statp = statp; 14349 14350 /* 14351 * Schedule a timeout if: 14352 * 1) The user has specified a delay. 14353 * 2) There is not a START_STOP_UNIT callback pending. 14354 * 14355 * If no delay has been specified, then it is up to the caller 14356 * to ensure that IO processing continues without stalling. 14357 * Effectively, this means that the caller will issue the 14358 * required call to sd_start_cmds(). The START_STOP_UNIT 14359 * callback does this after the START STOP UNIT command has 14360 * completed. In either of these cases we should not schedule 14361 * a timeout callback here. Also don't schedule the timeout if 14362 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14363 */ 14364 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14365 (un->un_direct_priority_timeid == NULL)) { 14366 un->un_retry_timeid = 14367 timeout(sd_start_retry_command, un, retry_delay); 14368 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14369 "sd_set_retry_bp: setting timeout: un: 0x%p" 14370 " bp:0x%p un_retry_timeid:0x%p\n", 14371 un, bp, un->un_retry_timeid); 14372 } 14373 } else { 14374 /* 14375 * We only get in here if there is already another command 14376 * waiting to be retried. In this case, we just put the 14377 * given command onto the wait queue, so it can be transported 14378 * after the current retry command has completed. 14379 * 14380 * Also we have to make sure that if the command at the head 14381 * of the wait queue is the un_failfast_bp, that we do not 14382 * put ahead of it any other commands that are to be retried. 14383 */ 14384 if ((un->un_failfast_bp != NULL) && 14385 (un->un_failfast_bp == un->un_waitq_headp)) { 14386 /* 14387 * Enqueue this command AFTER the first command on 14388 * the wait queue (which is also un_failfast_bp). 14389 */ 14390 bp->av_forw = un->un_waitq_headp->av_forw; 14391 un->un_waitq_headp->av_forw = bp; 14392 if (un->un_waitq_headp == un->un_waitq_tailp) { 14393 un->un_waitq_tailp = bp; 14394 } 14395 } else { 14396 /* Enqueue this command at the head of the waitq. */ 14397 bp->av_forw = un->un_waitq_headp; 14398 un->un_waitq_headp = bp; 14399 if (un->un_waitq_tailp == NULL) { 14400 un->un_waitq_tailp = bp; 14401 } 14402 } 14403 14404 if (statp == NULL) { 14405 statp = kstat_waitq_enter; 14406 } 14407 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14408 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14409 } 14410 14411 done: 14412 if (statp != NULL) { 14413 SD_UPDATE_KSTATS(un, statp, bp); 14414 } 14415 14416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14417 "sd_set_retry_bp: exit un:0x%p\n", un); 14418 } 14419 14420 14421 /* 14422 * Function: sd_start_retry_command 14423 * 14424 * Description: Start the command that has been waiting on the target's 14425 * retry queue. Called from timeout(9F) context after the 14426 * retry delay interval has expired. 14427 * 14428 * Arguments: arg - pointer to associated softstate for the device. 14429 * 14430 * Context: timeout(9F) thread context. May not sleep. 14431 */ 14432 14433 static void 14434 sd_start_retry_command(void *arg) 14435 { 14436 struct sd_lun *un = arg; 14437 14438 ASSERT(un != NULL); 14439 ASSERT(!mutex_owned(SD_MUTEX(un))); 14440 14441 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14442 "sd_start_retry_command: entry\n"); 14443 14444 mutex_enter(SD_MUTEX(un)); 14445 14446 un->un_retry_timeid = NULL; 14447 14448 if (un->un_retry_bp != NULL) { 14449 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14450 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14451 un, un->un_retry_bp); 14452 sd_start_cmds(un, un->un_retry_bp); 14453 } 14454 14455 mutex_exit(SD_MUTEX(un)); 14456 14457 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14458 "sd_start_retry_command: exit\n"); 14459 } 14460 14461 14462 /* 14463 * Function: sd_start_direct_priority_command 14464 * 14465 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14466 * received TRAN_BUSY when we called scsi_transport() to send it 14467 * to the underlying HBA. This function is called from timeout(9F) 14468 * context after the delay interval has expired. 14469 * 14470 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14471 * 14472 * Context: timeout(9F) thread context. May not sleep. 14473 */ 14474 14475 static void 14476 sd_start_direct_priority_command(void *arg) 14477 { 14478 struct buf *priority_bp = arg; 14479 struct sd_lun *un; 14480 14481 ASSERT(priority_bp != NULL); 14482 un = SD_GET_UN(priority_bp); 14483 ASSERT(un != NULL); 14484 ASSERT(!mutex_owned(SD_MUTEX(un))); 14485 14486 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14487 "sd_start_direct_priority_command: entry\n"); 14488 14489 mutex_enter(SD_MUTEX(un)); 14490 un->un_direct_priority_timeid = NULL; 14491 sd_start_cmds(un, priority_bp); 14492 mutex_exit(SD_MUTEX(un)); 14493 14494 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14495 "sd_start_direct_priority_command: exit\n"); 14496 } 14497 14498 14499 /* 14500 * Function: sd_send_request_sense_command 14501 * 14502 * Description: Sends a REQUEST SENSE command to the target 14503 * 14504 * Context: May be called from interrupt context. 14505 */ 14506 14507 static void 14508 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14509 struct scsi_pkt *pktp) 14510 { 14511 ASSERT(bp != NULL); 14512 ASSERT(un != NULL); 14513 ASSERT(mutex_owned(SD_MUTEX(un))); 14514 14515 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14516 "entry: buf:0x%p\n", bp); 14517 14518 /* 14519 * If we are syncing or dumping, then fail the command to avoid a 14520 * recursive callback into scsi_transport(). Also fail the command 14521 * if we are suspended (legacy behavior). 14522 */ 14523 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14524 (un->un_state == SD_STATE_DUMPING)) { 14525 sd_return_failed_command(un, bp, EIO); 14526 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14527 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14528 return; 14529 } 14530 14531 /* 14532 * Retry the failed command and don't issue the request sense if: 14533 * 1) the sense buf is busy 14534 * 2) we have 1 or more outstanding commands on the target 14535 * (the sense data will be cleared or invalidated any way) 14536 * 14537 * Note: There could be an issue with not checking a retry limit here, 14538 * the problem is determining which retry limit to check. 14539 */ 14540 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14541 /* Don't retry if the command is flagged as non-retryable */ 14542 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14543 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14544 NULL, NULL, 0, un->un_busy_timeout, 14545 kstat_waitq_enter); 14546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14547 "sd_send_request_sense_command: " 14548 "at full throttle, retrying exit\n"); 14549 } else { 14550 sd_return_failed_command(un, bp, EIO); 14551 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14552 "sd_send_request_sense_command: " 14553 "at full throttle, non-retryable exit\n"); 14554 } 14555 return; 14556 } 14557 14558 sd_mark_rqs_busy(un, bp); 14559 sd_start_cmds(un, un->un_rqs_bp); 14560 14561 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14562 "sd_send_request_sense_command: exit\n"); 14563 } 14564 14565 14566 /* 14567 * Function: sd_mark_rqs_busy 14568 * 14569 * Description: Indicate that the request sense bp for this instance is 14570 * in use. 14571 * 14572 * Context: May be called under interrupt context 14573 */ 14574 14575 static void 14576 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14577 { 14578 struct sd_xbuf *sense_xp; 14579 14580 ASSERT(un != NULL); 14581 ASSERT(bp != NULL); 14582 ASSERT(mutex_owned(SD_MUTEX(un))); 14583 ASSERT(un->un_sense_isbusy == 0); 14584 14585 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14586 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14587 14588 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14589 ASSERT(sense_xp != NULL); 14590 14591 SD_INFO(SD_LOG_IO, un, 14592 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14593 14594 ASSERT(sense_xp->xb_pktp != NULL); 14595 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14596 == (FLAG_SENSING | FLAG_HEAD)); 14597 14598 un->un_sense_isbusy = 1; 14599 un->un_rqs_bp->b_resid = 0; 14600 sense_xp->xb_pktp->pkt_resid = 0; 14601 sense_xp->xb_pktp->pkt_reason = 0; 14602 14603 /* So we can get back the bp at interrupt time! */ 14604 sense_xp->xb_sense_bp = bp; 14605 14606 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14607 14608 /* 14609 * Mark this buf as awaiting sense data. (This is already set in 14610 * the pkt_flags for the RQS packet.) 14611 */ 14612 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14613 14614 sense_xp->xb_retry_count = 0; 14615 sense_xp->xb_victim_retry_count = 0; 14616 sense_xp->xb_ua_retry_count = 0; 14617 sense_xp->xb_nr_retry_count = 0; 14618 sense_xp->xb_dma_resid = 0; 14619 14620 /* Clean up the fields for auto-request sense */ 14621 sense_xp->xb_sense_status = 0; 14622 sense_xp->xb_sense_state = 0; 14623 sense_xp->xb_sense_resid = 0; 14624 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14625 14626 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14627 } 14628 14629 14630 /* 14631 * Function: sd_mark_rqs_idle 14632 * 14633 * Description: SD_MUTEX must be held continuously through this routine 14634 * to prevent reuse of the rqs struct before the caller can 14635 * complete it's processing. 14636 * 14637 * Return Code: Pointer to the RQS buf 14638 * 14639 * Context: May be called under interrupt context 14640 */ 14641 14642 static struct buf * 14643 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14644 { 14645 struct buf *bp; 14646 ASSERT(un != NULL); 14647 ASSERT(sense_xp != NULL); 14648 ASSERT(mutex_owned(SD_MUTEX(un))); 14649 ASSERT(un->un_sense_isbusy != 0); 14650 14651 un->un_sense_isbusy = 0; 14652 bp = sense_xp->xb_sense_bp; 14653 sense_xp->xb_sense_bp = NULL; 14654 14655 /* This pkt is no longer interested in getting sense data */ 14656 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14657 14658 return (bp); 14659 } 14660 14661 14662 14663 /* 14664 * Function: sd_alloc_rqs 14665 * 14666 * Description: Set up the unit to receive auto request sense data 14667 * 14668 * Return Code: DDI_SUCCESS or DDI_FAILURE 14669 * 14670 * Context: Called under attach(9E) context 14671 */ 14672 14673 static int 14674 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14675 { 14676 struct sd_xbuf *xp; 14677 14678 ASSERT(un != NULL); 14679 ASSERT(!mutex_owned(SD_MUTEX(un))); 14680 ASSERT(un->un_rqs_bp == NULL); 14681 ASSERT(un->un_rqs_pktp == NULL); 14682 14683 /* 14684 * First allocate the required buf and scsi_pkt structs, then set up 14685 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14686 */ 14687 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14688 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14689 if (un->un_rqs_bp == NULL) { 14690 return (DDI_FAILURE); 14691 } 14692 14693 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14694 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14695 14696 if (un->un_rqs_pktp == NULL) { 14697 sd_free_rqs(un); 14698 return (DDI_FAILURE); 14699 } 14700 14701 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14702 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14703 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14704 14705 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14706 14707 /* Set up the other needed members in the ARQ scsi_pkt. */ 14708 un->un_rqs_pktp->pkt_comp = sdintr; 14709 un->un_rqs_pktp->pkt_time = sd_io_time; 14710 un->un_rqs_pktp->pkt_flags |= 14711 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14712 14713 /* 14714 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14715 * provide any intpkt, destroypkt routines as we take care of 14716 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14717 */ 14718 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14719 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14720 xp->xb_pktp = un->un_rqs_pktp; 14721 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14722 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14723 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14724 14725 /* 14726 * Save the pointer to the request sense private bp so it can 14727 * be retrieved in sdintr. 14728 */ 14729 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14730 ASSERT(un->un_rqs_bp->b_private == xp); 14731 14732 /* 14733 * See if the HBA supports auto-request sense for the specified 14734 * target/lun. If it does, then try to enable it (if not already 14735 * enabled). 14736 * 14737 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14738 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14739 * return success. However, in both of these cases ARQ is always 14740 * enabled and scsi_ifgetcap will always return true. The best approach 14741 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14742 * 14743 * The 3rd case is the HBA (adp) always return enabled on 14744 * scsi_ifgetgetcap even when it's not enable, the best approach 14745 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14746 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14747 */ 14748 14749 if (un->un_f_is_fibre == TRUE) { 14750 un->un_f_arq_enabled = TRUE; 14751 } else { 14752 #if defined(__i386) || defined(__amd64) 14753 /* 14754 * Circumvent the Adaptec bug, remove this code when 14755 * the bug is fixed 14756 */ 14757 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14758 #endif 14759 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14760 case 0: 14761 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14762 "sd_alloc_rqs: HBA supports ARQ\n"); 14763 /* 14764 * ARQ is supported by this HBA but currently is not 14765 * enabled. Attempt to enable it and if successful then 14766 * mark this instance as ARQ enabled. 14767 */ 14768 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14769 == 1) { 14770 /* Successfully enabled ARQ in the HBA */ 14771 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14772 "sd_alloc_rqs: ARQ enabled\n"); 14773 un->un_f_arq_enabled = TRUE; 14774 } else { 14775 /* Could not enable ARQ in the HBA */ 14776 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14777 "sd_alloc_rqs: failed ARQ enable\n"); 14778 un->un_f_arq_enabled = FALSE; 14779 } 14780 break; 14781 case 1: 14782 /* 14783 * ARQ is supported by this HBA and is already enabled. 14784 * Just mark ARQ as enabled for this instance. 14785 */ 14786 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14787 "sd_alloc_rqs: ARQ already enabled\n"); 14788 un->un_f_arq_enabled = TRUE; 14789 break; 14790 default: 14791 /* 14792 * ARQ is not supported by this HBA; disable it for this 14793 * instance. 14794 */ 14795 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14796 "sd_alloc_rqs: HBA does not support ARQ\n"); 14797 un->un_f_arq_enabled = FALSE; 14798 break; 14799 } 14800 } 14801 14802 return (DDI_SUCCESS); 14803 } 14804 14805 14806 /* 14807 * Function: sd_free_rqs 14808 * 14809 * Description: Cleanup for the pre-instance RQS command. 14810 * 14811 * Context: Kernel thread context 14812 */ 14813 14814 static void 14815 sd_free_rqs(struct sd_lun *un) 14816 { 14817 ASSERT(un != NULL); 14818 14819 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14820 14821 /* 14822 * If consistent memory is bound to a scsi_pkt, the pkt 14823 * has to be destroyed *before* freeing the consistent memory. 14824 * Don't change the sequence of this operations. 14825 * scsi_destroy_pkt() might access memory, which isn't allowed, 14826 * after it was freed in scsi_free_consistent_buf(). 14827 */ 14828 if (un->un_rqs_pktp != NULL) { 14829 scsi_destroy_pkt(un->un_rqs_pktp); 14830 un->un_rqs_pktp = NULL; 14831 } 14832 14833 if (un->un_rqs_bp != NULL) { 14834 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14835 if (xp != NULL) { 14836 kmem_free(xp, sizeof (struct sd_xbuf)); 14837 } 14838 scsi_free_consistent_buf(un->un_rqs_bp); 14839 un->un_rqs_bp = NULL; 14840 } 14841 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14842 } 14843 14844 14845 14846 /* 14847 * Function: sd_reduce_throttle 14848 * 14849 * Description: Reduces the maximum # of outstanding commands on a 14850 * target to the current number of outstanding commands. 14851 * Queues a tiemout(9F) callback to restore the limit 14852 * after a specified interval has elapsed. 14853 * Typically used when we get a TRAN_BUSY return code 14854 * back from scsi_transport(). 14855 * 14856 * Arguments: un - ptr to the sd_lun softstate struct 14857 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14858 * 14859 * Context: May be called from interrupt context 14860 */ 14861 14862 static void 14863 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14864 { 14865 ASSERT(un != NULL); 14866 ASSERT(mutex_owned(SD_MUTEX(un))); 14867 ASSERT(un->un_ncmds_in_transport >= 0); 14868 14869 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14870 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14871 un, un->un_throttle, un->un_ncmds_in_transport); 14872 14873 if (un->un_throttle > 1) { 14874 if (un->un_f_use_adaptive_throttle == TRUE) { 14875 switch (throttle_type) { 14876 case SD_THROTTLE_TRAN_BUSY: 14877 if (un->un_busy_throttle == 0) { 14878 un->un_busy_throttle = un->un_throttle; 14879 } 14880 break; 14881 case SD_THROTTLE_QFULL: 14882 un->un_busy_throttle = 0; 14883 break; 14884 default: 14885 ASSERT(FALSE); 14886 } 14887 14888 if (un->un_ncmds_in_transport > 0) { 14889 un->un_throttle = un->un_ncmds_in_transport; 14890 } 14891 14892 } else { 14893 if (un->un_ncmds_in_transport == 0) { 14894 un->un_throttle = 1; 14895 } else { 14896 un->un_throttle = un->un_ncmds_in_transport; 14897 } 14898 } 14899 } 14900 14901 /* Reschedule the timeout if none is currently active */ 14902 if (un->un_reset_throttle_timeid == NULL) { 14903 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14904 un, SD_THROTTLE_RESET_INTERVAL); 14905 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14906 "sd_reduce_throttle: timeout scheduled!\n"); 14907 } 14908 14909 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14910 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14911 } 14912 14913 14914 14915 /* 14916 * Function: sd_restore_throttle 14917 * 14918 * Description: Callback function for timeout(9F). Resets the current 14919 * value of un->un_throttle to its default. 14920 * 14921 * Arguments: arg - pointer to associated softstate for the device. 14922 * 14923 * Context: May be called from interrupt context 14924 */ 14925 14926 static void 14927 sd_restore_throttle(void *arg) 14928 { 14929 struct sd_lun *un = arg; 14930 14931 ASSERT(un != NULL); 14932 ASSERT(!mutex_owned(SD_MUTEX(un))); 14933 14934 mutex_enter(SD_MUTEX(un)); 14935 14936 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14937 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14938 14939 un->un_reset_throttle_timeid = NULL; 14940 14941 if (un->un_f_use_adaptive_throttle == TRUE) { 14942 /* 14943 * If un_busy_throttle is nonzero, then it contains the 14944 * value that un_throttle was when we got a TRAN_BUSY back 14945 * from scsi_transport(). We want to revert back to this 14946 * value. 14947 * 14948 * In the QFULL case, the throttle limit will incrementally 14949 * increase until it reaches max throttle. 14950 */ 14951 if (un->un_busy_throttle > 0) { 14952 un->un_throttle = un->un_busy_throttle; 14953 un->un_busy_throttle = 0; 14954 } else { 14955 /* 14956 * increase throttle by 10% open gate slowly, schedule 14957 * another restore if saved throttle has not been 14958 * reached 14959 */ 14960 short throttle; 14961 if (sd_qfull_throttle_enable) { 14962 throttle = un->un_throttle + 14963 max((un->un_throttle / 10), 1); 14964 un->un_throttle = 14965 (throttle < un->un_saved_throttle) ? 14966 throttle : un->un_saved_throttle; 14967 if (un->un_throttle < un->un_saved_throttle) { 14968 un->un_reset_throttle_timeid = 14969 timeout(sd_restore_throttle, 14970 un, 14971 SD_QFULL_THROTTLE_RESET_INTERVAL); 14972 } 14973 } 14974 } 14975 14976 /* 14977 * If un_throttle has fallen below the low-water mark, we 14978 * restore the maximum value here (and allow it to ratchet 14979 * down again if necessary). 14980 */ 14981 if (un->un_throttle < un->un_min_throttle) { 14982 un->un_throttle = un->un_saved_throttle; 14983 } 14984 } else { 14985 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14986 "restoring limit from 0x%x to 0x%x\n", 14987 un->un_throttle, un->un_saved_throttle); 14988 un->un_throttle = un->un_saved_throttle; 14989 } 14990 14991 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14992 "sd_restore_throttle: calling sd_start_cmds!\n"); 14993 14994 sd_start_cmds(un, NULL); 14995 14996 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14997 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14998 un, un->un_throttle); 14999 15000 mutex_exit(SD_MUTEX(un)); 15001 15002 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15003 } 15004 15005 /* 15006 * Function: sdrunout 15007 * 15008 * Description: Callback routine for scsi_init_pkt when a resource allocation 15009 * fails. 15010 * 15011 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15012 * soft state instance. 15013 * 15014 * Return Code: The scsi_init_pkt routine allows for the callback function to 15015 * return a 0 indicating the callback should be rescheduled or a 1 15016 * indicating not to reschedule. This routine always returns 1 15017 * because the driver always provides a callback function to 15018 * scsi_init_pkt. This results in a callback always being scheduled 15019 * (via the scsi_init_pkt callback implementation) if a resource 15020 * failure occurs. 15021 * 15022 * Context: This callback function may not block or call routines that block 15023 * 15024 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15025 * request persisting at the head of the list which cannot be 15026 * satisfied even after multiple retries. In the future the driver 15027 * may implement some time of maximum runout count before failing 15028 * an I/O. 15029 */ 15030 15031 static int 15032 sdrunout(caddr_t arg) 15033 { 15034 struct sd_lun *un = (struct sd_lun *)arg; 15035 15036 ASSERT(un != NULL); 15037 ASSERT(!mutex_owned(SD_MUTEX(un))); 15038 15039 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15040 15041 mutex_enter(SD_MUTEX(un)); 15042 sd_start_cmds(un, NULL); 15043 mutex_exit(SD_MUTEX(un)); 15044 /* 15045 * This callback routine always returns 1 (i.e. do not reschedule) 15046 * because we always specify sdrunout as the callback handler for 15047 * scsi_init_pkt inside the call to sd_start_cmds. 15048 */ 15049 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15050 return (1); 15051 } 15052 15053 15054 /* 15055 * Function: sdintr 15056 * 15057 * Description: Completion callback routine for scsi_pkt(9S) structs 15058 * sent to the HBA driver via scsi_transport(9F). 15059 * 15060 * Context: Interrupt context 15061 */ 15062 15063 static void 15064 sdintr(struct scsi_pkt *pktp) 15065 { 15066 struct buf *bp; 15067 struct sd_xbuf *xp; 15068 struct sd_lun *un; 15069 size_t actual_len; 15070 15071 ASSERT(pktp != NULL); 15072 bp = (struct buf *)pktp->pkt_private; 15073 ASSERT(bp != NULL); 15074 xp = SD_GET_XBUF(bp); 15075 ASSERT(xp != NULL); 15076 ASSERT(xp->xb_pktp != NULL); 15077 un = SD_GET_UN(bp); 15078 ASSERT(un != NULL); 15079 ASSERT(!mutex_owned(SD_MUTEX(un))); 15080 15081 #ifdef SD_FAULT_INJECTION 15082 15083 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 15084 /* SD FaultInjection */ 15085 sd_faultinjection(pktp); 15086 15087 #endif /* SD_FAULT_INJECTION */ 15088 15089 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 15090 " xp:0x%p, un:0x%p\n", bp, xp, un); 15091 15092 mutex_enter(SD_MUTEX(un)); 15093 15094 /* Reduce the count of the #commands currently in transport */ 15095 un->un_ncmds_in_transport--; 15096 ASSERT(un->un_ncmds_in_transport >= 0); 15097 15098 /* Increment counter to indicate that the callback routine is active */ 15099 un->un_in_callback++; 15100 15101 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15102 15103 #ifdef SDDEBUG 15104 if (bp == un->un_retry_bp) { 15105 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 15106 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 15107 un, un->un_retry_bp, un->un_ncmds_in_transport); 15108 } 15109 #endif 15110 15111 /* 15112 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 15113 * state if needed. 15114 */ 15115 if (pktp->pkt_reason == CMD_DEV_GONE) { 15116 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15117 "Command failed to complete...Device is gone\n"); 15118 if (un->un_mediastate != DKIO_DEV_GONE) { 15119 un->un_mediastate = DKIO_DEV_GONE; 15120 cv_broadcast(&un->un_state_cv); 15121 } 15122 sd_return_failed_command(un, bp, EIO); 15123 goto exit; 15124 } 15125 15126 if (pktp->pkt_state & STATE_XARQ_DONE) { 15127 SD_TRACE(SD_LOG_COMMON, un, 15128 "sdintr: extra sense data received. pkt=%p\n", pktp); 15129 } 15130 15131 /* 15132 * First see if the pkt has auto-request sense data with it.... 15133 * Look at the packet state first so we don't take a performance 15134 * hit looking at the arq enabled flag unless absolutely necessary. 15135 */ 15136 if ((pktp->pkt_state & STATE_ARQ_DONE) && 15137 (un->un_f_arq_enabled == TRUE)) { 15138 /* 15139 * The HBA did an auto request sense for this command so check 15140 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15141 * driver command that should not be retried. 15142 */ 15143 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15144 /* 15145 * Save the relevant sense info into the xp for the 15146 * original cmd. 15147 */ 15148 struct scsi_arq_status *asp; 15149 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15150 xp->xb_sense_status = 15151 *((uchar_t *)(&(asp->sts_rqpkt_status))); 15152 xp->xb_sense_state = asp->sts_rqpkt_state; 15153 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15154 if (pktp->pkt_state & STATE_XARQ_DONE) { 15155 actual_len = MAX_SENSE_LENGTH - 15156 xp->xb_sense_resid; 15157 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15158 MAX_SENSE_LENGTH); 15159 } else { 15160 if (xp->xb_sense_resid > SENSE_LENGTH) { 15161 actual_len = MAX_SENSE_LENGTH - 15162 xp->xb_sense_resid; 15163 } else { 15164 actual_len = SENSE_LENGTH - 15165 xp->xb_sense_resid; 15166 } 15167 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15168 if ((((struct uscsi_cmd *) 15169 (xp->xb_pktinfo))->uscsi_rqlen) > 15170 actual_len) { 15171 xp->xb_sense_resid = 15172 (((struct uscsi_cmd *) 15173 (xp->xb_pktinfo))-> 15174 uscsi_rqlen) - actual_len; 15175 } else { 15176 xp->xb_sense_resid = 0; 15177 } 15178 } 15179 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15180 SENSE_LENGTH); 15181 } 15182 15183 /* fail the command */ 15184 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15185 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 15186 sd_return_failed_command(un, bp, EIO); 15187 goto exit; 15188 } 15189 15190 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15191 /* 15192 * We want to either retry or fail this command, so free 15193 * the DMA resources here. If we retry the command then 15194 * the DMA resources will be reallocated in sd_start_cmds(). 15195 * Note that when PKT_DMA_PARTIAL is used, this reallocation 15196 * causes the *entire* transfer to start over again from the 15197 * beginning of the request, even for PARTIAL chunks that 15198 * have already transferred successfully. 15199 */ 15200 if ((un->un_f_is_fibre == TRUE) && 15201 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15202 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15203 scsi_dmafree(pktp); 15204 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15205 } 15206 #endif 15207 15208 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15209 "sdintr: arq done, sd_handle_auto_request_sense\n"); 15210 15211 sd_handle_auto_request_sense(un, bp, xp, pktp); 15212 goto exit; 15213 } 15214 15215 /* Next see if this is the REQUEST SENSE pkt for the instance */ 15216 if (pktp->pkt_flags & FLAG_SENSING) { 15217 /* This pktp is from the unit's REQUEST_SENSE command */ 15218 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15219 "sdintr: sd_handle_request_sense\n"); 15220 sd_handle_request_sense(un, bp, xp, pktp); 15221 goto exit; 15222 } 15223 15224 /* 15225 * Check to see if the command successfully completed as requested; 15226 * this is the most common case (and also the hot performance path). 15227 * 15228 * Requirements for successful completion are: 15229 * pkt_reason is CMD_CMPLT and packet status is status good. 15230 * In addition: 15231 * - A residual of zero indicates successful completion no matter what 15232 * the command is. 15233 * - If the residual is not zero and the command is not a read or 15234 * write, then it's still defined as successful completion. In other 15235 * words, if the command is a read or write the residual must be 15236 * zero for successful completion. 15237 * - If the residual is not zero and the command is a read or 15238 * write, and it's a USCSICMD, then it's still defined as 15239 * successful completion. 15240 */ 15241 if ((pktp->pkt_reason == CMD_CMPLT) && 15242 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 15243 15244 /* 15245 * Since this command is returned with a good status, we 15246 * can reset the count for Sonoma failover. 15247 */ 15248 un->un_sonoma_failure_count = 0; 15249 15250 /* 15251 * Return all USCSI commands on good status 15252 */ 15253 if (pktp->pkt_resid == 0) { 15254 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15255 "sdintr: returning command for resid == 0\n"); 15256 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15257 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15258 SD_UPDATE_B_RESID(bp, pktp); 15259 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15260 "sdintr: returning command for resid != 0\n"); 15261 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15262 SD_UPDATE_B_RESID(bp, pktp); 15263 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15264 "sdintr: returning uscsi command\n"); 15265 } else { 15266 goto not_successful; 15267 } 15268 sd_return_command(un, bp); 15269 15270 /* 15271 * Decrement counter to indicate that the callback routine 15272 * is done. 15273 */ 15274 un->un_in_callback--; 15275 ASSERT(un->un_in_callback >= 0); 15276 mutex_exit(SD_MUTEX(un)); 15277 15278 return; 15279 } 15280 15281 not_successful: 15282 15283 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15284 /* 15285 * The following is based upon knowledge of the underlying transport 15286 * and its use of DMA resources. This code should be removed when 15287 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15288 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15289 * and sd_start_cmds(). 15290 * 15291 * Free any DMA resources associated with this command if there 15292 * is a chance it could be retried or enqueued for later retry. 15293 * If we keep the DMA binding then mpxio cannot reissue the 15294 * command on another path whenever a path failure occurs. 15295 * 15296 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15297 * causes the *entire* transfer to start over again from the 15298 * beginning of the request, even for PARTIAL chunks that 15299 * have already transferred successfully. 15300 * 15301 * This is only done for non-uscsi commands (and also skipped for the 15302 * driver's internal RQS command). Also just do this for Fibre Channel 15303 * devices as these are the only ones that support mpxio. 15304 */ 15305 if ((un->un_f_is_fibre == TRUE) && 15306 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15307 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15308 scsi_dmafree(pktp); 15309 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15310 } 15311 #endif 15312 15313 /* 15314 * The command did not successfully complete as requested so check 15315 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15316 * driver command that should not be retried so just return. If 15317 * FLAG_DIAGNOSE is not set the error will be processed below. 15318 */ 15319 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15320 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15321 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15322 /* 15323 * Issue a request sense if a check condition caused the error 15324 * (we handle the auto request sense case above), otherwise 15325 * just fail the command. 15326 */ 15327 if ((pktp->pkt_reason == CMD_CMPLT) && 15328 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15329 sd_send_request_sense_command(un, bp, pktp); 15330 } else { 15331 sd_return_failed_command(un, bp, EIO); 15332 } 15333 goto exit; 15334 } 15335 15336 /* 15337 * The command did not successfully complete as requested so process 15338 * the error, retry, and/or attempt recovery. 15339 */ 15340 switch (pktp->pkt_reason) { 15341 case CMD_CMPLT: 15342 switch (SD_GET_PKT_STATUS(pktp)) { 15343 case STATUS_GOOD: 15344 /* 15345 * The command completed successfully with a non-zero 15346 * residual 15347 */ 15348 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15349 "sdintr: STATUS_GOOD \n"); 15350 sd_pkt_status_good(un, bp, xp, pktp); 15351 break; 15352 15353 case STATUS_CHECK: 15354 case STATUS_TERMINATED: 15355 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15356 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15357 sd_pkt_status_check_condition(un, bp, xp, pktp); 15358 break; 15359 15360 case STATUS_BUSY: 15361 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15362 "sdintr: STATUS_BUSY\n"); 15363 sd_pkt_status_busy(un, bp, xp, pktp); 15364 break; 15365 15366 case STATUS_RESERVATION_CONFLICT: 15367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15368 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15369 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15370 break; 15371 15372 case STATUS_QFULL: 15373 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15374 "sdintr: STATUS_QFULL\n"); 15375 sd_pkt_status_qfull(un, bp, xp, pktp); 15376 break; 15377 15378 case STATUS_MET: 15379 case STATUS_INTERMEDIATE: 15380 case STATUS_SCSI2: 15381 case STATUS_INTERMEDIATE_MET: 15382 case STATUS_ACA_ACTIVE: 15383 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15384 "Unexpected SCSI status received: 0x%x\n", 15385 SD_GET_PKT_STATUS(pktp)); 15386 sd_return_failed_command(un, bp, EIO); 15387 break; 15388 15389 default: 15390 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15391 "Invalid SCSI status received: 0x%x\n", 15392 SD_GET_PKT_STATUS(pktp)); 15393 sd_return_failed_command(un, bp, EIO); 15394 break; 15395 15396 } 15397 break; 15398 15399 case CMD_INCOMPLETE: 15400 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15401 "sdintr: CMD_INCOMPLETE\n"); 15402 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15403 break; 15404 case CMD_TRAN_ERR: 15405 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15406 "sdintr: CMD_TRAN_ERR\n"); 15407 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15408 break; 15409 case CMD_RESET: 15410 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15411 "sdintr: CMD_RESET \n"); 15412 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15413 break; 15414 case CMD_ABORTED: 15415 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15416 "sdintr: CMD_ABORTED \n"); 15417 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15418 break; 15419 case CMD_TIMEOUT: 15420 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15421 "sdintr: CMD_TIMEOUT\n"); 15422 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15423 break; 15424 case CMD_UNX_BUS_FREE: 15425 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15426 "sdintr: CMD_UNX_BUS_FREE \n"); 15427 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15428 break; 15429 case CMD_TAG_REJECT: 15430 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15431 "sdintr: CMD_TAG_REJECT\n"); 15432 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15433 break; 15434 default: 15435 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15436 "sdintr: default\n"); 15437 sd_pkt_reason_default(un, bp, xp, pktp); 15438 break; 15439 } 15440 15441 exit: 15442 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15443 15444 /* Decrement counter to indicate that the callback routine is done. */ 15445 un->un_in_callback--; 15446 ASSERT(un->un_in_callback >= 0); 15447 15448 /* 15449 * At this point, the pkt has been dispatched, ie, it is either 15450 * being re-tried or has been returned to its caller and should 15451 * not be referenced. 15452 */ 15453 15454 mutex_exit(SD_MUTEX(un)); 15455 } 15456 15457 15458 /* 15459 * Function: sd_print_incomplete_msg 15460 * 15461 * Description: Prints the error message for a CMD_INCOMPLETE error. 15462 * 15463 * Arguments: un - ptr to associated softstate for the device. 15464 * bp - ptr to the buf(9S) for the command. 15465 * arg - message string ptr 15466 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15467 * or SD_NO_RETRY_ISSUED. 15468 * 15469 * Context: May be called under interrupt context 15470 */ 15471 15472 static void 15473 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15474 { 15475 struct scsi_pkt *pktp; 15476 char *msgp; 15477 char *cmdp = arg; 15478 15479 ASSERT(un != NULL); 15480 ASSERT(mutex_owned(SD_MUTEX(un))); 15481 ASSERT(bp != NULL); 15482 ASSERT(arg != NULL); 15483 pktp = SD_GET_PKTP(bp); 15484 ASSERT(pktp != NULL); 15485 15486 switch (code) { 15487 case SD_DELAYED_RETRY_ISSUED: 15488 case SD_IMMEDIATE_RETRY_ISSUED: 15489 msgp = "retrying"; 15490 break; 15491 case SD_NO_RETRY_ISSUED: 15492 default: 15493 msgp = "giving up"; 15494 break; 15495 } 15496 15497 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15498 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15499 "incomplete %s- %s\n", cmdp, msgp); 15500 } 15501 } 15502 15503 15504 15505 /* 15506 * Function: sd_pkt_status_good 15507 * 15508 * Description: Processing for a STATUS_GOOD code in pkt_status. 15509 * 15510 * Context: May be called under interrupt context 15511 */ 15512 15513 static void 15514 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15515 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15516 { 15517 char *cmdp; 15518 15519 ASSERT(un != NULL); 15520 ASSERT(mutex_owned(SD_MUTEX(un))); 15521 ASSERT(bp != NULL); 15522 ASSERT(xp != NULL); 15523 ASSERT(pktp != NULL); 15524 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15525 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15526 ASSERT(pktp->pkt_resid != 0); 15527 15528 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15529 15530 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15531 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15532 case SCMD_READ: 15533 cmdp = "read"; 15534 break; 15535 case SCMD_WRITE: 15536 cmdp = "write"; 15537 break; 15538 default: 15539 SD_UPDATE_B_RESID(bp, pktp); 15540 sd_return_command(un, bp); 15541 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15542 return; 15543 } 15544 15545 /* 15546 * See if we can retry the read/write, preferrably immediately. 15547 * If retries are exhaused, then sd_retry_command() will update 15548 * the b_resid count. 15549 */ 15550 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15551 cmdp, EIO, (clock_t)0, NULL); 15552 15553 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15554 } 15555 15556 15557 15558 15559 15560 /* 15561 * Function: sd_handle_request_sense 15562 * 15563 * Description: Processing for non-auto Request Sense command. 15564 * 15565 * Arguments: un - ptr to associated softstate 15566 * sense_bp - ptr to buf(9S) for the RQS command 15567 * sense_xp - ptr to the sd_xbuf for the RQS command 15568 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15569 * 15570 * Context: May be called under interrupt context 15571 */ 15572 15573 static void 15574 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15575 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15576 { 15577 struct buf *cmd_bp; /* buf for the original command */ 15578 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15579 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15580 size_t actual_len; /* actual sense data length */ 15581 15582 ASSERT(un != NULL); 15583 ASSERT(mutex_owned(SD_MUTEX(un))); 15584 ASSERT(sense_bp != NULL); 15585 ASSERT(sense_xp != NULL); 15586 ASSERT(sense_pktp != NULL); 15587 15588 /* 15589 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15590 * RQS command and not the original command. 15591 */ 15592 ASSERT(sense_pktp == un->un_rqs_pktp); 15593 ASSERT(sense_bp == un->un_rqs_bp); 15594 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15595 (FLAG_SENSING | FLAG_HEAD)); 15596 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15597 FLAG_SENSING) == FLAG_SENSING); 15598 15599 /* These are the bp, xp, and pktp for the original command */ 15600 cmd_bp = sense_xp->xb_sense_bp; 15601 cmd_xp = SD_GET_XBUF(cmd_bp); 15602 cmd_pktp = SD_GET_PKTP(cmd_bp); 15603 15604 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15605 /* 15606 * The REQUEST SENSE command failed. Release the REQUEST 15607 * SENSE command for re-use, get back the bp for the original 15608 * command, and attempt to re-try the original command if 15609 * FLAG_DIAGNOSE is not set in the original packet. 15610 */ 15611 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15612 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15613 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15614 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15615 NULL, NULL, EIO, (clock_t)0, NULL); 15616 return; 15617 } 15618 } 15619 15620 /* 15621 * Save the relevant sense info into the xp for the original cmd. 15622 * 15623 * Note: if the request sense failed the state info will be zero 15624 * as set in sd_mark_rqs_busy() 15625 */ 15626 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15627 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15628 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15629 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15630 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15631 SENSE_LENGTH)) { 15632 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15633 MAX_SENSE_LENGTH); 15634 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15635 } else { 15636 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15637 SENSE_LENGTH); 15638 if (actual_len < SENSE_LENGTH) { 15639 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15640 } else { 15641 cmd_xp->xb_sense_resid = 0; 15642 } 15643 } 15644 15645 /* 15646 * Free up the RQS command.... 15647 * NOTE: 15648 * Must do this BEFORE calling sd_validate_sense_data! 15649 * sd_validate_sense_data may return the original command in 15650 * which case the pkt will be freed and the flags can no 15651 * longer be touched. 15652 * SD_MUTEX is held through this process until the command 15653 * is dispatched based upon the sense data, so there are 15654 * no race conditions. 15655 */ 15656 (void) sd_mark_rqs_idle(un, sense_xp); 15657 15658 /* 15659 * For a retryable command see if we have valid sense data, if so then 15660 * turn it over to sd_decode_sense() to figure out the right course of 15661 * action. Just fail a non-retryable command. 15662 */ 15663 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15664 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15665 SD_SENSE_DATA_IS_VALID) { 15666 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15667 } 15668 } else { 15669 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15670 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15671 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15672 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15673 sd_return_failed_command(un, cmd_bp, EIO); 15674 } 15675 } 15676 15677 15678 15679 15680 /* 15681 * Function: sd_handle_auto_request_sense 15682 * 15683 * Description: Processing for auto-request sense information. 15684 * 15685 * Arguments: un - ptr to associated softstate 15686 * bp - ptr to buf(9S) for the command 15687 * xp - ptr to the sd_xbuf for the command 15688 * pktp - ptr to the scsi_pkt(9S) for the command 15689 * 15690 * Context: May be called under interrupt context 15691 */ 15692 15693 static void 15694 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15695 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15696 { 15697 struct scsi_arq_status *asp; 15698 size_t actual_len; 15699 15700 ASSERT(un != NULL); 15701 ASSERT(mutex_owned(SD_MUTEX(un))); 15702 ASSERT(bp != NULL); 15703 ASSERT(xp != NULL); 15704 ASSERT(pktp != NULL); 15705 ASSERT(pktp != un->un_rqs_pktp); 15706 ASSERT(bp != un->un_rqs_bp); 15707 15708 /* 15709 * For auto-request sense, we get a scsi_arq_status back from 15710 * the HBA, with the sense data in the sts_sensedata member. 15711 * The pkt_scbp of the packet points to this scsi_arq_status. 15712 */ 15713 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15714 15715 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15716 /* 15717 * The auto REQUEST SENSE failed; see if we can re-try 15718 * the original command. 15719 */ 15720 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15721 "auto request sense failed (reason=%s)\n", 15722 scsi_rname(asp->sts_rqpkt_reason)); 15723 15724 sd_reset_target(un, pktp); 15725 15726 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15727 NULL, NULL, EIO, (clock_t)0, NULL); 15728 return; 15729 } 15730 15731 /* Save the relevant sense info into the xp for the original cmd. */ 15732 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15733 xp->xb_sense_state = asp->sts_rqpkt_state; 15734 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15735 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15736 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15737 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15738 MAX_SENSE_LENGTH); 15739 } else { 15740 if (xp->xb_sense_resid > SENSE_LENGTH) { 15741 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15742 } else { 15743 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15744 } 15745 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15746 if ((((struct uscsi_cmd *) 15747 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 15748 xp->xb_sense_resid = (((struct uscsi_cmd *) 15749 (xp->xb_pktinfo))->uscsi_rqlen) - 15750 actual_len; 15751 } else { 15752 xp->xb_sense_resid = 0; 15753 } 15754 } 15755 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15756 } 15757 15758 /* 15759 * See if we have valid sense data, if so then turn it over to 15760 * sd_decode_sense() to figure out the right course of action. 15761 */ 15762 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15763 SD_SENSE_DATA_IS_VALID) { 15764 sd_decode_sense(un, bp, xp, pktp); 15765 } 15766 } 15767 15768 15769 /* 15770 * Function: sd_print_sense_failed_msg 15771 * 15772 * Description: Print log message when RQS has failed. 15773 * 15774 * Arguments: un - ptr to associated softstate 15775 * bp - ptr to buf(9S) for the command 15776 * arg - generic message string ptr 15777 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15778 * or SD_NO_RETRY_ISSUED 15779 * 15780 * Context: May be called from interrupt context 15781 */ 15782 15783 static void 15784 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15785 int code) 15786 { 15787 char *msgp = arg; 15788 15789 ASSERT(un != NULL); 15790 ASSERT(mutex_owned(SD_MUTEX(un))); 15791 ASSERT(bp != NULL); 15792 15793 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15794 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15795 } 15796 } 15797 15798 15799 /* 15800 * Function: sd_validate_sense_data 15801 * 15802 * Description: Check the given sense data for validity. 15803 * If the sense data is not valid, the command will 15804 * be either failed or retried! 15805 * 15806 * Return Code: SD_SENSE_DATA_IS_INVALID 15807 * SD_SENSE_DATA_IS_VALID 15808 * 15809 * Context: May be called from interrupt context 15810 */ 15811 15812 static int 15813 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15814 size_t actual_len) 15815 { 15816 struct scsi_extended_sense *esp; 15817 struct scsi_pkt *pktp; 15818 char *msgp = NULL; 15819 15820 ASSERT(un != NULL); 15821 ASSERT(mutex_owned(SD_MUTEX(un))); 15822 ASSERT(bp != NULL); 15823 ASSERT(bp != un->un_rqs_bp); 15824 ASSERT(xp != NULL); 15825 15826 pktp = SD_GET_PKTP(bp); 15827 ASSERT(pktp != NULL); 15828 15829 /* 15830 * Check the status of the RQS command (auto or manual). 15831 */ 15832 switch (xp->xb_sense_status & STATUS_MASK) { 15833 case STATUS_GOOD: 15834 break; 15835 15836 case STATUS_RESERVATION_CONFLICT: 15837 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15838 return (SD_SENSE_DATA_IS_INVALID); 15839 15840 case STATUS_BUSY: 15841 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15842 "Busy Status on REQUEST SENSE\n"); 15843 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15844 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 15845 return (SD_SENSE_DATA_IS_INVALID); 15846 15847 case STATUS_QFULL: 15848 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15849 "QFULL Status on REQUEST SENSE\n"); 15850 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15851 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 15852 return (SD_SENSE_DATA_IS_INVALID); 15853 15854 case STATUS_CHECK: 15855 case STATUS_TERMINATED: 15856 msgp = "Check Condition on REQUEST SENSE\n"; 15857 goto sense_failed; 15858 15859 default: 15860 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15861 goto sense_failed; 15862 } 15863 15864 /* 15865 * See if we got the minimum required amount of sense data. 15866 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15867 * or less. 15868 */ 15869 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15870 (actual_len == 0)) { 15871 msgp = "Request Sense couldn't get sense data\n"; 15872 goto sense_failed; 15873 } 15874 15875 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15876 msgp = "Not enough sense information\n"; 15877 goto sense_failed; 15878 } 15879 15880 /* 15881 * We require the extended sense data 15882 */ 15883 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15884 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15885 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15886 static char tmp[8]; 15887 static char buf[148]; 15888 char *p = (char *)(xp->xb_sense_data); 15889 int i; 15890 15891 mutex_enter(&sd_sense_mutex); 15892 (void) strcpy(buf, "undecodable sense information:"); 15893 for (i = 0; i < actual_len; i++) { 15894 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15895 (void) strcpy(&buf[strlen(buf)], tmp); 15896 } 15897 i = strlen(buf); 15898 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15899 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15900 mutex_exit(&sd_sense_mutex); 15901 } 15902 /* Note: Legacy behavior, fail the command with no retry */ 15903 sd_return_failed_command(un, bp, EIO); 15904 return (SD_SENSE_DATA_IS_INVALID); 15905 } 15906 15907 /* 15908 * Check that es_code is valid (es_class concatenated with es_code 15909 * make up the "response code" field. es_class will always be 7, so 15910 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15911 * format. 15912 */ 15913 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15914 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15915 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15916 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15917 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15918 goto sense_failed; 15919 } 15920 15921 return (SD_SENSE_DATA_IS_VALID); 15922 15923 sense_failed: 15924 /* 15925 * If the request sense failed (for whatever reason), attempt 15926 * to retry the original command. 15927 */ 15928 #if defined(__i386) || defined(__amd64) 15929 /* 15930 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15931 * sddef.h for Sparc platform, and x86 uses 1 binary 15932 * for both SCSI/FC. 15933 * The SD_RETRY_DELAY value need to be adjusted here 15934 * when SD_RETRY_DELAY change in sddef.h 15935 */ 15936 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15937 sd_print_sense_failed_msg, msgp, EIO, 15938 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15939 #else 15940 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15941 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15942 #endif 15943 15944 return (SD_SENSE_DATA_IS_INVALID); 15945 } 15946 15947 15948 15949 /* 15950 * Function: sd_decode_sense 15951 * 15952 * Description: Take recovery action(s) when SCSI Sense Data is received. 15953 * 15954 * Context: Interrupt context. 15955 */ 15956 15957 static void 15958 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15959 struct scsi_pkt *pktp) 15960 { 15961 uint8_t sense_key; 15962 15963 ASSERT(un != NULL); 15964 ASSERT(mutex_owned(SD_MUTEX(un))); 15965 ASSERT(bp != NULL); 15966 ASSERT(bp != un->un_rqs_bp); 15967 ASSERT(xp != NULL); 15968 ASSERT(pktp != NULL); 15969 15970 sense_key = scsi_sense_key(xp->xb_sense_data); 15971 15972 switch (sense_key) { 15973 case KEY_NO_SENSE: 15974 sd_sense_key_no_sense(un, bp, xp, pktp); 15975 break; 15976 case KEY_RECOVERABLE_ERROR: 15977 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15978 bp, xp, pktp); 15979 break; 15980 case KEY_NOT_READY: 15981 sd_sense_key_not_ready(un, xp->xb_sense_data, 15982 bp, xp, pktp); 15983 break; 15984 case KEY_MEDIUM_ERROR: 15985 case KEY_HARDWARE_ERROR: 15986 sd_sense_key_medium_or_hardware_error(un, 15987 xp->xb_sense_data, bp, xp, pktp); 15988 break; 15989 case KEY_ILLEGAL_REQUEST: 15990 sd_sense_key_illegal_request(un, bp, xp, pktp); 15991 break; 15992 case KEY_UNIT_ATTENTION: 15993 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15994 bp, xp, pktp); 15995 break; 15996 case KEY_WRITE_PROTECT: 15997 case KEY_VOLUME_OVERFLOW: 15998 case KEY_MISCOMPARE: 15999 sd_sense_key_fail_command(un, bp, xp, pktp); 16000 break; 16001 case KEY_BLANK_CHECK: 16002 sd_sense_key_blank_check(un, bp, xp, pktp); 16003 break; 16004 case KEY_ABORTED_COMMAND: 16005 sd_sense_key_aborted_command(un, bp, xp, pktp); 16006 break; 16007 case KEY_VENDOR_UNIQUE: 16008 case KEY_COPY_ABORTED: 16009 case KEY_EQUAL: 16010 case KEY_RESERVED: 16011 default: 16012 sd_sense_key_default(un, xp->xb_sense_data, 16013 bp, xp, pktp); 16014 break; 16015 } 16016 } 16017 16018 16019 /* 16020 * Function: sd_dump_memory 16021 * 16022 * Description: Debug logging routine to print the contents of a user provided 16023 * buffer. The output of the buffer is broken up into 256 byte 16024 * segments due to a size constraint of the scsi_log. 16025 * implementation. 16026 * 16027 * Arguments: un - ptr to softstate 16028 * comp - component mask 16029 * title - "title" string to preceed data when printed 16030 * data - ptr to data block to be printed 16031 * len - size of data block to be printed 16032 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16033 * 16034 * Context: May be called from interrupt context 16035 */ 16036 16037 #define SD_DUMP_MEMORY_BUF_SIZE 256 16038 16039 static char *sd_dump_format_string[] = { 16040 " 0x%02x", 16041 " %c" 16042 }; 16043 16044 static void 16045 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 16046 int len, int fmt) 16047 { 16048 int i, j; 16049 int avail_count; 16050 int start_offset; 16051 int end_offset; 16052 size_t entry_len; 16053 char *bufp; 16054 char *local_buf; 16055 char *format_string; 16056 16057 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 16058 16059 /* 16060 * In the debug version of the driver, this function is called from a 16061 * number of places which are NOPs in the release driver. 16062 * The debug driver therefore has additional methods of filtering 16063 * debug output. 16064 */ 16065 #ifdef SDDEBUG 16066 /* 16067 * In the debug version of the driver we can reduce the amount of debug 16068 * messages by setting sd_error_level to something other than 16069 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 16070 * sd_component_mask. 16071 */ 16072 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 16073 (sd_error_level != SCSI_ERR_ALL)) { 16074 return; 16075 } 16076 if (((sd_component_mask & comp) == 0) || 16077 (sd_error_level != SCSI_ERR_ALL)) { 16078 return; 16079 } 16080 #else 16081 if (sd_error_level != SCSI_ERR_ALL) { 16082 return; 16083 } 16084 #endif 16085 16086 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 16087 bufp = local_buf; 16088 /* 16089 * Available length is the length of local_buf[], minus the 16090 * length of the title string, minus one for the ":", minus 16091 * one for the newline, minus one for the NULL terminator. 16092 * This gives the #bytes available for holding the printed 16093 * values from the given data buffer. 16094 */ 16095 if (fmt == SD_LOG_HEX) { 16096 format_string = sd_dump_format_string[0]; 16097 } else /* SD_LOG_CHAR */ { 16098 format_string = sd_dump_format_string[1]; 16099 } 16100 /* 16101 * Available count is the number of elements from the given 16102 * data buffer that we can fit into the available length. 16103 * This is based upon the size of the format string used. 16104 * Make one entry and find it's size. 16105 */ 16106 (void) sprintf(bufp, format_string, data[0]); 16107 entry_len = strlen(bufp); 16108 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 16109 16110 j = 0; 16111 while (j < len) { 16112 bufp = local_buf; 16113 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 16114 start_offset = j; 16115 16116 end_offset = start_offset + avail_count; 16117 16118 (void) sprintf(bufp, "%s:", title); 16119 bufp += strlen(bufp); 16120 for (i = start_offset; ((i < end_offset) && (j < len)); 16121 i++, j++) { 16122 (void) sprintf(bufp, format_string, data[i]); 16123 bufp += entry_len; 16124 } 16125 (void) sprintf(bufp, "\n"); 16126 16127 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 16128 } 16129 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 16130 } 16131 16132 /* 16133 * Function: sd_print_sense_msg 16134 * 16135 * Description: Log a message based upon the given sense data. 16136 * 16137 * Arguments: un - ptr to associated softstate 16138 * bp - ptr to buf(9S) for the command 16139 * arg - ptr to associate sd_sense_info struct 16140 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16141 * or SD_NO_RETRY_ISSUED 16142 * 16143 * Context: May be called from interrupt context 16144 */ 16145 16146 static void 16147 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16148 { 16149 struct sd_xbuf *xp; 16150 struct scsi_pkt *pktp; 16151 uint8_t *sensep; 16152 daddr_t request_blkno; 16153 diskaddr_t err_blkno; 16154 int severity; 16155 int pfa_flag; 16156 extern struct scsi_key_strings scsi_cmds[]; 16157 16158 ASSERT(un != NULL); 16159 ASSERT(mutex_owned(SD_MUTEX(un))); 16160 ASSERT(bp != NULL); 16161 xp = SD_GET_XBUF(bp); 16162 ASSERT(xp != NULL); 16163 pktp = SD_GET_PKTP(bp); 16164 ASSERT(pktp != NULL); 16165 ASSERT(arg != NULL); 16166 16167 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 16168 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 16169 16170 if ((code == SD_DELAYED_RETRY_ISSUED) || 16171 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 16172 severity = SCSI_ERR_RETRYABLE; 16173 } 16174 16175 /* Use absolute block number for the request block number */ 16176 request_blkno = xp->xb_blkno; 16177 16178 /* 16179 * Now try to get the error block number from the sense data 16180 */ 16181 sensep = xp->xb_sense_data; 16182 16183 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 16184 (uint64_t *)&err_blkno)) { 16185 /* 16186 * We retrieved the error block number from the information 16187 * portion of the sense data. 16188 * 16189 * For USCSI commands we are better off using the error 16190 * block no. as the requested block no. (This is the best 16191 * we can estimate.) 16192 */ 16193 if ((SD_IS_BUFIO(xp) == FALSE) && 16194 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 16195 request_blkno = err_blkno; 16196 } 16197 } else { 16198 /* 16199 * Without the es_valid bit set (for fixed format) or an 16200 * information descriptor (for descriptor format) we cannot 16201 * be certain of the error blkno, so just use the 16202 * request_blkno. 16203 */ 16204 err_blkno = (diskaddr_t)request_blkno; 16205 } 16206 16207 /* 16208 * The following will log the buffer contents for the release driver 16209 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 16210 * level is set to verbose. 16211 */ 16212 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 16213 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16214 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 16215 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 16216 16217 if (pfa_flag == FALSE) { 16218 /* This is normally only set for USCSI */ 16219 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 16220 return; 16221 } 16222 16223 if ((SD_IS_BUFIO(xp) == TRUE) && 16224 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 16225 (severity < sd_error_level))) { 16226 return; 16227 } 16228 } 16229 16230 /* 16231 * Check for Sonoma Failover and keep a count of how many failed I/O's 16232 */ 16233 if ((SD_IS_LSI(un)) && 16234 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 16235 (scsi_sense_asc(sensep) == 0x94) && 16236 (scsi_sense_ascq(sensep) == 0x01)) { 16237 un->un_sonoma_failure_count++; 16238 if (un->un_sonoma_failure_count > 1) { 16239 return; 16240 } 16241 } 16242 16243 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 16244 request_blkno, err_blkno, scsi_cmds, 16245 (struct scsi_extended_sense *)sensep, 16246 un->un_additional_codes, NULL); 16247 } 16248 16249 /* 16250 * Function: sd_sense_key_no_sense 16251 * 16252 * Description: Recovery action when sense data was not received. 16253 * 16254 * Context: May be called from interrupt context 16255 */ 16256 16257 static void 16258 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 16259 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16260 { 16261 struct sd_sense_info si; 16262 16263 ASSERT(un != NULL); 16264 ASSERT(mutex_owned(SD_MUTEX(un))); 16265 ASSERT(bp != NULL); 16266 ASSERT(xp != NULL); 16267 ASSERT(pktp != NULL); 16268 16269 si.ssi_severity = SCSI_ERR_FATAL; 16270 si.ssi_pfa_flag = FALSE; 16271 16272 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16273 16274 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16275 &si, EIO, (clock_t)0, NULL); 16276 } 16277 16278 16279 /* 16280 * Function: sd_sense_key_recoverable_error 16281 * 16282 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16283 * 16284 * Context: May be called from interrupt context 16285 */ 16286 16287 static void 16288 sd_sense_key_recoverable_error(struct sd_lun *un, 16289 uint8_t *sense_datap, 16290 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16291 { 16292 struct sd_sense_info si; 16293 uint8_t asc = scsi_sense_asc(sense_datap); 16294 16295 ASSERT(un != NULL); 16296 ASSERT(mutex_owned(SD_MUTEX(un))); 16297 ASSERT(bp != NULL); 16298 ASSERT(xp != NULL); 16299 ASSERT(pktp != NULL); 16300 16301 /* 16302 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16303 */ 16304 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16305 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16306 si.ssi_severity = SCSI_ERR_INFO; 16307 si.ssi_pfa_flag = TRUE; 16308 } else { 16309 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16310 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16311 si.ssi_severity = SCSI_ERR_RECOVERED; 16312 si.ssi_pfa_flag = FALSE; 16313 } 16314 16315 if (pktp->pkt_resid == 0) { 16316 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16317 sd_return_command(un, bp); 16318 return; 16319 } 16320 16321 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16322 &si, EIO, (clock_t)0, NULL); 16323 } 16324 16325 16326 16327 16328 /* 16329 * Function: sd_sense_key_not_ready 16330 * 16331 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16332 * 16333 * Context: May be called from interrupt context 16334 */ 16335 16336 static void 16337 sd_sense_key_not_ready(struct sd_lun *un, 16338 uint8_t *sense_datap, 16339 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16340 { 16341 struct sd_sense_info si; 16342 uint8_t asc = scsi_sense_asc(sense_datap); 16343 uint8_t ascq = scsi_sense_ascq(sense_datap); 16344 16345 ASSERT(un != NULL); 16346 ASSERT(mutex_owned(SD_MUTEX(un))); 16347 ASSERT(bp != NULL); 16348 ASSERT(xp != NULL); 16349 ASSERT(pktp != NULL); 16350 16351 si.ssi_severity = SCSI_ERR_FATAL; 16352 si.ssi_pfa_flag = FALSE; 16353 16354 /* 16355 * Update error stats after first NOT READY error. Disks may have 16356 * been powered down and may need to be restarted. For CDROMs, 16357 * report NOT READY errors only if media is present. 16358 */ 16359 if ((ISCD(un) && (asc == 0x3A)) || 16360 (xp->xb_nr_retry_count > 0)) { 16361 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16362 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16363 } 16364 16365 /* 16366 * Just fail if the "not ready" retry limit has been reached. 16367 */ 16368 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16369 /* Special check for error message printing for removables. */ 16370 if (un->un_f_has_removable_media && (asc == 0x04) && 16371 (ascq >= 0x04)) { 16372 si.ssi_severity = SCSI_ERR_ALL; 16373 } 16374 goto fail_command; 16375 } 16376 16377 /* 16378 * Check the ASC and ASCQ in the sense data as needed, to determine 16379 * what to do. 16380 */ 16381 switch (asc) { 16382 case 0x04: /* LOGICAL UNIT NOT READY */ 16383 /* 16384 * disk drives that don't spin up result in a very long delay 16385 * in format without warning messages. We will log a message 16386 * if the error level is set to verbose. 16387 */ 16388 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16389 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16390 "logical unit not ready, resetting disk\n"); 16391 } 16392 16393 /* 16394 * There are different requirements for CDROMs and disks for 16395 * the number of retries. If a CD-ROM is giving this, it is 16396 * probably reading TOC and is in the process of getting 16397 * ready, so we should keep on trying for a long time to make 16398 * sure that all types of media are taken in account (for 16399 * some media the drive takes a long time to read TOC). For 16400 * disks we do not want to retry this too many times as this 16401 * can cause a long hang in format when the drive refuses to 16402 * spin up (a very common failure). 16403 */ 16404 switch (ascq) { 16405 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16406 /* 16407 * Disk drives frequently refuse to spin up which 16408 * results in a very long hang in format without 16409 * warning messages. 16410 * 16411 * Note: This code preserves the legacy behavior of 16412 * comparing xb_nr_retry_count against zero for fibre 16413 * channel targets instead of comparing against the 16414 * un_reset_retry_count value. The reason for this 16415 * discrepancy has been so utterly lost beneath the 16416 * Sands of Time that even Indiana Jones could not 16417 * find it. 16418 */ 16419 if (un->un_f_is_fibre == TRUE) { 16420 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16421 (xp->xb_nr_retry_count > 0)) && 16422 (un->un_startstop_timeid == NULL)) { 16423 scsi_log(SD_DEVINFO(un), sd_label, 16424 CE_WARN, "logical unit not ready, " 16425 "resetting disk\n"); 16426 sd_reset_target(un, pktp); 16427 } 16428 } else { 16429 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16430 (xp->xb_nr_retry_count > 16431 un->un_reset_retry_count)) && 16432 (un->un_startstop_timeid == NULL)) { 16433 scsi_log(SD_DEVINFO(un), sd_label, 16434 CE_WARN, "logical unit not ready, " 16435 "resetting disk\n"); 16436 sd_reset_target(un, pktp); 16437 } 16438 } 16439 break; 16440 16441 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16442 /* 16443 * If the target is in the process of becoming 16444 * ready, just proceed with the retry. This can 16445 * happen with CD-ROMs that take a long time to 16446 * read TOC after a power cycle or reset. 16447 */ 16448 goto do_retry; 16449 16450 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16451 break; 16452 16453 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16454 /* 16455 * Retries cannot help here so just fail right away. 16456 */ 16457 goto fail_command; 16458 16459 case 0x88: 16460 /* 16461 * Vendor-unique code for T3/T4: it indicates a 16462 * path problem in a mutipathed config, but as far as 16463 * the target driver is concerned it equates to a fatal 16464 * error, so we should just fail the command right away 16465 * (without printing anything to the console). If this 16466 * is not a T3/T4, fall thru to the default recovery 16467 * action. 16468 * T3/T4 is FC only, don't need to check is_fibre 16469 */ 16470 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16471 sd_return_failed_command(un, bp, EIO); 16472 return; 16473 } 16474 /* FALLTHRU */ 16475 16476 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16477 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16478 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16479 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16480 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16481 default: /* Possible future codes in SCSI spec? */ 16482 /* 16483 * For removable-media devices, do not retry if 16484 * ASCQ > 2 as these result mostly from USCSI commands 16485 * on MMC devices issued to check status of an 16486 * operation initiated in immediate mode. Also for 16487 * ASCQ >= 4 do not print console messages as these 16488 * mainly represent a user-initiated operation 16489 * instead of a system failure. 16490 */ 16491 if (un->un_f_has_removable_media) { 16492 si.ssi_severity = SCSI_ERR_ALL; 16493 goto fail_command; 16494 } 16495 break; 16496 } 16497 16498 /* 16499 * As part of our recovery attempt for the NOT READY 16500 * condition, we issue a START STOP UNIT command. However 16501 * we want to wait for a short delay before attempting this 16502 * as there may still be more commands coming back from the 16503 * target with the check condition. To do this we use 16504 * timeout(9F) to call sd_start_stop_unit_callback() after 16505 * the delay interval expires. (sd_start_stop_unit_callback() 16506 * dispatches sd_start_stop_unit_task(), which will issue 16507 * the actual START STOP UNIT command. The delay interval 16508 * is one-half of the delay that we will use to retry the 16509 * command that generated the NOT READY condition. 16510 * 16511 * Note that we could just dispatch sd_start_stop_unit_task() 16512 * from here and allow it to sleep for the delay interval, 16513 * but then we would be tying up the taskq thread 16514 * uncesessarily for the duration of the delay. 16515 * 16516 * Do not issue the START STOP UNIT if the current command 16517 * is already a START STOP UNIT. 16518 */ 16519 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16520 break; 16521 } 16522 16523 /* 16524 * Do not schedule the timeout if one is already pending. 16525 */ 16526 if (un->un_startstop_timeid != NULL) { 16527 SD_INFO(SD_LOG_ERROR, un, 16528 "sd_sense_key_not_ready: restart already issued to" 16529 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16530 ddi_get_instance(SD_DEVINFO(un))); 16531 break; 16532 } 16533 16534 /* 16535 * Schedule the START STOP UNIT command, then queue the command 16536 * for a retry. 16537 * 16538 * Note: A timeout is not scheduled for this retry because we 16539 * want the retry to be serial with the START_STOP_UNIT. The 16540 * retry will be started when the START_STOP_UNIT is completed 16541 * in sd_start_stop_unit_task. 16542 */ 16543 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16544 un, un->un_busy_timeout / 2); 16545 xp->xb_nr_retry_count++; 16546 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16547 return; 16548 16549 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16550 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16551 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16552 "unit does not respond to selection\n"); 16553 } 16554 break; 16555 16556 case 0x3A: /* MEDIUM NOT PRESENT */ 16557 if (sd_error_level >= SCSI_ERR_FATAL) { 16558 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16559 "Caddy not inserted in drive\n"); 16560 } 16561 16562 sr_ejected(un); 16563 un->un_mediastate = DKIO_EJECTED; 16564 /* The state has changed, inform the media watch routines */ 16565 cv_broadcast(&un->un_state_cv); 16566 /* Just fail if no media is present in the drive. */ 16567 goto fail_command; 16568 16569 default: 16570 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16571 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16572 "Unit not Ready. Additional sense code 0x%x\n", 16573 asc); 16574 } 16575 break; 16576 } 16577 16578 do_retry: 16579 16580 /* 16581 * Retry the command, as some targets may report NOT READY for 16582 * several seconds after being reset. 16583 */ 16584 xp->xb_nr_retry_count++; 16585 si.ssi_severity = SCSI_ERR_RETRYABLE; 16586 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16587 &si, EIO, un->un_busy_timeout, NULL); 16588 16589 return; 16590 16591 fail_command: 16592 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16593 sd_return_failed_command(un, bp, EIO); 16594 } 16595 16596 16597 16598 /* 16599 * Function: sd_sense_key_medium_or_hardware_error 16600 * 16601 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16602 * sense key. 16603 * 16604 * Context: May be called from interrupt context 16605 */ 16606 16607 static void 16608 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16609 uint8_t *sense_datap, 16610 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16611 { 16612 struct sd_sense_info si; 16613 uint8_t sense_key = scsi_sense_key(sense_datap); 16614 uint8_t asc = scsi_sense_asc(sense_datap); 16615 16616 ASSERT(un != NULL); 16617 ASSERT(mutex_owned(SD_MUTEX(un))); 16618 ASSERT(bp != NULL); 16619 ASSERT(xp != NULL); 16620 ASSERT(pktp != NULL); 16621 16622 si.ssi_severity = SCSI_ERR_FATAL; 16623 si.ssi_pfa_flag = FALSE; 16624 16625 if (sense_key == KEY_MEDIUM_ERROR) { 16626 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16627 } 16628 16629 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16630 16631 if ((un->un_reset_retry_count != 0) && 16632 (xp->xb_retry_count == un->un_reset_retry_count)) { 16633 mutex_exit(SD_MUTEX(un)); 16634 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16635 if (un->un_f_allow_bus_device_reset == TRUE) { 16636 16637 boolean_t try_resetting_target = B_TRUE; 16638 16639 /* 16640 * We need to be able to handle specific ASC when we are 16641 * handling a KEY_HARDWARE_ERROR. In particular 16642 * taking the default action of resetting the target may 16643 * not be the appropriate way to attempt recovery. 16644 * Resetting a target because of a single LUN failure 16645 * victimizes all LUNs on that target. 16646 * 16647 * This is true for the LSI arrays, if an LSI 16648 * array controller returns an ASC of 0x84 (LUN Dead) we 16649 * should trust it. 16650 */ 16651 16652 if (sense_key == KEY_HARDWARE_ERROR) { 16653 switch (asc) { 16654 case 0x84: 16655 if (SD_IS_LSI(un)) { 16656 try_resetting_target = B_FALSE; 16657 } 16658 break; 16659 default: 16660 break; 16661 } 16662 } 16663 16664 if (try_resetting_target == B_TRUE) { 16665 int reset_retval = 0; 16666 if (un->un_f_lun_reset_enabled == TRUE) { 16667 SD_TRACE(SD_LOG_IO_CORE, un, 16668 "sd_sense_key_medium_or_hardware_" 16669 "error: issuing RESET_LUN\n"); 16670 reset_retval = 16671 scsi_reset(SD_ADDRESS(un), 16672 RESET_LUN); 16673 } 16674 if (reset_retval == 0) { 16675 SD_TRACE(SD_LOG_IO_CORE, un, 16676 "sd_sense_key_medium_or_hardware_" 16677 "error: issuing RESET_TARGET\n"); 16678 (void) scsi_reset(SD_ADDRESS(un), 16679 RESET_TARGET); 16680 } 16681 } 16682 } 16683 mutex_enter(SD_MUTEX(un)); 16684 } 16685 16686 /* 16687 * This really ought to be a fatal error, but we will retry anyway 16688 * as some drives report this as a spurious error. 16689 */ 16690 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16691 &si, EIO, (clock_t)0, NULL); 16692 } 16693 16694 16695 16696 /* 16697 * Function: sd_sense_key_illegal_request 16698 * 16699 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16700 * 16701 * Context: May be called from interrupt context 16702 */ 16703 16704 static void 16705 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16706 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16707 { 16708 struct sd_sense_info si; 16709 16710 ASSERT(un != NULL); 16711 ASSERT(mutex_owned(SD_MUTEX(un))); 16712 ASSERT(bp != NULL); 16713 ASSERT(xp != NULL); 16714 ASSERT(pktp != NULL); 16715 16716 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16717 16718 si.ssi_severity = SCSI_ERR_INFO; 16719 si.ssi_pfa_flag = FALSE; 16720 16721 /* Pointless to retry if the target thinks it's an illegal request */ 16722 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16723 sd_return_failed_command(un, bp, EIO); 16724 } 16725 16726 16727 16728 16729 /* 16730 * Function: sd_sense_key_unit_attention 16731 * 16732 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16733 * 16734 * Context: May be called from interrupt context 16735 */ 16736 16737 static void 16738 sd_sense_key_unit_attention(struct sd_lun *un, 16739 uint8_t *sense_datap, 16740 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16741 { 16742 /* 16743 * For UNIT ATTENTION we allow retries for one minute. Devices 16744 * like Sonoma can return UNIT ATTENTION close to a minute 16745 * under certain conditions. 16746 */ 16747 int retry_check_flag = SD_RETRIES_UA; 16748 boolean_t kstat_updated = B_FALSE; 16749 struct sd_sense_info si; 16750 uint8_t asc = scsi_sense_asc(sense_datap); 16751 uint8_t ascq = scsi_sense_ascq(sense_datap); 16752 16753 ASSERT(un != NULL); 16754 ASSERT(mutex_owned(SD_MUTEX(un))); 16755 ASSERT(bp != NULL); 16756 ASSERT(xp != NULL); 16757 ASSERT(pktp != NULL); 16758 16759 si.ssi_severity = SCSI_ERR_INFO; 16760 si.ssi_pfa_flag = FALSE; 16761 16762 16763 switch (asc) { 16764 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16765 if (sd_report_pfa != 0) { 16766 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16767 si.ssi_pfa_flag = TRUE; 16768 retry_check_flag = SD_RETRIES_STANDARD; 16769 goto do_retry; 16770 } 16771 16772 break; 16773 16774 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16775 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16776 un->un_resvd_status |= 16777 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16778 } 16779 #ifdef _LP64 16780 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16781 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16782 un, KM_NOSLEEP) == 0) { 16783 /* 16784 * If we can't dispatch the task we'll just 16785 * live without descriptor sense. We can 16786 * try again on the next "unit attention" 16787 */ 16788 SD_ERROR(SD_LOG_ERROR, un, 16789 "sd_sense_key_unit_attention: " 16790 "Could not dispatch " 16791 "sd_reenable_dsense_task\n"); 16792 } 16793 } 16794 #endif /* _LP64 */ 16795 /* FALLTHRU */ 16796 16797 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16798 if (!un->un_f_has_removable_media) { 16799 break; 16800 } 16801 16802 /* 16803 * When we get a unit attention from a removable-media device, 16804 * it may be in a state that will take a long time to recover 16805 * (e.g., from a reset). Since we are executing in interrupt 16806 * context here, we cannot wait around for the device to come 16807 * back. So hand this command off to sd_media_change_task() 16808 * for deferred processing under taskq thread context. (Note 16809 * that the command still may be failed if a problem is 16810 * encountered at a later time.) 16811 */ 16812 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16813 KM_NOSLEEP) == 0) { 16814 /* 16815 * Cannot dispatch the request so fail the command. 16816 */ 16817 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16818 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16819 si.ssi_severity = SCSI_ERR_FATAL; 16820 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16821 sd_return_failed_command(un, bp, EIO); 16822 } 16823 16824 /* 16825 * If failed to dispatch sd_media_change_task(), we already 16826 * updated kstat. If succeed to dispatch sd_media_change_task(), 16827 * we should update kstat later if it encounters an error. So, 16828 * we update kstat_updated flag here. 16829 */ 16830 kstat_updated = B_TRUE; 16831 16832 /* 16833 * Either the command has been successfully dispatched to a 16834 * task Q for retrying, or the dispatch failed. In either case 16835 * do NOT retry again by calling sd_retry_command. This sets up 16836 * two retries of the same command and when one completes and 16837 * frees the resources the other will access freed memory, 16838 * a bad thing. 16839 */ 16840 return; 16841 16842 default: 16843 break; 16844 } 16845 16846 /* 16847 * ASC ASCQ 16848 * 2A 09 Capacity data has changed 16849 * 2A 01 Mode parameters changed 16850 * 3F 0E Reported luns data has changed 16851 * Arrays that support logical unit expansion should report 16852 * capacity changes(2Ah/09). Mode parameters changed and 16853 * reported luns data has changed are the approximation. 16854 */ 16855 if (((asc == 0x2a) && (ascq == 0x09)) || 16856 ((asc == 0x2a) && (ascq == 0x01)) || 16857 ((asc == 0x3f) && (ascq == 0x0e))) { 16858 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 16859 KM_NOSLEEP) == 0) { 16860 SD_ERROR(SD_LOG_ERROR, un, 16861 "sd_sense_key_unit_attention: " 16862 "Could not dispatch sd_target_change_task\n"); 16863 } 16864 } 16865 16866 /* 16867 * Update kstat if we haven't done that. 16868 */ 16869 if (!kstat_updated) { 16870 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16871 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16872 } 16873 16874 do_retry: 16875 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16876 EIO, SD_UA_RETRY_DELAY, NULL); 16877 } 16878 16879 16880 16881 /* 16882 * Function: sd_sense_key_fail_command 16883 * 16884 * Description: Use to fail a command when we don't like the sense key that 16885 * was returned. 16886 * 16887 * Context: May be called from interrupt context 16888 */ 16889 16890 static void 16891 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16892 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16893 { 16894 struct sd_sense_info si; 16895 16896 ASSERT(un != NULL); 16897 ASSERT(mutex_owned(SD_MUTEX(un))); 16898 ASSERT(bp != NULL); 16899 ASSERT(xp != NULL); 16900 ASSERT(pktp != NULL); 16901 16902 si.ssi_severity = SCSI_ERR_FATAL; 16903 si.ssi_pfa_flag = FALSE; 16904 16905 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16906 sd_return_failed_command(un, bp, EIO); 16907 } 16908 16909 16910 16911 /* 16912 * Function: sd_sense_key_blank_check 16913 * 16914 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16915 * Has no monetary connotation. 16916 * 16917 * Context: May be called from interrupt context 16918 */ 16919 16920 static void 16921 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16922 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16923 { 16924 struct sd_sense_info si; 16925 16926 ASSERT(un != NULL); 16927 ASSERT(mutex_owned(SD_MUTEX(un))); 16928 ASSERT(bp != NULL); 16929 ASSERT(xp != NULL); 16930 ASSERT(pktp != NULL); 16931 16932 /* 16933 * Blank check is not fatal for removable devices, therefore 16934 * it does not require a console message. 16935 */ 16936 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16937 SCSI_ERR_FATAL; 16938 si.ssi_pfa_flag = FALSE; 16939 16940 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16941 sd_return_failed_command(un, bp, EIO); 16942 } 16943 16944 16945 16946 16947 /* 16948 * Function: sd_sense_key_aborted_command 16949 * 16950 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16951 * 16952 * Context: May be called from interrupt context 16953 */ 16954 16955 static void 16956 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16957 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16958 { 16959 struct sd_sense_info si; 16960 16961 ASSERT(un != NULL); 16962 ASSERT(mutex_owned(SD_MUTEX(un))); 16963 ASSERT(bp != NULL); 16964 ASSERT(xp != NULL); 16965 ASSERT(pktp != NULL); 16966 16967 si.ssi_severity = SCSI_ERR_FATAL; 16968 si.ssi_pfa_flag = FALSE; 16969 16970 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16971 16972 /* 16973 * This really ought to be a fatal error, but we will retry anyway 16974 * as some drives report this as a spurious error. 16975 */ 16976 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16977 &si, EIO, drv_usectohz(100000), NULL); 16978 } 16979 16980 16981 16982 /* 16983 * Function: sd_sense_key_default 16984 * 16985 * Description: Default recovery action for several SCSI sense keys (basically 16986 * attempts a retry). 16987 * 16988 * Context: May be called from interrupt context 16989 */ 16990 16991 static void 16992 sd_sense_key_default(struct sd_lun *un, 16993 uint8_t *sense_datap, 16994 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16995 { 16996 struct sd_sense_info si; 16997 uint8_t sense_key = scsi_sense_key(sense_datap); 16998 16999 ASSERT(un != NULL); 17000 ASSERT(mutex_owned(SD_MUTEX(un))); 17001 ASSERT(bp != NULL); 17002 ASSERT(xp != NULL); 17003 ASSERT(pktp != NULL); 17004 17005 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17006 17007 /* 17008 * Undecoded sense key. Attempt retries and hope that will fix 17009 * the problem. Otherwise, we're dead. 17010 */ 17011 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17012 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17013 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17014 } 17015 17016 si.ssi_severity = SCSI_ERR_FATAL; 17017 si.ssi_pfa_flag = FALSE; 17018 17019 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17020 &si, EIO, (clock_t)0, NULL); 17021 } 17022 17023 17024 17025 /* 17026 * Function: sd_print_retry_msg 17027 * 17028 * Description: Print a message indicating the retry action being taken. 17029 * 17030 * Arguments: un - ptr to associated softstate 17031 * bp - ptr to buf(9S) for the command 17032 * arg - not used. 17033 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17034 * or SD_NO_RETRY_ISSUED 17035 * 17036 * Context: May be called from interrupt context 17037 */ 17038 /* ARGSUSED */ 17039 static void 17040 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 17041 { 17042 struct sd_xbuf *xp; 17043 struct scsi_pkt *pktp; 17044 char *reasonp; 17045 char *msgp; 17046 17047 ASSERT(un != NULL); 17048 ASSERT(mutex_owned(SD_MUTEX(un))); 17049 ASSERT(bp != NULL); 17050 pktp = SD_GET_PKTP(bp); 17051 ASSERT(pktp != NULL); 17052 xp = SD_GET_XBUF(bp); 17053 ASSERT(xp != NULL); 17054 17055 ASSERT(!mutex_owned(&un->un_pm_mutex)); 17056 mutex_enter(&un->un_pm_mutex); 17057 if ((un->un_state == SD_STATE_SUSPENDED) || 17058 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 17059 (pktp->pkt_flags & FLAG_SILENT)) { 17060 mutex_exit(&un->un_pm_mutex); 17061 goto update_pkt_reason; 17062 } 17063 mutex_exit(&un->un_pm_mutex); 17064 17065 /* 17066 * Suppress messages if they are all the same pkt_reason; with 17067 * TQ, many (up to 256) are returned with the same pkt_reason. 17068 * If we are in panic, then suppress the retry messages. 17069 */ 17070 switch (flag) { 17071 case SD_NO_RETRY_ISSUED: 17072 msgp = "giving up"; 17073 break; 17074 case SD_IMMEDIATE_RETRY_ISSUED: 17075 case SD_DELAYED_RETRY_ISSUED: 17076 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 17077 ((pktp->pkt_reason == un->un_last_pkt_reason) && 17078 (sd_error_level != SCSI_ERR_ALL))) { 17079 return; 17080 } 17081 msgp = "retrying command"; 17082 break; 17083 default: 17084 goto update_pkt_reason; 17085 } 17086 17087 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 17088 scsi_rname(pktp->pkt_reason)); 17089 17090 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17091 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 17092 17093 update_pkt_reason: 17094 /* 17095 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 17096 * This is to prevent multiple console messages for the same failure 17097 * condition. Note that un->un_last_pkt_reason is NOT restored if & 17098 * when the command is retried successfully because there still may be 17099 * more commands coming back with the same value of pktp->pkt_reason. 17100 */ 17101 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 17102 un->un_last_pkt_reason = pktp->pkt_reason; 17103 } 17104 } 17105 17106 17107 /* 17108 * Function: sd_print_cmd_incomplete_msg 17109 * 17110 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 17111 * 17112 * Arguments: un - ptr to associated softstate 17113 * bp - ptr to buf(9S) for the command 17114 * arg - passed to sd_print_retry_msg() 17115 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17116 * or SD_NO_RETRY_ISSUED 17117 * 17118 * Context: May be called from interrupt context 17119 */ 17120 17121 static void 17122 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 17123 int code) 17124 { 17125 dev_info_t *dip; 17126 17127 ASSERT(un != NULL); 17128 ASSERT(mutex_owned(SD_MUTEX(un))); 17129 ASSERT(bp != NULL); 17130 17131 switch (code) { 17132 case SD_NO_RETRY_ISSUED: 17133 /* Command was failed. Someone turned off this target? */ 17134 if (un->un_state != SD_STATE_OFFLINE) { 17135 /* 17136 * Suppress message if we are detaching and 17137 * device has been disconnected 17138 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 17139 * private interface and not part of the DDI 17140 */ 17141 dip = un->un_sd->sd_dev; 17142 if (!(DEVI_IS_DETACHING(dip) && 17143 DEVI_IS_DEVICE_REMOVED(dip))) { 17144 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17145 "disk not responding to selection\n"); 17146 } 17147 New_state(un, SD_STATE_OFFLINE); 17148 } 17149 break; 17150 17151 case SD_DELAYED_RETRY_ISSUED: 17152 case SD_IMMEDIATE_RETRY_ISSUED: 17153 default: 17154 /* Command was successfully queued for retry */ 17155 sd_print_retry_msg(un, bp, arg, code); 17156 break; 17157 } 17158 } 17159 17160 17161 /* 17162 * Function: sd_pkt_reason_cmd_incomplete 17163 * 17164 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 17165 * 17166 * Context: May be called from interrupt context 17167 */ 17168 17169 static void 17170 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 17171 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17172 { 17173 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 17174 17175 ASSERT(un != NULL); 17176 ASSERT(mutex_owned(SD_MUTEX(un))); 17177 ASSERT(bp != NULL); 17178 ASSERT(xp != NULL); 17179 ASSERT(pktp != NULL); 17180 17181 /* Do not do a reset if selection did not complete */ 17182 /* Note: Should this not just check the bit? */ 17183 if (pktp->pkt_state != STATE_GOT_BUS) { 17184 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17185 sd_reset_target(un, pktp); 17186 } 17187 17188 /* 17189 * If the target was not successfully selected, then set 17190 * SD_RETRIES_FAILFAST to indicate that we lost communication 17191 * with the target, and further retries and/or commands are 17192 * likely to take a long time. 17193 */ 17194 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 17195 flag |= SD_RETRIES_FAILFAST; 17196 } 17197 17198 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17199 17200 sd_retry_command(un, bp, flag, 17201 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17202 } 17203 17204 17205 17206 /* 17207 * Function: sd_pkt_reason_cmd_tran_err 17208 * 17209 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 17210 * 17211 * Context: May be called from interrupt context 17212 */ 17213 17214 static void 17215 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 17216 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17217 { 17218 ASSERT(un != NULL); 17219 ASSERT(mutex_owned(SD_MUTEX(un))); 17220 ASSERT(bp != NULL); 17221 ASSERT(xp != NULL); 17222 ASSERT(pktp != NULL); 17223 17224 /* 17225 * Do not reset if we got a parity error, or if 17226 * selection did not complete. 17227 */ 17228 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17229 /* Note: Should this not just check the bit for pkt_state? */ 17230 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 17231 (pktp->pkt_state != STATE_GOT_BUS)) { 17232 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17233 sd_reset_target(un, pktp); 17234 } 17235 17236 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17237 17238 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17239 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17240 } 17241 17242 17243 17244 /* 17245 * Function: sd_pkt_reason_cmd_reset 17246 * 17247 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 17248 * 17249 * Context: May be called from interrupt context 17250 */ 17251 17252 static void 17253 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 17254 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17255 { 17256 ASSERT(un != NULL); 17257 ASSERT(mutex_owned(SD_MUTEX(un))); 17258 ASSERT(bp != NULL); 17259 ASSERT(xp != NULL); 17260 ASSERT(pktp != NULL); 17261 17262 /* The target may still be running the command, so try to reset. */ 17263 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17264 sd_reset_target(un, pktp); 17265 17266 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17267 17268 /* 17269 * If pkt_reason is CMD_RESET chances are that this pkt got 17270 * reset because another target on this bus caused it. The target 17271 * that caused it should get CMD_TIMEOUT with pkt_statistics 17272 * of STAT_TIMEOUT/STAT_DEV_RESET. 17273 */ 17274 17275 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17276 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17277 } 17278 17279 17280 17281 17282 /* 17283 * Function: sd_pkt_reason_cmd_aborted 17284 * 17285 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 17286 * 17287 * Context: May be called from interrupt context 17288 */ 17289 17290 static void 17291 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 17292 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17293 { 17294 ASSERT(un != NULL); 17295 ASSERT(mutex_owned(SD_MUTEX(un))); 17296 ASSERT(bp != NULL); 17297 ASSERT(xp != NULL); 17298 ASSERT(pktp != NULL); 17299 17300 /* The target may still be running the command, so try to reset. */ 17301 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17302 sd_reset_target(un, pktp); 17303 17304 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17305 17306 /* 17307 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17308 * aborted because another target on this bus caused it. The target 17309 * that caused it should get CMD_TIMEOUT with pkt_statistics 17310 * of STAT_TIMEOUT/STAT_DEV_RESET. 17311 */ 17312 17313 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17314 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17315 } 17316 17317 17318 17319 /* 17320 * Function: sd_pkt_reason_cmd_timeout 17321 * 17322 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17323 * 17324 * Context: May be called from interrupt context 17325 */ 17326 17327 static void 17328 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17329 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17330 { 17331 ASSERT(un != NULL); 17332 ASSERT(mutex_owned(SD_MUTEX(un))); 17333 ASSERT(bp != NULL); 17334 ASSERT(xp != NULL); 17335 ASSERT(pktp != NULL); 17336 17337 17338 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17339 sd_reset_target(un, pktp); 17340 17341 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17342 17343 /* 17344 * A command timeout indicates that we could not establish 17345 * communication with the target, so set SD_RETRIES_FAILFAST 17346 * as further retries/commands are likely to take a long time. 17347 */ 17348 sd_retry_command(un, bp, 17349 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17350 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17351 } 17352 17353 17354 17355 /* 17356 * Function: sd_pkt_reason_cmd_unx_bus_free 17357 * 17358 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17359 * 17360 * Context: May be called from interrupt context 17361 */ 17362 17363 static void 17364 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17365 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17366 { 17367 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17368 17369 ASSERT(un != NULL); 17370 ASSERT(mutex_owned(SD_MUTEX(un))); 17371 ASSERT(bp != NULL); 17372 ASSERT(xp != NULL); 17373 ASSERT(pktp != NULL); 17374 17375 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17376 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17377 17378 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17379 sd_print_retry_msg : NULL; 17380 17381 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17382 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17383 } 17384 17385 17386 /* 17387 * Function: sd_pkt_reason_cmd_tag_reject 17388 * 17389 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17390 * 17391 * Context: May be called from interrupt context 17392 */ 17393 17394 static void 17395 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17396 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17397 { 17398 ASSERT(un != NULL); 17399 ASSERT(mutex_owned(SD_MUTEX(un))); 17400 ASSERT(bp != NULL); 17401 ASSERT(xp != NULL); 17402 ASSERT(pktp != NULL); 17403 17404 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17405 pktp->pkt_flags = 0; 17406 un->un_tagflags = 0; 17407 if (un->un_f_opt_queueing == TRUE) { 17408 un->un_throttle = min(un->un_throttle, 3); 17409 } else { 17410 un->un_throttle = 1; 17411 } 17412 mutex_exit(SD_MUTEX(un)); 17413 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17414 mutex_enter(SD_MUTEX(un)); 17415 17416 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17417 17418 /* Legacy behavior not to check retry counts here. */ 17419 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17420 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17421 } 17422 17423 17424 /* 17425 * Function: sd_pkt_reason_default 17426 * 17427 * Description: Default recovery actions for SCSA pkt_reason values that 17428 * do not have more explicit recovery actions. 17429 * 17430 * Context: May be called from interrupt context 17431 */ 17432 17433 static void 17434 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17435 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17436 { 17437 ASSERT(un != NULL); 17438 ASSERT(mutex_owned(SD_MUTEX(un))); 17439 ASSERT(bp != NULL); 17440 ASSERT(xp != NULL); 17441 ASSERT(pktp != NULL); 17442 17443 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17444 sd_reset_target(un, pktp); 17445 17446 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17447 17448 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17449 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17450 } 17451 17452 17453 17454 /* 17455 * Function: sd_pkt_status_check_condition 17456 * 17457 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17458 * 17459 * Context: May be called from interrupt context 17460 */ 17461 17462 static void 17463 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17464 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17465 { 17466 ASSERT(un != NULL); 17467 ASSERT(mutex_owned(SD_MUTEX(un))); 17468 ASSERT(bp != NULL); 17469 ASSERT(xp != NULL); 17470 ASSERT(pktp != NULL); 17471 17472 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17473 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17474 17475 /* 17476 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17477 * command will be retried after the request sense). Otherwise, retry 17478 * the command. Note: we are issuing the request sense even though the 17479 * retry limit may have been reached for the failed command. 17480 */ 17481 if (un->un_f_arq_enabled == FALSE) { 17482 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17483 "no ARQ, sending request sense command\n"); 17484 sd_send_request_sense_command(un, bp, pktp); 17485 } else { 17486 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17487 "ARQ,retrying request sense command\n"); 17488 #if defined(__i386) || defined(__amd64) 17489 /* 17490 * The SD_RETRY_DELAY value need to be adjusted here 17491 * when SD_RETRY_DELAY change in sddef.h 17492 */ 17493 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17494 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17495 NULL); 17496 #else 17497 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17498 EIO, SD_RETRY_DELAY, NULL); 17499 #endif 17500 } 17501 17502 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17503 } 17504 17505 17506 /* 17507 * Function: sd_pkt_status_busy 17508 * 17509 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17510 * 17511 * Context: May be called from interrupt context 17512 */ 17513 17514 static void 17515 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17516 struct scsi_pkt *pktp) 17517 { 17518 ASSERT(un != NULL); 17519 ASSERT(mutex_owned(SD_MUTEX(un))); 17520 ASSERT(bp != NULL); 17521 ASSERT(xp != NULL); 17522 ASSERT(pktp != NULL); 17523 17524 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17525 "sd_pkt_status_busy: entry\n"); 17526 17527 /* If retries are exhausted, just fail the command. */ 17528 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17529 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17530 "device busy too long\n"); 17531 sd_return_failed_command(un, bp, EIO); 17532 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17533 "sd_pkt_status_busy: exit\n"); 17534 return; 17535 } 17536 xp->xb_retry_count++; 17537 17538 /* 17539 * Try to reset the target. However, we do not want to perform 17540 * more than one reset if the device continues to fail. The reset 17541 * will be performed when the retry count reaches the reset 17542 * threshold. This threshold should be set such that at least 17543 * one retry is issued before the reset is performed. 17544 */ 17545 if (xp->xb_retry_count == 17546 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17547 int rval = 0; 17548 mutex_exit(SD_MUTEX(un)); 17549 if (un->un_f_allow_bus_device_reset == TRUE) { 17550 /* 17551 * First try to reset the LUN; if we cannot then 17552 * try to reset the target. 17553 */ 17554 if (un->un_f_lun_reset_enabled == TRUE) { 17555 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17556 "sd_pkt_status_busy: RESET_LUN\n"); 17557 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17558 } 17559 if (rval == 0) { 17560 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17561 "sd_pkt_status_busy: RESET_TARGET\n"); 17562 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17563 } 17564 } 17565 if (rval == 0) { 17566 /* 17567 * If the RESET_LUN and/or RESET_TARGET failed, 17568 * try RESET_ALL 17569 */ 17570 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17571 "sd_pkt_status_busy: RESET_ALL\n"); 17572 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17573 } 17574 mutex_enter(SD_MUTEX(un)); 17575 if (rval == 0) { 17576 /* 17577 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17578 * At this point we give up & fail the command. 17579 */ 17580 sd_return_failed_command(un, bp, EIO); 17581 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17582 "sd_pkt_status_busy: exit (failed cmd)\n"); 17583 return; 17584 } 17585 } 17586 17587 /* 17588 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17589 * we have already checked the retry counts above. 17590 */ 17591 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17592 EIO, un->un_busy_timeout, NULL); 17593 17594 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17595 "sd_pkt_status_busy: exit\n"); 17596 } 17597 17598 17599 /* 17600 * Function: sd_pkt_status_reservation_conflict 17601 * 17602 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17603 * command status. 17604 * 17605 * Context: May be called from interrupt context 17606 */ 17607 17608 static void 17609 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17610 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17611 { 17612 ASSERT(un != NULL); 17613 ASSERT(mutex_owned(SD_MUTEX(un))); 17614 ASSERT(bp != NULL); 17615 ASSERT(xp != NULL); 17616 ASSERT(pktp != NULL); 17617 17618 /* 17619 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17620 * conflict could be due to various reasons like incorrect keys, not 17621 * registered or not reserved etc. So, we return EACCES to the caller. 17622 */ 17623 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17624 int cmd = SD_GET_PKT_OPCODE(pktp); 17625 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17626 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17627 sd_return_failed_command(un, bp, EACCES); 17628 return; 17629 } 17630 } 17631 17632 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17633 17634 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17635 if (sd_failfast_enable != 0) { 17636 /* By definition, we must panic here.... */ 17637 sd_panic_for_res_conflict(un); 17638 /*NOTREACHED*/ 17639 } 17640 SD_ERROR(SD_LOG_IO, un, 17641 "sd_handle_resv_conflict: Disk Reserved\n"); 17642 sd_return_failed_command(un, bp, EACCES); 17643 return; 17644 } 17645 17646 /* 17647 * 1147670: retry only if sd_retry_on_reservation_conflict 17648 * property is set (default is 1). Retries will not succeed 17649 * on a disk reserved by another initiator. HA systems 17650 * may reset this via sd.conf to avoid these retries. 17651 * 17652 * Note: The legacy return code for this failure is EIO, however EACCES 17653 * seems more appropriate for a reservation conflict. 17654 */ 17655 if (sd_retry_on_reservation_conflict == 0) { 17656 SD_ERROR(SD_LOG_IO, un, 17657 "sd_handle_resv_conflict: Device Reserved\n"); 17658 sd_return_failed_command(un, bp, EIO); 17659 return; 17660 } 17661 17662 /* 17663 * Retry the command if we can. 17664 * 17665 * Note: The legacy return code for this failure is EIO, however EACCES 17666 * seems more appropriate for a reservation conflict. 17667 */ 17668 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17669 (clock_t)2, NULL); 17670 } 17671 17672 17673 17674 /* 17675 * Function: sd_pkt_status_qfull 17676 * 17677 * Description: Handle a QUEUE FULL condition from the target. This can 17678 * occur if the HBA does not handle the queue full condition. 17679 * (Basically this means third-party HBAs as Sun HBAs will 17680 * handle the queue full condition.) Note that if there are 17681 * some commands already in the transport, then the queue full 17682 * has occurred because the queue for this nexus is actually 17683 * full. If there are no commands in the transport, then the 17684 * queue full is resulting from some other initiator or lun 17685 * consuming all the resources at the target. 17686 * 17687 * Context: May be called from interrupt context 17688 */ 17689 17690 static void 17691 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17692 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17693 { 17694 ASSERT(un != NULL); 17695 ASSERT(mutex_owned(SD_MUTEX(un))); 17696 ASSERT(bp != NULL); 17697 ASSERT(xp != NULL); 17698 ASSERT(pktp != NULL); 17699 17700 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17701 "sd_pkt_status_qfull: entry\n"); 17702 17703 /* 17704 * Just lower the QFULL throttle and retry the command. Note that 17705 * we do not limit the number of retries here. 17706 */ 17707 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17708 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17709 SD_RESTART_TIMEOUT, NULL); 17710 17711 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17712 "sd_pkt_status_qfull: exit\n"); 17713 } 17714 17715 17716 /* 17717 * Function: sd_reset_target 17718 * 17719 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17720 * RESET_TARGET, or RESET_ALL. 17721 * 17722 * Context: May be called under interrupt context. 17723 */ 17724 17725 static void 17726 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17727 { 17728 int rval = 0; 17729 17730 ASSERT(un != NULL); 17731 ASSERT(mutex_owned(SD_MUTEX(un))); 17732 ASSERT(pktp != NULL); 17733 17734 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17735 17736 /* 17737 * No need to reset if the transport layer has already done so. 17738 */ 17739 if ((pktp->pkt_statistics & 17740 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17741 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17742 "sd_reset_target: no reset\n"); 17743 return; 17744 } 17745 17746 mutex_exit(SD_MUTEX(un)); 17747 17748 if (un->un_f_allow_bus_device_reset == TRUE) { 17749 if (un->un_f_lun_reset_enabled == TRUE) { 17750 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17751 "sd_reset_target: RESET_LUN\n"); 17752 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17753 } 17754 if (rval == 0) { 17755 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17756 "sd_reset_target: RESET_TARGET\n"); 17757 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17758 } 17759 } 17760 17761 if (rval == 0) { 17762 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17763 "sd_reset_target: RESET_ALL\n"); 17764 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17765 } 17766 17767 mutex_enter(SD_MUTEX(un)); 17768 17769 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17770 } 17771 17772 /* 17773 * Function: sd_target_change_task 17774 * 17775 * Description: Handle dynamic target change 17776 * 17777 * Context: Executes in a taskq() thread context 17778 */ 17779 static void 17780 sd_target_change_task(void *arg) 17781 { 17782 struct sd_lun *un = arg; 17783 uint64_t capacity; 17784 diskaddr_t label_cap; 17785 uint_t lbasize; 17786 17787 ASSERT(un != NULL); 17788 ASSERT(!mutex_owned(SD_MUTEX(un))); 17789 17790 if ((un->un_f_blockcount_is_valid == FALSE) || 17791 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 17792 return; 17793 } 17794 17795 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 17796 &lbasize, SD_PATH_DIRECT) != 0) { 17797 SD_ERROR(SD_LOG_ERROR, un, 17798 "sd_target_change_task: fail to read capacity\n"); 17799 return; 17800 } 17801 17802 mutex_enter(SD_MUTEX(un)); 17803 if (capacity <= un->un_blockcount) { 17804 mutex_exit(SD_MUTEX(un)); 17805 return; 17806 } 17807 17808 sd_update_block_info(un, lbasize, capacity); 17809 mutex_exit(SD_MUTEX(un)); 17810 17811 /* 17812 * If lun is EFI labeled and lun capacity is greater than the 17813 * capacity contained in the label, log a sys event. 17814 */ 17815 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 17816 (void*)SD_PATH_DIRECT) == 0) { 17817 mutex_enter(SD_MUTEX(un)); 17818 if (un->un_f_blockcount_is_valid && 17819 un->un_blockcount > label_cap) { 17820 mutex_exit(SD_MUTEX(un)); 17821 sd_log_lun_expansion_event(un, KM_SLEEP); 17822 } else { 17823 mutex_exit(SD_MUTEX(un)); 17824 } 17825 } 17826 } 17827 17828 /* 17829 * Function: sd_log_lun_expansion_event 17830 * 17831 * Description: Log lun expansion sys event 17832 * 17833 * Context: Never called from interrupt context 17834 */ 17835 static void 17836 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 17837 { 17838 int err; 17839 char *path; 17840 nvlist_t *dle_attr_list; 17841 17842 /* Allocate and build sysevent attribute list */ 17843 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 17844 if (err != 0) { 17845 SD_ERROR(SD_LOG_ERROR, un, 17846 "sd_log_lun_expansion_event: fail to allocate space\n"); 17847 return; 17848 } 17849 17850 path = kmem_alloc(MAXPATHLEN, km_flag); 17851 if (path == NULL) { 17852 nvlist_free(dle_attr_list); 17853 SD_ERROR(SD_LOG_ERROR, un, 17854 "sd_log_lun_expansion_event: fail to allocate space\n"); 17855 return; 17856 } 17857 /* 17858 * Add path attribute to identify the lun. 17859 * We are using minor node 'a' as the sysevent attribute. 17860 */ 17861 (void) snprintf(path, MAXPATHLEN, "/devices"); 17862 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 17863 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 17864 ":a"); 17865 17866 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 17867 if (err != 0) { 17868 nvlist_free(dle_attr_list); 17869 kmem_free(path, MAXPATHLEN); 17870 SD_ERROR(SD_LOG_ERROR, un, 17871 "sd_log_lun_expansion_event: fail to add attribute\n"); 17872 return; 17873 } 17874 17875 /* Log dynamic lun expansion sysevent */ 17876 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 17877 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 17878 if (err != DDI_SUCCESS) { 17879 SD_ERROR(SD_LOG_ERROR, un, 17880 "sd_log_lun_expansion_event: fail to log sysevent\n"); 17881 } 17882 17883 nvlist_free(dle_attr_list); 17884 kmem_free(path, MAXPATHLEN); 17885 } 17886 17887 /* 17888 * Function: sd_media_change_task 17889 * 17890 * Description: Recovery action for CDROM to become available. 17891 * 17892 * Context: Executes in a taskq() thread context 17893 */ 17894 17895 static void 17896 sd_media_change_task(void *arg) 17897 { 17898 struct scsi_pkt *pktp = arg; 17899 struct sd_lun *un; 17900 struct buf *bp; 17901 struct sd_xbuf *xp; 17902 int err = 0; 17903 int retry_count = 0; 17904 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17905 struct sd_sense_info si; 17906 17907 ASSERT(pktp != NULL); 17908 bp = (struct buf *)pktp->pkt_private; 17909 ASSERT(bp != NULL); 17910 xp = SD_GET_XBUF(bp); 17911 ASSERT(xp != NULL); 17912 un = SD_GET_UN(bp); 17913 ASSERT(un != NULL); 17914 ASSERT(!mutex_owned(SD_MUTEX(un))); 17915 ASSERT(un->un_f_monitor_media_state); 17916 17917 si.ssi_severity = SCSI_ERR_INFO; 17918 si.ssi_pfa_flag = FALSE; 17919 17920 /* 17921 * When a reset is issued on a CDROM, it takes a long time to 17922 * recover. First few attempts to read capacity and other things 17923 * related to handling unit attention fail (with a ASC 0x4 and 17924 * ASCQ 0x1). In that case we want to do enough retries and we want 17925 * to limit the retries in other cases of genuine failures like 17926 * no media in drive. 17927 */ 17928 while (retry_count++ < retry_limit) { 17929 if ((err = sd_handle_mchange(un)) == 0) { 17930 break; 17931 } 17932 if (err == EAGAIN) { 17933 retry_limit = SD_UNIT_ATTENTION_RETRY; 17934 } 17935 /* Sleep for 0.5 sec. & try again */ 17936 delay(drv_usectohz(500000)); 17937 } 17938 17939 /* 17940 * Dispatch (retry or fail) the original command here, 17941 * along with appropriate console messages.... 17942 * 17943 * Must grab the mutex before calling sd_retry_command, 17944 * sd_print_sense_msg and sd_return_failed_command. 17945 */ 17946 mutex_enter(SD_MUTEX(un)); 17947 if (err != SD_CMD_SUCCESS) { 17948 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17949 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17950 si.ssi_severity = SCSI_ERR_FATAL; 17951 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17952 sd_return_failed_command(un, bp, EIO); 17953 } else { 17954 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17955 &si, EIO, (clock_t)0, NULL); 17956 } 17957 mutex_exit(SD_MUTEX(un)); 17958 } 17959 17960 17961 17962 /* 17963 * Function: sd_handle_mchange 17964 * 17965 * Description: Perform geometry validation & other recovery when CDROM 17966 * has been removed from drive. 17967 * 17968 * Return Code: 0 for success 17969 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17970 * sd_send_scsi_READ_CAPACITY() 17971 * 17972 * Context: Executes in a taskq() thread context 17973 */ 17974 17975 static int 17976 sd_handle_mchange(struct sd_lun *un) 17977 { 17978 uint64_t capacity; 17979 uint32_t lbasize; 17980 int rval; 17981 17982 ASSERT(!mutex_owned(SD_MUTEX(un))); 17983 ASSERT(un->un_f_monitor_media_state); 17984 17985 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17986 SD_PATH_DIRECT_PRIORITY)) != 0) { 17987 return (rval); 17988 } 17989 17990 mutex_enter(SD_MUTEX(un)); 17991 sd_update_block_info(un, lbasize, capacity); 17992 17993 if (un->un_errstats != NULL) { 17994 struct sd_errstats *stp = 17995 (struct sd_errstats *)un->un_errstats->ks_data; 17996 stp->sd_capacity.value.ui64 = (uint64_t) 17997 ((uint64_t)un->un_blockcount * 17998 (uint64_t)un->un_tgt_blocksize); 17999 } 18000 18001 18002 /* 18003 * Check if the media in the device is writable or not 18004 */ 18005 if (ISCD(un)) 18006 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 18007 18008 /* 18009 * Note: Maybe let the strategy/partitioning chain worry about getting 18010 * valid geometry. 18011 */ 18012 mutex_exit(SD_MUTEX(un)); 18013 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 18014 18015 18016 if (cmlb_validate(un->un_cmlbhandle, 0, 18017 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 18018 return (EIO); 18019 } else { 18020 if (un->un_f_pkstats_enabled) { 18021 sd_set_pstats(un); 18022 SD_TRACE(SD_LOG_IO_PARTITION, un, 18023 "sd_handle_mchange: un:0x%p pstats created and " 18024 "set\n", un); 18025 } 18026 } 18027 18028 18029 /* 18030 * Try to lock the door 18031 */ 18032 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 18033 SD_PATH_DIRECT_PRIORITY)); 18034 } 18035 18036 18037 /* 18038 * Function: sd_send_scsi_DOORLOCK 18039 * 18040 * Description: Issue the scsi DOOR LOCK command 18041 * 18042 * Arguments: un - pointer to driver soft state (unit) structure for 18043 * this target. 18044 * flag - SD_REMOVAL_ALLOW 18045 * SD_REMOVAL_PREVENT 18046 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18047 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18048 * to use the USCSI "direct" chain and bypass the normal 18049 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18050 * command is issued as part of an error recovery action. 18051 * 18052 * Return Code: 0 - Success 18053 * errno return code from sd_send_scsi_cmd() 18054 * 18055 * Context: Can sleep. 18056 */ 18057 18058 static int 18059 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 18060 { 18061 union scsi_cdb cdb; 18062 struct uscsi_cmd ucmd_buf; 18063 struct scsi_extended_sense sense_buf; 18064 int status; 18065 18066 ASSERT(un != NULL); 18067 ASSERT(!mutex_owned(SD_MUTEX(un))); 18068 18069 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 18070 18071 /* already determined doorlock is not supported, fake success */ 18072 if (un->un_f_doorlock_supported == FALSE) { 18073 return (0); 18074 } 18075 18076 /* 18077 * If we are ejecting and see an SD_REMOVAL_PREVENT 18078 * ignore the command so we can complete the eject 18079 * operation. 18080 */ 18081 if (flag == SD_REMOVAL_PREVENT) { 18082 mutex_enter(SD_MUTEX(un)); 18083 if (un->un_f_ejecting == TRUE) { 18084 mutex_exit(SD_MUTEX(un)); 18085 return (EAGAIN); 18086 } 18087 mutex_exit(SD_MUTEX(un)); 18088 } 18089 18090 bzero(&cdb, sizeof (cdb)); 18091 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18092 18093 cdb.scc_cmd = SCMD_DOORLOCK; 18094 cdb.cdb_opaque[4] = (uchar_t)flag; 18095 18096 ucmd_buf.uscsi_cdb = (char *)&cdb; 18097 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18098 ucmd_buf.uscsi_bufaddr = NULL; 18099 ucmd_buf.uscsi_buflen = 0; 18100 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18101 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18102 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18103 ucmd_buf.uscsi_timeout = 15; 18104 18105 SD_TRACE(SD_LOG_IO, un, 18106 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 18107 18108 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18109 UIO_SYSSPACE, path_flag); 18110 18111 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 18112 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18113 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 18114 /* fake success and skip subsequent doorlock commands */ 18115 un->un_f_doorlock_supported = FALSE; 18116 return (0); 18117 } 18118 18119 return (status); 18120 } 18121 18122 /* 18123 * Function: sd_send_scsi_READ_CAPACITY 18124 * 18125 * Description: This routine uses the scsi READ CAPACITY command to determine 18126 * the device capacity in number of blocks and the device native 18127 * block size. If this function returns a failure, then the 18128 * values in *capp and *lbap are undefined. If the capacity 18129 * returned is 0xffffffff then the lun is too large for a 18130 * normal READ CAPACITY command and the results of a 18131 * READ CAPACITY 16 will be used instead. 18132 * 18133 * Arguments: un - ptr to soft state struct for the target 18134 * capp - ptr to unsigned 64-bit variable to receive the 18135 * capacity value from the command. 18136 * lbap - ptr to unsigned 32-bit varaible to receive the 18137 * block size value from the command 18138 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18139 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18140 * to use the USCSI "direct" chain and bypass the normal 18141 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18142 * command is issued as part of an error recovery action. 18143 * 18144 * Return Code: 0 - Success 18145 * EIO - IO error 18146 * EACCES - Reservation conflict detected 18147 * EAGAIN - Device is becoming ready 18148 * errno return code from sd_send_scsi_cmd() 18149 * 18150 * Context: Can sleep. Blocks until command completes. 18151 */ 18152 18153 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 18154 18155 static int 18156 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 18157 int path_flag) 18158 { 18159 struct scsi_extended_sense sense_buf; 18160 struct uscsi_cmd ucmd_buf; 18161 union scsi_cdb cdb; 18162 uint32_t *capacity_buf; 18163 uint64_t capacity; 18164 uint32_t lbasize; 18165 int status; 18166 18167 ASSERT(un != NULL); 18168 ASSERT(!mutex_owned(SD_MUTEX(un))); 18169 ASSERT(capp != NULL); 18170 ASSERT(lbap != NULL); 18171 18172 SD_TRACE(SD_LOG_IO, un, 18173 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18174 18175 /* 18176 * First send a READ_CAPACITY command to the target. 18177 * (This command is mandatory under SCSI-2.) 18178 * 18179 * Set up the CDB for the READ_CAPACITY command. The Partial 18180 * Medium Indicator bit is cleared. The address field must be 18181 * zero if the PMI bit is zero. 18182 */ 18183 bzero(&cdb, sizeof (cdb)); 18184 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18185 18186 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 18187 18188 cdb.scc_cmd = SCMD_READ_CAPACITY; 18189 18190 ucmd_buf.uscsi_cdb = (char *)&cdb; 18191 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18192 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 18193 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 18194 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18195 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18196 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18197 ucmd_buf.uscsi_timeout = 60; 18198 18199 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18200 UIO_SYSSPACE, path_flag); 18201 18202 switch (status) { 18203 case 0: 18204 /* Return failure if we did not get valid capacity data. */ 18205 if (ucmd_buf.uscsi_resid != 0) { 18206 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18207 return (EIO); 18208 } 18209 18210 /* 18211 * Read capacity and block size from the READ CAPACITY 10 data. 18212 * This data may be adjusted later due to device specific 18213 * issues. 18214 * 18215 * According to the SCSI spec, the READ CAPACITY 10 18216 * command returns the following: 18217 * 18218 * bytes 0-3: Maximum logical block address available. 18219 * (MSB in byte:0 & LSB in byte:3) 18220 * 18221 * bytes 4-7: Block length in bytes 18222 * (MSB in byte:4 & LSB in byte:7) 18223 * 18224 */ 18225 capacity = BE_32(capacity_buf[0]); 18226 lbasize = BE_32(capacity_buf[1]); 18227 18228 /* 18229 * Done with capacity_buf 18230 */ 18231 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18232 18233 /* 18234 * if the reported capacity is set to all 0xf's, then 18235 * this disk is too large and requires SBC-2 commands. 18236 * Reissue the request using READ CAPACITY 16. 18237 */ 18238 if (capacity == 0xffffffff) { 18239 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18240 &lbasize, path_flag); 18241 if (status != 0) { 18242 return (status); 18243 } 18244 } 18245 break; /* Success! */ 18246 case EIO: 18247 switch (ucmd_buf.uscsi_status) { 18248 case STATUS_RESERVATION_CONFLICT: 18249 status = EACCES; 18250 break; 18251 case STATUS_CHECK: 18252 /* 18253 * Check condition; look for ASC/ASCQ of 0x04/0x01 18254 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18255 */ 18256 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18257 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18258 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18259 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18260 return (EAGAIN); 18261 } 18262 break; 18263 default: 18264 break; 18265 } 18266 /* FALLTHRU */ 18267 default: 18268 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18269 return (status); 18270 } 18271 18272 /* 18273 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18274 * (2352 and 0 are common) so for these devices always force the value 18275 * to 2048 as required by the ATAPI specs. 18276 */ 18277 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18278 lbasize = 2048; 18279 } 18280 18281 /* 18282 * Get the maximum LBA value from the READ CAPACITY data. 18283 * Here we assume that the Partial Medium Indicator (PMI) bit 18284 * was cleared when issuing the command. This means that the LBA 18285 * returned from the device is the LBA of the last logical block 18286 * on the logical unit. The actual logical block count will be 18287 * this value plus one. 18288 * 18289 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18290 * so scale the capacity value to reflect this. 18291 */ 18292 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18293 18294 /* 18295 * Copy the values from the READ CAPACITY command into the space 18296 * provided by the caller. 18297 */ 18298 *capp = capacity; 18299 *lbap = lbasize; 18300 18301 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 18302 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18303 18304 /* 18305 * Both the lbasize and capacity from the device must be nonzero, 18306 * otherwise we assume that the values are not valid and return 18307 * failure to the caller. (4203735) 18308 */ 18309 if ((capacity == 0) || (lbasize == 0)) { 18310 return (EIO); 18311 } 18312 18313 return (0); 18314 } 18315 18316 /* 18317 * Function: sd_send_scsi_READ_CAPACITY_16 18318 * 18319 * Description: This routine uses the scsi READ CAPACITY 16 command to 18320 * determine the device capacity in number of blocks and the 18321 * device native block size. If this function returns a failure, 18322 * then the values in *capp and *lbap are undefined. 18323 * This routine should always be called by 18324 * sd_send_scsi_READ_CAPACITY which will appy any device 18325 * specific adjustments to capacity and lbasize. 18326 * 18327 * Arguments: un - ptr to soft state struct for the target 18328 * capp - ptr to unsigned 64-bit variable to receive the 18329 * capacity value from the command. 18330 * lbap - ptr to unsigned 32-bit varaible to receive the 18331 * block size value from the command 18332 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18333 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18334 * to use the USCSI "direct" chain and bypass the normal 18335 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 18336 * this command is issued as part of an error recovery 18337 * action. 18338 * 18339 * Return Code: 0 - Success 18340 * EIO - IO error 18341 * EACCES - Reservation conflict detected 18342 * EAGAIN - Device is becoming ready 18343 * errno return code from sd_send_scsi_cmd() 18344 * 18345 * Context: Can sleep. Blocks until command completes. 18346 */ 18347 18348 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 18349 18350 static int 18351 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 18352 uint32_t *lbap, int path_flag) 18353 { 18354 struct scsi_extended_sense sense_buf; 18355 struct uscsi_cmd ucmd_buf; 18356 union scsi_cdb cdb; 18357 uint64_t *capacity16_buf; 18358 uint64_t capacity; 18359 uint32_t lbasize; 18360 int status; 18361 18362 ASSERT(un != NULL); 18363 ASSERT(!mutex_owned(SD_MUTEX(un))); 18364 ASSERT(capp != NULL); 18365 ASSERT(lbap != NULL); 18366 18367 SD_TRACE(SD_LOG_IO, un, 18368 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18369 18370 /* 18371 * First send a READ_CAPACITY_16 command to the target. 18372 * 18373 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 18374 * Medium Indicator bit is cleared. The address field must be 18375 * zero if the PMI bit is zero. 18376 */ 18377 bzero(&cdb, sizeof (cdb)); 18378 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18379 18380 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 18381 18382 ucmd_buf.uscsi_cdb = (char *)&cdb; 18383 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 18384 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 18385 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 18386 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18387 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18388 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18389 ucmd_buf.uscsi_timeout = 60; 18390 18391 /* 18392 * Read Capacity (16) is a Service Action In command. One 18393 * command byte (0x9E) is overloaded for multiple operations, 18394 * with the second CDB byte specifying the desired operation 18395 */ 18396 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 18397 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 18398 18399 /* 18400 * Fill in allocation length field 18401 */ 18402 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 18403 18404 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18405 UIO_SYSSPACE, path_flag); 18406 18407 switch (status) { 18408 case 0: 18409 /* Return failure if we did not get valid capacity data. */ 18410 if (ucmd_buf.uscsi_resid > 20) { 18411 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18412 return (EIO); 18413 } 18414 18415 /* 18416 * Read capacity and block size from the READ CAPACITY 10 data. 18417 * This data may be adjusted later due to device specific 18418 * issues. 18419 * 18420 * According to the SCSI spec, the READ CAPACITY 10 18421 * command returns the following: 18422 * 18423 * bytes 0-7: Maximum logical block address available. 18424 * (MSB in byte:0 & LSB in byte:7) 18425 * 18426 * bytes 8-11: Block length in bytes 18427 * (MSB in byte:8 & LSB in byte:11) 18428 * 18429 */ 18430 capacity = BE_64(capacity16_buf[0]); 18431 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18432 18433 /* 18434 * Done with capacity16_buf 18435 */ 18436 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18437 18438 /* 18439 * if the reported capacity is set to all 0xf's, then 18440 * this disk is too large. This could only happen with 18441 * a device that supports LBAs larger than 64 bits which 18442 * are not defined by any current T10 standards. 18443 */ 18444 if (capacity == 0xffffffffffffffff) { 18445 return (EIO); 18446 } 18447 break; /* Success! */ 18448 case EIO: 18449 switch (ucmd_buf.uscsi_status) { 18450 case STATUS_RESERVATION_CONFLICT: 18451 status = EACCES; 18452 break; 18453 case STATUS_CHECK: 18454 /* 18455 * Check condition; look for ASC/ASCQ of 0x04/0x01 18456 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18457 */ 18458 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18459 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18460 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18461 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18462 return (EAGAIN); 18463 } 18464 break; 18465 default: 18466 break; 18467 } 18468 /* FALLTHRU */ 18469 default: 18470 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18471 return (status); 18472 } 18473 18474 *capp = capacity; 18475 *lbap = lbasize; 18476 18477 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18478 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18479 18480 return (0); 18481 } 18482 18483 18484 /* 18485 * Function: sd_send_scsi_START_STOP_UNIT 18486 * 18487 * Description: Issue a scsi START STOP UNIT command to the target. 18488 * 18489 * Arguments: un - pointer to driver soft state (unit) structure for 18490 * this target. 18491 * flag - SD_TARGET_START 18492 * SD_TARGET_STOP 18493 * SD_TARGET_EJECT 18494 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18495 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18496 * to use the USCSI "direct" chain and bypass the normal 18497 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18498 * command is issued as part of an error recovery action. 18499 * 18500 * Return Code: 0 - Success 18501 * EIO - IO error 18502 * EACCES - Reservation conflict detected 18503 * ENXIO - Not Ready, medium not present 18504 * errno return code from sd_send_scsi_cmd() 18505 * 18506 * Context: Can sleep. 18507 */ 18508 18509 static int 18510 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18511 { 18512 struct scsi_extended_sense sense_buf; 18513 union scsi_cdb cdb; 18514 struct uscsi_cmd ucmd_buf; 18515 int status; 18516 18517 ASSERT(un != NULL); 18518 ASSERT(!mutex_owned(SD_MUTEX(un))); 18519 18520 SD_TRACE(SD_LOG_IO, un, 18521 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18522 18523 if (un->un_f_check_start_stop && 18524 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18525 (un->un_f_start_stop_supported != TRUE)) { 18526 return (0); 18527 } 18528 18529 /* 18530 * If we are performing an eject operation and 18531 * we receive any command other than SD_TARGET_EJECT 18532 * we should immediately return. 18533 */ 18534 if (flag != SD_TARGET_EJECT) { 18535 mutex_enter(SD_MUTEX(un)); 18536 if (un->un_f_ejecting == TRUE) { 18537 mutex_exit(SD_MUTEX(un)); 18538 return (EAGAIN); 18539 } 18540 mutex_exit(SD_MUTEX(un)); 18541 } 18542 18543 bzero(&cdb, sizeof (cdb)); 18544 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18545 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18546 18547 cdb.scc_cmd = SCMD_START_STOP; 18548 cdb.cdb_opaque[4] = (uchar_t)flag; 18549 18550 ucmd_buf.uscsi_cdb = (char *)&cdb; 18551 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18552 ucmd_buf.uscsi_bufaddr = NULL; 18553 ucmd_buf.uscsi_buflen = 0; 18554 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18555 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18556 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18557 ucmd_buf.uscsi_timeout = 200; 18558 18559 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18560 UIO_SYSSPACE, path_flag); 18561 18562 switch (status) { 18563 case 0: 18564 break; /* Success! */ 18565 case EIO: 18566 switch (ucmd_buf.uscsi_status) { 18567 case STATUS_RESERVATION_CONFLICT: 18568 status = EACCES; 18569 break; 18570 case STATUS_CHECK: 18571 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18572 switch (scsi_sense_key( 18573 (uint8_t *)&sense_buf)) { 18574 case KEY_ILLEGAL_REQUEST: 18575 status = ENOTSUP; 18576 break; 18577 case KEY_NOT_READY: 18578 if (scsi_sense_asc( 18579 (uint8_t *)&sense_buf) 18580 == 0x3A) { 18581 status = ENXIO; 18582 } 18583 break; 18584 default: 18585 break; 18586 } 18587 } 18588 break; 18589 default: 18590 break; 18591 } 18592 break; 18593 default: 18594 break; 18595 } 18596 18597 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18598 18599 return (status); 18600 } 18601 18602 18603 /* 18604 * Function: sd_start_stop_unit_callback 18605 * 18606 * Description: timeout(9F) callback to begin recovery process for a 18607 * device that has spun down. 18608 * 18609 * Arguments: arg - pointer to associated softstate struct. 18610 * 18611 * Context: Executes in a timeout(9F) thread context 18612 */ 18613 18614 static void 18615 sd_start_stop_unit_callback(void *arg) 18616 { 18617 struct sd_lun *un = arg; 18618 ASSERT(un != NULL); 18619 ASSERT(!mutex_owned(SD_MUTEX(un))); 18620 18621 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18622 18623 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18624 } 18625 18626 18627 /* 18628 * Function: sd_start_stop_unit_task 18629 * 18630 * Description: Recovery procedure when a drive is spun down. 18631 * 18632 * Arguments: arg - pointer to associated softstate struct. 18633 * 18634 * Context: Executes in a taskq() thread context 18635 */ 18636 18637 static void 18638 sd_start_stop_unit_task(void *arg) 18639 { 18640 struct sd_lun *un = arg; 18641 18642 ASSERT(un != NULL); 18643 ASSERT(!mutex_owned(SD_MUTEX(un))); 18644 18645 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18646 18647 /* 18648 * Some unformatted drives report not ready error, no need to 18649 * restart if format has been initiated. 18650 */ 18651 mutex_enter(SD_MUTEX(un)); 18652 if (un->un_f_format_in_progress == TRUE) { 18653 mutex_exit(SD_MUTEX(un)); 18654 return; 18655 } 18656 mutex_exit(SD_MUTEX(un)); 18657 18658 /* 18659 * When a START STOP command is issued from here, it is part of a 18660 * failure recovery operation and must be issued before any other 18661 * commands, including any pending retries. Thus it must be sent 18662 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18663 * succeeds or not, we will start I/O after the attempt. 18664 */ 18665 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18666 SD_PATH_DIRECT_PRIORITY); 18667 18668 /* 18669 * The above call blocks until the START_STOP_UNIT command completes. 18670 * Now that it has completed, we must re-try the original IO that 18671 * received the NOT READY condition in the first place. There are 18672 * three possible conditions here: 18673 * 18674 * (1) The original IO is on un_retry_bp. 18675 * (2) The original IO is on the regular wait queue, and un_retry_bp 18676 * is NULL. 18677 * (3) The original IO is on the regular wait queue, and un_retry_bp 18678 * points to some other, unrelated bp. 18679 * 18680 * For each case, we must call sd_start_cmds() with un_retry_bp 18681 * as the argument. If un_retry_bp is NULL, this will initiate 18682 * processing of the regular wait queue. If un_retry_bp is not NULL, 18683 * then this will process the bp on un_retry_bp. That may or may not 18684 * be the original IO, but that does not matter: the important thing 18685 * is to keep the IO processing going at this point. 18686 * 18687 * Note: This is a very specific error recovery sequence associated 18688 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18689 * serialize the I/O with completion of the spin-up. 18690 */ 18691 mutex_enter(SD_MUTEX(un)); 18692 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18693 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18694 un, un->un_retry_bp); 18695 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18696 sd_start_cmds(un, un->un_retry_bp); 18697 mutex_exit(SD_MUTEX(un)); 18698 18699 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18700 } 18701 18702 18703 /* 18704 * Function: sd_send_scsi_INQUIRY 18705 * 18706 * Description: Issue the scsi INQUIRY command. 18707 * 18708 * Arguments: un 18709 * bufaddr 18710 * buflen 18711 * evpd 18712 * page_code 18713 * page_length 18714 * 18715 * Return Code: 0 - Success 18716 * errno return code from sd_send_scsi_cmd() 18717 * 18718 * Context: Can sleep. Does not return until command is completed. 18719 */ 18720 18721 static int 18722 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18723 uchar_t evpd, uchar_t page_code, size_t *residp) 18724 { 18725 union scsi_cdb cdb; 18726 struct uscsi_cmd ucmd_buf; 18727 int status; 18728 18729 ASSERT(un != NULL); 18730 ASSERT(!mutex_owned(SD_MUTEX(un))); 18731 ASSERT(bufaddr != NULL); 18732 18733 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18734 18735 bzero(&cdb, sizeof (cdb)); 18736 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18737 bzero(bufaddr, buflen); 18738 18739 cdb.scc_cmd = SCMD_INQUIRY; 18740 cdb.cdb_opaque[1] = evpd; 18741 cdb.cdb_opaque[2] = page_code; 18742 FORMG0COUNT(&cdb, buflen); 18743 18744 ucmd_buf.uscsi_cdb = (char *)&cdb; 18745 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18746 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18747 ucmd_buf.uscsi_buflen = buflen; 18748 ucmd_buf.uscsi_rqbuf = NULL; 18749 ucmd_buf.uscsi_rqlen = 0; 18750 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18751 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18752 18753 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18754 UIO_SYSSPACE, SD_PATH_DIRECT); 18755 18756 if ((status == 0) && (residp != NULL)) { 18757 *residp = ucmd_buf.uscsi_resid; 18758 } 18759 18760 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18761 18762 return (status); 18763 } 18764 18765 18766 /* 18767 * Function: sd_send_scsi_TEST_UNIT_READY 18768 * 18769 * Description: Issue the scsi TEST UNIT READY command. 18770 * This routine can be told to set the flag USCSI_DIAGNOSE to 18771 * prevent retrying failed commands. Use this when the intent 18772 * is either to check for device readiness, to clear a Unit 18773 * Attention, or to clear any outstanding sense data. 18774 * However under specific conditions the expected behavior 18775 * is for retries to bring a device ready, so use the flag 18776 * with caution. 18777 * 18778 * Arguments: un 18779 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18780 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18781 * 0: dont check for media present, do retries on cmd. 18782 * 18783 * Return Code: 0 - Success 18784 * EIO - IO error 18785 * EACCES - Reservation conflict detected 18786 * ENXIO - Not Ready, medium not present 18787 * errno return code from sd_send_scsi_cmd() 18788 * 18789 * Context: Can sleep. Does not return until command is completed. 18790 */ 18791 18792 static int 18793 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18794 { 18795 struct scsi_extended_sense sense_buf; 18796 union scsi_cdb cdb; 18797 struct uscsi_cmd ucmd_buf; 18798 int status; 18799 18800 ASSERT(un != NULL); 18801 ASSERT(!mutex_owned(SD_MUTEX(un))); 18802 18803 SD_TRACE(SD_LOG_IO, un, 18804 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18805 18806 /* 18807 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18808 * timeouts when they receive a TUR and the queue is not empty. Check 18809 * the configuration flag set during attach (indicating the drive has 18810 * this firmware bug) and un_ncmds_in_transport before issuing the 18811 * TUR. If there are 18812 * pending commands return success, this is a bit arbitrary but is ok 18813 * for non-removables (i.e. the eliteI disks) and non-clustering 18814 * configurations. 18815 */ 18816 if (un->un_f_cfg_tur_check == TRUE) { 18817 mutex_enter(SD_MUTEX(un)); 18818 if (un->un_ncmds_in_transport != 0) { 18819 mutex_exit(SD_MUTEX(un)); 18820 return (0); 18821 } 18822 mutex_exit(SD_MUTEX(un)); 18823 } 18824 18825 bzero(&cdb, sizeof (cdb)); 18826 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18827 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18828 18829 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18830 18831 ucmd_buf.uscsi_cdb = (char *)&cdb; 18832 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18833 ucmd_buf.uscsi_bufaddr = NULL; 18834 ucmd_buf.uscsi_buflen = 0; 18835 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18836 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18837 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18838 18839 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18840 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18841 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18842 } 18843 ucmd_buf.uscsi_timeout = 60; 18844 18845 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18846 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18847 SD_PATH_STANDARD)); 18848 18849 switch (status) { 18850 case 0: 18851 break; /* Success! */ 18852 case EIO: 18853 switch (ucmd_buf.uscsi_status) { 18854 case STATUS_RESERVATION_CONFLICT: 18855 status = EACCES; 18856 break; 18857 case STATUS_CHECK: 18858 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18859 break; 18860 } 18861 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18862 (scsi_sense_key((uint8_t *)&sense_buf) == 18863 KEY_NOT_READY) && 18864 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18865 status = ENXIO; 18866 } 18867 break; 18868 default: 18869 break; 18870 } 18871 break; 18872 default: 18873 break; 18874 } 18875 18876 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18877 18878 return (status); 18879 } 18880 18881 18882 /* 18883 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18884 * 18885 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18886 * 18887 * Arguments: un 18888 * 18889 * Return Code: 0 - Success 18890 * EACCES 18891 * ENOTSUP 18892 * errno return code from sd_send_scsi_cmd() 18893 * 18894 * Context: Can sleep. Does not return until command is completed. 18895 */ 18896 18897 static int 18898 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18899 uint16_t data_len, uchar_t *data_bufp) 18900 { 18901 struct scsi_extended_sense sense_buf; 18902 union scsi_cdb cdb; 18903 struct uscsi_cmd ucmd_buf; 18904 int status; 18905 int no_caller_buf = FALSE; 18906 18907 ASSERT(un != NULL); 18908 ASSERT(!mutex_owned(SD_MUTEX(un))); 18909 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18910 18911 SD_TRACE(SD_LOG_IO, un, 18912 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18913 18914 bzero(&cdb, sizeof (cdb)); 18915 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18916 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18917 if (data_bufp == NULL) { 18918 /* Allocate a default buf if the caller did not give one */ 18919 ASSERT(data_len == 0); 18920 data_len = MHIOC_RESV_KEY_SIZE; 18921 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18922 no_caller_buf = TRUE; 18923 } 18924 18925 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18926 cdb.cdb_opaque[1] = usr_cmd; 18927 FORMG1COUNT(&cdb, data_len); 18928 18929 ucmd_buf.uscsi_cdb = (char *)&cdb; 18930 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18931 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18932 ucmd_buf.uscsi_buflen = data_len; 18933 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18934 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18935 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18936 ucmd_buf.uscsi_timeout = 60; 18937 18938 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18939 UIO_SYSSPACE, SD_PATH_STANDARD); 18940 18941 switch (status) { 18942 case 0: 18943 break; /* Success! */ 18944 case EIO: 18945 switch (ucmd_buf.uscsi_status) { 18946 case STATUS_RESERVATION_CONFLICT: 18947 status = EACCES; 18948 break; 18949 case STATUS_CHECK: 18950 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18951 (scsi_sense_key((uint8_t *)&sense_buf) == 18952 KEY_ILLEGAL_REQUEST)) { 18953 status = ENOTSUP; 18954 } 18955 break; 18956 default: 18957 break; 18958 } 18959 break; 18960 default: 18961 break; 18962 } 18963 18964 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18965 18966 if (no_caller_buf == TRUE) { 18967 kmem_free(data_bufp, data_len); 18968 } 18969 18970 return (status); 18971 } 18972 18973 18974 /* 18975 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18976 * 18977 * Description: This routine is the driver entry point for handling CD-ROM 18978 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18979 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18980 * device. 18981 * 18982 * Arguments: un - Pointer to soft state struct for the target. 18983 * usr_cmd SCSI-3 reservation facility command (one of 18984 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18985 * SD_SCSI3_PREEMPTANDABORT) 18986 * usr_bufp - user provided pointer register, reserve descriptor or 18987 * preempt and abort structure (mhioc_register_t, 18988 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18989 * 18990 * Return Code: 0 - Success 18991 * EACCES 18992 * ENOTSUP 18993 * errno return code from sd_send_scsi_cmd() 18994 * 18995 * Context: Can sleep. Does not return until command is completed. 18996 */ 18997 18998 static int 18999 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 19000 uchar_t *usr_bufp) 19001 { 19002 struct scsi_extended_sense sense_buf; 19003 union scsi_cdb cdb; 19004 struct uscsi_cmd ucmd_buf; 19005 int status; 19006 uchar_t data_len = sizeof (sd_prout_t); 19007 sd_prout_t *prp; 19008 19009 ASSERT(un != NULL); 19010 ASSERT(!mutex_owned(SD_MUTEX(un))); 19011 ASSERT(data_len == 24); /* required by scsi spec */ 19012 19013 SD_TRACE(SD_LOG_IO, un, 19014 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 19015 19016 if (usr_bufp == NULL) { 19017 return (EINVAL); 19018 } 19019 19020 bzero(&cdb, sizeof (cdb)); 19021 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19022 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19023 prp = kmem_zalloc(data_len, KM_SLEEP); 19024 19025 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 19026 cdb.cdb_opaque[1] = usr_cmd; 19027 FORMG1COUNT(&cdb, data_len); 19028 19029 ucmd_buf.uscsi_cdb = (char *)&cdb; 19030 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19031 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 19032 ucmd_buf.uscsi_buflen = data_len; 19033 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19034 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19035 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19036 ucmd_buf.uscsi_timeout = 60; 19037 19038 switch (usr_cmd) { 19039 case SD_SCSI3_REGISTER: { 19040 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 19041 19042 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19043 bcopy(ptr->newkey.key, prp->service_key, 19044 MHIOC_RESV_KEY_SIZE); 19045 prp->aptpl = ptr->aptpl; 19046 break; 19047 } 19048 case SD_SCSI3_RESERVE: 19049 case SD_SCSI3_RELEASE: { 19050 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 19051 19052 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19053 prp->scope_address = BE_32(ptr->scope_specific_addr); 19054 cdb.cdb_opaque[2] = ptr->type; 19055 break; 19056 } 19057 case SD_SCSI3_PREEMPTANDABORT: { 19058 mhioc_preemptandabort_t *ptr = 19059 (mhioc_preemptandabort_t *)usr_bufp; 19060 19061 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19062 bcopy(ptr->victim_key.key, prp->service_key, 19063 MHIOC_RESV_KEY_SIZE); 19064 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 19065 cdb.cdb_opaque[2] = ptr->resvdesc.type; 19066 ucmd_buf.uscsi_flags |= USCSI_HEAD; 19067 break; 19068 } 19069 case SD_SCSI3_REGISTERANDIGNOREKEY: 19070 { 19071 mhioc_registerandignorekey_t *ptr; 19072 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 19073 bcopy(ptr->newkey.key, 19074 prp->service_key, MHIOC_RESV_KEY_SIZE); 19075 prp->aptpl = ptr->aptpl; 19076 break; 19077 } 19078 default: 19079 ASSERT(FALSE); 19080 break; 19081 } 19082 19083 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19084 UIO_SYSSPACE, SD_PATH_STANDARD); 19085 19086 switch (status) { 19087 case 0: 19088 break; /* Success! */ 19089 case EIO: 19090 switch (ucmd_buf.uscsi_status) { 19091 case STATUS_RESERVATION_CONFLICT: 19092 status = EACCES; 19093 break; 19094 case STATUS_CHECK: 19095 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19096 (scsi_sense_key((uint8_t *)&sense_buf) == 19097 KEY_ILLEGAL_REQUEST)) { 19098 status = ENOTSUP; 19099 } 19100 break; 19101 default: 19102 break; 19103 } 19104 break; 19105 default: 19106 break; 19107 } 19108 19109 kmem_free(prp, data_len); 19110 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 19111 return (status); 19112 } 19113 19114 19115 /* 19116 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 19117 * 19118 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 19119 * 19120 * Arguments: un - pointer to the target's soft state struct 19121 * dkc - pointer to the callback structure 19122 * 19123 * Return Code: 0 - success 19124 * errno-type error code 19125 * 19126 * Context: kernel thread context only. 19127 * 19128 * _______________________________________________________________ 19129 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 19130 * |FLUSH_VOLATILE| | operation | 19131 * |______________|______________|_________________________________| 19132 * | 0 | NULL | Synchronous flush on both | 19133 * | | | volatile and non-volatile cache | 19134 * |______________|______________|_________________________________| 19135 * | 1 | NULL | Synchronous flush on volatile | 19136 * | | | cache; disk drivers may suppress| 19137 * | | | flush if disk table indicates | 19138 * | | | non-volatile cache | 19139 * |______________|______________|_________________________________| 19140 * | 0 | !NULL | Asynchronous flush on both | 19141 * | | | volatile and non-volatile cache;| 19142 * |______________|______________|_________________________________| 19143 * | 1 | !NULL | Asynchronous flush on volatile | 19144 * | | | cache; disk drivers may suppress| 19145 * | | | flush if disk table indicates | 19146 * | | | non-volatile cache | 19147 * |______________|______________|_________________________________| 19148 * 19149 */ 19150 19151 static int 19152 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 19153 { 19154 struct sd_uscsi_info *uip; 19155 struct uscsi_cmd *uscmd; 19156 union scsi_cdb *cdb; 19157 struct buf *bp; 19158 int rval = 0; 19159 int is_async; 19160 19161 SD_TRACE(SD_LOG_IO, un, 19162 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 19163 19164 ASSERT(un != NULL); 19165 ASSERT(!mutex_owned(SD_MUTEX(un))); 19166 19167 if (dkc == NULL || dkc->dkc_callback == NULL) { 19168 is_async = FALSE; 19169 } else { 19170 is_async = TRUE; 19171 } 19172 19173 mutex_enter(SD_MUTEX(un)); 19174 /* check whether cache flush should be suppressed */ 19175 if (un->un_f_suppress_cache_flush == TRUE) { 19176 mutex_exit(SD_MUTEX(un)); 19177 /* 19178 * suppress the cache flush if the device is told to do 19179 * so by sd.conf or disk table 19180 */ 19181 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 19182 skip the cache flush since suppress_cache_flush is %d!\n", 19183 un->un_f_suppress_cache_flush); 19184 19185 if (is_async == TRUE) { 19186 /* invoke callback for asynchronous flush */ 19187 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 19188 } 19189 return (rval); 19190 } 19191 mutex_exit(SD_MUTEX(un)); 19192 19193 /* 19194 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 19195 * set properly 19196 */ 19197 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 19198 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 19199 19200 mutex_enter(SD_MUTEX(un)); 19201 if (dkc != NULL && un->un_f_sync_nv_supported && 19202 (dkc->dkc_flag & FLUSH_VOLATILE)) { 19203 /* 19204 * if the device supports SYNC_NV bit, turn on 19205 * the SYNC_NV bit to only flush volatile cache 19206 */ 19207 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 19208 } 19209 mutex_exit(SD_MUTEX(un)); 19210 19211 /* 19212 * First get some memory for the uscsi_cmd struct and cdb 19213 * and initialize for SYNCHRONIZE_CACHE cmd. 19214 */ 19215 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 19216 uscmd->uscsi_cdblen = CDB_GROUP1; 19217 uscmd->uscsi_cdb = (caddr_t)cdb; 19218 uscmd->uscsi_bufaddr = NULL; 19219 uscmd->uscsi_buflen = 0; 19220 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 19221 uscmd->uscsi_rqlen = SENSE_LENGTH; 19222 uscmd->uscsi_rqresid = SENSE_LENGTH; 19223 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19224 uscmd->uscsi_timeout = sd_io_time; 19225 19226 /* 19227 * Allocate an sd_uscsi_info struct and fill it with the info 19228 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 19229 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 19230 * since we allocate the buf here in this function, we do not 19231 * need to preserve the prior contents of b_private. 19232 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 19233 */ 19234 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 19235 uip->ui_flags = SD_PATH_DIRECT; 19236 uip->ui_cmdp = uscmd; 19237 19238 bp = getrbuf(KM_SLEEP); 19239 bp->b_private = uip; 19240 19241 /* 19242 * Setup buffer to carry uscsi request. 19243 */ 19244 bp->b_flags = B_BUSY; 19245 bp->b_bcount = 0; 19246 bp->b_blkno = 0; 19247 19248 if (is_async == TRUE) { 19249 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 19250 uip->ui_dkc = *dkc; 19251 } 19252 19253 bp->b_edev = SD_GET_DEV(un); 19254 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 19255 19256 /* 19257 * Unset un_f_sync_cache_required flag 19258 */ 19259 mutex_enter(SD_MUTEX(un)); 19260 un->un_f_sync_cache_required = FALSE; 19261 mutex_exit(SD_MUTEX(un)); 19262 19263 (void) sd_uscsi_strategy(bp); 19264 19265 /* 19266 * If synchronous request, wait for completion 19267 * If async just return and let b_iodone callback 19268 * cleanup. 19269 * NOTE: On return, u_ncmds_in_driver will be decremented, 19270 * but it was also incremented in sd_uscsi_strategy(), so 19271 * we should be ok. 19272 */ 19273 if (is_async == FALSE) { 19274 (void) biowait(bp); 19275 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 19276 } 19277 19278 return (rval); 19279 } 19280 19281 19282 static int 19283 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 19284 { 19285 struct sd_uscsi_info *uip; 19286 struct uscsi_cmd *uscmd; 19287 uint8_t *sense_buf; 19288 struct sd_lun *un; 19289 int status; 19290 union scsi_cdb *cdb; 19291 19292 uip = (struct sd_uscsi_info *)(bp->b_private); 19293 ASSERT(uip != NULL); 19294 19295 uscmd = uip->ui_cmdp; 19296 ASSERT(uscmd != NULL); 19297 19298 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 19299 ASSERT(sense_buf != NULL); 19300 19301 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 19302 ASSERT(un != NULL); 19303 19304 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 19305 19306 status = geterror(bp); 19307 switch (status) { 19308 case 0: 19309 break; /* Success! */ 19310 case EIO: 19311 switch (uscmd->uscsi_status) { 19312 case STATUS_RESERVATION_CONFLICT: 19313 /* Ignore reservation conflict */ 19314 status = 0; 19315 goto done; 19316 19317 case STATUS_CHECK: 19318 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 19319 (scsi_sense_key(sense_buf) == 19320 KEY_ILLEGAL_REQUEST)) { 19321 /* Ignore Illegal Request error */ 19322 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 19323 mutex_enter(SD_MUTEX(un)); 19324 un->un_f_sync_nv_supported = FALSE; 19325 mutex_exit(SD_MUTEX(un)); 19326 status = 0; 19327 SD_TRACE(SD_LOG_IO, un, 19328 "un_f_sync_nv_supported \ 19329 is set to false.\n"); 19330 goto done; 19331 } 19332 19333 mutex_enter(SD_MUTEX(un)); 19334 un->un_f_sync_cache_supported = FALSE; 19335 mutex_exit(SD_MUTEX(un)); 19336 SD_TRACE(SD_LOG_IO, un, 19337 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 19338 un_f_sync_cache_supported set to false \ 19339 with asc = %x, ascq = %x\n", 19340 scsi_sense_asc(sense_buf), 19341 scsi_sense_ascq(sense_buf)); 19342 status = ENOTSUP; 19343 goto done; 19344 } 19345 break; 19346 default: 19347 break; 19348 } 19349 /* FALLTHRU */ 19350 default: 19351 /* 19352 * Turn on the un_f_sync_cache_required flag 19353 * since the SYNC CACHE command failed 19354 */ 19355 mutex_enter(SD_MUTEX(un)); 19356 un->un_f_sync_cache_required = TRUE; 19357 mutex_exit(SD_MUTEX(un)); 19358 19359 /* 19360 * Don't log an error message if this device 19361 * has removable media. 19362 */ 19363 if (!un->un_f_has_removable_media) { 19364 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19365 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19366 } 19367 break; 19368 } 19369 19370 done: 19371 if (uip->ui_dkc.dkc_callback != NULL) { 19372 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 19373 } 19374 19375 ASSERT((bp->b_flags & B_REMAPPED) == 0); 19376 freerbuf(bp); 19377 kmem_free(uip, sizeof (struct sd_uscsi_info)); 19378 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 19379 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 19380 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 19381 19382 return (status); 19383 } 19384 19385 19386 /* 19387 * Function: sd_send_scsi_GET_CONFIGURATION 19388 * 19389 * Description: Issues the get configuration command to the device. 19390 * Called from sd_check_for_writable_cd & sd_get_media_info 19391 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19392 * Arguments: un 19393 * ucmdbuf 19394 * rqbuf 19395 * rqbuflen 19396 * bufaddr 19397 * buflen 19398 * path_flag 19399 * 19400 * Return Code: 0 - Success 19401 * errno return code from sd_send_scsi_cmd() 19402 * 19403 * Context: Can sleep. Does not return until command is completed. 19404 * 19405 */ 19406 19407 static int 19408 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19409 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 19410 int path_flag) 19411 { 19412 char cdb[CDB_GROUP1]; 19413 int status; 19414 19415 ASSERT(un != NULL); 19416 ASSERT(!mutex_owned(SD_MUTEX(un))); 19417 ASSERT(bufaddr != NULL); 19418 ASSERT(ucmdbuf != NULL); 19419 ASSERT(rqbuf != NULL); 19420 19421 SD_TRACE(SD_LOG_IO, un, 19422 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19423 19424 bzero(cdb, sizeof (cdb)); 19425 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19426 bzero(rqbuf, rqbuflen); 19427 bzero(bufaddr, buflen); 19428 19429 /* 19430 * Set up cdb field for the get configuration command. 19431 */ 19432 cdb[0] = SCMD_GET_CONFIGURATION; 19433 cdb[1] = 0x02; /* Requested Type */ 19434 cdb[8] = SD_PROFILE_HEADER_LEN; 19435 ucmdbuf->uscsi_cdb = cdb; 19436 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19437 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19438 ucmdbuf->uscsi_buflen = buflen; 19439 ucmdbuf->uscsi_timeout = sd_io_time; 19440 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19441 ucmdbuf->uscsi_rqlen = rqbuflen; 19442 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19443 19444 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19445 UIO_SYSSPACE, path_flag); 19446 19447 switch (status) { 19448 case 0: 19449 break; /* Success! */ 19450 case EIO: 19451 switch (ucmdbuf->uscsi_status) { 19452 case STATUS_RESERVATION_CONFLICT: 19453 status = EACCES; 19454 break; 19455 default: 19456 break; 19457 } 19458 break; 19459 default: 19460 break; 19461 } 19462 19463 if (status == 0) { 19464 SD_DUMP_MEMORY(un, SD_LOG_IO, 19465 "sd_send_scsi_GET_CONFIGURATION: data", 19466 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19467 } 19468 19469 SD_TRACE(SD_LOG_IO, un, 19470 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19471 19472 return (status); 19473 } 19474 19475 /* 19476 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19477 * 19478 * Description: Issues the get configuration command to the device to 19479 * retrieve a specific feature. Called from 19480 * sd_check_for_writable_cd & sd_set_mmc_caps. 19481 * Arguments: un 19482 * ucmdbuf 19483 * rqbuf 19484 * rqbuflen 19485 * bufaddr 19486 * buflen 19487 * feature 19488 * 19489 * Return Code: 0 - Success 19490 * errno return code from sd_send_scsi_cmd() 19491 * 19492 * Context: Can sleep. Does not return until command is completed. 19493 * 19494 */ 19495 static int 19496 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19497 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19498 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19499 { 19500 char cdb[CDB_GROUP1]; 19501 int status; 19502 19503 ASSERT(un != NULL); 19504 ASSERT(!mutex_owned(SD_MUTEX(un))); 19505 ASSERT(bufaddr != NULL); 19506 ASSERT(ucmdbuf != NULL); 19507 ASSERT(rqbuf != NULL); 19508 19509 SD_TRACE(SD_LOG_IO, un, 19510 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19511 19512 bzero(cdb, sizeof (cdb)); 19513 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19514 bzero(rqbuf, rqbuflen); 19515 bzero(bufaddr, buflen); 19516 19517 /* 19518 * Set up cdb field for the get configuration command. 19519 */ 19520 cdb[0] = SCMD_GET_CONFIGURATION; 19521 cdb[1] = 0x02; /* Requested Type */ 19522 cdb[3] = feature; 19523 cdb[8] = buflen; 19524 ucmdbuf->uscsi_cdb = cdb; 19525 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19526 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19527 ucmdbuf->uscsi_buflen = buflen; 19528 ucmdbuf->uscsi_timeout = sd_io_time; 19529 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19530 ucmdbuf->uscsi_rqlen = rqbuflen; 19531 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19532 19533 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19534 UIO_SYSSPACE, path_flag); 19535 19536 switch (status) { 19537 case 0: 19538 break; /* Success! */ 19539 case EIO: 19540 switch (ucmdbuf->uscsi_status) { 19541 case STATUS_RESERVATION_CONFLICT: 19542 status = EACCES; 19543 break; 19544 default: 19545 break; 19546 } 19547 break; 19548 default: 19549 break; 19550 } 19551 19552 if (status == 0) { 19553 SD_DUMP_MEMORY(un, SD_LOG_IO, 19554 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19555 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19556 } 19557 19558 SD_TRACE(SD_LOG_IO, un, 19559 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19560 19561 return (status); 19562 } 19563 19564 19565 /* 19566 * Function: sd_send_scsi_MODE_SENSE 19567 * 19568 * Description: Utility function for issuing a scsi MODE SENSE command. 19569 * Note: This routine uses a consistent implementation for Group0, 19570 * Group1, and Group2 commands across all platforms. ATAPI devices 19571 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19572 * 19573 * Arguments: un - pointer to the softstate struct for the target. 19574 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19575 * CDB_GROUP[1|2] (10 byte). 19576 * bufaddr - buffer for page data retrieved from the target. 19577 * buflen - size of page to be retrieved. 19578 * page_code - page code of data to be retrieved from the target. 19579 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19580 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19581 * to use the USCSI "direct" chain and bypass the normal 19582 * command waitq. 19583 * 19584 * Return Code: 0 - Success 19585 * errno return code from sd_send_scsi_cmd() 19586 * 19587 * Context: Can sleep. Does not return until command is completed. 19588 */ 19589 19590 static int 19591 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19592 size_t buflen, uchar_t page_code, int path_flag) 19593 { 19594 struct scsi_extended_sense sense_buf; 19595 union scsi_cdb cdb; 19596 struct uscsi_cmd ucmd_buf; 19597 int status; 19598 int headlen; 19599 19600 ASSERT(un != NULL); 19601 ASSERT(!mutex_owned(SD_MUTEX(un))); 19602 ASSERT(bufaddr != NULL); 19603 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19604 (cdbsize == CDB_GROUP2)); 19605 19606 SD_TRACE(SD_LOG_IO, un, 19607 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19608 19609 bzero(&cdb, sizeof (cdb)); 19610 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19611 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19612 bzero(bufaddr, buflen); 19613 19614 if (cdbsize == CDB_GROUP0) { 19615 cdb.scc_cmd = SCMD_MODE_SENSE; 19616 cdb.cdb_opaque[2] = page_code; 19617 FORMG0COUNT(&cdb, buflen); 19618 headlen = MODE_HEADER_LENGTH; 19619 } else { 19620 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19621 cdb.cdb_opaque[2] = page_code; 19622 FORMG1COUNT(&cdb, buflen); 19623 headlen = MODE_HEADER_LENGTH_GRP2; 19624 } 19625 19626 ASSERT(headlen <= buflen); 19627 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19628 19629 ucmd_buf.uscsi_cdb = (char *)&cdb; 19630 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19631 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19632 ucmd_buf.uscsi_buflen = buflen; 19633 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19634 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19635 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19636 ucmd_buf.uscsi_timeout = 60; 19637 19638 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19639 UIO_SYSSPACE, path_flag); 19640 19641 switch (status) { 19642 case 0: 19643 /* 19644 * sr_check_wp() uses 0x3f page code and check the header of 19645 * mode page to determine if target device is write-protected. 19646 * But some USB devices return 0 bytes for 0x3f page code. For 19647 * this case, make sure that mode page header is returned at 19648 * least. 19649 */ 19650 if (buflen - ucmd_buf.uscsi_resid < headlen) 19651 status = EIO; 19652 break; /* Success! */ 19653 case EIO: 19654 switch (ucmd_buf.uscsi_status) { 19655 case STATUS_RESERVATION_CONFLICT: 19656 status = EACCES; 19657 break; 19658 default: 19659 break; 19660 } 19661 break; 19662 default: 19663 break; 19664 } 19665 19666 if (status == 0) { 19667 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19668 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19669 } 19670 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19671 19672 return (status); 19673 } 19674 19675 19676 /* 19677 * Function: sd_send_scsi_MODE_SELECT 19678 * 19679 * Description: Utility function for issuing a scsi MODE SELECT command. 19680 * Note: This routine uses a consistent implementation for Group0, 19681 * Group1, and Group2 commands across all platforms. ATAPI devices 19682 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19683 * 19684 * Arguments: un - pointer to the softstate struct for the target. 19685 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19686 * CDB_GROUP[1|2] (10 byte). 19687 * bufaddr - buffer for page data retrieved from the target. 19688 * buflen - size of page to be retrieved. 19689 * save_page - boolean to determin if SP bit should be set. 19690 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19691 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19692 * to use the USCSI "direct" chain and bypass the normal 19693 * command waitq. 19694 * 19695 * Return Code: 0 - Success 19696 * errno return code from sd_send_scsi_cmd() 19697 * 19698 * Context: Can sleep. Does not return until command is completed. 19699 */ 19700 19701 static int 19702 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19703 size_t buflen, uchar_t save_page, int path_flag) 19704 { 19705 struct scsi_extended_sense sense_buf; 19706 union scsi_cdb cdb; 19707 struct uscsi_cmd ucmd_buf; 19708 int status; 19709 19710 ASSERT(un != NULL); 19711 ASSERT(!mutex_owned(SD_MUTEX(un))); 19712 ASSERT(bufaddr != NULL); 19713 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19714 (cdbsize == CDB_GROUP2)); 19715 19716 SD_TRACE(SD_LOG_IO, un, 19717 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19718 19719 bzero(&cdb, sizeof (cdb)); 19720 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19721 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19722 19723 /* Set the PF bit for many third party drives */ 19724 cdb.cdb_opaque[1] = 0x10; 19725 19726 /* Set the savepage(SP) bit if given */ 19727 if (save_page == SD_SAVE_PAGE) { 19728 cdb.cdb_opaque[1] |= 0x01; 19729 } 19730 19731 if (cdbsize == CDB_GROUP0) { 19732 cdb.scc_cmd = SCMD_MODE_SELECT; 19733 FORMG0COUNT(&cdb, buflen); 19734 } else { 19735 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19736 FORMG1COUNT(&cdb, buflen); 19737 } 19738 19739 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19740 19741 ucmd_buf.uscsi_cdb = (char *)&cdb; 19742 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19743 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19744 ucmd_buf.uscsi_buflen = buflen; 19745 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19746 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19747 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19748 ucmd_buf.uscsi_timeout = 60; 19749 19750 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19751 UIO_SYSSPACE, path_flag); 19752 19753 switch (status) { 19754 case 0: 19755 break; /* Success! */ 19756 case EIO: 19757 switch (ucmd_buf.uscsi_status) { 19758 case STATUS_RESERVATION_CONFLICT: 19759 status = EACCES; 19760 break; 19761 default: 19762 break; 19763 } 19764 break; 19765 default: 19766 break; 19767 } 19768 19769 if (status == 0) { 19770 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19771 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19772 } 19773 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19774 19775 return (status); 19776 } 19777 19778 19779 /* 19780 * Function: sd_send_scsi_RDWR 19781 * 19782 * Description: Issue a scsi READ or WRITE command with the given parameters. 19783 * 19784 * Arguments: un: Pointer to the sd_lun struct for the target. 19785 * cmd: SCMD_READ or SCMD_WRITE 19786 * bufaddr: Address of caller's buffer to receive the RDWR data 19787 * buflen: Length of caller's buffer receive the RDWR data. 19788 * start_block: Block number for the start of the RDWR operation. 19789 * (Assumes target-native block size.) 19790 * residp: Pointer to variable to receive the redisual of the 19791 * RDWR operation (may be NULL of no residual requested). 19792 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19793 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19794 * to use the USCSI "direct" chain and bypass the normal 19795 * command waitq. 19796 * 19797 * Return Code: 0 - Success 19798 * errno return code from sd_send_scsi_cmd() 19799 * 19800 * Context: Can sleep. Does not return until command is completed. 19801 */ 19802 19803 static int 19804 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19805 size_t buflen, daddr_t start_block, int path_flag) 19806 { 19807 struct scsi_extended_sense sense_buf; 19808 union scsi_cdb cdb; 19809 struct uscsi_cmd ucmd_buf; 19810 uint32_t block_count; 19811 int status; 19812 int cdbsize; 19813 uchar_t flag; 19814 19815 ASSERT(un != NULL); 19816 ASSERT(!mutex_owned(SD_MUTEX(un))); 19817 ASSERT(bufaddr != NULL); 19818 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19819 19820 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19821 19822 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19823 return (EINVAL); 19824 } 19825 19826 mutex_enter(SD_MUTEX(un)); 19827 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19828 mutex_exit(SD_MUTEX(un)); 19829 19830 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19831 19832 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19833 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19834 bufaddr, buflen, start_block, block_count); 19835 19836 bzero(&cdb, sizeof (cdb)); 19837 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19838 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19839 19840 /* Compute CDB size to use */ 19841 if (start_block > 0xffffffff) 19842 cdbsize = CDB_GROUP4; 19843 else if ((start_block & 0xFFE00000) || 19844 (un->un_f_cfg_is_atapi == TRUE)) 19845 cdbsize = CDB_GROUP1; 19846 else 19847 cdbsize = CDB_GROUP0; 19848 19849 switch (cdbsize) { 19850 case CDB_GROUP0: /* 6-byte CDBs */ 19851 cdb.scc_cmd = cmd; 19852 FORMG0ADDR(&cdb, start_block); 19853 FORMG0COUNT(&cdb, block_count); 19854 break; 19855 case CDB_GROUP1: /* 10-byte CDBs */ 19856 cdb.scc_cmd = cmd | SCMD_GROUP1; 19857 FORMG1ADDR(&cdb, start_block); 19858 FORMG1COUNT(&cdb, block_count); 19859 break; 19860 case CDB_GROUP4: /* 16-byte CDBs */ 19861 cdb.scc_cmd = cmd | SCMD_GROUP4; 19862 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19863 FORMG4COUNT(&cdb, block_count); 19864 break; 19865 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19866 default: 19867 /* All others reserved */ 19868 return (EINVAL); 19869 } 19870 19871 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19872 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19873 19874 ucmd_buf.uscsi_cdb = (char *)&cdb; 19875 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19876 ucmd_buf.uscsi_bufaddr = bufaddr; 19877 ucmd_buf.uscsi_buflen = buflen; 19878 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19879 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19880 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19881 ucmd_buf.uscsi_timeout = 60; 19882 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19883 UIO_SYSSPACE, path_flag); 19884 switch (status) { 19885 case 0: 19886 break; /* Success! */ 19887 case EIO: 19888 switch (ucmd_buf.uscsi_status) { 19889 case STATUS_RESERVATION_CONFLICT: 19890 status = EACCES; 19891 break; 19892 default: 19893 break; 19894 } 19895 break; 19896 default: 19897 break; 19898 } 19899 19900 if (status == 0) { 19901 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19902 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19903 } 19904 19905 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19906 19907 return (status); 19908 } 19909 19910 19911 /* 19912 * Function: sd_send_scsi_LOG_SENSE 19913 * 19914 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19915 * 19916 * Arguments: un: Pointer to the sd_lun struct for the target. 19917 * 19918 * Return Code: 0 - Success 19919 * errno return code from sd_send_scsi_cmd() 19920 * 19921 * Context: Can sleep. Does not return until command is completed. 19922 */ 19923 19924 static int 19925 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19926 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19927 int path_flag) 19928 19929 { 19930 struct scsi_extended_sense sense_buf; 19931 union scsi_cdb cdb; 19932 struct uscsi_cmd ucmd_buf; 19933 int status; 19934 19935 ASSERT(un != NULL); 19936 ASSERT(!mutex_owned(SD_MUTEX(un))); 19937 19938 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19939 19940 bzero(&cdb, sizeof (cdb)); 19941 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19942 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19943 19944 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19945 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19946 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19947 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19948 FORMG1COUNT(&cdb, buflen); 19949 19950 ucmd_buf.uscsi_cdb = (char *)&cdb; 19951 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19952 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19953 ucmd_buf.uscsi_buflen = buflen; 19954 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19955 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19956 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19957 ucmd_buf.uscsi_timeout = 60; 19958 19959 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19960 UIO_SYSSPACE, path_flag); 19961 19962 switch (status) { 19963 case 0: 19964 break; 19965 case EIO: 19966 switch (ucmd_buf.uscsi_status) { 19967 case STATUS_RESERVATION_CONFLICT: 19968 status = EACCES; 19969 break; 19970 case STATUS_CHECK: 19971 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19972 (scsi_sense_key((uint8_t *)&sense_buf) == 19973 KEY_ILLEGAL_REQUEST) && 19974 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19975 /* 19976 * ASC 0x24: INVALID FIELD IN CDB 19977 */ 19978 switch (page_code) { 19979 case START_STOP_CYCLE_PAGE: 19980 /* 19981 * The start stop cycle counter is 19982 * implemented as page 0x31 in earlier 19983 * generation disks. In new generation 19984 * disks the start stop cycle counter is 19985 * implemented as page 0xE. To properly 19986 * handle this case if an attempt for 19987 * log page 0xE is made and fails we 19988 * will try again using page 0x31. 19989 * 19990 * Network storage BU committed to 19991 * maintain the page 0x31 for this 19992 * purpose and will not have any other 19993 * page implemented with page code 0x31 19994 * until all disks transition to the 19995 * standard page. 19996 */ 19997 mutex_enter(SD_MUTEX(un)); 19998 un->un_start_stop_cycle_page = 19999 START_STOP_CYCLE_VU_PAGE; 20000 cdb.cdb_opaque[2] = 20001 (char)(page_control << 6) | 20002 un->un_start_stop_cycle_page; 20003 mutex_exit(SD_MUTEX(un)); 20004 status = sd_send_scsi_cmd( 20005 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 20006 UIO_SYSSPACE, path_flag); 20007 20008 break; 20009 case TEMPERATURE_PAGE: 20010 status = ENOTTY; 20011 break; 20012 default: 20013 break; 20014 } 20015 } 20016 break; 20017 default: 20018 break; 20019 } 20020 break; 20021 default: 20022 break; 20023 } 20024 20025 if (status == 0) { 20026 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 20027 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20028 } 20029 20030 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 20031 20032 return (status); 20033 } 20034 20035 20036 /* 20037 * Function: sdioctl 20038 * 20039 * Description: Driver's ioctl(9e) entry point function. 20040 * 20041 * Arguments: dev - device number 20042 * cmd - ioctl operation to be performed 20043 * arg - user argument, contains data to be set or reference 20044 * parameter for get 20045 * flag - bit flag, indicating open settings, 32/64 bit type 20046 * cred_p - user credential pointer 20047 * rval_p - calling process return value (OPT) 20048 * 20049 * Return Code: EINVAL 20050 * ENOTTY 20051 * ENXIO 20052 * EIO 20053 * EFAULT 20054 * ENOTSUP 20055 * EPERM 20056 * 20057 * Context: Called from the device switch at normal priority. 20058 */ 20059 20060 static int 20061 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 20062 { 20063 struct sd_lun *un = NULL; 20064 int err = 0; 20065 int i = 0; 20066 cred_t *cr; 20067 int tmprval = EINVAL; 20068 int is_valid; 20069 20070 /* 20071 * All device accesses go thru sdstrategy where we check on suspend 20072 * status 20073 */ 20074 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20075 return (ENXIO); 20076 } 20077 20078 ASSERT(!mutex_owned(SD_MUTEX(un))); 20079 20080 20081 is_valid = SD_IS_VALID_LABEL(un); 20082 20083 /* 20084 * Moved this wait from sd_uscsi_strategy to here for 20085 * reasons of deadlock prevention. Internal driver commands, 20086 * specifically those to change a devices power level, result 20087 * in a call to sd_uscsi_strategy. 20088 */ 20089 mutex_enter(SD_MUTEX(un)); 20090 while ((un->un_state == SD_STATE_SUSPENDED) || 20091 (un->un_state == SD_STATE_PM_CHANGING)) { 20092 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 20093 } 20094 /* 20095 * Twiddling the counter here protects commands from now 20096 * through to the top of sd_uscsi_strategy. Without the 20097 * counter inc. a power down, for example, could get in 20098 * after the above check for state is made and before 20099 * execution gets to the top of sd_uscsi_strategy. 20100 * That would cause problems. 20101 */ 20102 un->un_ncmds_in_driver++; 20103 20104 if (!is_valid && 20105 (flag & (FNDELAY | FNONBLOCK))) { 20106 switch (cmd) { 20107 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 20108 case DKIOCGVTOC: 20109 case DKIOCGAPART: 20110 case DKIOCPARTINFO: 20111 case DKIOCSGEOM: 20112 case DKIOCSAPART: 20113 case DKIOCGETEFI: 20114 case DKIOCPARTITION: 20115 case DKIOCSVTOC: 20116 case DKIOCSETEFI: 20117 case DKIOCGMBOOT: 20118 case DKIOCSMBOOT: 20119 case DKIOCG_PHYGEOM: 20120 case DKIOCG_VIRTGEOM: 20121 /* let cmlb handle it */ 20122 goto skip_ready_valid; 20123 20124 case CDROMPAUSE: 20125 case CDROMRESUME: 20126 case CDROMPLAYMSF: 20127 case CDROMPLAYTRKIND: 20128 case CDROMREADTOCHDR: 20129 case CDROMREADTOCENTRY: 20130 case CDROMSTOP: 20131 case CDROMSTART: 20132 case CDROMVOLCTRL: 20133 case CDROMSUBCHNL: 20134 case CDROMREADMODE2: 20135 case CDROMREADMODE1: 20136 case CDROMREADOFFSET: 20137 case CDROMSBLKMODE: 20138 case CDROMGBLKMODE: 20139 case CDROMGDRVSPEED: 20140 case CDROMSDRVSPEED: 20141 case CDROMCDDA: 20142 case CDROMCDXA: 20143 case CDROMSUBCODE: 20144 if (!ISCD(un)) { 20145 un->un_ncmds_in_driver--; 20146 ASSERT(un->un_ncmds_in_driver >= 0); 20147 mutex_exit(SD_MUTEX(un)); 20148 return (ENOTTY); 20149 } 20150 break; 20151 case FDEJECT: 20152 case DKIOCEJECT: 20153 case CDROMEJECT: 20154 if (!un->un_f_eject_media_supported) { 20155 un->un_ncmds_in_driver--; 20156 ASSERT(un->un_ncmds_in_driver >= 0); 20157 mutex_exit(SD_MUTEX(un)); 20158 return (ENOTTY); 20159 } 20160 break; 20161 case DKIOCFLUSHWRITECACHE: 20162 mutex_exit(SD_MUTEX(un)); 20163 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20164 if (err != 0) { 20165 mutex_enter(SD_MUTEX(un)); 20166 un->un_ncmds_in_driver--; 20167 ASSERT(un->un_ncmds_in_driver >= 0); 20168 mutex_exit(SD_MUTEX(un)); 20169 return (EIO); 20170 } 20171 mutex_enter(SD_MUTEX(un)); 20172 /* FALLTHROUGH */ 20173 case DKIOCREMOVABLE: 20174 case DKIOCHOTPLUGGABLE: 20175 case DKIOCINFO: 20176 case DKIOCGMEDIAINFO: 20177 case MHIOCENFAILFAST: 20178 case MHIOCSTATUS: 20179 case MHIOCTKOWN: 20180 case MHIOCRELEASE: 20181 case MHIOCGRP_INKEYS: 20182 case MHIOCGRP_INRESV: 20183 case MHIOCGRP_REGISTER: 20184 case MHIOCGRP_RESERVE: 20185 case MHIOCGRP_PREEMPTANDABORT: 20186 case MHIOCGRP_REGISTERANDIGNOREKEY: 20187 case CDROMCLOSETRAY: 20188 case USCSICMD: 20189 goto skip_ready_valid; 20190 default: 20191 break; 20192 } 20193 20194 mutex_exit(SD_MUTEX(un)); 20195 err = sd_ready_and_valid(un); 20196 mutex_enter(SD_MUTEX(un)); 20197 20198 if (err != SD_READY_VALID) { 20199 switch (cmd) { 20200 case DKIOCSTATE: 20201 case CDROMGDRVSPEED: 20202 case CDROMSDRVSPEED: 20203 case FDEJECT: /* for eject command */ 20204 case DKIOCEJECT: 20205 case CDROMEJECT: 20206 case DKIOCREMOVABLE: 20207 case DKIOCHOTPLUGGABLE: 20208 break; 20209 default: 20210 if (un->un_f_has_removable_media) { 20211 err = ENXIO; 20212 } else { 20213 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 20214 if (err == SD_RESERVED_BY_OTHERS) { 20215 err = EACCES; 20216 } else { 20217 err = EIO; 20218 } 20219 } 20220 un->un_ncmds_in_driver--; 20221 ASSERT(un->un_ncmds_in_driver >= 0); 20222 mutex_exit(SD_MUTEX(un)); 20223 return (err); 20224 } 20225 } 20226 } 20227 20228 skip_ready_valid: 20229 mutex_exit(SD_MUTEX(un)); 20230 20231 switch (cmd) { 20232 case DKIOCINFO: 20233 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20234 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20235 break; 20236 20237 case DKIOCGMEDIAINFO: 20238 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20239 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20240 break; 20241 20242 case DKIOCGGEOM: 20243 case DKIOCGVTOC: 20244 case DKIOCGAPART: 20245 case DKIOCPARTINFO: 20246 case DKIOCSGEOM: 20247 case DKIOCSAPART: 20248 case DKIOCGETEFI: 20249 case DKIOCPARTITION: 20250 case DKIOCSVTOC: 20251 case DKIOCSETEFI: 20252 case DKIOCGMBOOT: 20253 case DKIOCSMBOOT: 20254 case DKIOCG_PHYGEOM: 20255 case DKIOCG_VIRTGEOM: 20256 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 20257 20258 /* TUR should spin up */ 20259 20260 if (un->un_f_has_removable_media) 20261 err = sd_send_scsi_TEST_UNIT_READY(un, 20262 SD_CHECK_FOR_MEDIA); 20263 else 20264 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20265 20266 if (err != 0) 20267 break; 20268 20269 err = cmlb_ioctl(un->un_cmlbhandle, dev, 20270 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 20271 20272 if ((err == 0) && 20273 ((cmd == DKIOCSETEFI) || 20274 (un->un_f_pkstats_enabled) && 20275 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 20276 20277 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 20278 (void *)SD_PATH_DIRECT); 20279 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 20280 sd_set_pstats(un); 20281 SD_TRACE(SD_LOG_IO_PARTITION, un, 20282 "sd_ioctl: un:0x%p pstats created and " 20283 "set\n", un); 20284 } 20285 } 20286 20287 if ((cmd == DKIOCSVTOC) || 20288 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 20289 20290 mutex_enter(SD_MUTEX(un)); 20291 if (un->un_f_devid_supported && 20292 (un->un_f_opt_fab_devid == TRUE)) { 20293 if (un->un_devid == NULL) { 20294 sd_register_devid(un, SD_DEVINFO(un), 20295 SD_TARGET_IS_UNRESERVED); 20296 } else { 20297 /* 20298 * The device id for this disk 20299 * has been fabricated. The 20300 * device id must be preserved 20301 * by writing it back out to 20302 * disk. 20303 */ 20304 if (sd_write_deviceid(un) != 0) { 20305 ddi_devid_free(un->un_devid); 20306 un->un_devid = NULL; 20307 } 20308 } 20309 } 20310 mutex_exit(SD_MUTEX(un)); 20311 } 20312 20313 break; 20314 20315 case DKIOCLOCK: 20316 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20317 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20318 SD_PATH_STANDARD); 20319 break; 20320 20321 case DKIOCUNLOCK: 20322 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20323 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20324 SD_PATH_STANDARD); 20325 break; 20326 20327 case DKIOCSTATE: { 20328 enum dkio_state state; 20329 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20330 20331 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20332 err = EFAULT; 20333 } else { 20334 err = sd_check_media(dev, state); 20335 if (err == 0) { 20336 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20337 sizeof (int), flag) != 0) 20338 err = EFAULT; 20339 } 20340 } 20341 break; 20342 } 20343 20344 case DKIOCREMOVABLE: 20345 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20346 i = un->un_f_has_removable_media ? 1 : 0; 20347 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20348 err = EFAULT; 20349 } else { 20350 err = 0; 20351 } 20352 break; 20353 20354 case DKIOCHOTPLUGGABLE: 20355 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 20356 i = un->un_f_is_hotpluggable ? 1 : 0; 20357 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20358 err = EFAULT; 20359 } else { 20360 err = 0; 20361 } 20362 break; 20363 20364 case DKIOCGTEMPERATURE: 20365 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20366 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20367 break; 20368 20369 case MHIOCENFAILFAST: 20370 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20371 if ((err = drv_priv(cred_p)) == 0) { 20372 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20373 } 20374 break; 20375 20376 case MHIOCTKOWN: 20377 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20378 if ((err = drv_priv(cred_p)) == 0) { 20379 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20380 } 20381 break; 20382 20383 case MHIOCRELEASE: 20384 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20385 if ((err = drv_priv(cred_p)) == 0) { 20386 err = sd_mhdioc_release(dev); 20387 } 20388 break; 20389 20390 case MHIOCSTATUS: 20391 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20392 if ((err = drv_priv(cred_p)) == 0) { 20393 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20394 case 0: 20395 err = 0; 20396 break; 20397 case EACCES: 20398 *rval_p = 1; 20399 err = 0; 20400 break; 20401 default: 20402 err = EIO; 20403 break; 20404 } 20405 } 20406 break; 20407 20408 case MHIOCQRESERVE: 20409 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20410 if ((err = drv_priv(cred_p)) == 0) { 20411 err = sd_reserve_release(dev, SD_RESERVE); 20412 } 20413 break; 20414 20415 case MHIOCREREGISTERDEVID: 20416 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20417 if (drv_priv(cred_p) == EPERM) { 20418 err = EPERM; 20419 } else if (!un->un_f_devid_supported) { 20420 err = ENOTTY; 20421 } else { 20422 err = sd_mhdioc_register_devid(dev); 20423 } 20424 break; 20425 20426 case MHIOCGRP_INKEYS: 20427 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20428 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20429 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20430 err = ENOTSUP; 20431 } else { 20432 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20433 flag); 20434 } 20435 } 20436 break; 20437 20438 case MHIOCGRP_INRESV: 20439 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20440 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20441 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20442 err = ENOTSUP; 20443 } else { 20444 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20445 } 20446 } 20447 break; 20448 20449 case MHIOCGRP_REGISTER: 20450 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20451 if ((err = drv_priv(cred_p)) != EPERM) { 20452 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20453 err = ENOTSUP; 20454 } else if (arg != NULL) { 20455 mhioc_register_t reg; 20456 if (ddi_copyin((void *)arg, ®, 20457 sizeof (mhioc_register_t), flag) != 0) { 20458 err = EFAULT; 20459 } else { 20460 err = 20461 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20462 un, SD_SCSI3_REGISTER, 20463 (uchar_t *)®); 20464 } 20465 } 20466 } 20467 break; 20468 20469 case MHIOCGRP_RESERVE: 20470 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20471 if ((err = drv_priv(cred_p)) != EPERM) { 20472 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20473 err = ENOTSUP; 20474 } else if (arg != NULL) { 20475 mhioc_resv_desc_t resv_desc; 20476 if (ddi_copyin((void *)arg, &resv_desc, 20477 sizeof (mhioc_resv_desc_t), flag) != 0) { 20478 err = EFAULT; 20479 } else { 20480 err = 20481 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20482 un, SD_SCSI3_RESERVE, 20483 (uchar_t *)&resv_desc); 20484 } 20485 } 20486 } 20487 break; 20488 20489 case MHIOCGRP_PREEMPTANDABORT: 20490 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20491 if ((err = drv_priv(cred_p)) != EPERM) { 20492 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20493 err = ENOTSUP; 20494 } else if (arg != NULL) { 20495 mhioc_preemptandabort_t preempt_abort; 20496 if (ddi_copyin((void *)arg, &preempt_abort, 20497 sizeof (mhioc_preemptandabort_t), 20498 flag) != 0) { 20499 err = EFAULT; 20500 } else { 20501 err = 20502 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20503 un, SD_SCSI3_PREEMPTANDABORT, 20504 (uchar_t *)&preempt_abort); 20505 } 20506 } 20507 } 20508 break; 20509 20510 case MHIOCGRP_REGISTERANDIGNOREKEY: 20511 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20512 if ((err = drv_priv(cred_p)) != EPERM) { 20513 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20514 err = ENOTSUP; 20515 } else if (arg != NULL) { 20516 mhioc_registerandignorekey_t r_and_i; 20517 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20518 sizeof (mhioc_registerandignorekey_t), 20519 flag) != 0) { 20520 err = EFAULT; 20521 } else { 20522 err = 20523 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20524 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20525 (uchar_t *)&r_and_i); 20526 } 20527 } 20528 } 20529 break; 20530 20531 case USCSICMD: 20532 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20533 cr = ddi_get_cred(); 20534 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20535 err = EPERM; 20536 } else { 20537 enum uio_seg uioseg; 20538 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20539 UIO_USERSPACE; 20540 if (un->un_f_format_in_progress == TRUE) { 20541 err = EAGAIN; 20542 break; 20543 } 20544 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20545 flag, uioseg, SD_PATH_STANDARD); 20546 } 20547 break; 20548 20549 case CDROMPAUSE: 20550 case CDROMRESUME: 20551 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20552 if (!ISCD(un)) { 20553 err = ENOTTY; 20554 } else { 20555 err = sr_pause_resume(dev, cmd); 20556 } 20557 break; 20558 20559 case CDROMPLAYMSF: 20560 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20561 if (!ISCD(un)) { 20562 err = ENOTTY; 20563 } else { 20564 err = sr_play_msf(dev, (caddr_t)arg, flag); 20565 } 20566 break; 20567 20568 case CDROMPLAYTRKIND: 20569 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20570 #if defined(__i386) || defined(__amd64) 20571 /* 20572 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20573 */ 20574 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20575 #else 20576 if (!ISCD(un)) { 20577 #endif 20578 err = ENOTTY; 20579 } else { 20580 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20581 } 20582 break; 20583 20584 case CDROMREADTOCHDR: 20585 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20586 if (!ISCD(un)) { 20587 err = ENOTTY; 20588 } else { 20589 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20590 } 20591 break; 20592 20593 case CDROMREADTOCENTRY: 20594 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20595 if (!ISCD(un)) { 20596 err = ENOTTY; 20597 } else { 20598 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20599 } 20600 break; 20601 20602 case CDROMSTOP: 20603 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20604 if (!ISCD(un)) { 20605 err = ENOTTY; 20606 } else { 20607 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20608 SD_PATH_STANDARD); 20609 } 20610 break; 20611 20612 case CDROMSTART: 20613 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20614 if (!ISCD(un)) { 20615 err = ENOTTY; 20616 } else { 20617 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20618 SD_PATH_STANDARD); 20619 } 20620 break; 20621 20622 case CDROMCLOSETRAY: 20623 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20624 if (!ISCD(un)) { 20625 err = ENOTTY; 20626 } else { 20627 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20628 SD_PATH_STANDARD); 20629 } 20630 break; 20631 20632 case FDEJECT: /* for eject command */ 20633 case DKIOCEJECT: 20634 case CDROMEJECT: 20635 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20636 if (!un->un_f_eject_media_supported) { 20637 err = ENOTTY; 20638 } else { 20639 err = sr_eject(dev); 20640 } 20641 break; 20642 20643 case CDROMVOLCTRL: 20644 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20645 if (!ISCD(un)) { 20646 err = ENOTTY; 20647 } else { 20648 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20649 } 20650 break; 20651 20652 case CDROMSUBCHNL: 20653 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20654 if (!ISCD(un)) { 20655 err = ENOTTY; 20656 } else { 20657 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20658 } 20659 break; 20660 20661 case CDROMREADMODE2: 20662 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20663 if (!ISCD(un)) { 20664 err = ENOTTY; 20665 } else if (un->un_f_cfg_is_atapi == TRUE) { 20666 /* 20667 * If the drive supports READ CD, use that instead of 20668 * switching the LBA size via a MODE SELECT 20669 * Block Descriptor 20670 */ 20671 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20672 } else { 20673 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20674 } 20675 break; 20676 20677 case CDROMREADMODE1: 20678 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20679 if (!ISCD(un)) { 20680 err = ENOTTY; 20681 } else { 20682 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20683 } 20684 break; 20685 20686 case CDROMREADOFFSET: 20687 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20688 if (!ISCD(un)) { 20689 err = ENOTTY; 20690 } else { 20691 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20692 flag); 20693 } 20694 break; 20695 20696 case CDROMSBLKMODE: 20697 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20698 /* 20699 * There is no means of changing block size in case of atapi 20700 * drives, thus return ENOTTY if drive type is atapi 20701 */ 20702 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20703 err = ENOTTY; 20704 } else if (un->un_f_mmc_cap == TRUE) { 20705 20706 /* 20707 * MMC Devices do not support changing the 20708 * logical block size 20709 * 20710 * Note: EINVAL is being returned instead of ENOTTY to 20711 * maintain consistancy with the original mmc 20712 * driver update. 20713 */ 20714 err = EINVAL; 20715 } else { 20716 mutex_enter(SD_MUTEX(un)); 20717 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20718 (un->un_ncmds_in_transport > 0)) { 20719 mutex_exit(SD_MUTEX(un)); 20720 err = EINVAL; 20721 } else { 20722 mutex_exit(SD_MUTEX(un)); 20723 err = sr_change_blkmode(dev, cmd, arg, flag); 20724 } 20725 } 20726 break; 20727 20728 case CDROMGBLKMODE: 20729 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20730 if (!ISCD(un)) { 20731 err = ENOTTY; 20732 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20733 (un->un_f_blockcount_is_valid != FALSE)) { 20734 /* 20735 * Drive is an ATAPI drive so return target block 20736 * size for ATAPI drives since we cannot change the 20737 * blocksize on ATAPI drives. Used primarily to detect 20738 * if an ATAPI cdrom is present. 20739 */ 20740 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20741 sizeof (int), flag) != 0) { 20742 err = EFAULT; 20743 } else { 20744 err = 0; 20745 } 20746 20747 } else { 20748 /* 20749 * Drive supports changing block sizes via a Mode 20750 * Select. 20751 */ 20752 err = sr_change_blkmode(dev, cmd, arg, flag); 20753 } 20754 break; 20755 20756 case CDROMGDRVSPEED: 20757 case CDROMSDRVSPEED: 20758 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20759 if (!ISCD(un)) { 20760 err = ENOTTY; 20761 } else if (un->un_f_mmc_cap == TRUE) { 20762 /* 20763 * Note: In the future the driver implementation 20764 * for getting and 20765 * setting cd speed should entail: 20766 * 1) If non-mmc try the Toshiba mode page 20767 * (sr_change_speed) 20768 * 2) If mmc but no support for Real Time Streaming try 20769 * the SET CD SPEED (0xBB) command 20770 * (sr_atapi_change_speed) 20771 * 3) If mmc and support for Real Time Streaming 20772 * try the GET PERFORMANCE and SET STREAMING 20773 * commands (not yet implemented, 4380808) 20774 */ 20775 /* 20776 * As per recent MMC spec, CD-ROM speed is variable 20777 * and changes with LBA. Since there is no such 20778 * things as drive speed now, fail this ioctl. 20779 * 20780 * Note: EINVAL is returned for consistancy of original 20781 * implementation which included support for getting 20782 * the drive speed of mmc devices but not setting 20783 * the drive speed. Thus EINVAL would be returned 20784 * if a set request was made for an mmc device. 20785 * We no longer support get or set speed for 20786 * mmc but need to remain consistent with regard 20787 * to the error code returned. 20788 */ 20789 err = EINVAL; 20790 } else if (un->un_f_cfg_is_atapi == TRUE) { 20791 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20792 } else { 20793 err = sr_change_speed(dev, cmd, arg, flag); 20794 } 20795 break; 20796 20797 case CDROMCDDA: 20798 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20799 if (!ISCD(un)) { 20800 err = ENOTTY; 20801 } else { 20802 err = sr_read_cdda(dev, (void *)arg, flag); 20803 } 20804 break; 20805 20806 case CDROMCDXA: 20807 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20808 if (!ISCD(un)) { 20809 err = ENOTTY; 20810 } else { 20811 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20812 } 20813 break; 20814 20815 case CDROMSUBCODE: 20816 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20817 if (!ISCD(un)) { 20818 err = ENOTTY; 20819 } else { 20820 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20821 } 20822 break; 20823 20824 20825 #ifdef SDDEBUG 20826 /* RESET/ABORTS testing ioctls */ 20827 case DKIOCRESET: { 20828 int reset_level; 20829 20830 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20831 err = EFAULT; 20832 } else { 20833 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20834 "reset_level = 0x%lx\n", reset_level); 20835 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20836 err = 0; 20837 } else { 20838 err = EIO; 20839 } 20840 } 20841 break; 20842 } 20843 20844 case DKIOCABORT: 20845 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20846 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20847 err = 0; 20848 } else { 20849 err = EIO; 20850 } 20851 break; 20852 #endif 20853 20854 #ifdef SD_FAULT_INJECTION 20855 /* SDIOC FaultInjection testing ioctls */ 20856 case SDIOCSTART: 20857 case SDIOCSTOP: 20858 case SDIOCINSERTPKT: 20859 case SDIOCINSERTXB: 20860 case SDIOCINSERTUN: 20861 case SDIOCINSERTARQ: 20862 case SDIOCPUSH: 20863 case SDIOCRETRIEVE: 20864 case SDIOCRUN: 20865 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20866 "SDIOC detected cmd:0x%X:\n", cmd); 20867 /* call error generator */ 20868 sd_faultinjection_ioctl(cmd, arg, un); 20869 err = 0; 20870 break; 20871 20872 #endif /* SD_FAULT_INJECTION */ 20873 20874 case DKIOCFLUSHWRITECACHE: 20875 { 20876 struct dk_callback *dkc = (struct dk_callback *)arg; 20877 20878 mutex_enter(SD_MUTEX(un)); 20879 if (!un->un_f_sync_cache_supported || 20880 !un->un_f_write_cache_enabled) { 20881 err = un->un_f_sync_cache_supported ? 20882 0 : ENOTSUP; 20883 mutex_exit(SD_MUTEX(un)); 20884 if ((flag & FKIOCTL) && dkc != NULL && 20885 dkc->dkc_callback != NULL) { 20886 (*dkc->dkc_callback)(dkc->dkc_cookie, 20887 err); 20888 /* 20889 * Did callback and reported error. 20890 * Since we did a callback, ioctl 20891 * should return 0. 20892 */ 20893 err = 0; 20894 } 20895 break; 20896 } 20897 mutex_exit(SD_MUTEX(un)); 20898 20899 if ((flag & FKIOCTL) && dkc != NULL && 20900 dkc->dkc_callback != NULL) { 20901 /* async SYNC CACHE request */ 20902 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20903 } else { 20904 /* synchronous SYNC CACHE request */ 20905 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20906 } 20907 } 20908 break; 20909 20910 case DKIOCGETWCE: { 20911 20912 int wce; 20913 20914 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20915 break; 20916 } 20917 20918 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20919 err = EFAULT; 20920 } 20921 break; 20922 } 20923 20924 case DKIOCSETWCE: { 20925 20926 int wce, sync_supported; 20927 20928 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20929 err = EFAULT; 20930 break; 20931 } 20932 20933 /* 20934 * Synchronize multiple threads trying to enable 20935 * or disable the cache via the un_f_wcc_cv 20936 * condition variable. 20937 */ 20938 mutex_enter(SD_MUTEX(un)); 20939 20940 /* 20941 * Don't allow the cache to be enabled if the 20942 * config file has it disabled. 20943 */ 20944 if (un->un_f_opt_disable_cache && wce) { 20945 mutex_exit(SD_MUTEX(un)); 20946 err = EINVAL; 20947 break; 20948 } 20949 20950 /* 20951 * Wait for write cache change in progress 20952 * bit to be clear before proceeding. 20953 */ 20954 while (un->un_f_wcc_inprog) 20955 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20956 20957 un->un_f_wcc_inprog = 1; 20958 20959 if (un->un_f_write_cache_enabled && wce == 0) { 20960 /* 20961 * Disable the write cache. Don't clear 20962 * un_f_write_cache_enabled until after 20963 * the mode select and flush are complete. 20964 */ 20965 sync_supported = un->un_f_sync_cache_supported; 20966 20967 /* 20968 * If cache flush is suppressed, we assume that the 20969 * controller firmware will take care of managing the 20970 * write cache for us: no need to explicitly 20971 * disable it. 20972 */ 20973 if (!un->un_f_suppress_cache_flush) { 20974 mutex_exit(SD_MUTEX(un)); 20975 if ((err = sd_cache_control(un, 20976 SD_CACHE_NOCHANGE, 20977 SD_CACHE_DISABLE)) == 0 && 20978 sync_supported) { 20979 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20980 NULL); 20981 } 20982 } else { 20983 mutex_exit(SD_MUTEX(un)); 20984 } 20985 20986 mutex_enter(SD_MUTEX(un)); 20987 if (err == 0) { 20988 un->un_f_write_cache_enabled = 0; 20989 } 20990 20991 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20992 /* 20993 * Set un_f_write_cache_enabled first, so there is 20994 * no window where the cache is enabled, but the 20995 * bit says it isn't. 20996 */ 20997 un->un_f_write_cache_enabled = 1; 20998 20999 /* 21000 * If cache flush is suppressed, we assume that the 21001 * controller firmware will take care of managing the 21002 * write cache for us: no need to explicitly 21003 * enable it. 21004 */ 21005 if (!un->un_f_suppress_cache_flush) { 21006 mutex_exit(SD_MUTEX(un)); 21007 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 21008 SD_CACHE_ENABLE); 21009 } else { 21010 mutex_exit(SD_MUTEX(un)); 21011 } 21012 21013 mutex_enter(SD_MUTEX(un)); 21014 21015 if (err) { 21016 un->un_f_write_cache_enabled = 0; 21017 } 21018 } 21019 21020 un->un_f_wcc_inprog = 0; 21021 cv_broadcast(&un->un_wcc_cv); 21022 mutex_exit(SD_MUTEX(un)); 21023 break; 21024 } 21025 21026 default: 21027 err = ENOTTY; 21028 break; 21029 } 21030 mutex_enter(SD_MUTEX(un)); 21031 un->un_ncmds_in_driver--; 21032 ASSERT(un->un_ncmds_in_driver >= 0); 21033 mutex_exit(SD_MUTEX(un)); 21034 21035 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 21036 return (err); 21037 } 21038 21039 21040 /* 21041 * Function: sd_dkio_ctrl_info 21042 * 21043 * Description: This routine is the driver entry point for handling controller 21044 * information ioctl requests (DKIOCINFO). 21045 * 21046 * Arguments: dev - the device number 21047 * arg - pointer to user provided dk_cinfo structure 21048 * specifying the controller type and attributes. 21049 * flag - this argument is a pass through to ddi_copyxxx() 21050 * directly from the mode argument of ioctl(). 21051 * 21052 * Return Code: 0 21053 * EFAULT 21054 * ENXIO 21055 */ 21056 21057 static int 21058 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 21059 { 21060 struct sd_lun *un = NULL; 21061 struct dk_cinfo *info; 21062 dev_info_t *pdip; 21063 int lun, tgt; 21064 21065 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21066 return (ENXIO); 21067 } 21068 21069 info = (struct dk_cinfo *) 21070 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 21071 21072 switch (un->un_ctype) { 21073 case CTYPE_CDROM: 21074 info->dki_ctype = DKC_CDROM; 21075 break; 21076 default: 21077 info->dki_ctype = DKC_SCSI_CCS; 21078 break; 21079 } 21080 pdip = ddi_get_parent(SD_DEVINFO(un)); 21081 info->dki_cnum = ddi_get_instance(pdip); 21082 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 21083 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 21084 } else { 21085 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 21086 DK_DEVLEN - 1); 21087 } 21088 21089 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21090 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 21091 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21092 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 21093 21094 /* Unit Information */ 21095 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 21096 info->dki_slave = ((tgt << 3) | lun); 21097 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 21098 DK_DEVLEN - 1); 21099 info->dki_flags = DKI_FMTVOL; 21100 info->dki_partition = SDPART(dev); 21101 21102 /* Max Transfer size of this device in blocks */ 21103 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 21104 info->dki_addr = 0; 21105 info->dki_space = 0; 21106 info->dki_prio = 0; 21107 info->dki_vec = 0; 21108 21109 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 21110 kmem_free(info, sizeof (struct dk_cinfo)); 21111 return (EFAULT); 21112 } else { 21113 kmem_free(info, sizeof (struct dk_cinfo)); 21114 return (0); 21115 } 21116 } 21117 21118 21119 /* 21120 * Function: sd_get_media_info 21121 * 21122 * Description: This routine is the driver entry point for handling ioctl 21123 * requests for the media type or command set profile used by the 21124 * drive to operate on the media (DKIOCGMEDIAINFO). 21125 * 21126 * Arguments: dev - the device number 21127 * arg - pointer to user provided dk_minfo structure 21128 * specifying the media type, logical block size and 21129 * drive capacity. 21130 * flag - this argument is a pass through to ddi_copyxxx() 21131 * directly from the mode argument of ioctl(). 21132 * 21133 * Return Code: 0 21134 * EACCESS 21135 * EFAULT 21136 * ENXIO 21137 * EIO 21138 */ 21139 21140 static int 21141 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 21142 { 21143 struct sd_lun *un = NULL; 21144 struct uscsi_cmd com; 21145 struct scsi_inquiry *sinq; 21146 struct dk_minfo media_info; 21147 u_longlong_t media_capacity; 21148 uint64_t capacity; 21149 uint_t lbasize; 21150 uchar_t *out_data; 21151 uchar_t *rqbuf; 21152 int rval = 0; 21153 int rtn; 21154 21155 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 21156 (un->un_state == SD_STATE_OFFLINE)) { 21157 return (ENXIO); 21158 } 21159 21160 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 21161 21162 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 21163 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21164 21165 /* Issue a TUR to determine if the drive is ready with media present */ 21166 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 21167 if (rval == ENXIO) { 21168 goto done; 21169 } 21170 21171 /* Now get configuration data */ 21172 if (ISCD(un)) { 21173 media_info.dki_media_type = DK_CDROM; 21174 21175 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 21176 if (un->un_f_mmc_cap == TRUE) { 21177 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 21178 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 21179 SD_PATH_STANDARD); 21180 21181 if (rtn) { 21182 /* 21183 * Failed for other than an illegal request 21184 * or command not supported 21185 */ 21186 if ((com.uscsi_status == STATUS_CHECK) && 21187 (com.uscsi_rqstatus == STATUS_GOOD)) { 21188 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 21189 (rqbuf[12] != 0x20)) { 21190 rval = EIO; 21191 goto done; 21192 } 21193 } 21194 } else { 21195 /* 21196 * The GET CONFIGURATION command succeeded 21197 * so set the media type according to the 21198 * returned data 21199 */ 21200 media_info.dki_media_type = out_data[6]; 21201 media_info.dki_media_type <<= 8; 21202 media_info.dki_media_type |= out_data[7]; 21203 } 21204 } 21205 } else { 21206 /* 21207 * The profile list is not available, so we attempt to identify 21208 * the media type based on the inquiry data 21209 */ 21210 sinq = un->un_sd->sd_inq; 21211 if ((sinq->inq_dtype == DTYPE_DIRECT) || 21212 (sinq->inq_dtype == DTYPE_OPTICAL)) { 21213 /* This is a direct access device or optical disk */ 21214 media_info.dki_media_type = DK_FIXED_DISK; 21215 21216 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 21217 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 21218 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 21219 media_info.dki_media_type = DK_ZIP; 21220 } else if ( 21221 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 21222 media_info.dki_media_type = DK_JAZ; 21223 } 21224 } 21225 } else { 21226 /* 21227 * Not a CD, direct access or optical disk so return 21228 * unknown media 21229 */ 21230 media_info.dki_media_type = DK_UNKNOWN; 21231 } 21232 } 21233 21234 /* Now read the capacity so we can provide the lbasize and capacity */ 21235 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21236 SD_PATH_DIRECT)) { 21237 case 0: 21238 break; 21239 case EACCES: 21240 rval = EACCES; 21241 goto done; 21242 default: 21243 rval = EIO; 21244 goto done; 21245 } 21246 21247 /* 21248 * If lun is expanded dynamically, update the un structure. 21249 */ 21250 mutex_enter(SD_MUTEX(un)); 21251 if ((un->un_f_blockcount_is_valid == TRUE) && 21252 (un->un_f_tgt_blocksize_is_valid == TRUE) && 21253 (capacity > un->un_blockcount)) { 21254 sd_update_block_info(un, lbasize, capacity); 21255 } 21256 mutex_exit(SD_MUTEX(un)); 21257 21258 media_info.dki_lbsize = lbasize; 21259 media_capacity = capacity; 21260 21261 /* 21262 * sd_send_scsi_READ_CAPACITY() reports capacity in 21263 * un->un_sys_blocksize chunks. So we need to convert it into 21264 * cap.lbasize chunks. 21265 */ 21266 media_capacity *= un->un_sys_blocksize; 21267 media_capacity /= lbasize; 21268 media_info.dki_capacity = media_capacity; 21269 21270 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21271 rval = EFAULT; 21272 /* Put goto. Anybody might add some code below in future */ 21273 goto done; 21274 } 21275 done: 21276 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21277 kmem_free(rqbuf, SENSE_LENGTH); 21278 return (rval); 21279 } 21280 21281 21282 /* 21283 * Function: sd_check_media 21284 * 21285 * Description: This utility routine implements the functionality for the 21286 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 21287 * driver state changes from that specified by the user 21288 * (inserted or ejected). For example, if the user specifies 21289 * DKIO_EJECTED and the current media state is inserted this 21290 * routine will immediately return DKIO_INSERTED. However, if the 21291 * current media state is not inserted the user thread will be 21292 * blocked until the drive state changes. If DKIO_NONE is specified 21293 * the user thread will block until a drive state change occurs. 21294 * 21295 * Arguments: dev - the device number 21296 * state - user pointer to a dkio_state, updated with the current 21297 * drive state at return. 21298 * 21299 * Return Code: ENXIO 21300 * EIO 21301 * EAGAIN 21302 * EINTR 21303 */ 21304 21305 static int 21306 sd_check_media(dev_t dev, enum dkio_state state) 21307 { 21308 struct sd_lun *un = NULL; 21309 enum dkio_state prev_state; 21310 opaque_t token = NULL; 21311 int rval = 0; 21312 21313 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21314 return (ENXIO); 21315 } 21316 21317 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 21318 21319 mutex_enter(SD_MUTEX(un)); 21320 21321 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 21322 "state=%x, mediastate=%x\n", state, un->un_mediastate); 21323 21324 prev_state = un->un_mediastate; 21325 21326 /* is there anything to do? */ 21327 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 21328 /* 21329 * submit the request to the scsi_watch service; 21330 * scsi_media_watch_cb() does the real work 21331 */ 21332 mutex_exit(SD_MUTEX(un)); 21333 21334 /* 21335 * This change handles the case where a scsi watch request is 21336 * added to a device that is powered down. To accomplish this 21337 * we power up the device before adding the scsi watch request, 21338 * since the scsi watch sends a TUR directly to the device 21339 * which the device cannot handle if it is powered down. 21340 */ 21341 if (sd_pm_entry(un) != DDI_SUCCESS) { 21342 mutex_enter(SD_MUTEX(un)); 21343 goto done; 21344 } 21345 21346 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 21347 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 21348 (caddr_t)dev); 21349 21350 sd_pm_exit(un); 21351 21352 mutex_enter(SD_MUTEX(un)); 21353 if (token == NULL) { 21354 rval = EAGAIN; 21355 goto done; 21356 } 21357 21358 /* 21359 * This is a special case IOCTL that doesn't return 21360 * until the media state changes. Routine sdpower 21361 * knows about and handles this so don't count it 21362 * as an active cmd in the driver, which would 21363 * keep the device busy to the pm framework. 21364 * If the count isn't decremented the device can't 21365 * be powered down. 21366 */ 21367 un->un_ncmds_in_driver--; 21368 ASSERT(un->un_ncmds_in_driver >= 0); 21369 21370 /* 21371 * if a prior request had been made, this will be the same 21372 * token, as scsi_watch was designed that way. 21373 */ 21374 un->un_swr_token = token; 21375 un->un_specified_mediastate = state; 21376 21377 /* 21378 * now wait for media change 21379 * we will not be signalled unless mediastate == state but it is 21380 * still better to test for this condition, since there is a 21381 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 21382 */ 21383 SD_TRACE(SD_LOG_COMMON, un, 21384 "sd_check_media: waiting for media state change\n"); 21385 while (un->un_mediastate == state) { 21386 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 21387 SD_TRACE(SD_LOG_COMMON, un, 21388 "sd_check_media: waiting for media state " 21389 "was interrupted\n"); 21390 un->un_ncmds_in_driver++; 21391 rval = EINTR; 21392 goto done; 21393 } 21394 SD_TRACE(SD_LOG_COMMON, un, 21395 "sd_check_media: received signal, state=%x\n", 21396 un->un_mediastate); 21397 } 21398 /* 21399 * Inc the counter to indicate the device once again 21400 * has an active outstanding cmd. 21401 */ 21402 un->un_ncmds_in_driver++; 21403 } 21404 21405 /* invalidate geometry */ 21406 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 21407 sr_ejected(un); 21408 } 21409 21410 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 21411 uint64_t capacity; 21412 uint_t lbasize; 21413 21414 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 21415 mutex_exit(SD_MUTEX(un)); 21416 /* 21417 * Since the following routines use SD_PATH_DIRECT, we must 21418 * call PM directly before the upcoming disk accesses. This 21419 * may cause the disk to be power/spin up. 21420 */ 21421 21422 if (sd_pm_entry(un) == DDI_SUCCESS) { 21423 rval = sd_send_scsi_READ_CAPACITY(un, 21424 &capacity, 21425 &lbasize, SD_PATH_DIRECT); 21426 if (rval != 0) { 21427 sd_pm_exit(un); 21428 mutex_enter(SD_MUTEX(un)); 21429 goto done; 21430 } 21431 } else { 21432 rval = EIO; 21433 mutex_enter(SD_MUTEX(un)); 21434 goto done; 21435 } 21436 mutex_enter(SD_MUTEX(un)); 21437 21438 sd_update_block_info(un, lbasize, capacity); 21439 21440 /* 21441 * Check if the media in the device is writable or not 21442 */ 21443 if (ISCD(un)) 21444 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21445 21446 mutex_exit(SD_MUTEX(un)); 21447 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21448 if ((cmlb_validate(un->un_cmlbhandle, 0, 21449 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21450 sd_set_pstats(un); 21451 SD_TRACE(SD_LOG_IO_PARTITION, un, 21452 "sd_check_media: un:0x%p pstats created and " 21453 "set\n", un); 21454 } 21455 21456 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21457 SD_PATH_DIRECT); 21458 sd_pm_exit(un); 21459 21460 mutex_enter(SD_MUTEX(un)); 21461 } 21462 done: 21463 un->un_f_watcht_stopped = FALSE; 21464 /* 21465 * Use of this local token and the mutex ensures that we avoid 21466 * some race conditions associated with terminating the 21467 * scsi watch. 21468 */ 21469 if (token) { 21470 un->un_swr_token = (opaque_t)NULL; 21471 mutex_exit(SD_MUTEX(un)); 21472 (void) scsi_watch_request_terminate(token, 21473 SCSI_WATCH_TERMINATE_WAIT); 21474 mutex_enter(SD_MUTEX(un)); 21475 } 21476 21477 /* 21478 * Update the capacity kstat value, if no media previously 21479 * (capacity kstat is 0) and a media has been inserted 21480 * (un_f_blockcount_is_valid == TRUE) 21481 */ 21482 if (un->un_errstats) { 21483 struct sd_errstats *stp = NULL; 21484 21485 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21486 if ((stp->sd_capacity.value.ui64 == 0) && 21487 (un->un_f_blockcount_is_valid == TRUE)) { 21488 stp->sd_capacity.value.ui64 = 21489 (uint64_t)((uint64_t)un->un_blockcount * 21490 un->un_sys_blocksize); 21491 } 21492 } 21493 mutex_exit(SD_MUTEX(un)); 21494 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21495 return (rval); 21496 } 21497 21498 21499 /* 21500 * Function: sd_delayed_cv_broadcast 21501 * 21502 * Description: Delayed cv_broadcast to allow for target to recover from media 21503 * insertion. 21504 * 21505 * Arguments: arg - driver soft state (unit) structure 21506 */ 21507 21508 static void 21509 sd_delayed_cv_broadcast(void *arg) 21510 { 21511 struct sd_lun *un = arg; 21512 21513 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21514 21515 mutex_enter(SD_MUTEX(un)); 21516 un->un_dcvb_timeid = NULL; 21517 cv_broadcast(&un->un_state_cv); 21518 mutex_exit(SD_MUTEX(un)); 21519 } 21520 21521 21522 /* 21523 * Function: sd_media_watch_cb 21524 * 21525 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21526 * routine processes the TUR sense data and updates the driver 21527 * state if a transition has occurred. The user thread 21528 * (sd_check_media) is then signalled. 21529 * 21530 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21531 * among multiple watches that share this callback function 21532 * resultp - scsi watch facility result packet containing scsi 21533 * packet, status byte and sense data 21534 * 21535 * Return Code: 0 for success, -1 for failure 21536 */ 21537 21538 static int 21539 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21540 { 21541 struct sd_lun *un; 21542 struct scsi_status *statusp = resultp->statusp; 21543 uint8_t *sensep = (uint8_t *)resultp->sensep; 21544 enum dkio_state state = DKIO_NONE; 21545 dev_t dev = (dev_t)arg; 21546 uchar_t actual_sense_length; 21547 uint8_t skey, asc, ascq; 21548 21549 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21550 return (-1); 21551 } 21552 actual_sense_length = resultp->actual_sense_length; 21553 21554 mutex_enter(SD_MUTEX(un)); 21555 SD_TRACE(SD_LOG_COMMON, un, 21556 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21557 *((char *)statusp), (void *)sensep, actual_sense_length); 21558 21559 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21560 un->un_mediastate = DKIO_DEV_GONE; 21561 cv_broadcast(&un->un_state_cv); 21562 mutex_exit(SD_MUTEX(un)); 21563 21564 return (0); 21565 } 21566 21567 /* 21568 * If there was a check condition then sensep points to valid sense data 21569 * If status was not a check condition but a reservation or busy status 21570 * then the new state is DKIO_NONE 21571 */ 21572 if (sensep != NULL) { 21573 skey = scsi_sense_key(sensep); 21574 asc = scsi_sense_asc(sensep); 21575 ascq = scsi_sense_ascq(sensep); 21576 21577 SD_INFO(SD_LOG_COMMON, un, 21578 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21579 skey, asc, ascq); 21580 /* This routine only uses up to 13 bytes of sense data. */ 21581 if (actual_sense_length >= 13) { 21582 if (skey == KEY_UNIT_ATTENTION) { 21583 if (asc == 0x28) { 21584 state = DKIO_INSERTED; 21585 } 21586 } else if (skey == KEY_NOT_READY) { 21587 /* 21588 * if 02/04/02 means that the host 21589 * should send start command. Explicitly 21590 * leave the media state as is 21591 * (inserted) as the media is inserted 21592 * and host has stopped device for PM 21593 * reasons. Upon next true read/write 21594 * to this media will bring the 21595 * device to the right state good for 21596 * media access. 21597 */ 21598 if (asc == 0x3a) { 21599 state = DKIO_EJECTED; 21600 } else { 21601 /* 21602 * If the drive is busy with an 21603 * operation or long write, keep the 21604 * media in an inserted state. 21605 */ 21606 21607 if ((asc == 0x04) && 21608 ((ascq == 0x02) || 21609 (ascq == 0x07) || 21610 (ascq == 0x08))) { 21611 state = DKIO_INSERTED; 21612 } 21613 } 21614 } else if (skey == KEY_NO_SENSE) { 21615 if ((asc == 0x00) && (ascq == 0x00)) { 21616 /* 21617 * Sense Data 00/00/00 does not provide 21618 * any information about the state of 21619 * the media. Ignore it. 21620 */ 21621 mutex_exit(SD_MUTEX(un)); 21622 return (0); 21623 } 21624 } 21625 } 21626 } else if ((*((char *)statusp) == STATUS_GOOD) && 21627 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21628 state = DKIO_INSERTED; 21629 } 21630 21631 SD_TRACE(SD_LOG_COMMON, un, 21632 "sd_media_watch_cb: state=%x, specified=%x\n", 21633 state, un->un_specified_mediastate); 21634 21635 /* 21636 * now signal the waiting thread if this is *not* the specified state; 21637 * delay the signal if the state is DKIO_INSERTED to allow the target 21638 * to recover 21639 */ 21640 if (state != un->un_specified_mediastate) { 21641 un->un_mediastate = state; 21642 if (state == DKIO_INSERTED) { 21643 /* 21644 * delay the signal to give the drive a chance 21645 * to do what it apparently needs to do 21646 */ 21647 SD_TRACE(SD_LOG_COMMON, un, 21648 "sd_media_watch_cb: delayed cv_broadcast\n"); 21649 if (un->un_dcvb_timeid == NULL) { 21650 un->un_dcvb_timeid = 21651 timeout(sd_delayed_cv_broadcast, un, 21652 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21653 } 21654 } else { 21655 SD_TRACE(SD_LOG_COMMON, un, 21656 "sd_media_watch_cb: immediate cv_broadcast\n"); 21657 cv_broadcast(&un->un_state_cv); 21658 } 21659 } 21660 mutex_exit(SD_MUTEX(un)); 21661 return (0); 21662 } 21663 21664 21665 /* 21666 * Function: sd_dkio_get_temp 21667 * 21668 * Description: This routine is the driver entry point for handling ioctl 21669 * requests to get the disk temperature. 21670 * 21671 * Arguments: dev - the device number 21672 * arg - pointer to user provided dk_temperature structure. 21673 * flag - this argument is a pass through to ddi_copyxxx() 21674 * directly from the mode argument of ioctl(). 21675 * 21676 * Return Code: 0 21677 * EFAULT 21678 * ENXIO 21679 * EAGAIN 21680 */ 21681 21682 static int 21683 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21684 { 21685 struct sd_lun *un = NULL; 21686 struct dk_temperature *dktemp = NULL; 21687 uchar_t *temperature_page; 21688 int rval = 0; 21689 int path_flag = SD_PATH_STANDARD; 21690 21691 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21692 return (ENXIO); 21693 } 21694 21695 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21696 21697 /* copyin the disk temp argument to get the user flags */ 21698 if (ddi_copyin((void *)arg, dktemp, 21699 sizeof (struct dk_temperature), flag) != 0) { 21700 rval = EFAULT; 21701 goto done; 21702 } 21703 21704 /* Initialize the temperature to invalid. */ 21705 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21706 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21707 21708 /* 21709 * Note: Investigate removing the "bypass pm" semantic. 21710 * Can we just bypass PM always? 21711 */ 21712 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21713 path_flag = SD_PATH_DIRECT; 21714 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21715 mutex_enter(&un->un_pm_mutex); 21716 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21717 /* 21718 * If DKT_BYPASS_PM is set, and the drive happens to be 21719 * in low power mode, we can not wake it up, Need to 21720 * return EAGAIN. 21721 */ 21722 mutex_exit(&un->un_pm_mutex); 21723 rval = EAGAIN; 21724 goto done; 21725 } else { 21726 /* 21727 * Indicate to PM the device is busy. This is required 21728 * to avoid a race - i.e. the ioctl is issuing a 21729 * command and the pm framework brings down the device 21730 * to low power mode (possible power cut-off on some 21731 * platforms). 21732 */ 21733 mutex_exit(&un->un_pm_mutex); 21734 if (sd_pm_entry(un) != DDI_SUCCESS) { 21735 rval = EAGAIN; 21736 goto done; 21737 } 21738 } 21739 } 21740 21741 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21742 21743 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21744 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21745 goto done2; 21746 } 21747 21748 /* 21749 * For the current temperature verify that the parameter length is 0x02 21750 * and the parameter code is 0x00 21751 */ 21752 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21753 (temperature_page[5] == 0x00)) { 21754 if (temperature_page[9] == 0xFF) { 21755 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21756 } else { 21757 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21758 } 21759 } 21760 21761 /* 21762 * For the reference temperature verify that the parameter 21763 * length is 0x02 and the parameter code is 0x01 21764 */ 21765 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21766 (temperature_page[11] == 0x01)) { 21767 if (temperature_page[15] == 0xFF) { 21768 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21769 } else { 21770 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21771 } 21772 } 21773 21774 /* Do the copyout regardless of the temperature commands status. */ 21775 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21776 flag) != 0) { 21777 rval = EFAULT; 21778 } 21779 21780 done2: 21781 if (path_flag == SD_PATH_DIRECT) { 21782 sd_pm_exit(un); 21783 } 21784 21785 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21786 done: 21787 if (dktemp != NULL) { 21788 kmem_free(dktemp, sizeof (struct dk_temperature)); 21789 } 21790 21791 return (rval); 21792 } 21793 21794 21795 /* 21796 * Function: sd_log_page_supported 21797 * 21798 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21799 * supported log pages. 21800 * 21801 * Arguments: un - 21802 * log_page - 21803 * 21804 * Return Code: -1 - on error (log sense is optional and may not be supported). 21805 * 0 - log page not found. 21806 * 1 - log page found. 21807 */ 21808 21809 static int 21810 sd_log_page_supported(struct sd_lun *un, int log_page) 21811 { 21812 uchar_t *log_page_data; 21813 int i; 21814 int match = 0; 21815 int log_size; 21816 21817 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21818 21819 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21820 SD_PATH_DIRECT) != 0) { 21821 SD_ERROR(SD_LOG_COMMON, un, 21822 "sd_log_page_supported: failed log page retrieval\n"); 21823 kmem_free(log_page_data, 0xFF); 21824 return (-1); 21825 } 21826 log_size = log_page_data[3]; 21827 21828 /* 21829 * The list of supported log pages start from the fourth byte. Check 21830 * until we run out of log pages or a match is found. 21831 */ 21832 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21833 if (log_page_data[i] == log_page) { 21834 match++; 21835 } 21836 } 21837 kmem_free(log_page_data, 0xFF); 21838 return (match); 21839 } 21840 21841 21842 /* 21843 * Function: sd_mhdioc_failfast 21844 * 21845 * Description: This routine is the driver entry point for handling ioctl 21846 * requests to enable/disable the multihost failfast option. 21847 * (MHIOCENFAILFAST) 21848 * 21849 * Arguments: dev - the device number 21850 * arg - user specified probing interval. 21851 * flag - this argument is a pass through to ddi_copyxxx() 21852 * directly from the mode argument of ioctl(). 21853 * 21854 * Return Code: 0 21855 * EFAULT 21856 * ENXIO 21857 */ 21858 21859 static int 21860 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21861 { 21862 struct sd_lun *un = NULL; 21863 int mh_time; 21864 int rval = 0; 21865 21866 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21867 return (ENXIO); 21868 } 21869 21870 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21871 return (EFAULT); 21872 21873 if (mh_time) { 21874 mutex_enter(SD_MUTEX(un)); 21875 un->un_resvd_status |= SD_FAILFAST; 21876 mutex_exit(SD_MUTEX(un)); 21877 /* 21878 * If mh_time is INT_MAX, then this ioctl is being used for 21879 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21880 */ 21881 if (mh_time != INT_MAX) { 21882 rval = sd_check_mhd(dev, mh_time); 21883 } 21884 } else { 21885 (void) sd_check_mhd(dev, 0); 21886 mutex_enter(SD_MUTEX(un)); 21887 un->un_resvd_status &= ~SD_FAILFAST; 21888 mutex_exit(SD_MUTEX(un)); 21889 } 21890 return (rval); 21891 } 21892 21893 21894 /* 21895 * Function: sd_mhdioc_takeown 21896 * 21897 * Description: This routine is the driver entry point for handling ioctl 21898 * requests to forcefully acquire exclusive access rights to the 21899 * multihost disk (MHIOCTKOWN). 21900 * 21901 * Arguments: dev - the device number 21902 * arg - user provided structure specifying the delay 21903 * parameters in milliseconds 21904 * flag - this argument is a pass through to ddi_copyxxx() 21905 * directly from the mode argument of ioctl(). 21906 * 21907 * Return Code: 0 21908 * EFAULT 21909 * ENXIO 21910 */ 21911 21912 static int 21913 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21914 { 21915 struct sd_lun *un = NULL; 21916 struct mhioctkown *tkown = NULL; 21917 int rval = 0; 21918 21919 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21920 return (ENXIO); 21921 } 21922 21923 if (arg != NULL) { 21924 tkown = (struct mhioctkown *) 21925 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21926 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21927 if (rval != 0) { 21928 rval = EFAULT; 21929 goto error; 21930 } 21931 } 21932 21933 rval = sd_take_ownership(dev, tkown); 21934 mutex_enter(SD_MUTEX(un)); 21935 if (rval == 0) { 21936 un->un_resvd_status |= SD_RESERVE; 21937 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21938 sd_reinstate_resv_delay = 21939 tkown->reinstate_resv_delay * 1000; 21940 } else { 21941 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21942 } 21943 /* 21944 * Give the scsi_watch routine interval set by 21945 * the MHIOCENFAILFAST ioctl precedence here. 21946 */ 21947 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21948 mutex_exit(SD_MUTEX(un)); 21949 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21950 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21951 "sd_mhdioc_takeown : %d\n", 21952 sd_reinstate_resv_delay); 21953 } else { 21954 mutex_exit(SD_MUTEX(un)); 21955 } 21956 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21957 sd_mhd_reset_notify_cb, (caddr_t)un); 21958 } else { 21959 un->un_resvd_status &= ~SD_RESERVE; 21960 mutex_exit(SD_MUTEX(un)); 21961 } 21962 21963 error: 21964 if (tkown != NULL) { 21965 kmem_free(tkown, sizeof (struct mhioctkown)); 21966 } 21967 return (rval); 21968 } 21969 21970 21971 /* 21972 * Function: sd_mhdioc_release 21973 * 21974 * Description: This routine is the driver entry point for handling ioctl 21975 * requests to release exclusive access rights to the multihost 21976 * disk (MHIOCRELEASE). 21977 * 21978 * Arguments: dev - the device number 21979 * 21980 * Return Code: 0 21981 * ENXIO 21982 */ 21983 21984 static int 21985 sd_mhdioc_release(dev_t dev) 21986 { 21987 struct sd_lun *un = NULL; 21988 timeout_id_t resvd_timeid_save; 21989 int resvd_status_save; 21990 int rval = 0; 21991 21992 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21993 return (ENXIO); 21994 } 21995 21996 mutex_enter(SD_MUTEX(un)); 21997 resvd_status_save = un->un_resvd_status; 21998 un->un_resvd_status &= 21999 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 22000 if (un->un_resvd_timeid) { 22001 resvd_timeid_save = un->un_resvd_timeid; 22002 un->un_resvd_timeid = NULL; 22003 mutex_exit(SD_MUTEX(un)); 22004 (void) untimeout(resvd_timeid_save); 22005 } else { 22006 mutex_exit(SD_MUTEX(un)); 22007 } 22008 22009 /* 22010 * destroy any pending timeout thread that may be attempting to 22011 * reinstate reservation on this device. 22012 */ 22013 sd_rmv_resv_reclaim_req(dev); 22014 22015 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 22016 mutex_enter(SD_MUTEX(un)); 22017 if ((un->un_mhd_token) && 22018 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 22019 mutex_exit(SD_MUTEX(un)); 22020 (void) sd_check_mhd(dev, 0); 22021 } else { 22022 mutex_exit(SD_MUTEX(un)); 22023 } 22024 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 22025 sd_mhd_reset_notify_cb, (caddr_t)un); 22026 } else { 22027 /* 22028 * sd_mhd_watch_cb will restart the resvd recover timeout thread 22029 */ 22030 mutex_enter(SD_MUTEX(un)); 22031 un->un_resvd_status = resvd_status_save; 22032 mutex_exit(SD_MUTEX(un)); 22033 } 22034 return (rval); 22035 } 22036 22037 22038 /* 22039 * Function: sd_mhdioc_register_devid 22040 * 22041 * Description: This routine is the driver entry point for handling ioctl 22042 * requests to register the device id (MHIOCREREGISTERDEVID). 22043 * 22044 * Note: The implementation for this ioctl has been updated to 22045 * be consistent with the original PSARC case (1999/357) 22046 * (4375899, 4241671, 4220005) 22047 * 22048 * Arguments: dev - the device number 22049 * 22050 * Return Code: 0 22051 * ENXIO 22052 */ 22053 22054 static int 22055 sd_mhdioc_register_devid(dev_t dev) 22056 { 22057 struct sd_lun *un = NULL; 22058 int rval = 0; 22059 22060 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22061 return (ENXIO); 22062 } 22063 22064 ASSERT(!mutex_owned(SD_MUTEX(un))); 22065 22066 mutex_enter(SD_MUTEX(un)); 22067 22068 /* If a devid already exists, de-register it */ 22069 if (un->un_devid != NULL) { 22070 ddi_devid_unregister(SD_DEVINFO(un)); 22071 /* 22072 * After unregister devid, needs to free devid memory 22073 */ 22074 ddi_devid_free(un->un_devid); 22075 un->un_devid = NULL; 22076 } 22077 22078 /* Check for reservation conflict */ 22079 mutex_exit(SD_MUTEX(un)); 22080 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 22081 mutex_enter(SD_MUTEX(un)); 22082 22083 switch (rval) { 22084 case 0: 22085 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 22086 break; 22087 case EACCES: 22088 break; 22089 default: 22090 rval = EIO; 22091 } 22092 22093 mutex_exit(SD_MUTEX(un)); 22094 return (rval); 22095 } 22096 22097 22098 /* 22099 * Function: sd_mhdioc_inkeys 22100 * 22101 * Description: This routine is the driver entry point for handling ioctl 22102 * requests to issue the SCSI-3 Persistent In Read Keys command 22103 * to the device (MHIOCGRP_INKEYS). 22104 * 22105 * Arguments: dev - the device number 22106 * arg - user provided in_keys structure 22107 * flag - this argument is a pass through to ddi_copyxxx() 22108 * directly from the mode argument of ioctl(). 22109 * 22110 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 22111 * ENXIO 22112 * EFAULT 22113 */ 22114 22115 static int 22116 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 22117 { 22118 struct sd_lun *un; 22119 mhioc_inkeys_t inkeys; 22120 int rval = 0; 22121 22122 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22123 return (ENXIO); 22124 } 22125 22126 #ifdef _MULTI_DATAMODEL 22127 switch (ddi_model_convert_from(flag & FMODELS)) { 22128 case DDI_MODEL_ILP32: { 22129 struct mhioc_inkeys32 inkeys32; 22130 22131 if (ddi_copyin(arg, &inkeys32, 22132 sizeof (struct mhioc_inkeys32), flag) != 0) { 22133 return (EFAULT); 22134 } 22135 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 22136 if ((rval = sd_persistent_reservation_in_read_keys(un, 22137 &inkeys, flag)) != 0) { 22138 return (rval); 22139 } 22140 inkeys32.generation = inkeys.generation; 22141 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 22142 flag) != 0) { 22143 return (EFAULT); 22144 } 22145 break; 22146 } 22147 case DDI_MODEL_NONE: 22148 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 22149 flag) != 0) { 22150 return (EFAULT); 22151 } 22152 if ((rval = sd_persistent_reservation_in_read_keys(un, 22153 &inkeys, flag)) != 0) { 22154 return (rval); 22155 } 22156 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 22157 flag) != 0) { 22158 return (EFAULT); 22159 } 22160 break; 22161 } 22162 22163 #else /* ! _MULTI_DATAMODEL */ 22164 22165 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 22166 return (EFAULT); 22167 } 22168 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 22169 if (rval != 0) { 22170 return (rval); 22171 } 22172 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 22173 return (EFAULT); 22174 } 22175 22176 #endif /* _MULTI_DATAMODEL */ 22177 22178 return (rval); 22179 } 22180 22181 22182 /* 22183 * Function: sd_mhdioc_inresv 22184 * 22185 * Description: This routine is the driver entry point for handling ioctl 22186 * requests to issue the SCSI-3 Persistent In Read Reservations 22187 * command to the device (MHIOCGRP_INKEYS). 22188 * 22189 * Arguments: dev - the device number 22190 * arg - user provided in_resv structure 22191 * flag - this argument is a pass through to ddi_copyxxx() 22192 * directly from the mode argument of ioctl(). 22193 * 22194 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 22195 * ENXIO 22196 * EFAULT 22197 */ 22198 22199 static int 22200 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 22201 { 22202 struct sd_lun *un; 22203 mhioc_inresvs_t inresvs; 22204 int rval = 0; 22205 22206 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22207 return (ENXIO); 22208 } 22209 22210 #ifdef _MULTI_DATAMODEL 22211 22212 switch (ddi_model_convert_from(flag & FMODELS)) { 22213 case DDI_MODEL_ILP32: { 22214 struct mhioc_inresvs32 inresvs32; 22215 22216 if (ddi_copyin(arg, &inresvs32, 22217 sizeof (struct mhioc_inresvs32), flag) != 0) { 22218 return (EFAULT); 22219 } 22220 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 22221 if ((rval = sd_persistent_reservation_in_read_resv(un, 22222 &inresvs, flag)) != 0) { 22223 return (rval); 22224 } 22225 inresvs32.generation = inresvs.generation; 22226 if (ddi_copyout(&inresvs32, arg, 22227 sizeof (struct mhioc_inresvs32), flag) != 0) { 22228 return (EFAULT); 22229 } 22230 break; 22231 } 22232 case DDI_MODEL_NONE: 22233 if (ddi_copyin(arg, &inresvs, 22234 sizeof (mhioc_inresvs_t), flag) != 0) { 22235 return (EFAULT); 22236 } 22237 if ((rval = sd_persistent_reservation_in_read_resv(un, 22238 &inresvs, flag)) != 0) { 22239 return (rval); 22240 } 22241 if (ddi_copyout(&inresvs, arg, 22242 sizeof (mhioc_inresvs_t), flag) != 0) { 22243 return (EFAULT); 22244 } 22245 break; 22246 } 22247 22248 #else /* ! _MULTI_DATAMODEL */ 22249 22250 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 22251 return (EFAULT); 22252 } 22253 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 22254 if (rval != 0) { 22255 return (rval); 22256 } 22257 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 22258 return (EFAULT); 22259 } 22260 22261 #endif /* ! _MULTI_DATAMODEL */ 22262 22263 return (rval); 22264 } 22265 22266 22267 /* 22268 * The following routines support the clustering functionality described below 22269 * and implement lost reservation reclaim functionality. 22270 * 22271 * Clustering 22272 * ---------- 22273 * The clustering code uses two different, independent forms of SCSI 22274 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 22275 * Persistent Group Reservations. For any particular disk, it will use either 22276 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 22277 * 22278 * SCSI-2 22279 * The cluster software takes ownership of a multi-hosted disk by issuing the 22280 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 22281 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 22282 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 22283 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 22284 * driver. The meaning of failfast is that if the driver (on this host) ever 22285 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 22286 * it should immediately panic the host. The motivation for this ioctl is that 22287 * if this host does encounter reservation conflict, the underlying cause is 22288 * that some other host of the cluster has decided that this host is no longer 22289 * in the cluster and has seized control of the disks for itself. Since this 22290 * host is no longer in the cluster, it ought to panic itself. The 22291 * MHIOCENFAILFAST ioctl does two things: 22292 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 22293 * error to panic the host 22294 * (b) it sets up a periodic timer to test whether this host still has 22295 * "access" (in that no other host has reserved the device): if the 22296 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 22297 * purpose of that periodic timer is to handle scenarios where the host is 22298 * otherwise temporarily quiescent, temporarily doing no real i/o. 22299 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 22300 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 22301 * the device itself. 22302 * 22303 * SCSI-3 PGR 22304 * A direct semantic implementation of the SCSI-3 Persistent Reservation 22305 * facility is supported through the shared multihost disk ioctls 22306 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 22307 * MHIOCGRP_PREEMPTANDABORT) 22308 * 22309 * Reservation Reclaim: 22310 * -------------------- 22311 * To support the lost reservation reclaim operations this driver creates a 22312 * single thread to handle reinstating reservations on all devices that have 22313 * lost reservations sd_resv_reclaim_requests are logged for all devices that 22314 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 22315 * and the reservation reclaim thread loops through the requests to regain the 22316 * lost reservations. 22317 */ 22318 22319 /* 22320 * Function: sd_check_mhd() 22321 * 22322 * Description: This function sets up and submits a scsi watch request or 22323 * terminates an existing watch request. This routine is used in 22324 * support of reservation reclaim. 22325 * 22326 * Arguments: dev - the device 'dev_t' is used for context to discriminate 22327 * among multiple watches that share the callback function 22328 * interval - the number of microseconds specifying the watch 22329 * interval for issuing TEST UNIT READY commands. If 22330 * set to 0 the watch should be terminated. If the 22331 * interval is set to 0 and if the device is required 22332 * to hold reservation while disabling failfast, the 22333 * watch is restarted with an interval of 22334 * reinstate_resv_delay. 22335 * 22336 * Return Code: 0 - Successful submit/terminate of scsi watch request 22337 * ENXIO - Indicates an invalid device was specified 22338 * EAGAIN - Unable to submit the scsi watch request 22339 */ 22340 22341 static int 22342 sd_check_mhd(dev_t dev, int interval) 22343 { 22344 struct sd_lun *un; 22345 opaque_t token; 22346 22347 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22348 return (ENXIO); 22349 } 22350 22351 /* is this a watch termination request? */ 22352 if (interval == 0) { 22353 mutex_enter(SD_MUTEX(un)); 22354 /* if there is an existing watch task then terminate it */ 22355 if (un->un_mhd_token) { 22356 token = un->un_mhd_token; 22357 un->un_mhd_token = NULL; 22358 mutex_exit(SD_MUTEX(un)); 22359 (void) scsi_watch_request_terminate(token, 22360 SCSI_WATCH_TERMINATE_ALL_WAIT); 22361 mutex_enter(SD_MUTEX(un)); 22362 } else { 22363 mutex_exit(SD_MUTEX(un)); 22364 /* 22365 * Note: If we return here we don't check for the 22366 * failfast case. This is the original legacy 22367 * implementation but perhaps we should be checking 22368 * the failfast case. 22369 */ 22370 return (0); 22371 } 22372 /* 22373 * If the device is required to hold reservation while 22374 * disabling failfast, we need to restart the scsi_watch 22375 * routine with an interval of reinstate_resv_delay. 22376 */ 22377 if (un->un_resvd_status & SD_RESERVE) { 22378 interval = sd_reinstate_resv_delay/1000; 22379 } else { 22380 /* no failfast so bail */ 22381 mutex_exit(SD_MUTEX(un)); 22382 return (0); 22383 } 22384 mutex_exit(SD_MUTEX(un)); 22385 } 22386 22387 /* 22388 * adjust minimum time interval to 1 second, 22389 * and convert from msecs to usecs 22390 */ 22391 if (interval > 0 && interval < 1000) { 22392 interval = 1000; 22393 } 22394 interval *= 1000; 22395 22396 /* 22397 * submit the request to the scsi_watch service 22398 */ 22399 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 22400 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 22401 if (token == NULL) { 22402 return (EAGAIN); 22403 } 22404 22405 /* 22406 * save token for termination later on 22407 */ 22408 mutex_enter(SD_MUTEX(un)); 22409 un->un_mhd_token = token; 22410 mutex_exit(SD_MUTEX(un)); 22411 return (0); 22412 } 22413 22414 22415 /* 22416 * Function: sd_mhd_watch_cb() 22417 * 22418 * Description: This function is the call back function used by the scsi watch 22419 * facility. The scsi watch facility sends the "Test Unit Ready" 22420 * and processes the status. If applicable (i.e. a "Unit Attention" 22421 * status and automatic "Request Sense" not used) the scsi watch 22422 * facility will send a "Request Sense" and retrieve the sense data 22423 * to be passed to this callback function. In either case the 22424 * automatic "Request Sense" or the facility submitting one, this 22425 * callback is passed the status and sense data. 22426 * 22427 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22428 * among multiple watches that share this callback function 22429 * resultp - scsi watch facility result packet containing scsi 22430 * packet, status byte and sense data 22431 * 22432 * Return Code: 0 - continue the watch task 22433 * non-zero - terminate the watch task 22434 */ 22435 22436 static int 22437 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22438 { 22439 struct sd_lun *un; 22440 struct scsi_status *statusp; 22441 uint8_t *sensep; 22442 struct scsi_pkt *pkt; 22443 uchar_t actual_sense_length; 22444 dev_t dev = (dev_t)arg; 22445 22446 ASSERT(resultp != NULL); 22447 statusp = resultp->statusp; 22448 sensep = (uint8_t *)resultp->sensep; 22449 pkt = resultp->pkt; 22450 actual_sense_length = resultp->actual_sense_length; 22451 22452 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22453 return (ENXIO); 22454 } 22455 22456 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22457 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22458 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22459 22460 /* Begin processing of the status and/or sense data */ 22461 if (pkt->pkt_reason != CMD_CMPLT) { 22462 /* Handle the incomplete packet */ 22463 sd_mhd_watch_incomplete(un, pkt); 22464 return (0); 22465 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22466 if (*((unsigned char *)statusp) 22467 == STATUS_RESERVATION_CONFLICT) { 22468 /* 22469 * Handle a reservation conflict by panicking if 22470 * configured for failfast or by logging the conflict 22471 * and updating the reservation status 22472 */ 22473 mutex_enter(SD_MUTEX(un)); 22474 if ((un->un_resvd_status & SD_FAILFAST) && 22475 (sd_failfast_enable)) { 22476 sd_panic_for_res_conflict(un); 22477 /*NOTREACHED*/ 22478 } 22479 SD_INFO(SD_LOG_IOCTL_MHD, un, 22480 "sd_mhd_watch_cb: Reservation Conflict\n"); 22481 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22482 mutex_exit(SD_MUTEX(un)); 22483 } 22484 } 22485 22486 if (sensep != NULL) { 22487 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22488 mutex_enter(SD_MUTEX(un)); 22489 if ((scsi_sense_asc(sensep) == 22490 SD_SCSI_RESET_SENSE_CODE) && 22491 (un->un_resvd_status & SD_RESERVE)) { 22492 /* 22493 * The additional sense code indicates a power 22494 * on or bus device reset has occurred; update 22495 * the reservation status. 22496 */ 22497 un->un_resvd_status |= 22498 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22499 SD_INFO(SD_LOG_IOCTL_MHD, un, 22500 "sd_mhd_watch_cb: Lost Reservation\n"); 22501 } 22502 } else { 22503 return (0); 22504 } 22505 } else { 22506 mutex_enter(SD_MUTEX(un)); 22507 } 22508 22509 if ((un->un_resvd_status & SD_RESERVE) && 22510 (un->un_resvd_status & SD_LOST_RESERVE)) { 22511 if (un->un_resvd_status & SD_WANT_RESERVE) { 22512 /* 22513 * A reset occurred in between the last probe and this 22514 * one so if a timeout is pending cancel it. 22515 */ 22516 if (un->un_resvd_timeid) { 22517 timeout_id_t temp_id = un->un_resvd_timeid; 22518 un->un_resvd_timeid = NULL; 22519 mutex_exit(SD_MUTEX(un)); 22520 (void) untimeout(temp_id); 22521 mutex_enter(SD_MUTEX(un)); 22522 } 22523 un->un_resvd_status &= ~SD_WANT_RESERVE; 22524 } 22525 if (un->un_resvd_timeid == 0) { 22526 /* Schedule a timeout to handle the lost reservation */ 22527 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22528 (void *)dev, 22529 drv_usectohz(sd_reinstate_resv_delay)); 22530 } 22531 } 22532 mutex_exit(SD_MUTEX(un)); 22533 return (0); 22534 } 22535 22536 22537 /* 22538 * Function: sd_mhd_watch_incomplete() 22539 * 22540 * Description: This function is used to find out why a scsi pkt sent by the 22541 * scsi watch facility was not completed. Under some scenarios this 22542 * routine will return. Otherwise it will send a bus reset to see 22543 * if the drive is still online. 22544 * 22545 * Arguments: un - driver soft state (unit) structure 22546 * pkt - incomplete scsi pkt 22547 */ 22548 22549 static void 22550 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22551 { 22552 int be_chatty; 22553 int perr; 22554 22555 ASSERT(pkt != NULL); 22556 ASSERT(un != NULL); 22557 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22558 perr = (pkt->pkt_statistics & STAT_PERR); 22559 22560 mutex_enter(SD_MUTEX(un)); 22561 if (un->un_state == SD_STATE_DUMPING) { 22562 mutex_exit(SD_MUTEX(un)); 22563 return; 22564 } 22565 22566 switch (pkt->pkt_reason) { 22567 case CMD_UNX_BUS_FREE: 22568 /* 22569 * If we had a parity error that caused the target to drop BSY*, 22570 * don't be chatty about it. 22571 */ 22572 if (perr && be_chatty) { 22573 be_chatty = 0; 22574 } 22575 break; 22576 case CMD_TAG_REJECT: 22577 /* 22578 * The SCSI-2 spec states that a tag reject will be sent by the 22579 * target if tagged queuing is not supported. A tag reject may 22580 * also be sent during certain initialization periods or to 22581 * control internal resources. For the latter case the target 22582 * may also return Queue Full. 22583 * 22584 * If this driver receives a tag reject from a target that is 22585 * going through an init period or controlling internal 22586 * resources tagged queuing will be disabled. This is a less 22587 * than optimal behavior but the driver is unable to determine 22588 * the target state and assumes tagged queueing is not supported 22589 */ 22590 pkt->pkt_flags = 0; 22591 un->un_tagflags = 0; 22592 22593 if (un->un_f_opt_queueing == TRUE) { 22594 un->un_throttle = min(un->un_throttle, 3); 22595 } else { 22596 un->un_throttle = 1; 22597 } 22598 mutex_exit(SD_MUTEX(un)); 22599 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22600 mutex_enter(SD_MUTEX(un)); 22601 break; 22602 case CMD_INCOMPLETE: 22603 /* 22604 * The transport stopped with an abnormal state, fallthrough and 22605 * reset the target and/or bus unless selection did not complete 22606 * (indicated by STATE_GOT_BUS) in which case we don't want to 22607 * go through a target/bus reset 22608 */ 22609 if (pkt->pkt_state == STATE_GOT_BUS) { 22610 break; 22611 } 22612 /*FALLTHROUGH*/ 22613 22614 case CMD_TIMEOUT: 22615 default: 22616 /* 22617 * The lun may still be running the command, so a lun reset 22618 * should be attempted. If the lun reset fails or cannot be 22619 * issued, than try a target reset. Lastly try a bus reset. 22620 */ 22621 if ((pkt->pkt_statistics & 22622 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22623 int reset_retval = 0; 22624 mutex_exit(SD_MUTEX(un)); 22625 if (un->un_f_allow_bus_device_reset == TRUE) { 22626 if (un->un_f_lun_reset_enabled == TRUE) { 22627 reset_retval = 22628 scsi_reset(SD_ADDRESS(un), 22629 RESET_LUN); 22630 } 22631 if (reset_retval == 0) { 22632 reset_retval = 22633 scsi_reset(SD_ADDRESS(un), 22634 RESET_TARGET); 22635 } 22636 } 22637 if (reset_retval == 0) { 22638 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22639 } 22640 mutex_enter(SD_MUTEX(un)); 22641 } 22642 break; 22643 } 22644 22645 /* A device/bus reset has occurred; update the reservation status. */ 22646 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22647 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22648 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22649 un->un_resvd_status |= 22650 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22651 SD_INFO(SD_LOG_IOCTL_MHD, un, 22652 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22653 } 22654 } 22655 22656 /* 22657 * The disk has been turned off; Update the device state. 22658 * 22659 * Note: Should we be offlining the disk here? 22660 */ 22661 if (pkt->pkt_state == STATE_GOT_BUS) { 22662 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22663 "Disk not responding to selection\n"); 22664 if (un->un_state != SD_STATE_OFFLINE) { 22665 New_state(un, SD_STATE_OFFLINE); 22666 } 22667 } else if (be_chatty) { 22668 /* 22669 * suppress messages if they are all the same pkt reason; 22670 * with TQ, many (up to 256) are returned with the same 22671 * pkt_reason 22672 */ 22673 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22674 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22675 "sd_mhd_watch_incomplete: " 22676 "SCSI transport failed: reason '%s'\n", 22677 scsi_rname(pkt->pkt_reason)); 22678 } 22679 } 22680 un->un_last_pkt_reason = pkt->pkt_reason; 22681 mutex_exit(SD_MUTEX(un)); 22682 } 22683 22684 22685 /* 22686 * Function: sd_sname() 22687 * 22688 * Description: This is a simple little routine to return a string containing 22689 * a printable description of command status byte for use in 22690 * logging. 22691 * 22692 * Arguments: status - pointer to a status byte 22693 * 22694 * Return Code: char * - string containing status description. 22695 */ 22696 22697 static char * 22698 sd_sname(uchar_t status) 22699 { 22700 switch (status & STATUS_MASK) { 22701 case STATUS_GOOD: 22702 return ("good status"); 22703 case STATUS_CHECK: 22704 return ("check condition"); 22705 case STATUS_MET: 22706 return ("condition met"); 22707 case STATUS_BUSY: 22708 return ("busy"); 22709 case STATUS_INTERMEDIATE: 22710 return ("intermediate"); 22711 case STATUS_INTERMEDIATE_MET: 22712 return ("intermediate - condition met"); 22713 case STATUS_RESERVATION_CONFLICT: 22714 return ("reservation_conflict"); 22715 case STATUS_TERMINATED: 22716 return ("command terminated"); 22717 case STATUS_QFULL: 22718 return ("queue full"); 22719 default: 22720 return ("<unknown status>"); 22721 } 22722 } 22723 22724 22725 /* 22726 * Function: sd_mhd_resvd_recover() 22727 * 22728 * Description: This function adds a reservation entry to the 22729 * sd_resv_reclaim_request list and signals the reservation 22730 * reclaim thread that there is work pending. If the reservation 22731 * reclaim thread has not been previously created this function 22732 * will kick it off. 22733 * 22734 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22735 * among multiple watches that share this callback function 22736 * 22737 * Context: This routine is called by timeout() and is run in interrupt 22738 * context. It must not sleep or call other functions which may 22739 * sleep. 22740 */ 22741 22742 static void 22743 sd_mhd_resvd_recover(void *arg) 22744 { 22745 dev_t dev = (dev_t)arg; 22746 struct sd_lun *un; 22747 struct sd_thr_request *sd_treq = NULL; 22748 struct sd_thr_request *sd_cur = NULL; 22749 struct sd_thr_request *sd_prev = NULL; 22750 int already_there = 0; 22751 22752 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22753 return; 22754 } 22755 22756 mutex_enter(SD_MUTEX(un)); 22757 un->un_resvd_timeid = NULL; 22758 if (un->un_resvd_status & SD_WANT_RESERVE) { 22759 /* 22760 * There was a reset so don't issue the reserve, allow the 22761 * sd_mhd_watch_cb callback function to notice this and 22762 * reschedule the timeout for reservation. 22763 */ 22764 mutex_exit(SD_MUTEX(un)); 22765 return; 22766 } 22767 mutex_exit(SD_MUTEX(un)); 22768 22769 /* 22770 * Add this device to the sd_resv_reclaim_request list and the 22771 * sd_resv_reclaim_thread should take care of the rest. 22772 * 22773 * Note: We can't sleep in this context so if the memory allocation 22774 * fails allow the sd_mhd_watch_cb callback function to notice this and 22775 * reschedule the timeout for reservation. (4378460) 22776 */ 22777 sd_treq = (struct sd_thr_request *) 22778 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22779 if (sd_treq == NULL) { 22780 return; 22781 } 22782 22783 sd_treq->sd_thr_req_next = NULL; 22784 sd_treq->dev = dev; 22785 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22786 if (sd_tr.srq_thr_req_head == NULL) { 22787 sd_tr.srq_thr_req_head = sd_treq; 22788 } else { 22789 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22790 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22791 if (sd_cur->dev == dev) { 22792 /* 22793 * already in Queue so don't log 22794 * another request for the device 22795 */ 22796 already_there = 1; 22797 break; 22798 } 22799 sd_prev = sd_cur; 22800 } 22801 if (!already_there) { 22802 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22803 "logging request for %lx\n", dev); 22804 sd_prev->sd_thr_req_next = sd_treq; 22805 } else { 22806 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22807 } 22808 } 22809 22810 /* 22811 * Create a kernel thread to do the reservation reclaim and free up this 22812 * thread. We cannot block this thread while we go away to do the 22813 * reservation reclaim 22814 */ 22815 if (sd_tr.srq_resv_reclaim_thread == NULL) 22816 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22817 sd_resv_reclaim_thread, NULL, 22818 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22819 22820 /* Tell the reservation reclaim thread that it has work to do */ 22821 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22822 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22823 } 22824 22825 /* 22826 * Function: sd_resv_reclaim_thread() 22827 * 22828 * Description: This function implements the reservation reclaim operations 22829 * 22830 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22831 * among multiple watches that share this callback function 22832 */ 22833 22834 static void 22835 sd_resv_reclaim_thread() 22836 { 22837 struct sd_lun *un; 22838 struct sd_thr_request *sd_mhreq; 22839 22840 /* Wait for work */ 22841 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22842 if (sd_tr.srq_thr_req_head == NULL) { 22843 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22844 &sd_tr.srq_resv_reclaim_mutex); 22845 } 22846 22847 /* Loop while we have work */ 22848 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22849 un = ddi_get_soft_state(sd_state, 22850 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22851 if (un == NULL) { 22852 /* 22853 * softstate structure is NULL so just 22854 * dequeue the request and continue 22855 */ 22856 sd_tr.srq_thr_req_head = 22857 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22858 kmem_free(sd_tr.srq_thr_cur_req, 22859 sizeof (struct sd_thr_request)); 22860 continue; 22861 } 22862 22863 /* dequeue the request */ 22864 sd_mhreq = sd_tr.srq_thr_cur_req; 22865 sd_tr.srq_thr_req_head = 22866 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22867 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22868 22869 /* 22870 * Reclaim reservation only if SD_RESERVE is still set. There 22871 * may have been a call to MHIOCRELEASE before we got here. 22872 */ 22873 mutex_enter(SD_MUTEX(un)); 22874 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22875 /* 22876 * Note: The SD_LOST_RESERVE flag is cleared before 22877 * reclaiming the reservation. If this is done after the 22878 * call to sd_reserve_release a reservation loss in the 22879 * window between pkt completion of reserve cmd and 22880 * mutex_enter below may not be recognized 22881 */ 22882 un->un_resvd_status &= ~SD_LOST_RESERVE; 22883 mutex_exit(SD_MUTEX(un)); 22884 22885 if (sd_reserve_release(sd_mhreq->dev, 22886 SD_RESERVE) == 0) { 22887 mutex_enter(SD_MUTEX(un)); 22888 un->un_resvd_status |= SD_RESERVE; 22889 mutex_exit(SD_MUTEX(un)); 22890 SD_INFO(SD_LOG_IOCTL_MHD, un, 22891 "sd_resv_reclaim_thread: " 22892 "Reservation Recovered\n"); 22893 } else { 22894 mutex_enter(SD_MUTEX(un)); 22895 un->un_resvd_status |= SD_LOST_RESERVE; 22896 mutex_exit(SD_MUTEX(un)); 22897 SD_INFO(SD_LOG_IOCTL_MHD, un, 22898 "sd_resv_reclaim_thread: Failed " 22899 "Reservation Recovery\n"); 22900 } 22901 } else { 22902 mutex_exit(SD_MUTEX(un)); 22903 } 22904 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22905 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22906 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22907 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22908 /* 22909 * wakeup the destroy thread if anyone is waiting on 22910 * us to complete. 22911 */ 22912 cv_signal(&sd_tr.srq_inprocess_cv); 22913 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22914 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22915 } 22916 22917 /* 22918 * cleanup the sd_tr structure now that this thread will not exist 22919 */ 22920 ASSERT(sd_tr.srq_thr_req_head == NULL); 22921 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22922 sd_tr.srq_resv_reclaim_thread = NULL; 22923 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22924 thread_exit(); 22925 } 22926 22927 22928 /* 22929 * Function: sd_rmv_resv_reclaim_req() 22930 * 22931 * Description: This function removes any pending reservation reclaim requests 22932 * for the specified device. 22933 * 22934 * Arguments: dev - the device 'dev_t' 22935 */ 22936 22937 static void 22938 sd_rmv_resv_reclaim_req(dev_t dev) 22939 { 22940 struct sd_thr_request *sd_mhreq; 22941 struct sd_thr_request *sd_prev; 22942 22943 /* Remove a reservation reclaim request from the list */ 22944 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22945 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22946 /* 22947 * We are attempting to reinstate reservation for 22948 * this device. We wait for sd_reserve_release() 22949 * to return before we return. 22950 */ 22951 cv_wait(&sd_tr.srq_inprocess_cv, 22952 &sd_tr.srq_resv_reclaim_mutex); 22953 } else { 22954 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22955 if (sd_mhreq && sd_mhreq->dev == dev) { 22956 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22957 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22958 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22959 return; 22960 } 22961 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22962 if (sd_mhreq && sd_mhreq->dev == dev) { 22963 break; 22964 } 22965 sd_prev = sd_mhreq; 22966 } 22967 if (sd_mhreq != NULL) { 22968 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22969 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22970 } 22971 } 22972 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22973 } 22974 22975 22976 /* 22977 * Function: sd_mhd_reset_notify_cb() 22978 * 22979 * Description: This is a call back function for scsi_reset_notify. This 22980 * function updates the softstate reserved status and logs the 22981 * reset. The driver scsi watch facility callback function 22982 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22983 * will reclaim the reservation. 22984 * 22985 * Arguments: arg - driver soft state (unit) structure 22986 */ 22987 22988 static void 22989 sd_mhd_reset_notify_cb(caddr_t arg) 22990 { 22991 struct sd_lun *un = (struct sd_lun *)arg; 22992 22993 mutex_enter(SD_MUTEX(un)); 22994 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22995 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22996 SD_INFO(SD_LOG_IOCTL_MHD, un, 22997 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22998 } 22999 mutex_exit(SD_MUTEX(un)); 23000 } 23001 23002 23003 /* 23004 * Function: sd_take_ownership() 23005 * 23006 * Description: This routine implements an algorithm to achieve a stable 23007 * reservation on disks which don't implement priority reserve, 23008 * and makes sure that other host lose re-reservation attempts. 23009 * This algorithm contains of a loop that keeps issuing the RESERVE 23010 * for some period of time (min_ownership_delay, default 6 seconds) 23011 * During that loop, it looks to see if there has been a bus device 23012 * reset or bus reset (both of which cause an existing reservation 23013 * to be lost). If the reservation is lost issue RESERVE until a 23014 * period of min_ownership_delay with no resets has gone by, or 23015 * until max_ownership_delay has expired. This loop ensures that 23016 * the host really did manage to reserve the device, in spite of 23017 * resets. The looping for min_ownership_delay (default six 23018 * seconds) is important to early generation clustering products, 23019 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 23020 * MHIOCENFAILFAST periodic timer of two seconds. By having 23021 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 23022 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 23023 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 23024 * have already noticed, via the MHIOCENFAILFAST polling, that it 23025 * no longer "owns" the disk and will have panicked itself. Thus, 23026 * the host issuing the MHIOCTKOWN is assured (with timing 23027 * dependencies) that by the time it actually starts to use the 23028 * disk for real work, the old owner is no longer accessing it. 23029 * 23030 * min_ownership_delay is the minimum amount of time for which the 23031 * disk must be reserved continuously devoid of resets before the 23032 * MHIOCTKOWN ioctl will return success. 23033 * 23034 * max_ownership_delay indicates the amount of time by which the 23035 * take ownership should succeed or timeout with an error. 23036 * 23037 * Arguments: dev - the device 'dev_t' 23038 * *p - struct containing timing info. 23039 * 23040 * Return Code: 0 for success or error code 23041 */ 23042 23043 static int 23044 sd_take_ownership(dev_t dev, struct mhioctkown *p) 23045 { 23046 struct sd_lun *un; 23047 int rval; 23048 int err; 23049 int reservation_count = 0; 23050 int min_ownership_delay = 6000000; /* in usec */ 23051 int max_ownership_delay = 30000000; /* in usec */ 23052 clock_t start_time; /* starting time of this algorithm */ 23053 clock_t end_time; /* time limit for giving up */ 23054 clock_t ownership_time; /* time limit for stable ownership */ 23055 clock_t current_time; 23056 clock_t previous_current_time; 23057 23058 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23059 return (ENXIO); 23060 } 23061 23062 /* 23063 * Attempt a device reservation. A priority reservation is requested. 23064 */ 23065 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 23066 != SD_SUCCESS) { 23067 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23068 "sd_take_ownership: return(1)=%d\n", rval); 23069 return (rval); 23070 } 23071 23072 /* Update the softstate reserved status to indicate the reservation */ 23073 mutex_enter(SD_MUTEX(un)); 23074 un->un_resvd_status |= SD_RESERVE; 23075 un->un_resvd_status &= 23076 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 23077 mutex_exit(SD_MUTEX(un)); 23078 23079 if (p != NULL) { 23080 if (p->min_ownership_delay != 0) { 23081 min_ownership_delay = p->min_ownership_delay * 1000; 23082 } 23083 if (p->max_ownership_delay != 0) { 23084 max_ownership_delay = p->max_ownership_delay * 1000; 23085 } 23086 } 23087 SD_INFO(SD_LOG_IOCTL_MHD, un, 23088 "sd_take_ownership: min, max delays: %d, %d\n", 23089 min_ownership_delay, max_ownership_delay); 23090 23091 start_time = ddi_get_lbolt(); 23092 current_time = start_time; 23093 ownership_time = current_time + drv_usectohz(min_ownership_delay); 23094 end_time = start_time + drv_usectohz(max_ownership_delay); 23095 23096 while (current_time - end_time < 0) { 23097 delay(drv_usectohz(500000)); 23098 23099 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 23100 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 23101 mutex_enter(SD_MUTEX(un)); 23102 rval = (un->un_resvd_status & 23103 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 23104 mutex_exit(SD_MUTEX(un)); 23105 break; 23106 } 23107 } 23108 previous_current_time = current_time; 23109 current_time = ddi_get_lbolt(); 23110 mutex_enter(SD_MUTEX(un)); 23111 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 23112 ownership_time = ddi_get_lbolt() + 23113 drv_usectohz(min_ownership_delay); 23114 reservation_count = 0; 23115 } else { 23116 reservation_count++; 23117 } 23118 un->un_resvd_status |= SD_RESERVE; 23119 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 23120 mutex_exit(SD_MUTEX(un)); 23121 23122 SD_INFO(SD_LOG_IOCTL_MHD, un, 23123 "sd_take_ownership: ticks for loop iteration=%ld, " 23124 "reservation=%s\n", (current_time - previous_current_time), 23125 reservation_count ? "ok" : "reclaimed"); 23126 23127 if (current_time - ownership_time >= 0 && 23128 reservation_count >= 4) { 23129 rval = 0; /* Achieved a stable ownership */ 23130 break; 23131 } 23132 if (current_time - end_time >= 0) { 23133 rval = EACCES; /* No ownership in max possible time */ 23134 break; 23135 } 23136 } 23137 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23138 "sd_take_ownership: return(2)=%d\n", rval); 23139 return (rval); 23140 } 23141 23142 23143 /* 23144 * Function: sd_reserve_release() 23145 * 23146 * Description: This function builds and sends scsi RESERVE, RELEASE, and 23147 * PRIORITY RESERVE commands based on a user specified command type 23148 * 23149 * Arguments: dev - the device 'dev_t' 23150 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 23151 * SD_RESERVE, SD_RELEASE 23152 * 23153 * Return Code: 0 or Error Code 23154 */ 23155 23156 static int 23157 sd_reserve_release(dev_t dev, int cmd) 23158 { 23159 struct uscsi_cmd *com = NULL; 23160 struct sd_lun *un = NULL; 23161 char cdb[CDB_GROUP0]; 23162 int rval; 23163 23164 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 23165 (cmd == SD_PRIORITY_RESERVE)); 23166 23167 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23168 return (ENXIO); 23169 } 23170 23171 /* instantiate and initialize the command and cdb */ 23172 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23173 bzero(cdb, CDB_GROUP0); 23174 com->uscsi_flags = USCSI_SILENT; 23175 com->uscsi_timeout = un->un_reserve_release_time; 23176 com->uscsi_cdblen = CDB_GROUP0; 23177 com->uscsi_cdb = cdb; 23178 if (cmd == SD_RELEASE) { 23179 cdb[0] = SCMD_RELEASE; 23180 } else { 23181 cdb[0] = SCMD_RESERVE; 23182 } 23183 23184 /* Send the command. */ 23185 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23186 SD_PATH_STANDARD); 23187 23188 /* 23189 * "break" a reservation that is held by another host, by issuing a 23190 * reset if priority reserve is desired, and we could not get the 23191 * device. 23192 */ 23193 if ((cmd == SD_PRIORITY_RESERVE) && 23194 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 23195 /* 23196 * First try to reset the LUN. If we cannot, then try a target 23197 * reset, followed by a bus reset if the target reset fails. 23198 */ 23199 int reset_retval = 0; 23200 if (un->un_f_lun_reset_enabled == TRUE) { 23201 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 23202 } 23203 if (reset_retval == 0) { 23204 /* The LUN reset either failed or was not issued */ 23205 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23206 } 23207 if ((reset_retval == 0) && 23208 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 23209 rval = EIO; 23210 kmem_free(com, sizeof (*com)); 23211 return (rval); 23212 } 23213 23214 bzero(com, sizeof (struct uscsi_cmd)); 23215 com->uscsi_flags = USCSI_SILENT; 23216 com->uscsi_cdb = cdb; 23217 com->uscsi_cdblen = CDB_GROUP0; 23218 com->uscsi_timeout = 5; 23219 23220 /* 23221 * Reissue the last reserve command, this time without request 23222 * sense. Assume that it is just a regular reserve command. 23223 */ 23224 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23225 SD_PATH_STANDARD); 23226 } 23227 23228 /* Return an error if still getting a reservation conflict. */ 23229 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 23230 rval = EACCES; 23231 } 23232 23233 kmem_free(com, sizeof (*com)); 23234 return (rval); 23235 } 23236 23237 23238 #define SD_NDUMP_RETRIES 12 23239 /* 23240 * System Crash Dump routine 23241 */ 23242 23243 static int 23244 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 23245 { 23246 int instance; 23247 int partition; 23248 int i; 23249 int err; 23250 struct sd_lun *un; 23251 struct scsi_pkt *wr_pktp; 23252 struct buf *wr_bp; 23253 struct buf wr_buf; 23254 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 23255 daddr_t tgt_blkno; /* rmw - blkno for target */ 23256 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 23257 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 23258 size_t io_start_offset; 23259 int doing_rmw = FALSE; 23260 int rval; 23261 ssize_t dma_resid; 23262 daddr_t oblkno; 23263 diskaddr_t nblks = 0; 23264 diskaddr_t start_block; 23265 23266 instance = SDUNIT(dev); 23267 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 23268 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 23269 return (ENXIO); 23270 } 23271 23272 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 23273 23274 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 23275 23276 partition = SDPART(dev); 23277 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 23278 23279 /* Validate blocks to dump at against partition size. */ 23280 23281 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 23282 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 23283 23284 if ((blkno + nblk) > nblks) { 23285 SD_TRACE(SD_LOG_DUMP, un, 23286 "sddump: dump range larger than partition: " 23287 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 23288 blkno, nblk, nblks); 23289 return (EINVAL); 23290 } 23291 23292 mutex_enter(&un->un_pm_mutex); 23293 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23294 struct scsi_pkt *start_pktp; 23295 23296 mutex_exit(&un->un_pm_mutex); 23297 23298 /* 23299 * use pm framework to power on HBA 1st 23300 */ 23301 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 23302 23303 /* 23304 * Dump no long uses sdpower to power on a device, it's 23305 * in-line here so it can be done in polled mode. 23306 */ 23307 23308 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 23309 23310 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 23311 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 23312 23313 if (start_pktp == NULL) { 23314 /* We were not given a SCSI packet, fail. */ 23315 return (EIO); 23316 } 23317 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 23318 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 23319 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 23320 start_pktp->pkt_flags = FLAG_NOINTR; 23321 23322 mutex_enter(SD_MUTEX(un)); 23323 SD_FILL_SCSI1_LUN(un, start_pktp); 23324 mutex_exit(SD_MUTEX(un)); 23325 /* 23326 * Scsi_poll returns 0 (success) if the command completes and 23327 * the status block is STATUS_GOOD. 23328 */ 23329 if (sd_scsi_poll(un, start_pktp) != 0) { 23330 scsi_destroy_pkt(start_pktp); 23331 return (EIO); 23332 } 23333 scsi_destroy_pkt(start_pktp); 23334 (void) sd_ddi_pm_resume(un); 23335 } else { 23336 mutex_exit(&un->un_pm_mutex); 23337 } 23338 23339 mutex_enter(SD_MUTEX(un)); 23340 un->un_throttle = 0; 23341 23342 /* 23343 * The first time through, reset the specific target device. 23344 * However, when cpr calls sddump we know that sd is in a 23345 * a good state so no bus reset is required. 23346 * Clear sense data via Request Sense cmd. 23347 * In sddump we don't care about allow_bus_device_reset anymore 23348 */ 23349 23350 if ((un->un_state != SD_STATE_SUSPENDED) && 23351 (un->un_state != SD_STATE_DUMPING)) { 23352 23353 New_state(un, SD_STATE_DUMPING); 23354 23355 if (un->un_f_is_fibre == FALSE) { 23356 mutex_exit(SD_MUTEX(un)); 23357 /* 23358 * Attempt a bus reset for parallel scsi. 23359 * 23360 * Note: A bus reset is required because on some host 23361 * systems (i.e. E420R) a bus device reset is 23362 * insufficient to reset the state of the target. 23363 * 23364 * Note: Don't issue the reset for fibre-channel, 23365 * because this tends to hang the bus (loop) for 23366 * too long while everyone is logging out and in 23367 * and the deadman timer for dumping will fire 23368 * before the dump is complete. 23369 */ 23370 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 23371 mutex_enter(SD_MUTEX(un)); 23372 Restore_state(un); 23373 mutex_exit(SD_MUTEX(un)); 23374 return (EIO); 23375 } 23376 23377 /* Delay to give the device some recovery time. */ 23378 drv_usecwait(10000); 23379 23380 if (sd_send_polled_RQS(un) == SD_FAILURE) { 23381 SD_INFO(SD_LOG_DUMP, un, 23382 "sddump: sd_send_polled_RQS failed\n"); 23383 } 23384 mutex_enter(SD_MUTEX(un)); 23385 } 23386 } 23387 23388 /* 23389 * Convert the partition-relative block number to a 23390 * disk physical block number. 23391 */ 23392 blkno += start_block; 23393 23394 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 23395 23396 23397 /* 23398 * Check if the device has a non-512 block size. 23399 */ 23400 wr_bp = NULL; 23401 if (NOT_DEVBSIZE(un)) { 23402 tgt_byte_offset = blkno * un->un_sys_blocksize; 23403 tgt_byte_count = nblk * un->un_sys_blocksize; 23404 if ((tgt_byte_offset % un->un_tgt_blocksize) || 23405 (tgt_byte_count % un->un_tgt_blocksize)) { 23406 doing_rmw = TRUE; 23407 /* 23408 * Calculate the block number and number of block 23409 * in terms of the media block size. 23410 */ 23411 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23412 tgt_nblk = 23413 ((tgt_byte_offset + tgt_byte_count + 23414 (un->un_tgt_blocksize - 1)) / 23415 un->un_tgt_blocksize) - tgt_blkno; 23416 23417 /* 23418 * Invoke the routine which is going to do read part 23419 * of read-modify-write. 23420 * Note that this routine returns a pointer to 23421 * a valid bp in wr_bp. 23422 */ 23423 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 23424 &wr_bp); 23425 if (err) { 23426 mutex_exit(SD_MUTEX(un)); 23427 return (err); 23428 } 23429 /* 23430 * Offset is being calculated as - 23431 * (original block # * system block size) - 23432 * (new block # * target block size) 23433 */ 23434 io_start_offset = 23435 ((uint64_t)(blkno * un->un_sys_blocksize)) - 23436 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 23437 23438 ASSERT((io_start_offset >= 0) && 23439 (io_start_offset < un->un_tgt_blocksize)); 23440 /* 23441 * Do the modify portion of read modify write. 23442 */ 23443 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23444 (size_t)nblk * un->un_sys_blocksize); 23445 } else { 23446 doing_rmw = FALSE; 23447 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23448 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23449 } 23450 23451 /* Convert blkno and nblk to target blocks */ 23452 blkno = tgt_blkno; 23453 nblk = tgt_nblk; 23454 } else { 23455 wr_bp = &wr_buf; 23456 bzero(wr_bp, sizeof (struct buf)); 23457 wr_bp->b_flags = B_BUSY; 23458 wr_bp->b_un.b_addr = addr; 23459 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23460 wr_bp->b_resid = 0; 23461 } 23462 23463 mutex_exit(SD_MUTEX(un)); 23464 23465 /* 23466 * Obtain a SCSI packet for the write command. 23467 * It should be safe to call the allocator here without 23468 * worrying about being locked for DVMA mapping because 23469 * the address we're passed is already a DVMA mapping 23470 * 23471 * We are also not going to worry about semaphore ownership 23472 * in the dump buffer. Dumping is single threaded at present. 23473 */ 23474 23475 wr_pktp = NULL; 23476 23477 dma_resid = wr_bp->b_bcount; 23478 oblkno = blkno; 23479 23480 while (dma_resid != 0) { 23481 23482 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23483 wr_bp->b_flags &= ~B_ERROR; 23484 23485 if (un->un_partial_dma_supported == 1) { 23486 blkno = oblkno + 23487 ((wr_bp->b_bcount - dma_resid) / 23488 un->un_tgt_blocksize); 23489 nblk = dma_resid / un->un_tgt_blocksize; 23490 23491 if (wr_pktp) { 23492 /* 23493 * Partial DMA transfers after initial transfer 23494 */ 23495 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23496 blkno, nblk); 23497 } else { 23498 /* Initial transfer */ 23499 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23500 un->un_pkt_flags, NULL_FUNC, NULL, 23501 blkno, nblk); 23502 } 23503 } else { 23504 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23505 0, NULL_FUNC, NULL, blkno, nblk); 23506 } 23507 23508 if (rval == 0) { 23509 /* We were given a SCSI packet, continue. */ 23510 break; 23511 } 23512 23513 if (i == 0) { 23514 if (wr_bp->b_flags & B_ERROR) { 23515 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23516 "no resources for dumping; " 23517 "error code: 0x%x, retrying", 23518 geterror(wr_bp)); 23519 } else { 23520 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23521 "no resources for dumping; retrying"); 23522 } 23523 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23524 if (wr_bp->b_flags & B_ERROR) { 23525 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23526 "no resources for dumping; error code: " 23527 "0x%x, retrying\n", geterror(wr_bp)); 23528 } 23529 } else { 23530 if (wr_bp->b_flags & B_ERROR) { 23531 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23532 "no resources for dumping; " 23533 "error code: 0x%x, retries failed, " 23534 "giving up.\n", geterror(wr_bp)); 23535 } else { 23536 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23537 "no resources for dumping; " 23538 "retries failed, giving up.\n"); 23539 } 23540 mutex_enter(SD_MUTEX(un)); 23541 Restore_state(un); 23542 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23543 mutex_exit(SD_MUTEX(un)); 23544 scsi_free_consistent_buf(wr_bp); 23545 } else { 23546 mutex_exit(SD_MUTEX(un)); 23547 } 23548 return (EIO); 23549 } 23550 drv_usecwait(10000); 23551 } 23552 23553 if (un->un_partial_dma_supported == 1) { 23554 /* 23555 * save the resid from PARTIAL_DMA 23556 */ 23557 dma_resid = wr_pktp->pkt_resid; 23558 if (dma_resid != 0) 23559 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23560 wr_pktp->pkt_resid = 0; 23561 } else { 23562 dma_resid = 0; 23563 } 23564 23565 /* SunBug 1222170 */ 23566 wr_pktp->pkt_flags = FLAG_NOINTR; 23567 23568 err = EIO; 23569 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23570 23571 /* 23572 * Scsi_poll returns 0 (success) if the command completes and 23573 * the status block is STATUS_GOOD. We should only check 23574 * errors if this condition is not true. Even then we should 23575 * send our own request sense packet only if we have a check 23576 * condition and auto request sense has not been performed by 23577 * the hba. 23578 */ 23579 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23580 23581 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23582 (wr_pktp->pkt_resid == 0)) { 23583 err = SD_SUCCESS; 23584 break; 23585 } 23586 23587 /* 23588 * Check CMD_DEV_GONE 1st, give up if device is gone. 23589 */ 23590 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23591 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23592 "Error while dumping state...Device is gone\n"); 23593 break; 23594 } 23595 23596 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23597 SD_INFO(SD_LOG_DUMP, un, 23598 "sddump: write failed with CHECK, try # %d\n", i); 23599 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23600 (void) sd_send_polled_RQS(un); 23601 } 23602 23603 continue; 23604 } 23605 23606 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23607 int reset_retval = 0; 23608 23609 SD_INFO(SD_LOG_DUMP, un, 23610 "sddump: write failed with BUSY, try # %d\n", i); 23611 23612 if (un->un_f_lun_reset_enabled == TRUE) { 23613 reset_retval = scsi_reset(SD_ADDRESS(un), 23614 RESET_LUN); 23615 } 23616 if (reset_retval == 0) { 23617 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23618 } 23619 (void) sd_send_polled_RQS(un); 23620 23621 } else { 23622 SD_INFO(SD_LOG_DUMP, un, 23623 "sddump: write failed with 0x%x, try # %d\n", 23624 SD_GET_PKT_STATUS(wr_pktp), i); 23625 mutex_enter(SD_MUTEX(un)); 23626 sd_reset_target(un, wr_pktp); 23627 mutex_exit(SD_MUTEX(un)); 23628 } 23629 23630 /* 23631 * If we are not getting anywhere with lun/target resets, 23632 * let's reset the bus. 23633 */ 23634 if (i == SD_NDUMP_RETRIES/2) { 23635 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23636 (void) sd_send_polled_RQS(un); 23637 } 23638 } 23639 } 23640 23641 scsi_destroy_pkt(wr_pktp); 23642 mutex_enter(SD_MUTEX(un)); 23643 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23644 mutex_exit(SD_MUTEX(un)); 23645 scsi_free_consistent_buf(wr_bp); 23646 } else { 23647 mutex_exit(SD_MUTEX(un)); 23648 } 23649 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23650 return (err); 23651 } 23652 23653 /* 23654 * Function: sd_scsi_poll() 23655 * 23656 * Description: This is a wrapper for the scsi_poll call. 23657 * 23658 * Arguments: sd_lun - The unit structure 23659 * scsi_pkt - The scsi packet being sent to the device. 23660 * 23661 * Return Code: 0 - Command completed successfully with good status 23662 * -1 - Command failed. This could indicate a check condition 23663 * or other status value requiring recovery action. 23664 * 23665 * NOTE: This code is only called off sddump(). 23666 */ 23667 23668 static int 23669 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23670 { 23671 int status; 23672 23673 ASSERT(un != NULL); 23674 ASSERT(!mutex_owned(SD_MUTEX(un))); 23675 ASSERT(pktp != NULL); 23676 23677 status = SD_SUCCESS; 23678 23679 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23680 pktp->pkt_flags |= un->un_tagflags; 23681 pktp->pkt_flags &= ~FLAG_NODISCON; 23682 } 23683 23684 status = sd_ddi_scsi_poll(pktp); 23685 /* 23686 * Scsi_poll returns 0 (success) if the command completes and the 23687 * status block is STATUS_GOOD. We should only check errors if this 23688 * condition is not true. Even then we should send our own request 23689 * sense packet only if we have a check condition and auto 23690 * request sense has not been performed by the hba. 23691 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23692 */ 23693 if ((status != SD_SUCCESS) && 23694 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23695 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23696 (pktp->pkt_reason != CMD_DEV_GONE)) 23697 (void) sd_send_polled_RQS(un); 23698 23699 return (status); 23700 } 23701 23702 /* 23703 * Function: sd_send_polled_RQS() 23704 * 23705 * Description: This sends the request sense command to a device. 23706 * 23707 * Arguments: sd_lun - The unit structure 23708 * 23709 * Return Code: 0 - Command completed successfully with good status 23710 * -1 - Command failed. 23711 * 23712 */ 23713 23714 static int 23715 sd_send_polled_RQS(struct sd_lun *un) 23716 { 23717 int ret_val; 23718 struct scsi_pkt *rqs_pktp; 23719 struct buf *rqs_bp; 23720 23721 ASSERT(un != NULL); 23722 ASSERT(!mutex_owned(SD_MUTEX(un))); 23723 23724 ret_val = SD_SUCCESS; 23725 23726 rqs_pktp = un->un_rqs_pktp; 23727 rqs_bp = un->un_rqs_bp; 23728 23729 mutex_enter(SD_MUTEX(un)); 23730 23731 if (un->un_sense_isbusy) { 23732 ret_val = SD_FAILURE; 23733 mutex_exit(SD_MUTEX(un)); 23734 return (ret_val); 23735 } 23736 23737 /* 23738 * If the request sense buffer (and packet) is not in use, 23739 * let's set the un_sense_isbusy and send our packet 23740 */ 23741 un->un_sense_isbusy = 1; 23742 rqs_pktp->pkt_resid = 0; 23743 rqs_pktp->pkt_reason = 0; 23744 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23745 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23746 23747 mutex_exit(SD_MUTEX(un)); 23748 23749 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23750 " 0x%p\n", rqs_bp->b_un.b_addr); 23751 23752 /* 23753 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23754 * axle - it has a call into us! 23755 */ 23756 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23757 SD_INFO(SD_LOG_COMMON, un, 23758 "sd_send_polled_RQS: RQS failed\n"); 23759 } 23760 23761 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23762 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23763 23764 mutex_enter(SD_MUTEX(un)); 23765 un->un_sense_isbusy = 0; 23766 mutex_exit(SD_MUTEX(un)); 23767 23768 return (ret_val); 23769 } 23770 23771 /* 23772 * Defines needed for localized version of the scsi_poll routine. 23773 */ 23774 #define CSEC 10000 /* usecs */ 23775 #define SEC_TO_CSEC (1000000/CSEC) 23776 23777 /* 23778 * Function: sd_ddi_scsi_poll() 23779 * 23780 * Description: Localized version of the scsi_poll routine. The purpose is to 23781 * send a scsi_pkt to a device as a polled command. This version 23782 * is to ensure more robust handling of transport errors. 23783 * Specifically this routine cures not ready, coming ready 23784 * transition for power up and reset of sonoma's. This can take 23785 * up to 45 seconds for power-on and 20 seconds for reset of a 23786 * sonoma lun. 23787 * 23788 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23789 * 23790 * Return Code: 0 - Command completed successfully with good status 23791 * -1 - Command failed. 23792 * 23793 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 23794 * be fixed (removing this code), we need to determine how to handle the 23795 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 23796 * 23797 * NOTE: This code is only called off sddump(). 23798 */ 23799 static int 23800 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23801 { 23802 int rval = -1; 23803 int savef; 23804 long savet; 23805 void (*savec)(); 23806 int timeout; 23807 int busy_count; 23808 int poll_delay; 23809 int rc; 23810 uint8_t *sensep; 23811 struct scsi_arq_status *arqstat; 23812 extern int do_polled_io; 23813 23814 ASSERT(pkt->pkt_scbp); 23815 23816 /* 23817 * save old flags.. 23818 */ 23819 savef = pkt->pkt_flags; 23820 savec = pkt->pkt_comp; 23821 savet = pkt->pkt_time; 23822 23823 pkt->pkt_flags |= FLAG_NOINTR; 23824 23825 /* 23826 * XXX there is nothing in the SCSA spec that states that we should not 23827 * do a callback for polled cmds; however, removing this will break sd 23828 * and probably other target drivers 23829 */ 23830 pkt->pkt_comp = NULL; 23831 23832 /* 23833 * we don't like a polled command without timeout. 23834 * 60 seconds seems long enough. 23835 */ 23836 if (pkt->pkt_time == 0) 23837 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23838 23839 /* 23840 * Send polled cmd. 23841 * 23842 * We do some error recovery for various errors. Tran_busy, 23843 * queue full, and non-dispatched commands are retried every 10 msec. 23844 * as they are typically transient failures. Busy status and Not 23845 * Ready are retried every second as this status takes a while to 23846 * change. 23847 */ 23848 timeout = pkt->pkt_time * SEC_TO_CSEC; 23849 23850 for (busy_count = 0; busy_count < timeout; busy_count++) { 23851 /* 23852 * Initialize pkt status variables. 23853 */ 23854 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23855 23856 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23857 if (rc != TRAN_BUSY) { 23858 /* Transport failed - give up. */ 23859 break; 23860 } else { 23861 /* Transport busy - try again. */ 23862 poll_delay = 1 * CSEC; /* 10 msec. */ 23863 } 23864 } else { 23865 /* 23866 * Transport accepted - check pkt status. 23867 */ 23868 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23869 if ((pkt->pkt_reason == CMD_CMPLT) && 23870 (rc == STATUS_CHECK) && 23871 (pkt->pkt_state & STATE_ARQ_DONE)) { 23872 arqstat = 23873 (struct scsi_arq_status *)(pkt->pkt_scbp); 23874 sensep = (uint8_t *)&arqstat->sts_sensedata; 23875 } else { 23876 sensep = NULL; 23877 } 23878 23879 if ((pkt->pkt_reason == CMD_CMPLT) && 23880 (rc == STATUS_GOOD)) { 23881 /* No error - we're done */ 23882 rval = 0; 23883 break; 23884 23885 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23886 /* Lost connection - give up */ 23887 break; 23888 23889 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23890 (pkt->pkt_state == 0)) { 23891 /* Pkt not dispatched - try again. */ 23892 poll_delay = 1 * CSEC; /* 10 msec. */ 23893 23894 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23895 (rc == STATUS_QFULL)) { 23896 /* Queue full - try again. */ 23897 poll_delay = 1 * CSEC; /* 10 msec. */ 23898 23899 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23900 (rc == STATUS_BUSY)) { 23901 /* Busy - try again. */ 23902 poll_delay = 100 * CSEC; /* 1 sec. */ 23903 busy_count += (SEC_TO_CSEC - 1); 23904 23905 } else if ((sensep != NULL) && 23906 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 23907 /* 23908 * Unit Attention - try again. 23909 * Pretend it took 1 sec. 23910 * NOTE: 'continue' avoids poll_delay 23911 */ 23912 busy_count += (SEC_TO_CSEC - 1); 23913 continue; 23914 23915 } else if ((sensep != NULL) && 23916 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23917 (scsi_sense_asc(sensep) == 0x04) && 23918 (scsi_sense_ascq(sensep) == 0x01)) { 23919 /* 23920 * Not ready -> ready - try again. 23921 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 23922 * ...same as STATUS_BUSY 23923 */ 23924 poll_delay = 100 * CSEC; /* 1 sec. */ 23925 busy_count += (SEC_TO_CSEC - 1); 23926 23927 } else { 23928 /* BAD status - give up. */ 23929 break; 23930 } 23931 } 23932 23933 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 23934 !do_polled_io) { 23935 delay(drv_usectohz(poll_delay)); 23936 } else { 23937 /* we busy wait during cpr_dump or interrupt threads */ 23938 drv_usecwait(poll_delay); 23939 } 23940 } 23941 23942 pkt->pkt_flags = savef; 23943 pkt->pkt_comp = savec; 23944 pkt->pkt_time = savet; 23945 23946 /* return on error */ 23947 if (rval) 23948 return (rval); 23949 23950 /* 23951 * This is not a performance critical code path. 23952 * 23953 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 23954 * issues associated with looking at DMA memory prior to 23955 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 23956 */ 23957 scsi_sync_pkt(pkt); 23958 return (0); 23959 } 23960 23961 23962 23963 /* 23964 * Function: sd_persistent_reservation_in_read_keys 23965 * 23966 * Description: This routine is the driver entry point for handling CD-ROM 23967 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23968 * by sending the SCSI-3 PRIN commands to the device. 23969 * Processes the read keys command response by copying the 23970 * reservation key information into the user provided buffer. 23971 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23972 * 23973 * Arguments: un - Pointer to soft state struct for the target. 23974 * usrp - user provided pointer to multihost Persistent In Read 23975 * Keys structure (mhioc_inkeys_t) 23976 * flag - this argument is a pass through to ddi_copyxxx() 23977 * directly from the mode argument of ioctl(). 23978 * 23979 * Return Code: 0 - Success 23980 * EACCES 23981 * ENOTSUP 23982 * errno return code from sd_send_scsi_cmd() 23983 * 23984 * Context: Can sleep. Does not return until command is completed. 23985 */ 23986 23987 static int 23988 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23989 mhioc_inkeys_t *usrp, int flag) 23990 { 23991 #ifdef _MULTI_DATAMODEL 23992 struct mhioc_key_list32 li32; 23993 #endif 23994 sd_prin_readkeys_t *in; 23995 mhioc_inkeys_t *ptr; 23996 mhioc_key_list_t li; 23997 uchar_t *data_bufp; 23998 int data_len; 23999 int rval; 24000 size_t copysz; 24001 24002 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 24003 return (EINVAL); 24004 } 24005 bzero(&li, sizeof (mhioc_key_list_t)); 24006 24007 /* 24008 * Get the listsize from user 24009 */ 24010 #ifdef _MULTI_DATAMODEL 24011 24012 switch (ddi_model_convert_from(flag & FMODELS)) { 24013 case DDI_MODEL_ILP32: 24014 copysz = sizeof (struct mhioc_key_list32); 24015 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 24016 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24017 "sd_persistent_reservation_in_read_keys: " 24018 "failed ddi_copyin: mhioc_key_list32_t\n"); 24019 rval = EFAULT; 24020 goto done; 24021 } 24022 li.listsize = li32.listsize; 24023 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 24024 break; 24025 24026 case DDI_MODEL_NONE: 24027 copysz = sizeof (mhioc_key_list_t); 24028 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 24029 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24030 "sd_persistent_reservation_in_read_keys: " 24031 "failed ddi_copyin: mhioc_key_list_t\n"); 24032 rval = EFAULT; 24033 goto done; 24034 } 24035 break; 24036 } 24037 24038 #else /* ! _MULTI_DATAMODEL */ 24039 copysz = sizeof (mhioc_key_list_t); 24040 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 24041 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24042 "sd_persistent_reservation_in_read_keys: " 24043 "failed ddi_copyin: mhioc_key_list_t\n"); 24044 rval = EFAULT; 24045 goto done; 24046 } 24047 #endif 24048 24049 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 24050 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 24051 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 24052 24053 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 24054 data_len, data_bufp)) != 0) { 24055 goto done; 24056 } 24057 in = (sd_prin_readkeys_t *)data_bufp; 24058 ptr->generation = BE_32(in->generation); 24059 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 24060 24061 /* 24062 * Return the min(listsize, listlen) keys 24063 */ 24064 #ifdef _MULTI_DATAMODEL 24065 24066 switch (ddi_model_convert_from(flag & FMODELS)) { 24067 case DDI_MODEL_ILP32: 24068 li32.listlen = li.listlen; 24069 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 24070 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24071 "sd_persistent_reservation_in_read_keys: " 24072 "failed ddi_copyout: mhioc_key_list32_t\n"); 24073 rval = EFAULT; 24074 goto done; 24075 } 24076 break; 24077 24078 case DDI_MODEL_NONE: 24079 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 24080 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24081 "sd_persistent_reservation_in_read_keys: " 24082 "failed ddi_copyout: mhioc_key_list_t\n"); 24083 rval = EFAULT; 24084 goto done; 24085 } 24086 break; 24087 } 24088 24089 #else /* ! _MULTI_DATAMODEL */ 24090 24091 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 24092 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24093 "sd_persistent_reservation_in_read_keys: " 24094 "failed ddi_copyout: mhioc_key_list_t\n"); 24095 rval = EFAULT; 24096 goto done; 24097 } 24098 24099 #endif /* _MULTI_DATAMODEL */ 24100 24101 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 24102 li.listsize * MHIOC_RESV_KEY_SIZE); 24103 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 24104 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24105 "sd_persistent_reservation_in_read_keys: " 24106 "failed ddi_copyout: keylist\n"); 24107 rval = EFAULT; 24108 } 24109 done: 24110 kmem_free(data_bufp, data_len); 24111 return (rval); 24112 } 24113 24114 24115 /* 24116 * Function: sd_persistent_reservation_in_read_resv 24117 * 24118 * Description: This routine is the driver entry point for handling CD-ROM 24119 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 24120 * by sending the SCSI-3 PRIN commands to the device. 24121 * Process the read persistent reservations command response by 24122 * copying the reservation information into the user provided 24123 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 24124 * 24125 * Arguments: un - Pointer to soft state struct for the target. 24126 * usrp - user provided pointer to multihost Persistent In Read 24127 * Keys structure (mhioc_inkeys_t) 24128 * flag - this argument is a pass through to ddi_copyxxx() 24129 * directly from the mode argument of ioctl(). 24130 * 24131 * Return Code: 0 - Success 24132 * EACCES 24133 * ENOTSUP 24134 * errno return code from sd_send_scsi_cmd() 24135 * 24136 * Context: Can sleep. Does not return until command is completed. 24137 */ 24138 24139 static int 24140 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 24141 mhioc_inresvs_t *usrp, int flag) 24142 { 24143 #ifdef _MULTI_DATAMODEL 24144 struct mhioc_resv_desc_list32 resvlist32; 24145 #endif 24146 sd_prin_readresv_t *in; 24147 mhioc_inresvs_t *ptr; 24148 sd_readresv_desc_t *readresv_ptr; 24149 mhioc_resv_desc_list_t resvlist; 24150 mhioc_resv_desc_t resvdesc; 24151 uchar_t *data_bufp; 24152 int data_len; 24153 int rval; 24154 int i; 24155 size_t copysz; 24156 mhioc_resv_desc_t *bufp; 24157 24158 if ((ptr = usrp) == NULL) { 24159 return (EINVAL); 24160 } 24161 24162 /* 24163 * Get the listsize from user 24164 */ 24165 #ifdef _MULTI_DATAMODEL 24166 switch (ddi_model_convert_from(flag & FMODELS)) { 24167 case DDI_MODEL_ILP32: 24168 copysz = sizeof (struct mhioc_resv_desc_list32); 24169 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 24170 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24171 "sd_persistent_reservation_in_read_resv: " 24172 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 24173 rval = EFAULT; 24174 goto done; 24175 } 24176 resvlist.listsize = resvlist32.listsize; 24177 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 24178 break; 24179 24180 case DDI_MODEL_NONE: 24181 copysz = sizeof (mhioc_resv_desc_list_t); 24182 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 24183 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24184 "sd_persistent_reservation_in_read_resv: " 24185 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 24186 rval = EFAULT; 24187 goto done; 24188 } 24189 break; 24190 } 24191 #else /* ! _MULTI_DATAMODEL */ 24192 copysz = sizeof (mhioc_resv_desc_list_t); 24193 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 24194 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24195 "sd_persistent_reservation_in_read_resv: " 24196 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 24197 rval = EFAULT; 24198 goto done; 24199 } 24200 #endif /* ! _MULTI_DATAMODEL */ 24201 24202 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 24203 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 24204 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 24205 24206 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 24207 data_len, data_bufp)) != 0) { 24208 goto done; 24209 } 24210 in = (sd_prin_readresv_t *)data_bufp; 24211 ptr->generation = BE_32(in->generation); 24212 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 24213 24214 /* 24215 * Return the min(listsize, listlen( keys 24216 */ 24217 #ifdef _MULTI_DATAMODEL 24218 24219 switch (ddi_model_convert_from(flag & FMODELS)) { 24220 case DDI_MODEL_ILP32: 24221 resvlist32.listlen = resvlist.listlen; 24222 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 24223 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24224 "sd_persistent_reservation_in_read_resv: " 24225 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24226 rval = EFAULT; 24227 goto done; 24228 } 24229 break; 24230 24231 case DDI_MODEL_NONE: 24232 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24233 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24234 "sd_persistent_reservation_in_read_resv: " 24235 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24236 rval = EFAULT; 24237 goto done; 24238 } 24239 break; 24240 } 24241 24242 #else /* ! _MULTI_DATAMODEL */ 24243 24244 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24245 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24246 "sd_persistent_reservation_in_read_resv: " 24247 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24248 rval = EFAULT; 24249 goto done; 24250 } 24251 24252 #endif /* ! _MULTI_DATAMODEL */ 24253 24254 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 24255 bufp = resvlist.list; 24256 copysz = sizeof (mhioc_resv_desc_t); 24257 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 24258 i++, readresv_ptr++, bufp++) { 24259 24260 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 24261 MHIOC_RESV_KEY_SIZE); 24262 resvdesc.type = readresv_ptr->type; 24263 resvdesc.scope = readresv_ptr->scope; 24264 resvdesc.scope_specific_addr = 24265 BE_32(readresv_ptr->scope_specific_addr); 24266 24267 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 24268 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24269 "sd_persistent_reservation_in_read_resv: " 24270 "failed ddi_copyout: resvlist\n"); 24271 rval = EFAULT; 24272 goto done; 24273 } 24274 } 24275 done: 24276 kmem_free(data_bufp, data_len); 24277 return (rval); 24278 } 24279 24280 24281 /* 24282 * Function: sr_change_blkmode() 24283 * 24284 * Description: This routine is the driver entry point for handling CD-ROM 24285 * block mode ioctl requests. Support for returning and changing 24286 * the current block size in use by the device is implemented. The 24287 * LBA size is changed via a MODE SELECT Block Descriptor. 24288 * 24289 * This routine issues a mode sense with an allocation length of 24290 * 12 bytes for the mode page header and a single block descriptor. 24291 * 24292 * Arguments: dev - the device 'dev_t' 24293 * cmd - the request type; one of CDROMGBLKMODE (get) or 24294 * CDROMSBLKMODE (set) 24295 * data - current block size or requested block size 24296 * flag - this argument is a pass through to ddi_copyxxx() directly 24297 * from the mode argument of ioctl(). 24298 * 24299 * Return Code: the code returned by sd_send_scsi_cmd() 24300 * EINVAL if invalid arguments are provided 24301 * EFAULT if ddi_copyxxx() fails 24302 * ENXIO if fail ddi_get_soft_state 24303 * EIO if invalid mode sense block descriptor length 24304 * 24305 */ 24306 24307 static int 24308 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 24309 { 24310 struct sd_lun *un = NULL; 24311 struct mode_header *sense_mhp, *select_mhp; 24312 struct block_descriptor *sense_desc, *select_desc; 24313 int current_bsize; 24314 int rval = EINVAL; 24315 uchar_t *sense = NULL; 24316 uchar_t *select = NULL; 24317 24318 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 24319 24320 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24321 return (ENXIO); 24322 } 24323 24324 /* 24325 * The block length is changed via the Mode Select block descriptor, the 24326 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 24327 * required as part of this routine. Therefore the mode sense allocation 24328 * length is specified to be the length of a mode page header and a 24329 * block descriptor. 24330 */ 24331 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24332 24333 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24334 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 24335 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24336 "sr_change_blkmode: Mode Sense Failed\n"); 24337 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24338 return (rval); 24339 } 24340 24341 /* Check the block descriptor len to handle only 1 block descriptor */ 24342 sense_mhp = (struct mode_header *)sense; 24343 if ((sense_mhp->bdesc_length == 0) || 24344 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 24345 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24346 "sr_change_blkmode: Mode Sense returned invalid block" 24347 " descriptor length\n"); 24348 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24349 return (EIO); 24350 } 24351 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 24352 current_bsize = ((sense_desc->blksize_hi << 16) | 24353 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 24354 24355 /* Process command */ 24356 switch (cmd) { 24357 case CDROMGBLKMODE: 24358 /* Return the block size obtained during the mode sense */ 24359 if (ddi_copyout(¤t_bsize, (void *)data, 24360 sizeof (int), flag) != 0) 24361 rval = EFAULT; 24362 break; 24363 case CDROMSBLKMODE: 24364 /* Validate the requested block size */ 24365 switch (data) { 24366 case CDROM_BLK_512: 24367 case CDROM_BLK_1024: 24368 case CDROM_BLK_2048: 24369 case CDROM_BLK_2056: 24370 case CDROM_BLK_2336: 24371 case CDROM_BLK_2340: 24372 case CDROM_BLK_2352: 24373 case CDROM_BLK_2368: 24374 case CDROM_BLK_2448: 24375 case CDROM_BLK_2646: 24376 case CDROM_BLK_2647: 24377 break; 24378 default: 24379 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24380 "sr_change_blkmode: " 24381 "Block Size '%ld' Not Supported\n", data); 24382 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24383 return (EINVAL); 24384 } 24385 24386 /* 24387 * The current block size matches the requested block size so 24388 * there is no need to send the mode select to change the size 24389 */ 24390 if (current_bsize == data) { 24391 break; 24392 } 24393 24394 /* Build the select data for the requested block size */ 24395 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24396 select_mhp = (struct mode_header *)select; 24397 select_desc = 24398 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 24399 /* 24400 * The LBA size is changed via the block descriptor, so the 24401 * descriptor is built according to the user data 24402 */ 24403 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 24404 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 24405 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 24406 select_desc->blksize_lo = (char)((data) & 0x000000ff); 24407 24408 /* Send the mode select for the requested block size */ 24409 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24410 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24411 SD_PATH_STANDARD)) != 0) { 24412 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24413 "sr_change_blkmode: Mode Select Failed\n"); 24414 /* 24415 * The mode select failed for the requested block size, 24416 * so reset the data for the original block size and 24417 * send it to the target. The error is indicated by the 24418 * return value for the failed mode select. 24419 */ 24420 select_desc->blksize_hi = sense_desc->blksize_hi; 24421 select_desc->blksize_mid = sense_desc->blksize_mid; 24422 select_desc->blksize_lo = sense_desc->blksize_lo; 24423 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24424 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24425 SD_PATH_STANDARD); 24426 } else { 24427 ASSERT(!mutex_owned(SD_MUTEX(un))); 24428 mutex_enter(SD_MUTEX(un)); 24429 sd_update_block_info(un, (uint32_t)data, 0); 24430 mutex_exit(SD_MUTEX(un)); 24431 } 24432 break; 24433 default: 24434 /* should not reach here, but check anyway */ 24435 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24436 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 24437 rval = EINVAL; 24438 break; 24439 } 24440 24441 if (select) { 24442 kmem_free(select, BUFLEN_CHG_BLK_MODE); 24443 } 24444 if (sense) { 24445 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24446 } 24447 return (rval); 24448 } 24449 24450 24451 /* 24452 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 24453 * implement driver support for getting and setting the CD speed. The command 24454 * set used will be based on the device type. If the device has not been 24455 * identified as MMC the Toshiba vendor specific mode page will be used. If 24456 * the device is MMC but does not support the Real Time Streaming feature 24457 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24458 * be used to read the speed. 24459 */ 24460 24461 /* 24462 * Function: sr_change_speed() 24463 * 24464 * Description: This routine is the driver entry point for handling CD-ROM 24465 * drive speed ioctl requests for devices supporting the Toshiba 24466 * vendor specific drive speed mode page. Support for returning 24467 * and changing the current drive speed in use by the device is 24468 * implemented. 24469 * 24470 * Arguments: dev - the device 'dev_t' 24471 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24472 * CDROMSDRVSPEED (set) 24473 * data - current drive speed or requested drive speed 24474 * flag - this argument is a pass through to ddi_copyxxx() directly 24475 * from the mode argument of ioctl(). 24476 * 24477 * Return Code: the code returned by sd_send_scsi_cmd() 24478 * EINVAL if invalid arguments are provided 24479 * EFAULT if ddi_copyxxx() fails 24480 * ENXIO if fail ddi_get_soft_state 24481 * EIO if invalid mode sense block descriptor length 24482 */ 24483 24484 static int 24485 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24486 { 24487 struct sd_lun *un = NULL; 24488 struct mode_header *sense_mhp, *select_mhp; 24489 struct mode_speed *sense_page, *select_page; 24490 int current_speed; 24491 int rval = EINVAL; 24492 int bd_len; 24493 uchar_t *sense = NULL; 24494 uchar_t *select = NULL; 24495 24496 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24497 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24498 return (ENXIO); 24499 } 24500 24501 /* 24502 * Note: The drive speed is being modified here according to a Toshiba 24503 * vendor specific mode page (0x31). 24504 */ 24505 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24506 24507 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24508 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24509 SD_PATH_STANDARD)) != 0) { 24510 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24511 "sr_change_speed: Mode Sense Failed\n"); 24512 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24513 return (rval); 24514 } 24515 sense_mhp = (struct mode_header *)sense; 24516 24517 /* Check the block descriptor len to handle only 1 block descriptor */ 24518 bd_len = sense_mhp->bdesc_length; 24519 if (bd_len > MODE_BLK_DESC_LENGTH) { 24520 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24521 "sr_change_speed: Mode Sense returned invalid block " 24522 "descriptor length\n"); 24523 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24524 return (EIO); 24525 } 24526 24527 sense_page = (struct mode_speed *) 24528 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24529 current_speed = sense_page->speed; 24530 24531 /* Process command */ 24532 switch (cmd) { 24533 case CDROMGDRVSPEED: 24534 /* Return the drive speed obtained during the mode sense */ 24535 if (current_speed == 0x2) { 24536 current_speed = CDROM_TWELVE_SPEED; 24537 } 24538 if (ddi_copyout(¤t_speed, (void *)data, 24539 sizeof (int), flag) != 0) { 24540 rval = EFAULT; 24541 } 24542 break; 24543 case CDROMSDRVSPEED: 24544 /* Validate the requested drive speed */ 24545 switch ((uchar_t)data) { 24546 case CDROM_TWELVE_SPEED: 24547 data = 0x2; 24548 /*FALLTHROUGH*/ 24549 case CDROM_NORMAL_SPEED: 24550 case CDROM_DOUBLE_SPEED: 24551 case CDROM_QUAD_SPEED: 24552 case CDROM_MAXIMUM_SPEED: 24553 break; 24554 default: 24555 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24556 "sr_change_speed: " 24557 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24558 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24559 return (EINVAL); 24560 } 24561 24562 /* 24563 * The current drive speed matches the requested drive speed so 24564 * there is no need to send the mode select to change the speed 24565 */ 24566 if (current_speed == data) { 24567 break; 24568 } 24569 24570 /* Build the select data for the requested drive speed */ 24571 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24572 select_mhp = (struct mode_header *)select; 24573 select_mhp->bdesc_length = 0; 24574 select_page = 24575 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24576 select_page = 24577 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24578 select_page->mode_page.code = CDROM_MODE_SPEED; 24579 select_page->mode_page.length = 2; 24580 select_page->speed = (uchar_t)data; 24581 24582 /* Send the mode select for the requested block size */ 24583 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24584 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24585 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24586 /* 24587 * The mode select failed for the requested drive speed, 24588 * so reset the data for the original drive speed and 24589 * send it to the target. The error is indicated by the 24590 * return value for the failed mode select. 24591 */ 24592 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24593 "sr_drive_speed: Mode Select Failed\n"); 24594 select_page->speed = sense_page->speed; 24595 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24596 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24597 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24598 } 24599 break; 24600 default: 24601 /* should not reach here, but check anyway */ 24602 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24603 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24604 rval = EINVAL; 24605 break; 24606 } 24607 24608 if (select) { 24609 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24610 } 24611 if (sense) { 24612 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24613 } 24614 24615 return (rval); 24616 } 24617 24618 24619 /* 24620 * Function: sr_atapi_change_speed() 24621 * 24622 * Description: This routine is the driver entry point for handling CD-ROM 24623 * drive speed ioctl requests for MMC devices that do not support 24624 * the Real Time Streaming feature (0x107). 24625 * 24626 * Note: This routine will use the SET SPEED command which may not 24627 * be supported by all devices. 24628 * 24629 * Arguments: dev- the device 'dev_t' 24630 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24631 * CDROMSDRVSPEED (set) 24632 * data- current drive speed or requested drive speed 24633 * flag- this argument is a pass through to ddi_copyxxx() directly 24634 * from the mode argument of ioctl(). 24635 * 24636 * Return Code: the code returned by sd_send_scsi_cmd() 24637 * EINVAL if invalid arguments are provided 24638 * EFAULT if ddi_copyxxx() fails 24639 * ENXIO if fail ddi_get_soft_state 24640 * EIO if invalid mode sense block descriptor length 24641 */ 24642 24643 static int 24644 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24645 { 24646 struct sd_lun *un; 24647 struct uscsi_cmd *com = NULL; 24648 struct mode_header_grp2 *sense_mhp; 24649 uchar_t *sense_page; 24650 uchar_t *sense = NULL; 24651 char cdb[CDB_GROUP5]; 24652 int bd_len; 24653 int current_speed = 0; 24654 int max_speed = 0; 24655 int rval; 24656 24657 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24658 24659 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24660 return (ENXIO); 24661 } 24662 24663 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24664 24665 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24666 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24667 SD_PATH_STANDARD)) != 0) { 24668 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24669 "sr_atapi_change_speed: Mode Sense Failed\n"); 24670 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24671 return (rval); 24672 } 24673 24674 /* Check the block descriptor len to handle only 1 block descriptor */ 24675 sense_mhp = (struct mode_header_grp2 *)sense; 24676 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24677 if (bd_len > MODE_BLK_DESC_LENGTH) { 24678 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24679 "sr_atapi_change_speed: Mode Sense returned invalid " 24680 "block descriptor length\n"); 24681 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24682 return (EIO); 24683 } 24684 24685 /* Calculate the current and maximum drive speeds */ 24686 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24687 current_speed = (sense_page[14] << 8) | sense_page[15]; 24688 max_speed = (sense_page[8] << 8) | sense_page[9]; 24689 24690 /* Process the command */ 24691 switch (cmd) { 24692 case CDROMGDRVSPEED: 24693 current_speed /= SD_SPEED_1X; 24694 if (ddi_copyout(¤t_speed, (void *)data, 24695 sizeof (int), flag) != 0) 24696 rval = EFAULT; 24697 break; 24698 case CDROMSDRVSPEED: 24699 /* Convert the speed code to KB/sec */ 24700 switch ((uchar_t)data) { 24701 case CDROM_NORMAL_SPEED: 24702 current_speed = SD_SPEED_1X; 24703 break; 24704 case CDROM_DOUBLE_SPEED: 24705 current_speed = 2 * SD_SPEED_1X; 24706 break; 24707 case CDROM_QUAD_SPEED: 24708 current_speed = 4 * SD_SPEED_1X; 24709 break; 24710 case CDROM_TWELVE_SPEED: 24711 current_speed = 12 * SD_SPEED_1X; 24712 break; 24713 case CDROM_MAXIMUM_SPEED: 24714 current_speed = 0xffff; 24715 break; 24716 default: 24717 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24718 "sr_atapi_change_speed: invalid drive speed %d\n", 24719 (uchar_t)data); 24720 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24721 return (EINVAL); 24722 } 24723 24724 /* Check the request against the drive's max speed. */ 24725 if (current_speed != 0xffff) { 24726 if (current_speed > max_speed) { 24727 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24728 return (EINVAL); 24729 } 24730 } 24731 24732 /* 24733 * Build and send the SET SPEED command 24734 * 24735 * Note: The SET SPEED (0xBB) command used in this routine is 24736 * obsolete per the SCSI MMC spec but still supported in the 24737 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24738 * therefore the command is still implemented in this routine. 24739 */ 24740 bzero(cdb, sizeof (cdb)); 24741 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24742 cdb[2] = (uchar_t)(current_speed >> 8); 24743 cdb[3] = (uchar_t)current_speed; 24744 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24745 com->uscsi_cdb = (caddr_t)cdb; 24746 com->uscsi_cdblen = CDB_GROUP5; 24747 com->uscsi_bufaddr = NULL; 24748 com->uscsi_buflen = 0; 24749 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24750 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24751 break; 24752 default: 24753 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24754 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24755 rval = EINVAL; 24756 } 24757 24758 if (sense) { 24759 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24760 } 24761 if (com) { 24762 kmem_free(com, sizeof (*com)); 24763 } 24764 return (rval); 24765 } 24766 24767 24768 /* 24769 * Function: sr_pause_resume() 24770 * 24771 * Description: This routine is the driver entry point for handling CD-ROM 24772 * pause/resume ioctl requests. This only affects the audio play 24773 * operation. 24774 * 24775 * Arguments: dev - the device 'dev_t' 24776 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24777 * for setting the resume bit of the cdb. 24778 * 24779 * Return Code: the code returned by sd_send_scsi_cmd() 24780 * EINVAL if invalid mode specified 24781 * 24782 */ 24783 24784 static int 24785 sr_pause_resume(dev_t dev, int cmd) 24786 { 24787 struct sd_lun *un; 24788 struct uscsi_cmd *com; 24789 char cdb[CDB_GROUP1]; 24790 int rval; 24791 24792 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24793 return (ENXIO); 24794 } 24795 24796 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24797 bzero(cdb, CDB_GROUP1); 24798 cdb[0] = SCMD_PAUSE_RESUME; 24799 switch (cmd) { 24800 case CDROMRESUME: 24801 cdb[8] = 1; 24802 break; 24803 case CDROMPAUSE: 24804 cdb[8] = 0; 24805 break; 24806 default: 24807 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24808 " Command '%x' Not Supported\n", cmd); 24809 rval = EINVAL; 24810 goto done; 24811 } 24812 24813 com->uscsi_cdb = cdb; 24814 com->uscsi_cdblen = CDB_GROUP1; 24815 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24816 24817 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24818 SD_PATH_STANDARD); 24819 24820 done: 24821 kmem_free(com, sizeof (*com)); 24822 return (rval); 24823 } 24824 24825 24826 /* 24827 * Function: sr_play_msf() 24828 * 24829 * Description: This routine is the driver entry point for handling CD-ROM 24830 * ioctl requests to output the audio signals at the specified 24831 * starting address and continue the audio play until the specified 24832 * ending address (CDROMPLAYMSF) The address is in Minute Second 24833 * Frame (MSF) format. 24834 * 24835 * Arguments: dev - the device 'dev_t' 24836 * data - pointer to user provided audio msf structure, 24837 * specifying start/end addresses. 24838 * flag - this argument is a pass through to ddi_copyxxx() 24839 * directly from the mode argument of ioctl(). 24840 * 24841 * Return Code: the code returned by sd_send_scsi_cmd() 24842 * EFAULT if ddi_copyxxx() fails 24843 * ENXIO if fail ddi_get_soft_state 24844 * EINVAL if data pointer is NULL 24845 */ 24846 24847 static int 24848 sr_play_msf(dev_t dev, caddr_t data, int flag) 24849 { 24850 struct sd_lun *un; 24851 struct uscsi_cmd *com; 24852 struct cdrom_msf msf_struct; 24853 struct cdrom_msf *msf = &msf_struct; 24854 char cdb[CDB_GROUP1]; 24855 int rval; 24856 24857 if (data == NULL) { 24858 return (EINVAL); 24859 } 24860 24861 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24862 return (ENXIO); 24863 } 24864 24865 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24866 return (EFAULT); 24867 } 24868 24869 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24870 bzero(cdb, CDB_GROUP1); 24871 cdb[0] = SCMD_PLAYAUDIO_MSF; 24872 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24873 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24874 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24875 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24876 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24877 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24878 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24879 } else { 24880 cdb[3] = msf->cdmsf_min0; 24881 cdb[4] = msf->cdmsf_sec0; 24882 cdb[5] = msf->cdmsf_frame0; 24883 cdb[6] = msf->cdmsf_min1; 24884 cdb[7] = msf->cdmsf_sec1; 24885 cdb[8] = msf->cdmsf_frame1; 24886 } 24887 com->uscsi_cdb = cdb; 24888 com->uscsi_cdblen = CDB_GROUP1; 24889 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24890 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24891 SD_PATH_STANDARD); 24892 kmem_free(com, sizeof (*com)); 24893 return (rval); 24894 } 24895 24896 24897 /* 24898 * Function: sr_play_trkind() 24899 * 24900 * Description: This routine is the driver entry point for handling CD-ROM 24901 * ioctl requests to output the audio signals at the specified 24902 * starting address and continue the audio play until the specified 24903 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24904 * format. 24905 * 24906 * Arguments: dev - the device 'dev_t' 24907 * data - pointer to user provided audio track/index structure, 24908 * specifying start/end addresses. 24909 * flag - this argument is a pass through to ddi_copyxxx() 24910 * directly from the mode argument of ioctl(). 24911 * 24912 * Return Code: the code returned by sd_send_scsi_cmd() 24913 * EFAULT if ddi_copyxxx() fails 24914 * ENXIO if fail ddi_get_soft_state 24915 * EINVAL if data pointer is NULL 24916 */ 24917 24918 static int 24919 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24920 { 24921 struct cdrom_ti ti_struct; 24922 struct cdrom_ti *ti = &ti_struct; 24923 struct uscsi_cmd *com = NULL; 24924 char cdb[CDB_GROUP1]; 24925 int rval; 24926 24927 if (data == NULL) { 24928 return (EINVAL); 24929 } 24930 24931 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24932 return (EFAULT); 24933 } 24934 24935 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24936 bzero(cdb, CDB_GROUP1); 24937 cdb[0] = SCMD_PLAYAUDIO_TI; 24938 cdb[4] = ti->cdti_trk0; 24939 cdb[5] = ti->cdti_ind0; 24940 cdb[7] = ti->cdti_trk1; 24941 cdb[8] = ti->cdti_ind1; 24942 com->uscsi_cdb = cdb; 24943 com->uscsi_cdblen = CDB_GROUP1; 24944 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24945 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24946 SD_PATH_STANDARD); 24947 kmem_free(com, sizeof (*com)); 24948 return (rval); 24949 } 24950 24951 24952 /* 24953 * Function: sr_read_all_subcodes() 24954 * 24955 * Description: This routine is the driver entry point for handling CD-ROM 24956 * ioctl requests to return raw subcode data while the target is 24957 * playing audio (CDROMSUBCODE). 24958 * 24959 * Arguments: dev - the device 'dev_t' 24960 * data - pointer to user provided cdrom subcode structure, 24961 * specifying the transfer length and address. 24962 * flag - this argument is a pass through to ddi_copyxxx() 24963 * directly from the mode argument of ioctl(). 24964 * 24965 * Return Code: the code returned by sd_send_scsi_cmd() 24966 * EFAULT if ddi_copyxxx() fails 24967 * ENXIO if fail ddi_get_soft_state 24968 * EINVAL if data pointer is NULL 24969 */ 24970 24971 static int 24972 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24973 { 24974 struct sd_lun *un = NULL; 24975 struct uscsi_cmd *com = NULL; 24976 struct cdrom_subcode *subcode = NULL; 24977 int rval; 24978 size_t buflen; 24979 char cdb[CDB_GROUP5]; 24980 24981 #ifdef _MULTI_DATAMODEL 24982 /* To support ILP32 applications in an LP64 world */ 24983 struct cdrom_subcode32 cdrom_subcode32; 24984 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24985 #endif 24986 if (data == NULL) { 24987 return (EINVAL); 24988 } 24989 24990 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24991 return (ENXIO); 24992 } 24993 24994 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24995 24996 #ifdef _MULTI_DATAMODEL 24997 switch (ddi_model_convert_from(flag & FMODELS)) { 24998 case DDI_MODEL_ILP32: 24999 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 25000 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25001 "sr_read_all_subcodes: ddi_copyin Failed\n"); 25002 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25003 return (EFAULT); 25004 } 25005 /* Convert the ILP32 uscsi data from the application to LP64 */ 25006 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 25007 break; 25008 case DDI_MODEL_NONE: 25009 if (ddi_copyin(data, subcode, 25010 sizeof (struct cdrom_subcode), flag)) { 25011 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25012 "sr_read_all_subcodes: ddi_copyin Failed\n"); 25013 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25014 return (EFAULT); 25015 } 25016 break; 25017 } 25018 #else /* ! _MULTI_DATAMODEL */ 25019 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 25020 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25021 "sr_read_all_subcodes: ddi_copyin Failed\n"); 25022 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25023 return (EFAULT); 25024 } 25025 #endif /* _MULTI_DATAMODEL */ 25026 25027 /* 25028 * Since MMC-2 expects max 3 bytes for length, check if the 25029 * length input is greater than 3 bytes 25030 */ 25031 if ((subcode->cdsc_length & 0xFF000000) != 0) { 25032 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25033 "sr_read_all_subcodes: " 25034 "cdrom transfer length too large: %d (limit %d)\n", 25035 subcode->cdsc_length, 0xFFFFFF); 25036 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25037 return (EINVAL); 25038 } 25039 25040 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 25041 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25042 bzero(cdb, CDB_GROUP5); 25043 25044 if (un->un_f_mmc_cap == TRUE) { 25045 cdb[0] = (char)SCMD_READ_CD; 25046 cdb[2] = (char)0xff; 25047 cdb[3] = (char)0xff; 25048 cdb[4] = (char)0xff; 25049 cdb[5] = (char)0xff; 25050 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 25051 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 25052 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 25053 cdb[10] = 1; 25054 } else { 25055 /* 25056 * Note: A vendor specific command (0xDF) is being used her to 25057 * request a read of all subcodes. 25058 */ 25059 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 25060 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 25061 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 25062 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 25063 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 25064 } 25065 com->uscsi_cdb = cdb; 25066 com->uscsi_cdblen = CDB_GROUP5; 25067 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 25068 com->uscsi_buflen = buflen; 25069 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25070 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25071 SD_PATH_STANDARD); 25072 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25073 kmem_free(com, sizeof (*com)); 25074 return (rval); 25075 } 25076 25077 25078 /* 25079 * Function: sr_read_subchannel() 25080 * 25081 * Description: This routine is the driver entry point for handling CD-ROM 25082 * ioctl requests to return the Q sub-channel data of the CD 25083 * current position block. (CDROMSUBCHNL) The data includes the 25084 * track number, index number, absolute CD-ROM address (LBA or MSF 25085 * format per the user) , track relative CD-ROM address (LBA or MSF 25086 * format per the user), control data and audio status. 25087 * 25088 * Arguments: dev - the device 'dev_t' 25089 * data - pointer to user provided cdrom sub-channel structure 25090 * flag - this argument is a pass through to ddi_copyxxx() 25091 * directly from the mode argument of ioctl(). 25092 * 25093 * Return Code: the code returned by sd_send_scsi_cmd() 25094 * EFAULT if ddi_copyxxx() fails 25095 * ENXIO if fail ddi_get_soft_state 25096 * EINVAL if data pointer is NULL 25097 */ 25098 25099 static int 25100 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 25101 { 25102 struct sd_lun *un; 25103 struct uscsi_cmd *com; 25104 struct cdrom_subchnl subchanel; 25105 struct cdrom_subchnl *subchnl = &subchanel; 25106 char cdb[CDB_GROUP1]; 25107 caddr_t buffer; 25108 int rval; 25109 25110 if (data == NULL) { 25111 return (EINVAL); 25112 } 25113 25114 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25115 (un->un_state == SD_STATE_OFFLINE)) { 25116 return (ENXIO); 25117 } 25118 25119 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 25120 return (EFAULT); 25121 } 25122 25123 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 25124 bzero(cdb, CDB_GROUP1); 25125 cdb[0] = SCMD_READ_SUBCHANNEL; 25126 /* Set the MSF bit based on the user requested address format */ 25127 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 25128 /* 25129 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 25130 * returned 25131 */ 25132 cdb[2] = 0x40; 25133 /* 25134 * Set byte 3 to specify the return data format. A value of 0x01 25135 * indicates that the CD-ROM current position should be returned. 25136 */ 25137 cdb[3] = 0x01; 25138 cdb[8] = 0x10; 25139 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25140 com->uscsi_cdb = cdb; 25141 com->uscsi_cdblen = CDB_GROUP1; 25142 com->uscsi_bufaddr = buffer; 25143 com->uscsi_buflen = 16; 25144 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25145 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25146 SD_PATH_STANDARD); 25147 if (rval != 0) { 25148 kmem_free(buffer, 16); 25149 kmem_free(com, sizeof (*com)); 25150 return (rval); 25151 } 25152 25153 /* Process the returned Q sub-channel data */ 25154 subchnl->cdsc_audiostatus = buffer[1]; 25155 subchnl->cdsc_adr = (buffer[5] & 0xF0); 25156 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 25157 subchnl->cdsc_trk = buffer[6]; 25158 subchnl->cdsc_ind = buffer[7]; 25159 if (subchnl->cdsc_format & CDROM_LBA) { 25160 subchnl->cdsc_absaddr.lba = 25161 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25162 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25163 subchnl->cdsc_reladdr.lba = 25164 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 25165 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 25166 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 25167 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 25168 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 25169 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 25170 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 25171 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 25172 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 25173 } else { 25174 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 25175 subchnl->cdsc_absaddr.msf.second = buffer[10]; 25176 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 25177 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 25178 subchnl->cdsc_reladdr.msf.second = buffer[14]; 25179 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 25180 } 25181 kmem_free(buffer, 16); 25182 kmem_free(com, sizeof (*com)); 25183 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 25184 != 0) { 25185 return (EFAULT); 25186 } 25187 return (rval); 25188 } 25189 25190 25191 /* 25192 * Function: sr_read_tocentry() 25193 * 25194 * Description: This routine is the driver entry point for handling CD-ROM 25195 * ioctl requests to read from the Table of Contents (TOC) 25196 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 25197 * fields, the starting address (LBA or MSF format per the user) 25198 * and the data mode if the user specified track is a data track. 25199 * 25200 * Note: The READ HEADER (0x44) command used in this routine is 25201 * obsolete per the SCSI MMC spec but still supported in the 25202 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 25203 * therefore the command is still implemented in this routine. 25204 * 25205 * Arguments: dev - the device 'dev_t' 25206 * data - pointer to user provided toc entry structure, 25207 * specifying the track # and the address format 25208 * (LBA or MSF). 25209 * flag - this argument is a pass through to ddi_copyxxx() 25210 * directly from the mode argument of ioctl(). 25211 * 25212 * Return Code: the code returned by sd_send_scsi_cmd() 25213 * EFAULT if ddi_copyxxx() fails 25214 * ENXIO if fail ddi_get_soft_state 25215 * EINVAL if data pointer is NULL 25216 */ 25217 25218 static int 25219 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 25220 { 25221 struct sd_lun *un = NULL; 25222 struct uscsi_cmd *com; 25223 struct cdrom_tocentry toc_entry; 25224 struct cdrom_tocentry *entry = &toc_entry; 25225 caddr_t buffer; 25226 int rval; 25227 char cdb[CDB_GROUP1]; 25228 25229 if (data == NULL) { 25230 return (EINVAL); 25231 } 25232 25233 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25234 (un->un_state == SD_STATE_OFFLINE)) { 25235 return (ENXIO); 25236 } 25237 25238 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 25239 return (EFAULT); 25240 } 25241 25242 /* Validate the requested track and address format */ 25243 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 25244 return (EINVAL); 25245 } 25246 25247 if (entry->cdte_track == 0) { 25248 return (EINVAL); 25249 } 25250 25251 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 25252 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25253 bzero(cdb, CDB_GROUP1); 25254 25255 cdb[0] = SCMD_READ_TOC; 25256 /* Set the MSF bit based on the user requested address format */ 25257 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 25258 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25259 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 25260 } else { 25261 cdb[6] = entry->cdte_track; 25262 } 25263 25264 /* 25265 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25266 * (4 byte TOC response header + 8 byte track descriptor) 25267 */ 25268 cdb[8] = 12; 25269 com->uscsi_cdb = cdb; 25270 com->uscsi_cdblen = CDB_GROUP1; 25271 com->uscsi_bufaddr = buffer; 25272 com->uscsi_buflen = 0x0C; 25273 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 25274 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25275 SD_PATH_STANDARD); 25276 if (rval != 0) { 25277 kmem_free(buffer, 12); 25278 kmem_free(com, sizeof (*com)); 25279 return (rval); 25280 } 25281 25282 /* Process the toc entry */ 25283 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 25284 entry->cdte_ctrl = (buffer[5] & 0x0F); 25285 if (entry->cdte_format & CDROM_LBA) { 25286 entry->cdte_addr.lba = 25287 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25288 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25289 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 25290 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 25291 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 25292 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 25293 /* 25294 * Send a READ TOC command using the LBA address format to get 25295 * the LBA for the track requested so it can be used in the 25296 * READ HEADER request 25297 * 25298 * Note: The MSF bit of the READ HEADER command specifies the 25299 * output format. The block address specified in that command 25300 * must be in LBA format. 25301 */ 25302 cdb[1] = 0; 25303 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25304 SD_PATH_STANDARD); 25305 if (rval != 0) { 25306 kmem_free(buffer, 12); 25307 kmem_free(com, sizeof (*com)); 25308 return (rval); 25309 } 25310 } else { 25311 entry->cdte_addr.msf.minute = buffer[9]; 25312 entry->cdte_addr.msf.second = buffer[10]; 25313 entry->cdte_addr.msf.frame = buffer[11]; 25314 /* 25315 * Send a READ TOC command using the LBA address format to get 25316 * the LBA for the track requested so it can be used in the 25317 * READ HEADER request 25318 * 25319 * Note: The MSF bit of the READ HEADER command specifies the 25320 * output format. The block address specified in that command 25321 * must be in LBA format. 25322 */ 25323 cdb[1] = 0; 25324 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25325 SD_PATH_STANDARD); 25326 if (rval != 0) { 25327 kmem_free(buffer, 12); 25328 kmem_free(com, sizeof (*com)); 25329 return (rval); 25330 } 25331 } 25332 25333 /* 25334 * Build and send the READ HEADER command to determine the data mode of 25335 * the user specified track. 25336 */ 25337 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 25338 (entry->cdte_track != CDROM_LEADOUT)) { 25339 bzero(cdb, CDB_GROUP1); 25340 cdb[0] = SCMD_READ_HEADER; 25341 cdb[2] = buffer[8]; 25342 cdb[3] = buffer[9]; 25343 cdb[4] = buffer[10]; 25344 cdb[5] = buffer[11]; 25345 cdb[8] = 0x08; 25346 com->uscsi_buflen = 0x08; 25347 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25348 SD_PATH_STANDARD); 25349 if (rval == 0) { 25350 entry->cdte_datamode = buffer[0]; 25351 } else { 25352 /* 25353 * READ HEADER command failed, since this is 25354 * obsoleted in one spec, its better to return 25355 * -1 for an invlid track so that we can still 25356 * receive the rest of the TOC data. 25357 */ 25358 entry->cdte_datamode = (uchar_t)-1; 25359 } 25360 } else { 25361 entry->cdte_datamode = (uchar_t)-1; 25362 } 25363 25364 kmem_free(buffer, 12); 25365 kmem_free(com, sizeof (*com)); 25366 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 25367 return (EFAULT); 25368 25369 return (rval); 25370 } 25371 25372 25373 /* 25374 * Function: sr_read_tochdr() 25375 * 25376 * Description: This routine is the driver entry point for handling CD-ROM 25377 * ioctl requests to read the Table of Contents (TOC) header 25378 * (CDROMREADTOHDR). The TOC header consists of the disk starting 25379 * and ending track numbers 25380 * 25381 * Arguments: dev - the device 'dev_t' 25382 * data - pointer to user provided toc header structure, 25383 * specifying the starting and ending track numbers. 25384 * flag - this argument is a pass through to ddi_copyxxx() 25385 * directly from the mode argument of ioctl(). 25386 * 25387 * Return Code: the code returned by sd_send_scsi_cmd() 25388 * EFAULT if ddi_copyxxx() fails 25389 * ENXIO if fail ddi_get_soft_state 25390 * EINVAL if data pointer is NULL 25391 */ 25392 25393 static int 25394 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 25395 { 25396 struct sd_lun *un; 25397 struct uscsi_cmd *com; 25398 struct cdrom_tochdr toc_header; 25399 struct cdrom_tochdr *hdr = &toc_header; 25400 char cdb[CDB_GROUP1]; 25401 int rval; 25402 caddr_t buffer; 25403 25404 if (data == NULL) { 25405 return (EINVAL); 25406 } 25407 25408 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25409 (un->un_state == SD_STATE_OFFLINE)) { 25410 return (ENXIO); 25411 } 25412 25413 buffer = kmem_zalloc(4, KM_SLEEP); 25414 bzero(cdb, CDB_GROUP1); 25415 cdb[0] = SCMD_READ_TOC; 25416 /* 25417 * Specifying a track number of 0x00 in the READ TOC command indicates 25418 * that the TOC header should be returned 25419 */ 25420 cdb[6] = 0x00; 25421 /* 25422 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 25423 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 25424 */ 25425 cdb[8] = 0x04; 25426 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25427 com->uscsi_cdb = cdb; 25428 com->uscsi_cdblen = CDB_GROUP1; 25429 com->uscsi_bufaddr = buffer; 25430 com->uscsi_buflen = 0x04; 25431 com->uscsi_timeout = 300; 25432 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25433 25434 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25435 SD_PATH_STANDARD); 25436 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25437 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 25438 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 25439 } else { 25440 hdr->cdth_trk0 = buffer[2]; 25441 hdr->cdth_trk1 = buffer[3]; 25442 } 25443 kmem_free(buffer, 4); 25444 kmem_free(com, sizeof (*com)); 25445 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 25446 return (EFAULT); 25447 } 25448 return (rval); 25449 } 25450 25451 25452 /* 25453 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 25454 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 25455 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 25456 * digital audio and extended architecture digital audio. These modes are 25457 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25458 * MMC specs. 25459 * 25460 * In addition to support for the various data formats these routines also 25461 * include support for devices that implement only the direct access READ 25462 * commands (0x08, 0x28), devices that implement the READ_CD commands 25463 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25464 * READ CDXA commands (0xD8, 0xDB) 25465 */ 25466 25467 /* 25468 * Function: sr_read_mode1() 25469 * 25470 * Description: This routine is the driver entry point for handling CD-ROM 25471 * ioctl read mode1 requests (CDROMREADMODE1). 25472 * 25473 * Arguments: dev - the device 'dev_t' 25474 * data - pointer to user provided cd read structure specifying 25475 * the lba buffer address and length. 25476 * flag - this argument is a pass through to ddi_copyxxx() 25477 * directly from the mode argument of ioctl(). 25478 * 25479 * Return Code: the code returned by sd_send_scsi_cmd() 25480 * EFAULT if ddi_copyxxx() fails 25481 * ENXIO if fail ddi_get_soft_state 25482 * EINVAL if data pointer is NULL 25483 */ 25484 25485 static int 25486 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25487 { 25488 struct sd_lun *un; 25489 struct cdrom_read mode1_struct; 25490 struct cdrom_read *mode1 = &mode1_struct; 25491 int rval; 25492 #ifdef _MULTI_DATAMODEL 25493 /* To support ILP32 applications in an LP64 world */ 25494 struct cdrom_read32 cdrom_read32; 25495 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25496 #endif /* _MULTI_DATAMODEL */ 25497 25498 if (data == NULL) { 25499 return (EINVAL); 25500 } 25501 25502 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25503 (un->un_state == SD_STATE_OFFLINE)) { 25504 return (ENXIO); 25505 } 25506 25507 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25508 "sd_read_mode1: entry: un:0x%p\n", un); 25509 25510 #ifdef _MULTI_DATAMODEL 25511 switch (ddi_model_convert_from(flag & FMODELS)) { 25512 case DDI_MODEL_ILP32: 25513 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25514 return (EFAULT); 25515 } 25516 /* Convert the ILP32 uscsi data from the application to LP64 */ 25517 cdrom_read32tocdrom_read(cdrd32, mode1); 25518 break; 25519 case DDI_MODEL_NONE: 25520 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25521 return (EFAULT); 25522 } 25523 } 25524 #else /* ! _MULTI_DATAMODEL */ 25525 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25526 return (EFAULT); 25527 } 25528 #endif /* _MULTI_DATAMODEL */ 25529 25530 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25531 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25532 25533 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25534 "sd_read_mode1: exit: un:0x%p\n", un); 25535 25536 return (rval); 25537 } 25538 25539 25540 /* 25541 * Function: sr_read_cd_mode2() 25542 * 25543 * Description: This routine is the driver entry point for handling CD-ROM 25544 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25545 * support the READ CD (0xBE) command or the 1st generation 25546 * READ CD (0xD4) command. 25547 * 25548 * Arguments: dev - the device 'dev_t' 25549 * data - pointer to user provided cd read structure specifying 25550 * the lba buffer address and length. 25551 * flag - this argument is a pass through to ddi_copyxxx() 25552 * directly from the mode argument of ioctl(). 25553 * 25554 * Return Code: the code returned by sd_send_scsi_cmd() 25555 * EFAULT if ddi_copyxxx() fails 25556 * ENXIO if fail ddi_get_soft_state 25557 * EINVAL if data pointer is NULL 25558 */ 25559 25560 static int 25561 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25562 { 25563 struct sd_lun *un; 25564 struct uscsi_cmd *com; 25565 struct cdrom_read mode2_struct; 25566 struct cdrom_read *mode2 = &mode2_struct; 25567 uchar_t cdb[CDB_GROUP5]; 25568 int nblocks; 25569 int rval; 25570 #ifdef _MULTI_DATAMODEL 25571 /* To support ILP32 applications in an LP64 world */ 25572 struct cdrom_read32 cdrom_read32; 25573 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25574 #endif /* _MULTI_DATAMODEL */ 25575 25576 if (data == NULL) { 25577 return (EINVAL); 25578 } 25579 25580 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25581 (un->un_state == SD_STATE_OFFLINE)) { 25582 return (ENXIO); 25583 } 25584 25585 #ifdef _MULTI_DATAMODEL 25586 switch (ddi_model_convert_from(flag & FMODELS)) { 25587 case DDI_MODEL_ILP32: 25588 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25589 return (EFAULT); 25590 } 25591 /* Convert the ILP32 uscsi data from the application to LP64 */ 25592 cdrom_read32tocdrom_read(cdrd32, mode2); 25593 break; 25594 case DDI_MODEL_NONE: 25595 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25596 return (EFAULT); 25597 } 25598 break; 25599 } 25600 25601 #else /* ! _MULTI_DATAMODEL */ 25602 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25603 return (EFAULT); 25604 } 25605 #endif /* _MULTI_DATAMODEL */ 25606 25607 bzero(cdb, sizeof (cdb)); 25608 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25609 /* Read command supported by 1st generation atapi drives */ 25610 cdb[0] = SCMD_READ_CDD4; 25611 } else { 25612 /* Universal CD Access Command */ 25613 cdb[0] = SCMD_READ_CD; 25614 } 25615 25616 /* 25617 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25618 */ 25619 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25620 25621 /* set the start address */ 25622 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25623 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25624 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25625 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25626 25627 /* set the transfer length */ 25628 nblocks = mode2->cdread_buflen / 2336; 25629 cdb[6] = (uchar_t)(nblocks >> 16); 25630 cdb[7] = (uchar_t)(nblocks >> 8); 25631 cdb[8] = (uchar_t)nblocks; 25632 25633 /* set the filter bits */ 25634 cdb[9] = CDROM_READ_CD_USERDATA; 25635 25636 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25637 com->uscsi_cdb = (caddr_t)cdb; 25638 com->uscsi_cdblen = sizeof (cdb); 25639 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25640 com->uscsi_buflen = mode2->cdread_buflen; 25641 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25642 25643 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25644 SD_PATH_STANDARD); 25645 kmem_free(com, sizeof (*com)); 25646 return (rval); 25647 } 25648 25649 25650 /* 25651 * Function: sr_read_mode2() 25652 * 25653 * Description: This routine is the driver entry point for handling CD-ROM 25654 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25655 * do not support the READ CD (0xBE) command. 25656 * 25657 * Arguments: dev - the device 'dev_t' 25658 * data - pointer to user provided cd read structure specifying 25659 * the lba buffer address and length. 25660 * flag - this argument is a pass through to ddi_copyxxx() 25661 * directly from the mode argument of ioctl(). 25662 * 25663 * Return Code: the code returned by sd_send_scsi_cmd() 25664 * EFAULT if ddi_copyxxx() fails 25665 * ENXIO if fail ddi_get_soft_state 25666 * EINVAL if data pointer is NULL 25667 * EIO if fail to reset block size 25668 * EAGAIN if commands are in progress in the driver 25669 */ 25670 25671 static int 25672 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25673 { 25674 struct sd_lun *un; 25675 struct cdrom_read mode2_struct; 25676 struct cdrom_read *mode2 = &mode2_struct; 25677 int rval; 25678 uint32_t restore_blksize; 25679 struct uscsi_cmd *com; 25680 uchar_t cdb[CDB_GROUP0]; 25681 int nblocks; 25682 25683 #ifdef _MULTI_DATAMODEL 25684 /* To support ILP32 applications in an LP64 world */ 25685 struct cdrom_read32 cdrom_read32; 25686 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25687 #endif /* _MULTI_DATAMODEL */ 25688 25689 if (data == NULL) { 25690 return (EINVAL); 25691 } 25692 25693 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25694 (un->un_state == SD_STATE_OFFLINE)) { 25695 return (ENXIO); 25696 } 25697 25698 /* 25699 * Because this routine will update the device and driver block size 25700 * being used we want to make sure there are no commands in progress. 25701 * If commands are in progress the user will have to try again. 25702 * 25703 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25704 * in sdioctl to protect commands from sdioctl through to the top of 25705 * sd_uscsi_strategy. See sdioctl for details. 25706 */ 25707 mutex_enter(SD_MUTEX(un)); 25708 if (un->un_ncmds_in_driver != 1) { 25709 mutex_exit(SD_MUTEX(un)); 25710 return (EAGAIN); 25711 } 25712 mutex_exit(SD_MUTEX(un)); 25713 25714 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25715 "sd_read_mode2: entry: un:0x%p\n", un); 25716 25717 #ifdef _MULTI_DATAMODEL 25718 switch (ddi_model_convert_from(flag & FMODELS)) { 25719 case DDI_MODEL_ILP32: 25720 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25721 return (EFAULT); 25722 } 25723 /* Convert the ILP32 uscsi data from the application to LP64 */ 25724 cdrom_read32tocdrom_read(cdrd32, mode2); 25725 break; 25726 case DDI_MODEL_NONE: 25727 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25728 return (EFAULT); 25729 } 25730 break; 25731 } 25732 #else /* ! _MULTI_DATAMODEL */ 25733 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25734 return (EFAULT); 25735 } 25736 #endif /* _MULTI_DATAMODEL */ 25737 25738 /* Store the current target block size for restoration later */ 25739 restore_blksize = un->un_tgt_blocksize; 25740 25741 /* Change the device and soft state target block size to 2336 */ 25742 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25743 rval = EIO; 25744 goto done; 25745 } 25746 25747 25748 bzero(cdb, sizeof (cdb)); 25749 25750 /* set READ operation */ 25751 cdb[0] = SCMD_READ; 25752 25753 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25754 mode2->cdread_lba >>= 2; 25755 25756 /* set the start address */ 25757 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25758 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25759 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25760 25761 /* set the transfer length */ 25762 nblocks = mode2->cdread_buflen / 2336; 25763 cdb[4] = (uchar_t)nblocks & 0xFF; 25764 25765 /* build command */ 25766 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25767 com->uscsi_cdb = (caddr_t)cdb; 25768 com->uscsi_cdblen = sizeof (cdb); 25769 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25770 com->uscsi_buflen = mode2->cdread_buflen; 25771 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25772 25773 /* 25774 * Issue SCSI command with user space address for read buffer. 25775 * 25776 * This sends the command through main channel in the driver. 25777 * 25778 * Since this is accessed via an IOCTL call, we go through the 25779 * standard path, so that if the device was powered down, then 25780 * it would be 'awakened' to handle the command. 25781 */ 25782 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25783 SD_PATH_STANDARD); 25784 25785 kmem_free(com, sizeof (*com)); 25786 25787 /* Restore the device and soft state target block size */ 25788 if (sr_sector_mode(dev, restore_blksize) != 0) { 25789 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25790 "can't do switch back to mode 1\n"); 25791 /* 25792 * If sd_send_scsi_READ succeeded we still need to report 25793 * an error because we failed to reset the block size 25794 */ 25795 if (rval == 0) { 25796 rval = EIO; 25797 } 25798 } 25799 25800 done: 25801 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25802 "sd_read_mode2: exit: un:0x%p\n", un); 25803 25804 return (rval); 25805 } 25806 25807 25808 /* 25809 * Function: sr_sector_mode() 25810 * 25811 * Description: This utility function is used by sr_read_mode2 to set the target 25812 * block size based on the user specified size. This is a legacy 25813 * implementation based upon a vendor specific mode page 25814 * 25815 * Arguments: dev - the device 'dev_t' 25816 * data - flag indicating if block size is being set to 2336 or 25817 * 512. 25818 * 25819 * Return Code: the code returned by sd_send_scsi_cmd() 25820 * EFAULT if ddi_copyxxx() fails 25821 * ENXIO if fail ddi_get_soft_state 25822 * EINVAL if data pointer is NULL 25823 */ 25824 25825 static int 25826 sr_sector_mode(dev_t dev, uint32_t blksize) 25827 { 25828 struct sd_lun *un; 25829 uchar_t *sense; 25830 uchar_t *select; 25831 int rval; 25832 25833 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25834 (un->un_state == SD_STATE_OFFLINE)) { 25835 return (ENXIO); 25836 } 25837 25838 sense = kmem_zalloc(20, KM_SLEEP); 25839 25840 /* Note: This is a vendor specific mode page (0x81) */ 25841 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25842 SD_PATH_STANDARD)) != 0) { 25843 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25844 "sr_sector_mode: Mode Sense failed\n"); 25845 kmem_free(sense, 20); 25846 return (rval); 25847 } 25848 select = kmem_zalloc(20, KM_SLEEP); 25849 select[3] = 0x08; 25850 select[10] = ((blksize >> 8) & 0xff); 25851 select[11] = (blksize & 0xff); 25852 select[12] = 0x01; 25853 select[13] = 0x06; 25854 select[14] = sense[14]; 25855 select[15] = sense[15]; 25856 if (blksize == SD_MODE2_BLKSIZE) { 25857 select[14] |= 0x01; 25858 } 25859 25860 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25861 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25862 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25863 "sr_sector_mode: Mode Select failed\n"); 25864 } else { 25865 /* 25866 * Only update the softstate block size if we successfully 25867 * changed the device block mode. 25868 */ 25869 mutex_enter(SD_MUTEX(un)); 25870 sd_update_block_info(un, blksize, 0); 25871 mutex_exit(SD_MUTEX(un)); 25872 } 25873 kmem_free(sense, 20); 25874 kmem_free(select, 20); 25875 return (rval); 25876 } 25877 25878 25879 /* 25880 * Function: sr_read_cdda() 25881 * 25882 * Description: This routine is the driver entry point for handling CD-ROM 25883 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25884 * the target supports CDDA these requests are handled via a vendor 25885 * specific command (0xD8) If the target does not support CDDA 25886 * these requests are handled via the READ CD command (0xBE). 25887 * 25888 * Arguments: dev - the device 'dev_t' 25889 * data - pointer to user provided CD-DA structure specifying 25890 * the track starting address, transfer length, and 25891 * subcode options. 25892 * flag - this argument is a pass through to ddi_copyxxx() 25893 * directly from the mode argument of ioctl(). 25894 * 25895 * Return Code: the code returned by sd_send_scsi_cmd() 25896 * EFAULT if ddi_copyxxx() fails 25897 * ENXIO if fail ddi_get_soft_state 25898 * EINVAL if invalid arguments are provided 25899 * ENOTTY 25900 */ 25901 25902 static int 25903 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25904 { 25905 struct sd_lun *un; 25906 struct uscsi_cmd *com; 25907 struct cdrom_cdda *cdda; 25908 int rval; 25909 size_t buflen; 25910 char cdb[CDB_GROUP5]; 25911 25912 #ifdef _MULTI_DATAMODEL 25913 /* To support ILP32 applications in an LP64 world */ 25914 struct cdrom_cdda32 cdrom_cdda32; 25915 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25916 #endif /* _MULTI_DATAMODEL */ 25917 25918 if (data == NULL) { 25919 return (EINVAL); 25920 } 25921 25922 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25923 return (ENXIO); 25924 } 25925 25926 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25927 25928 #ifdef _MULTI_DATAMODEL 25929 switch (ddi_model_convert_from(flag & FMODELS)) { 25930 case DDI_MODEL_ILP32: 25931 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25932 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25933 "sr_read_cdda: ddi_copyin Failed\n"); 25934 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25935 return (EFAULT); 25936 } 25937 /* Convert the ILP32 uscsi data from the application to LP64 */ 25938 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25939 break; 25940 case DDI_MODEL_NONE: 25941 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25942 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25943 "sr_read_cdda: ddi_copyin Failed\n"); 25944 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25945 return (EFAULT); 25946 } 25947 break; 25948 } 25949 #else /* ! _MULTI_DATAMODEL */ 25950 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25951 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25952 "sr_read_cdda: ddi_copyin Failed\n"); 25953 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25954 return (EFAULT); 25955 } 25956 #endif /* _MULTI_DATAMODEL */ 25957 25958 /* 25959 * Since MMC-2 expects max 3 bytes for length, check if the 25960 * length input is greater than 3 bytes 25961 */ 25962 if ((cdda->cdda_length & 0xFF000000) != 0) { 25963 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25964 "cdrom transfer length too large: %d (limit %d)\n", 25965 cdda->cdda_length, 0xFFFFFF); 25966 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25967 return (EINVAL); 25968 } 25969 25970 switch (cdda->cdda_subcode) { 25971 case CDROM_DA_NO_SUBCODE: 25972 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25973 break; 25974 case CDROM_DA_SUBQ: 25975 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25976 break; 25977 case CDROM_DA_ALL_SUBCODE: 25978 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25979 break; 25980 case CDROM_DA_SUBCODE_ONLY: 25981 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25982 break; 25983 default: 25984 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25985 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25986 cdda->cdda_subcode); 25987 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25988 return (EINVAL); 25989 } 25990 25991 /* Build and send the command */ 25992 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25993 bzero(cdb, CDB_GROUP5); 25994 25995 if (un->un_f_cfg_cdda == TRUE) { 25996 cdb[0] = (char)SCMD_READ_CD; 25997 cdb[1] = 0x04; 25998 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25999 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 26000 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 26001 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 26002 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 26003 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 26004 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 26005 cdb[9] = 0x10; 26006 switch (cdda->cdda_subcode) { 26007 case CDROM_DA_NO_SUBCODE : 26008 cdb[10] = 0x0; 26009 break; 26010 case CDROM_DA_SUBQ : 26011 cdb[10] = 0x2; 26012 break; 26013 case CDROM_DA_ALL_SUBCODE : 26014 cdb[10] = 0x1; 26015 break; 26016 case CDROM_DA_SUBCODE_ONLY : 26017 /* FALLTHROUGH */ 26018 default : 26019 kmem_free(cdda, sizeof (struct cdrom_cdda)); 26020 kmem_free(com, sizeof (*com)); 26021 return (ENOTTY); 26022 } 26023 } else { 26024 cdb[0] = (char)SCMD_READ_CDDA; 26025 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 26026 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 26027 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 26028 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 26029 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 26030 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 26031 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 26032 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 26033 cdb[10] = cdda->cdda_subcode; 26034 } 26035 26036 com->uscsi_cdb = cdb; 26037 com->uscsi_cdblen = CDB_GROUP5; 26038 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 26039 com->uscsi_buflen = buflen; 26040 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26041 26042 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26043 SD_PATH_STANDARD); 26044 26045 kmem_free(cdda, sizeof (struct cdrom_cdda)); 26046 kmem_free(com, sizeof (*com)); 26047 return (rval); 26048 } 26049 26050 26051 /* 26052 * Function: sr_read_cdxa() 26053 * 26054 * Description: This routine is the driver entry point for handling CD-ROM 26055 * ioctl requests to return CD-XA (Extended Architecture) data. 26056 * (CDROMCDXA). 26057 * 26058 * Arguments: dev - the device 'dev_t' 26059 * data - pointer to user provided CD-XA structure specifying 26060 * the data starting address, transfer length, and format 26061 * flag - this argument is a pass through to ddi_copyxxx() 26062 * directly from the mode argument of ioctl(). 26063 * 26064 * Return Code: the code returned by sd_send_scsi_cmd() 26065 * EFAULT if ddi_copyxxx() fails 26066 * ENXIO if fail ddi_get_soft_state 26067 * EINVAL if data pointer is NULL 26068 */ 26069 26070 static int 26071 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 26072 { 26073 struct sd_lun *un; 26074 struct uscsi_cmd *com; 26075 struct cdrom_cdxa *cdxa; 26076 int rval; 26077 size_t buflen; 26078 char cdb[CDB_GROUP5]; 26079 uchar_t read_flags; 26080 26081 #ifdef _MULTI_DATAMODEL 26082 /* To support ILP32 applications in an LP64 world */ 26083 struct cdrom_cdxa32 cdrom_cdxa32; 26084 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 26085 #endif /* _MULTI_DATAMODEL */ 26086 26087 if (data == NULL) { 26088 return (EINVAL); 26089 } 26090 26091 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26092 return (ENXIO); 26093 } 26094 26095 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 26096 26097 #ifdef _MULTI_DATAMODEL 26098 switch (ddi_model_convert_from(flag & FMODELS)) { 26099 case DDI_MODEL_ILP32: 26100 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 26101 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26102 return (EFAULT); 26103 } 26104 /* 26105 * Convert the ILP32 uscsi data from the 26106 * application to LP64 for internal use. 26107 */ 26108 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 26109 break; 26110 case DDI_MODEL_NONE: 26111 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 26112 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26113 return (EFAULT); 26114 } 26115 break; 26116 } 26117 #else /* ! _MULTI_DATAMODEL */ 26118 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 26119 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26120 return (EFAULT); 26121 } 26122 #endif /* _MULTI_DATAMODEL */ 26123 26124 /* 26125 * Since MMC-2 expects max 3 bytes for length, check if the 26126 * length input is greater than 3 bytes 26127 */ 26128 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 26129 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 26130 "cdrom transfer length too large: %d (limit %d)\n", 26131 cdxa->cdxa_length, 0xFFFFFF); 26132 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26133 return (EINVAL); 26134 } 26135 26136 switch (cdxa->cdxa_format) { 26137 case CDROM_XA_DATA: 26138 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 26139 read_flags = 0x10; 26140 break; 26141 case CDROM_XA_SECTOR_DATA: 26142 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 26143 read_flags = 0xf8; 26144 break; 26145 case CDROM_XA_DATA_W_ERROR: 26146 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 26147 read_flags = 0xfc; 26148 break; 26149 default: 26150 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26151 "sr_read_cdxa: Format '0x%x' Not Supported\n", 26152 cdxa->cdxa_format); 26153 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26154 return (EINVAL); 26155 } 26156 26157 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26158 bzero(cdb, CDB_GROUP5); 26159 if (un->un_f_mmc_cap == TRUE) { 26160 cdb[0] = (char)SCMD_READ_CD; 26161 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 26162 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 26163 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 26164 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 26165 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 26166 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 26167 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 26168 cdb[9] = (char)read_flags; 26169 } else { 26170 /* 26171 * Note: A vendor specific command (0xDB) is being used her to 26172 * request a read of all subcodes. 26173 */ 26174 cdb[0] = (char)SCMD_READ_CDXA; 26175 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 26176 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 26177 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 26178 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 26179 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 26180 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 26181 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 26182 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 26183 cdb[10] = cdxa->cdxa_format; 26184 } 26185 com->uscsi_cdb = cdb; 26186 com->uscsi_cdblen = CDB_GROUP5; 26187 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 26188 com->uscsi_buflen = buflen; 26189 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26190 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26191 SD_PATH_STANDARD); 26192 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26193 kmem_free(com, sizeof (*com)); 26194 return (rval); 26195 } 26196 26197 26198 /* 26199 * Function: sr_eject() 26200 * 26201 * Description: This routine is the driver entry point for handling CD-ROM 26202 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 26203 * 26204 * Arguments: dev - the device 'dev_t' 26205 * 26206 * Return Code: the code returned by sd_send_scsi_cmd() 26207 */ 26208 26209 static int 26210 sr_eject(dev_t dev) 26211 { 26212 struct sd_lun *un; 26213 int rval; 26214 26215 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26216 (un->un_state == SD_STATE_OFFLINE)) { 26217 return (ENXIO); 26218 } 26219 26220 /* 26221 * To prevent race conditions with the eject 26222 * command, keep track of an eject command as 26223 * it progresses. If we are already handling 26224 * an eject command in the driver for the given 26225 * unit and another request to eject is received 26226 * immediately return EAGAIN so we don't lose 26227 * the command if the current eject command fails. 26228 */ 26229 mutex_enter(SD_MUTEX(un)); 26230 if (un->un_f_ejecting == TRUE) { 26231 mutex_exit(SD_MUTEX(un)); 26232 return (EAGAIN); 26233 } 26234 un->un_f_ejecting = TRUE; 26235 mutex_exit(SD_MUTEX(un)); 26236 26237 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 26238 SD_PATH_STANDARD)) != 0) { 26239 mutex_enter(SD_MUTEX(un)); 26240 un->un_f_ejecting = FALSE; 26241 mutex_exit(SD_MUTEX(un)); 26242 return (rval); 26243 } 26244 26245 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 26246 SD_PATH_STANDARD); 26247 26248 if (rval == 0) { 26249 mutex_enter(SD_MUTEX(un)); 26250 sr_ejected(un); 26251 un->un_mediastate = DKIO_EJECTED; 26252 un->un_f_ejecting = FALSE; 26253 cv_broadcast(&un->un_state_cv); 26254 mutex_exit(SD_MUTEX(un)); 26255 } else { 26256 mutex_enter(SD_MUTEX(un)); 26257 un->un_f_ejecting = FALSE; 26258 mutex_exit(SD_MUTEX(un)); 26259 } 26260 return (rval); 26261 } 26262 26263 26264 /* 26265 * Function: sr_ejected() 26266 * 26267 * Description: This routine updates the soft state structure to invalidate the 26268 * geometry information after the media has been ejected or a 26269 * media eject has been detected. 26270 * 26271 * Arguments: un - driver soft state (unit) structure 26272 */ 26273 26274 static void 26275 sr_ejected(struct sd_lun *un) 26276 { 26277 struct sd_errstats *stp; 26278 26279 ASSERT(un != NULL); 26280 ASSERT(mutex_owned(SD_MUTEX(un))); 26281 26282 un->un_f_blockcount_is_valid = FALSE; 26283 un->un_f_tgt_blocksize_is_valid = FALSE; 26284 mutex_exit(SD_MUTEX(un)); 26285 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 26286 mutex_enter(SD_MUTEX(un)); 26287 26288 if (un->un_errstats != NULL) { 26289 stp = (struct sd_errstats *)un->un_errstats->ks_data; 26290 stp->sd_capacity.value.ui64 = 0; 26291 } 26292 } 26293 26294 26295 /* 26296 * Function: sr_check_wp() 26297 * 26298 * Description: This routine checks the write protection of a removable 26299 * media disk and hotpluggable devices via the write protect bit of 26300 * the Mode Page Header device specific field. Some devices choke 26301 * on unsupported mode page. In order to workaround this issue, 26302 * this routine has been implemented to use 0x3f mode page(request 26303 * for all pages) for all device types. 26304 * 26305 * Arguments: dev - the device 'dev_t' 26306 * 26307 * Return Code: int indicating if the device is write protected (1) or not (0) 26308 * 26309 * Context: Kernel thread. 26310 * 26311 */ 26312 26313 static int 26314 sr_check_wp(dev_t dev) 26315 { 26316 struct sd_lun *un; 26317 uchar_t device_specific; 26318 uchar_t *sense; 26319 int hdrlen; 26320 int rval = FALSE; 26321 26322 /* 26323 * Note: The return codes for this routine should be reworked to 26324 * properly handle the case of a NULL softstate. 26325 */ 26326 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26327 return (FALSE); 26328 } 26329 26330 if (un->un_f_cfg_is_atapi == TRUE) { 26331 /* 26332 * The mode page contents are not required; set the allocation 26333 * length for the mode page header only 26334 */ 26335 hdrlen = MODE_HEADER_LENGTH_GRP2; 26336 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26337 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 26338 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26339 goto err_exit; 26340 device_specific = 26341 ((struct mode_header_grp2 *)sense)->device_specific; 26342 } else { 26343 hdrlen = MODE_HEADER_LENGTH; 26344 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26345 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 26346 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26347 goto err_exit; 26348 device_specific = 26349 ((struct mode_header *)sense)->device_specific; 26350 } 26351 26352 /* 26353 * Write protect mode sense failed; not all disks 26354 * understand this query. Return FALSE assuming that 26355 * these devices are not writable. 26356 */ 26357 if (device_specific & WRITE_PROTECT) { 26358 rval = TRUE; 26359 } 26360 26361 err_exit: 26362 kmem_free(sense, hdrlen); 26363 return (rval); 26364 } 26365 26366 /* 26367 * Function: sr_volume_ctrl() 26368 * 26369 * Description: This routine is the driver entry point for handling CD-ROM 26370 * audio output volume ioctl requests. (CDROMVOLCTRL) 26371 * 26372 * Arguments: dev - the device 'dev_t' 26373 * data - pointer to user audio volume control structure 26374 * flag - this argument is a pass through to ddi_copyxxx() 26375 * directly from the mode argument of ioctl(). 26376 * 26377 * Return Code: the code returned by sd_send_scsi_cmd() 26378 * EFAULT if ddi_copyxxx() fails 26379 * ENXIO if fail ddi_get_soft_state 26380 * EINVAL if data pointer is NULL 26381 * 26382 */ 26383 26384 static int 26385 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 26386 { 26387 struct sd_lun *un; 26388 struct cdrom_volctrl volume; 26389 struct cdrom_volctrl *vol = &volume; 26390 uchar_t *sense_page; 26391 uchar_t *select_page; 26392 uchar_t *sense; 26393 uchar_t *select; 26394 int sense_buflen; 26395 int select_buflen; 26396 int rval; 26397 26398 if (data == NULL) { 26399 return (EINVAL); 26400 } 26401 26402 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26403 (un->un_state == SD_STATE_OFFLINE)) { 26404 return (ENXIO); 26405 } 26406 26407 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 26408 return (EFAULT); 26409 } 26410 26411 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26412 struct mode_header_grp2 *sense_mhp; 26413 struct mode_header_grp2 *select_mhp; 26414 int bd_len; 26415 26416 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 26417 select_buflen = MODE_HEADER_LENGTH_GRP2 + 26418 MODEPAGE_AUDIO_CTRL_LEN; 26419 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26420 select = kmem_zalloc(select_buflen, KM_SLEEP); 26421 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26422 sense_buflen, MODEPAGE_AUDIO_CTRL, 26423 SD_PATH_STANDARD)) != 0) { 26424 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 26425 "sr_volume_ctrl: Mode Sense Failed\n"); 26426 kmem_free(sense, sense_buflen); 26427 kmem_free(select, select_buflen); 26428 return (rval); 26429 } 26430 sense_mhp = (struct mode_header_grp2 *)sense; 26431 select_mhp = (struct mode_header_grp2 *)select; 26432 bd_len = (sense_mhp->bdesc_length_hi << 8) | 26433 sense_mhp->bdesc_length_lo; 26434 if (bd_len > MODE_BLK_DESC_LENGTH) { 26435 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26436 "sr_volume_ctrl: Mode Sense returned invalid " 26437 "block descriptor length\n"); 26438 kmem_free(sense, sense_buflen); 26439 kmem_free(select, select_buflen); 26440 return (EIO); 26441 } 26442 sense_page = (uchar_t *) 26443 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26444 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 26445 select_mhp->length_msb = 0; 26446 select_mhp->length_lsb = 0; 26447 select_mhp->bdesc_length_hi = 0; 26448 select_mhp->bdesc_length_lo = 0; 26449 } else { 26450 struct mode_header *sense_mhp, *select_mhp; 26451 26452 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26453 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26454 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26455 select = kmem_zalloc(select_buflen, KM_SLEEP); 26456 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26457 sense_buflen, MODEPAGE_AUDIO_CTRL, 26458 SD_PATH_STANDARD)) != 0) { 26459 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26460 "sr_volume_ctrl: Mode Sense Failed\n"); 26461 kmem_free(sense, sense_buflen); 26462 kmem_free(select, select_buflen); 26463 return (rval); 26464 } 26465 sense_mhp = (struct mode_header *)sense; 26466 select_mhp = (struct mode_header *)select; 26467 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26468 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26469 "sr_volume_ctrl: Mode Sense returned invalid " 26470 "block descriptor length\n"); 26471 kmem_free(sense, sense_buflen); 26472 kmem_free(select, select_buflen); 26473 return (EIO); 26474 } 26475 sense_page = (uchar_t *) 26476 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26477 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26478 select_mhp->length = 0; 26479 select_mhp->bdesc_length = 0; 26480 } 26481 /* 26482 * Note: An audio control data structure could be created and overlayed 26483 * on the following in place of the array indexing method implemented. 26484 */ 26485 26486 /* Build the select data for the user volume data */ 26487 select_page[0] = MODEPAGE_AUDIO_CTRL; 26488 select_page[1] = 0xE; 26489 /* Set the immediate bit */ 26490 select_page[2] = 0x04; 26491 /* Zero out reserved fields */ 26492 select_page[3] = 0x00; 26493 select_page[4] = 0x00; 26494 /* Return sense data for fields not to be modified */ 26495 select_page[5] = sense_page[5]; 26496 select_page[6] = sense_page[6]; 26497 select_page[7] = sense_page[7]; 26498 /* Set the user specified volume levels for channel 0 and 1 */ 26499 select_page[8] = 0x01; 26500 select_page[9] = vol->channel0; 26501 select_page[10] = 0x02; 26502 select_page[11] = vol->channel1; 26503 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26504 select_page[12] = sense_page[12]; 26505 select_page[13] = sense_page[13]; 26506 select_page[14] = sense_page[14]; 26507 select_page[15] = sense_page[15]; 26508 26509 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26510 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26511 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26512 } else { 26513 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26514 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26515 } 26516 26517 kmem_free(sense, sense_buflen); 26518 kmem_free(select, select_buflen); 26519 return (rval); 26520 } 26521 26522 26523 /* 26524 * Function: sr_read_sony_session_offset() 26525 * 26526 * Description: This routine is the driver entry point for handling CD-ROM 26527 * ioctl requests for session offset information. (CDROMREADOFFSET) 26528 * The address of the first track in the last session of a 26529 * multi-session CD-ROM is returned 26530 * 26531 * Note: This routine uses a vendor specific key value in the 26532 * command control field without implementing any vendor check here 26533 * or in the ioctl routine. 26534 * 26535 * Arguments: dev - the device 'dev_t' 26536 * data - pointer to an int to hold the requested address 26537 * flag - this argument is a pass through to ddi_copyxxx() 26538 * directly from the mode argument of ioctl(). 26539 * 26540 * Return Code: the code returned by sd_send_scsi_cmd() 26541 * EFAULT if ddi_copyxxx() fails 26542 * ENXIO if fail ddi_get_soft_state 26543 * EINVAL if data pointer is NULL 26544 */ 26545 26546 static int 26547 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26548 { 26549 struct sd_lun *un; 26550 struct uscsi_cmd *com; 26551 caddr_t buffer; 26552 char cdb[CDB_GROUP1]; 26553 int session_offset = 0; 26554 int rval; 26555 26556 if (data == NULL) { 26557 return (EINVAL); 26558 } 26559 26560 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26561 (un->un_state == SD_STATE_OFFLINE)) { 26562 return (ENXIO); 26563 } 26564 26565 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26566 bzero(cdb, CDB_GROUP1); 26567 cdb[0] = SCMD_READ_TOC; 26568 /* 26569 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26570 * (4 byte TOC response header + 8 byte response data) 26571 */ 26572 cdb[8] = SONY_SESSION_OFFSET_LEN; 26573 /* Byte 9 is the control byte. A vendor specific value is used */ 26574 cdb[9] = SONY_SESSION_OFFSET_KEY; 26575 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26576 com->uscsi_cdb = cdb; 26577 com->uscsi_cdblen = CDB_GROUP1; 26578 com->uscsi_bufaddr = buffer; 26579 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26580 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26581 26582 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26583 SD_PATH_STANDARD); 26584 if (rval != 0) { 26585 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26586 kmem_free(com, sizeof (*com)); 26587 return (rval); 26588 } 26589 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26590 session_offset = 26591 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26592 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26593 /* 26594 * Offset returned offset in current lbasize block's. Convert to 26595 * 2k block's to return to the user 26596 */ 26597 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26598 session_offset >>= 2; 26599 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26600 session_offset >>= 1; 26601 } 26602 } 26603 26604 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26605 rval = EFAULT; 26606 } 26607 26608 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26609 kmem_free(com, sizeof (*com)); 26610 return (rval); 26611 } 26612 26613 26614 /* 26615 * Function: sd_wm_cache_constructor() 26616 * 26617 * Description: Cache Constructor for the wmap cache for the read/modify/write 26618 * devices. 26619 * 26620 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26621 * un - sd_lun structure for the device. 26622 * flag - the km flags passed to constructor 26623 * 26624 * Return Code: 0 on success. 26625 * -1 on failure. 26626 */ 26627 26628 /*ARGSUSED*/ 26629 static int 26630 sd_wm_cache_constructor(void *wm, void *un, int flags) 26631 { 26632 bzero(wm, sizeof (struct sd_w_map)); 26633 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26634 return (0); 26635 } 26636 26637 26638 /* 26639 * Function: sd_wm_cache_destructor() 26640 * 26641 * Description: Cache destructor for the wmap cache for the read/modify/write 26642 * devices. 26643 * 26644 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26645 * un - sd_lun structure for the device. 26646 */ 26647 /*ARGSUSED*/ 26648 static void 26649 sd_wm_cache_destructor(void *wm, void *un) 26650 { 26651 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26652 } 26653 26654 26655 /* 26656 * Function: sd_range_lock() 26657 * 26658 * Description: Lock the range of blocks specified as parameter to ensure 26659 * that read, modify write is atomic and no other i/o writes 26660 * to the same location. The range is specified in terms 26661 * of start and end blocks. Block numbers are the actual 26662 * media block numbers and not system. 26663 * 26664 * Arguments: un - sd_lun structure for the device. 26665 * startb - The starting block number 26666 * endb - The end block number 26667 * typ - type of i/o - simple/read_modify_write 26668 * 26669 * Return Code: wm - pointer to the wmap structure. 26670 * 26671 * Context: This routine can sleep. 26672 */ 26673 26674 static struct sd_w_map * 26675 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26676 { 26677 struct sd_w_map *wmp = NULL; 26678 struct sd_w_map *sl_wmp = NULL; 26679 struct sd_w_map *tmp_wmp; 26680 wm_state state = SD_WM_CHK_LIST; 26681 26682 26683 ASSERT(un != NULL); 26684 ASSERT(!mutex_owned(SD_MUTEX(un))); 26685 26686 mutex_enter(SD_MUTEX(un)); 26687 26688 while (state != SD_WM_DONE) { 26689 26690 switch (state) { 26691 case SD_WM_CHK_LIST: 26692 /* 26693 * This is the starting state. Check the wmap list 26694 * to see if the range is currently available. 26695 */ 26696 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26697 /* 26698 * If this is a simple write and no rmw 26699 * i/o is pending then try to lock the 26700 * range as the range should be available. 26701 */ 26702 state = SD_WM_LOCK_RANGE; 26703 } else { 26704 tmp_wmp = sd_get_range(un, startb, endb); 26705 if (tmp_wmp != NULL) { 26706 if ((wmp != NULL) && ONLIST(un, wmp)) { 26707 /* 26708 * Should not keep onlist wmps 26709 * while waiting this macro 26710 * will also do wmp = NULL; 26711 */ 26712 FREE_ONLIST_WMAP(un, wmp); 26713 } 26714 /* 26715 * sl_wmp is the wmap on which wait 26716 * is done, since the tmp_wmp points 26717 * to the inuse wmap, set sl_wmp to 26718 * tmp_wmp and change the state to sleep 26719 */ 26720 sl_wmp = tmp_wmp; 26721 state = SD_WM_WAIT_MAP; 26722 } else { 26723 state = SD_WM_LOCK_RANGE; 26724 } 26725 26726 } 26727 break; 26728 26729 case SD_WM_LOCK_RANGE: 26730 ASSERT(un->un_wm_cache); 26731 /* 26732 * The range need to be locked, try to get a wmap. 26733 * First attempt it with NO_SLEEP, want to avoid a sleep 26734 * if possible as we will have to release the sd mutex 26735 * if we have to sleep. 26736 */ 26737 if (wmp == NULL) 26738 wmp = kmem_cache_alloc(un->un_wm_cache, 26739 KM_NOSLEEP); 26740 if (wmp == NULL) { 26741 mutex_exit(SD_MUTEX(un)); 26742 _NOTE(DATA_READABLE_WITHOUT_LOCK 26743 (sd_lun::un_wm_cache)) 26744 wmp = kmem_cache_alloc(un->un_wm_cache, 26745 KM_SLEEP); 26746 mutex_enter(SD_MUTEX(un)); 26747 /* 26748 * we released the mutex so recheck and go to 26749 * check list state. 26750 */ 26751 state = SD_WM_CHK_LIST; 26752 } else { 26753 /* 26754 * We exit out of state machine since we 26755 * have the wmap. Do the housekeeping first. 26756 * place the wmap on the wmap list if it is not 26757 * on it already and then set the state to done. 26758 */ 26759 wmp->wm_start = startb; 26760 wmp->wm_end = endb; 26761 wmp->wm_flags = typ | SD_WM_BUSY; 26762 if (typ & SD_WTYPE_RMW) { 26763 un->un_rmw_count++; 26764 } 26765 /* 26766 * If not already on the list then link 26767 */ 26768 if (!ONLIST(un, wmp)) { 26769 wmp->wm_next = un->un_wm; 26770 wmp->wm_prev = NULL; 26771 if (wmp->wm_next) 26772 wmp->wm_next->wm_prev = wmp; 26773 un->un_wm = wmp; 26774 } 26775 state = SD_WM_DONE; 26776 } 26777 break; 26778 26779 case SD_WM_WAIT_MAP: 26780 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26781 /* 26782 * Wait is done on sl_wmp, which is set in the 26783 * check_list state. 26784 */ 26785 sl_wmp->wm_wanted_count++; 26786 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26787 sl_wmp->wm_wanted_count--; 26788 /* 26789 * We can reuse the memory from the completed sl_wmp 26790 * lock range for our new lock, but only if noone is 26791 * waiting for it. 26792 */ 26793 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26794 if (sl_wmp->wm_wanted_count == 0) { 26795 if (wmp != NULL) 26796 CHK_N_FREEWMP(un, wmp); 26797 wmp = sl_wmp; 26798 } 26799 sl_wmp = NULL; 26800 /* 26801 * After waking up, need to recheck for availability of 26802 * range. 26803 */ 26804 state = SD_WM_CHK_LIST; 26805 break; 26806 26807 default: 26808 panic("sd_range_lock: " 26809 "Unknown state %d in sd_range_lock", state); 26810 /*NOTREACHED*/ 26811 } /* switch(state) */ 26812 26813 } /* while(state != SD_WM_DONE) */ 26814 26815 mutex_exit(SD_MUTEX(un)); 26816 26817 ASSERT(wmp != NULL); 26818 26819 return (wmp); 26820 } 26821 26822 26823 /* 26824 * Function: sd_get_range() 26825 * 26826 * Description: Find if there any overlapping I/O to this one 26827 * Returns the write-map of 1st such I/O, NULL otherwise. 26828 * 26829 * Arguments: un - sd_lun structure for the device. 26830 * startb - The starting block number 26831 * endb - The end block number 26832 * 26833 * Return Code: wm - pointer to the wmap structure. 26834 */ 26835 26836 static struct sd_w_map * 26837 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26838 { 26839 struct sd_w_map *wmp; 26840 26841 ASSERT(un != NULL); 26842 26843 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26844 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26845 continue; 26846 } 26847 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26848 break; 26849 } 26850 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26851 break; 26852 } 26853 } 26854 26855 return (wmp); 26856 } 26857 26858 26859 /* 26860 * Function: sd_free_inlist_wmap() 26861 * 26862 * Description: Unlink and free a write map struct. 26863 * 26864 * Arguments: un - sd_lun structure for the device. 26865 * wmp - sd_w_map which needs to be unlinked. 26866 */ 26867 26868 static void 26869 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26870 { 26871 ASSERT(un != NULL); 26872 26873 if (un->un_wm == wmp) { 26874 un->un_wm = wmp->wm_next; 26875 } else { 26876 wmp->wm_prev->wm_next = wmp->wm_next; 26877 } 26878 26879 if (wmp->wm_next) { 26880 wmp->wm_next->wm_prev = wmp->wm_prev; 26881 } 26882 26883 wmp->wm_next = wmp->wm_prev = NULL; 26884 26885 kmem_cache_free(un->un_wm_cache, wmp); 26886 } 26887 26888 26889 /* 26890 * Function: sd_range_unlock() 26891 * 26892 * Description: Unlock the range locked by wm. 26893 * Free write map if nobody else is waiting on it. 26894 * 26895 * Arguments: un - sd_lun structure for the device. 26896 * wmp - sd_w_map which needs to be unlinked. 26897 */ 26898 26899 static void 26900 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26901 { 26902 ASSERT(un != NULL); 26903 ASSERT(wm != NULL); 26904 ASSERT(!mutex_owned(SD_MUTEX(un))); 26905 26906 mutex_enter(SD_MUTEX(un)); 26907 26908 if (wm->wm_flags & SD_WTYPE_RMW) { 26909 un->un_rmw_count--; 26910 } 26911 26912 if (wm->wm_wanted_count) { 26913 wm->wm_flags = 0; 26914 /* 26915 * Broadcast that the wmap is available now. 26916 */ 26917 cv_broadcast(&wm->wm_avail); 26918 } else { 26919 /* 26920 * If no one is waiting on the map, it should be free'ed. 26921 */ 26922 sd_free_inlist_wmap(un, wm); 26923 } 26924 26925 mutex_exit(SD_MUTEX(un)); 26926 } 26927 26928 26929 /* 26930 * Function: sd_read_modify_write_task 26931 * 26932 * Description: Called from a taskq thread to initiate the write phase of 26933 * a read-modify-write request. This is used for targets where 26934 * un->un_sys_blocksize != un->un_tgt_blocksize. 26935 * 26936 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26937 * 26938 * Context: Called under taskq thread context. 26939 */ 26940 26941 static void 26942 sd_read_modify_write_task(void *arg) 26943 { 26944 struct sd_mapblocksize_info *bsp; 26945 struct buf *bp; 26946 struct sd_xbuf *xp; 26947 struct sd_lun *un; 26948 26949 bp = arg; /* The bp is given in arg */ 26950 ASSERT(bp != NULL); 26951 26952 /* Get the pointer to the layer-private data struct */ 26953 xp = SD_GET_XBUF(bp); 26954 ASSERT(xp != NULL); 26955 bsp = xp->xb_private; 26956 ASSERT(bsp != NULL); 26957 26958 un = SD_GET_UN(bp); 26959 ASSERT(un != NULL); 26960 ASSERT(!mutex_owned(SD_MUTEX(un))); 26961 26962 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26963 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26964 26965 /* 26966 * This is the write phase of a read-modify-write request, called 26967 * under the context of a taskq thread in response to the completion 26968 * of the read portion of the rmw request completing under interrupt 26969 * context. The write request must be sent from here down the iostart 26970 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26971 * we use the layer index saved in the layer-private data area. 26972 */ 26973 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26974 26975 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26976 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26977 } 26978 26979 26980 /* 26981 * Function: sddump_do_read_of_rmw() 26982 * 26983 * Description: This routine will be called from sddump, If sddump is called 26984 * with an I/O which not aligned on device blocksize boundary 26985 * then the write has to be converted to read-modify-write. 26986 * Do the read part here in order to keep sddump simple. 26987 * Note - That the sd_mutex is held across the call to this 26988 * routine. 26989 * 26990 * Arguments: un - sd_lun 26991 * blkno - block number in terms of media block size. 26992 * nblk - number of blocks. 26993 * bpp - pointer to pointer to the buf structure. On return 26994 * from this function, *bpp points to the valid buffer 26995 * to which the write has to be done. 26996 * 26997 * Return Code: 0 for success or errno-type return code 26998 */ 26999 27000 static int 27001 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 27002 struct buf **bpp) 27003 { 27004 int err; 27005 int i; 27006 int rval; 27007 struct buf *bp; 27008 struct scsi_pkt *pkt = NULL; 27009 uint32_t target_blocksize; 27010 27011 ASSERT(un != NULL); 27012 ASSERT(mutex_owned(SD_MUTEX(un))); 27013 27014 target_blocksize = un->un_tgt_blocksize; 27015 27016 mutex_exit(SD_MUTEX(un)); 27017 27018 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 27019 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 27020 if (bp == NULL) { 27021 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27022 "no resources for dumping; giving up"); 27023 err = ENOMEM; 27024 goto done; 27025 } 27026 27027 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 27028 blkno, nblk); 27029 if (rval != 0) { 27030 scsi_free_consistent_buf(bp); 27031 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27032 "no resources for dumping; giving up"); 27033 err = ENOMEM; 27034 goto done; 27035 } 27036 27037 pkt->pkt_flags |= FLAG_NOINTR; 27038 27039 err = EIO; 27040 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 27041 27042 /* 27043 * Scsi_poll returns 0 (success) if the command completes and 27044 * the status block is STATUS_GOOD. We should only check 27045 * errors if this condition is not true. Even then we should 27046 * send our own request sense packet only if we have a check 27047 * condition and auto request sense has not been performed by 27048 * the hba. 27049 */ 27050 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 27051 27052 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 27053 err = 0; 27054 break; 27055 } 27056 27057 /* 27058 * Check CMD_DEV_GONE 1st, give up if device is gone, 27059 * no need to read RQS data. 27060 */ 27061 if (pkt->pkt_reason == CMD_DEV_GONE) { 27062 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27063 "Error while dumping state with rmw..." 27064 "Device is gone\n"); 27065 break; 27066 } 27067 27068 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 27069 SD_INFO(SD_LOG_DUMP, un, 27070 "sddump: read failed with CHECK, try # %d\n", i); 27071 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 27072 (void) sd_send_polled_RQS(un); 27073 } 27074 27075 continue; 27076 } 27077 27078 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 27079 int reset_retval = 0; 27080 27081 SD_INFO(SD_LOG_DUMP, un, 27082 "sddump: read failed with BUSY, try # %d\n", i); 27083 27084 if (un->un_f_lun_reset_enabled == TRUE) { 27085 reset_retval = scsi_reset(SD_ADDRESS(un), 27086 RESET_LUN); 27087 } 27088 if (reset_retval == 0) { 27089 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 27090 } 27091 (void) sd_send_polled_RQS(un); 27092 27093 } else { 27094 SD_INFO(SD_LOG_DUMP, un, 27095 "sddump: read failed with 0x%x, try # %d\n", 27096 SD_GET_PKT_STATUS(pkt), i); 27097 mutex_enter(SD_MUTEX(un)); 27098 sd_reset_target(un, pkt); 27099 mutex_exit(SD_MUTEX(un)); 27100 } 27101 27102 /* 27103 * If we are not getting anywhere with lun/target resets, 27104 * let's reset the bus. 27105 */ 27106 if (i > SD_NDUMP_RETRIES/2) { 27107 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 27108 (void) sd_send_polled_RQS(un); 27109 } 27110 27111 } 27112 scsi_destroy_pkt(pkt); 27113 27114 if (err != 0) { 27115 scsi_free_consistent_buf(bp); 27116 *bpp = NULL; 27117 } else { 27118 *bpp = bp; 27119 } 27120 27121 done: 27122 mutex_enter(SD_MUTEX(un)); 27123 return (err); 27124 } 27125 27126 27127 /* 27128 * Function: sd_failfast_flushq 27129 * 27130 * Description: Take all bp's on the wait queue that have B_FAILFAST set 27131 * in b_flags and move them onto the failfast queue, then kick 27132 * off a thread to return all bp's on the failfast queue to 27133 * their owners with an error set. 27134 * 27135 * Arguments: un - pointer to the soft state struct for the instance. 27136 * 27137 * Context: may execute in interrupt context. 27138 */ 27139 27140 static void 27141 sd_failfast_flushq(struct sd_lun *un) 27142 { 27143 struct buf *bp; 27144 struct buf *next_waitq_bp; 27145 struct buf *prev_waitq_bp = NULL; 27146 27147 ASSERT(un != NULL); 27148 ASSERT(mutex_owned(SD_MUTEX(un))); 27149 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 27150 ASSERT(un->un_failfast_bp == NULL); 27151 27152 SD_TRACE(SD_LOG_IO_FAILFAST, un, 27153 "sd_failfast_flushq: entry: un:0x%p\n", un); 27154 27155 /* 27156 * Check if we should flush all bufs when entering failfast state, or 27157 * just those with B_FAILFAST set. 27158 */ 27159 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 27160 /* 27161 * Move *all* bp's on the wait queue to the failfast flush 27162 * queue, including those that do NOT have B_FAILFAST set. 27163 */ 27164 if (un->un_failfast_headp == NULL) { 27165 ASSERT(un->un_failfast_tailp == NULL); 27166 un->un_failfast_headp = un->un_waitq_headp; 27167 } else { 27168 ASSERT(un->un_failfast_tailp != NULL); 27169 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 27170 } 27171 27172 un->un_failfast_tailp = un->un_waitq_tailp; 27173 27174 /* update kstat for each bp moved out of the waitq */ 27175 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 27176 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 27177 } 27178 27179 /* empty the waitq */ 27180 un->un_waitq_headp = un->un_waitq_tailp = NULL; 27181 27182 } else { 27183 /* 27184 * Go thru the wait queue, pick off all entries with 27185 * B_FAILFAST set, and move these onto the failfast queue. 27186 */ 27187 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 27188 /* 27189 * Save the pointer to the next bp on the wait queue, 27190 * so we get to it on the next iteration of this loop. 27191 */ 27192 next_waitq_bp = bp->av_forw; 27193 27194 /* 27195 * If this bp from the wait queue does NOT have 27196 * B_FAILFAST set, just move on to the next element 27197 * in the wait queue. Note, this is the only place 27198 * where it is correct to set prev_waitq_bp. 27199 */ 27200 if ((bp->b_flags & B_FAILFAST) == 0) { 27201 prev_waitq_bp = bp; 27202 continue; 27203 } 27204 27205 /* 27206 * Remove the bp from the wait queue. 27207 */ 27208 if (bp == un->un_waitq_headp) { 27209 /* The bp is the first element of the waitq. */ 27210 un->un_waitq_headp = next_waitq_bp; 27211 if (un->un_waitq_headp == NULL) { 27212 /* The wait queue is now empty */ 27213 un->un_waitq_tailp = NULL; 27214 } 27215 } else { 27216 /* 27217 * The bp is either somewhere in the middle 27218 * or at the end of the wait queue. 27219 */ 27220 ASSERT(un->un_waitq_headp != NULL); 27221 ASSERT(prev_waitq_bp != NULL); 27222 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 27223 == 0); 27224 if (bp == un->un_waitq_tailp) { 27225 /* bp is the last entry on the waitq. */ 27226 ASSERT(next_waitq_bp == NULL); 27227 un->un_waitq_tailp = prev_waitq_bp; 27228 } 27229 prev_waitq_bp->av_forw = next_waitq_bp; 27230 } 27231 bp->av_forw = NULL; 27232 27233 /* 27234 * update kstat since the bp is moved out of 27235 * the waitq 27236 */ 27237 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 27238 27239 /* 27240 * Now put the bp onto the failfast queue. 27241 */ 27242 if (un->un_failfast_headp == NULL) { 27243 /* failfast queue is currently empty */ 27244 ASSERT(un->un_failfast_tailp == NULL); 27245 un->un_failfast_headp = 27246 un->un_failfast_tailp = bp; 27247 } else { 27248 /* Add the bp to the end of the failfast q */ 27249 ASSERT(un->un_failfast_tailp != NULL); 27250 ASSERT(un->un_failfast_tailp->b_flags & 27251 B_FAILFAST); 27252 un->un_failfast_tailp->av_forw = bp; 27253 un->un_failfast_tailp = bp; 27254 } 27255 } 27256 } 27257 27258 /* 27259 * Now return all bp's on the failfast queue to their owners. 27260 */ 27261 while ((bp = un->un_failfast_headp) != NULL) { 27262 27263 un->un_failfast_headp = bp->av_forw; 27264 if (un->un_failfast_headp == NULL) { 27265 un->un_failfast_tailp = NULL; 27266 } 27267 27268 /* 27269 * We want to return the bp with a failure error code, but 27270 * we do not want a call to sd_start_cmds() to occur here, 27271 * so use sd_return_failed_command_no_restart() instead of 27272 * sd_return_failed_command(). 27273 */ 27274 sd_return_failed_command_no_restart(un, bp, EIO); 27275 } 27276 27277 /* Flush the xbuf queues if required. */ 27278 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 27279 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 27280 } 27281 27282 SD_TRACE(SD_LOG_IO_FAILFAST, un, 27283 "sd_failfast_flushq: exit: un:0x%p\n", un); 27284 } 27285 27286 27287 /* 27288 * Function: sd_failfast_flushq_callback 27289 * 27290 * Description: Return TRUE if the given bp meets the criteria for failfast 27291 * flushing. Used with ddi_xbuf_flushq(9F). 27292 * 27293 * Arguments: bp - ptr to buf struct to be examined. 27294 * 27295 * Context: Any 27296 */ 27297 27298 static int 27299 sd_failfast_flushq_callback(struct buf *bp) 27300 { 27301 /* 27302 * Return TRUE if (1) we want to flush ALL bufs when the failfast 27303 * state is entered; OR (2) the given bp has B_FAILFAST set. 27304 */ 27305 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 27306 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 27307 } 27308 27309 27310 27311 /* 27312 * Function: sd_setup_next_xfer 27313 * 27314 * Description: Prepare next I/O operation using DMA_PARTIAL 27315 * 27316 */ 27317 27318 static int 27319 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 27320 struct scsi_pkt *pkt, struct sd_xbuf *xp) 27321 { 27322 ssize_t num_blks_not_xfered; 27323 daddr_t strt_blk_num; 27324 ssize_t bytes_not_xfered; 27325 int rval; 27326 27327 ASSERT(pkt->pkt_resid == 0); 27328 27329 /* 27330 * Calculate next block number and amount to be transferred. 27331 * 27332 * How much data NOT transfered to the HBA yet. 27333 */ 27334 bytes_not_xfered = xp->xb_dma_resid; 27335 27336 /* 27337 * figure how many blocks NOT transfered to the HBA yet. 27338 */ 27339 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 27340 27341 /* 27342 * set starting block number to the end of what WAS transfered. 27343 */ 27344 strt_blk_num = xp->xb_blkno + 27345 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 27346 27347 /* 27348 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 27349 * will call scsi_initpkt with NULL_FUNC so we do not have to release 27350 * the disk mutex here. 27351 */ 27352 rval = sd_setup_next_rw_pkt(un, pkt, bp, 27353 strt_blk_num, num_blks_not_xfered); 27354 27355 if (rval == 0) { 27356 27357 /* 27358 * Success. 27359 * 27360 * Adjust things if there are still more blocks to be 27361 * transfered. 27362 */ 27363 xp->xb_dma_resid = pkt->pkt_resid; 27364 pkt->pkt_resid = 0; 27365 27366 return (1); 27367 } 27368 27369 /* 27370 * There's really only one possible return value from 27371 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 27372 * returns NULL. 27373 */ 27374 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 27375 27376 bp->b_resid = bp->b_bcount; 27377 bp->b_flags |= B_ERROR; 27378 27379 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27380 "Error setting up next portion of DMA transfer\n"); 27381 27382 return (0); 27383 } 27384 27385 /* 27386 * Function: sd_panic_for_res_conflict 27387 * 27388 * Description: Call panic with a string formatted with "Reservation Conflict" 27389 * and a human readable identifier indicating the SD instance 27390 * that experienced the reservation conflict. 27391 * 27392 * Arguments: un - pointer to the soft state struct for the instance. 27393 * 27394 * Context: may execute in interrupt context. 27395 */ 27396 27397 #define SD_RESV_CONFLICT_FMT_LEN 40 27398 void 27399 sd_panic_for_res_conflict(struct sd_lun *un) 27400 { 27401 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 27402 char path_str[MAXPATHLEN]; 27403 27404 (void) snprintf(panic_str, sizeof (panic_str), 27405 "Reservation Conflict\nDisk: %s", 27406 ddi_pathname(SD_DEVINFO(un), path_str)); 27407 27408 panic(panic_str); 27409 } 27410 27411 /* 27412 * Note: The following sd_faultinjection_ioctl( ) routines implement 27413 * driver support for handling fault injection for error analysis 27414 * causing faults in multiple layers of the driver. 27415 * 27416 */ 27417 27418 #ifdef SD_FAULT_INJECTION 27419 static uint_t sd_fault_injection_on = 0; 27420 27421 /* 27422 * Function: sd_faultinjection_ioctl() 27423 * 27424 * Description: This routine is the driver entry point for handling 27425 * faultinjection ioctls to inject errors into the 27426 * layer model 27427 * 27428 * Arguments: cmd - the ioctl cmd received 27429 * arg - the arguments from user and returns 27430 */ 27431 27432 static void 27433 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 27434 27435 uint_t i; 27436 uint_t rval; 27437 27438 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 27439 27440 mutex_enter(SD_MUTEX(un)); 27441 27442 switch (cmd) { 27443 case SDIOCRUN: 27444 /* Allow pushed faults to be injected */ 27445 SD_INFO(SD_LOG_SDTEST, un, 27446 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 27447 27448 sd_fault_injection_on = 1; 27449 27450 SD_INFO(SD_LOG_IOERR, un, 27451 "sd_faultinjection_ioctl: run finished\n"); 27452 break; 27453 27454 case SDIOCSTART: 27455 /* Start Injection Session */ 27456 SD_INFO(SD_LOG_SDTEST, un, 27457 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27458 27459 sd_fault_injection_on = 0; 27460 un->sd_injection_mask = 0xFFFFFFFF; 27461 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27462 un->sd_fi_fifo_pkt[i] = NULL; 27463 un->sd_fi_fifo_xb[i] = NULL; 27464 un->sd_fi_fifo_un[i] = NULL; 27465 un->sd_fi_fifo_arq[i] = NULL; 27466 } 27467 un->sd_fi_fifo_start = 0; 27468 un->sd_fi_fifo_end = 0; 27469 27470 mutex_enter(&(un->un_fi_mutex)); 27471 un->sd_fi_log[0] = '\0'; 27472 un->sd_fi_buf_len = 0; 27473 mutex_exit(&(un->un_fi_mutex)); 27474 27475 SD_INFO(SD_LOG_IOERR, un, 27476 "sd_faultinjection_ioctl: start finished\n"); 27477 break; 27478 27479 case SDIOCSTOP: 27480 /* Stop Injection Session */ 27481 SD_INFO(SD_LOG_SDTEST, un, 27482 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27483 sd_fault_injection_on = 0; 27484 un->sd_injection_mask = 0x0; 27485 27486 /* Empty stray or unuseds structs from fifo */ 27487 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27488 if (un->sd_fi_fifo_pkt[i] != NULL) { 27489 kmem_free(un->sd_fi_fifo_pkt[i], 27490 sizeof (struct sd_fi_pkt)); 27491 } 27492 if (un->sd_fi_fifo_xb[i] != NULL) { 27493 kmem_free(un->sd_fi_fifo_xb[i], 27494 sizeof (struct sd_fi_xb)); 27495 } 27496 if (un->sd_fi_fifo_un[i] != NULL) { 27497 kmem_free(un->sd_fi_fifo_un[i], 27498 sizeof (struct sd_fi_un)); 27499 } 27500 if (un->sd_fi_fifo_arq[i] != NULL) { 27501 kmem_free(un->sd_fi_fifo_arq[i], 27502 sizeof (struct sd_fi_arq)); 27503 } 27504 un->sd_fi_fifo_pkt[i] = NULL; 27505 un->sd_fi_fifo_un[i] = NULL; 27506 un->sd_fi_fifo_xb[i] = NULL; 27507 un->sd_fi_fifo_arq[i] = NULL; 27508 } 27509 un->sd_fi_fifo_start = 0; 27510 un->sd_fi_fifo_end = 0; 27511 27512 SD_INFO(SD_LOG_IOERR, un, 27513 "sd_faultinjection_ioctl: stop finished\n"); 27514 break; 27515 27516 case SDIOCINSERTPKT: 27517 /* Store a packet struct to be pushed onto fifo */ 27518 SD_INFO(SD_LOG_SDTEST, un, 27519 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27520 27521 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27522 27523 sd_fault_injection_on = 0; 27524 27525 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27526 if (un->sd_fi_fifo_pkt[i] != NULL) { 27527 kmem_free(un->sd_fi_fifo_pkt[i], 27528 sizeof (struct sd_fi_pkt)); 27529 } 27530 if (arg != NULL) { 27531 un->sd_fi_fifo_pkt[i] = 27532 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27533 if (un->sd_fi_fifo_pkt[i] == NULL) { 27534 /* Alloc failed don't store anything */ 27535 break; 27536 } 27537 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27538 sizeof (struct sd_fi_pkt), 0); 27539 if (rval == -1) { 27540 kmem_free(un->sd_fi_fifo_pkt[i], 27541 sizeof (struct sd_fi_pkt)); 27542 un->sd_fi_fifo_pkt[i] = NULL; 27543 } 27544 } else { 27545 SD_INFO(SD_LOG_IOERR, un, 27546 "sd_faultinjection_ioctl: pkt null\n"); 27547 } 27548 break; 27549 27550 case SDIOCINSERTXB: 27551 /* Store a xb struct to be pushed onto fifo */ 27552 SD_INFO(SD_LOG_SDTEST, un, 27553 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27554 27555 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27556 27557 sd_fault_injection_on = 0; 27558 27559 if (un->sd_fi_fifo_xb[i] != NULL) { 27560 kmem_free(un->sd_fi_fifo_xb[i], 27561 sizeof (struct sd_fi_xb)); 27562 un->sd_fi_fifo_xb[i] = NULL; 27563 } 27564 if (arg != NULL) { 27565 un->sd_fi_fifo_xb[i] = 27566 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27567 if (un->sd_fi_fifo_xb[i] == NULL) { 27568 /* Alloc failed don't store anything */ 27569 break; 27570 } 27571 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27572 sizeof (struct sd_fi_xb), 0); 27573 27574 if (rval == -1) { 27575 kmem_free(un->sd_fi_fifo_xb[i], 27576 sizeof (struct sd_fi_xb)); 27577 un->sd_fi_fifo_xb[i] = NULL; 27578 } 27579 } else { 27580 SD_INFO(SD_LOG_IOERR, un, 27581 "sd_faultinjection_ioctl: xb null\n"); 27582 } 27583 break; 27584 27585 case SDIOCINSERTUN: 27586 /* Store a un struct to be pushed onto fifo */ 27587 SD_INFO(SD_LOG_SDTEST, un, 27588 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27589 27590 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27591 27592 sd_fault_injection_on = 0; 27593 27594 if (un->sd_fi_fifo_un[i] != NULL) { 27595 kmem_free(un->sd_fi_fifo_un[i], 27596 sizeof (struct sd_fi_un)); 27597 un->sd_fi_fifo_un[i] = NULL; 27598 } 27599 if (arg != NULL) { 27600 un->sd_fi_fifo_un[i] = 27601 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27602 if (un->sd_fi_fifo_un[i] == NULL) { 27603 /* Alloc failed don't store anything */ 27604 break; 27605 } 27606 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27607 sizeof (struct sd_fi_un), 0); 27608 if (rval == -1) { 27609 kmem_free(un->sd_fi_fifo_un[i], 27610 sizeof (struct sd_fi_un)); 27611 un->sd_fi_fifo_un[i] = NULL; 27612 } 27613 27614 } else { 27615 SD_INFO(SD_LOG_IOERR, un, 27616 "sd_faultinjection_ioctl: un null\n"); 27617 } 27618 27619 break; 27620 27621 case SDIOCINSERTARQ: 27622 /* Store a arq struct to be pushed onto fifo */ 27623 SD_INFO(SD_LOG_SDTEST, un, 27624 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27625 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27626 27627 sd_fault_injection_on = 0; 27628 27629 if (un->sd_fi_fifo_arq[i] != NULL) { 27630 kmem_free(un->sd_fi_fifo_arq[i], 27631 sizeof (struct sd_fi_arq)); 27632 un->sd_fi_fifo_arq[i] = NULL; 27633 } 27634 if (arg != NULL) { 27635 un->sd_fi_fifo_arq[i] = 27636 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27637 if (un->sd_fi_fifo_arq[i] == NULL) { 27638 /* Alloc failed don't store anything */ 27639 break; 27640 } 27641 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27642 sizeof (struct sd_fi_arq), 0); 27643 if (rval == -1) { 27644 kmem_free(un->sd_fi_fifo_arq[i], 27645 sizeof (struct sd_fi_arq)); 27646 un->sd_fi_fifo_arq[i] = NULL; 27647 } 27648 27649 } else { 27650 SD_INFO(SD_LOG_IOERR, un, 27651 "sd_faultinjection_ioctl: arq null\n"); 27652 } 27653 27654 break; 27655 27656 case SDIOCPUSH: 27657 /* Push stored xb, pkt, un, and arq onto fifo */ 27658 sd_fault_injection_on = 0; 27659 27660 if (arg != NULL) { 27661 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27662 if (rval != -1 && 27663 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27664 un->sd_fi_fifo_end += i; 27665 } 27666 } else { 27667 SD_INFO(SD_LOG_IOERR, un, 27668 "sd_faultinjection_ioctl: push arg null\n"); 27669 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27670 un->sd_fi_fifo_end++; 27671 } 27672 } 27673 SD_INFO(SD_LOG_IOERR, un, 27674 "sd_faultinjection_ioctl: push to end=%d\n", 27675 un->sd_fi_fifo_end); 27676 break; 27677 27678 case SDIOCRETRIEVE: 27679 /* Return buffer of log from Injection session */ 27680 SD_INFO(SD_LOG_SDTEST, un, 27681 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27682 27683 sd_fault_injection_on = 0; 27684 27685 mutex_enter(&(un->un_fi_mutex)); 27686 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27687 un->sd_fi_buf_len+1, 0); 27688 mutex_exit(&(un->un_fi_mutex)); 27689 27690 if (rval == -1) { 27691 /* 27692 * arg is possibly invalid setting 27693 * it to NULL for return 27694 */ 27695 arg = NULL; 27696 } 27697 break; 27698 } 27699 27700 mutex_exit(SD_MUTEX(un)); 27701 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27702 " exit\n"); 27703 } 27704 27705 27706 /* 27707 * Function: sd_injection_log() 27708 * 27709 * Description: This routine adds buff to the already existing injection log 27710 * for retrieval via faultinjection_ioctl for use in fault 27711 * detection and recovery 27712 * 27713 * Arguments: buf - the string to add to the log 27714 */ 27715 27716 static void 27717 sd_injection_log(char *buf, struct sd_lun *un) 27718 { 27719 uint_t len; 27720 27721 ASSERT(un != NULL); 27722 ASSERT(buf != NULL); 27723 27724 mutex_enter(&(un->un_fi_mutex)); 27725 27726 len = min(strlen(buf), 255); 27727 /* Add logged value to Injection log to be returned later */ 27728 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27729 uint_t offset = strlen((char *)un->sd_fi_log); 27730 char *destp = (char *)un->sd_fi_log + offset; 27731 int i; 27732 for (i = 0; i < len; i++) { 27733 *destp++ = *buf++; 27734 } 27735 un->sd_fi_buf_len += len; 27736 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27737 } 27738 27739 mutex_exit(&(un->un_fi_mutex)); 27740 } 27741 27742 27743 /* 27744 * Function: sd_faultinjection() 27745 * 27746 * Description: This routine takes the pkt and changes its 27747 * content based on error injection scenerio. 27748 * 27749 * Arguments: pktp - packet to be changed 27750 */ 27751 27752 static void 27753 sd_faultinjection(struct scsi_pkt *pktp) 27754 { 27755 uint_t i; 27756 struct sd_fi_pkt *fi_pkt; 27757 struct sd_fi_xb *fi_xb; 27758 struct sd_fi_un *fi_un; 27759 struct sd_fi_arq *fi_arq; 27760 struct buf *bp; 27761 struct sd_xbuf *xb; 27762 struct sd_lun *un; 27763 27764 ASSERT(pktp != NULL); 27765 27766 /* pull bp xb and un from pktp */ 27767 bp = (struct buf *)pktp->pkt_private; 27768 xb = SD_GET_XBUF(bp); 27769 un = SD_GET_UN(bp); 27770 27771 ASSERT(un != NULL); 27772 27773 mutex_enter(SD_MUTEX(un)); 27774 27775 SD_TRACE(SD_LOG_SDTEST, un, 27776 "sd_faultinjection: entry Injection from sdintr\n"); 27777 27778 /* if injection is off return */ 27779 if (sd_fault_injection_on == 0 || 27780 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27781 mutex_exit(SD_MUTEX(un)); 27782 return; 27783 } 27784 27785 27786 /* take next set off fifo */ 27787 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27788 27789 fi_pkt = un->sd_fi_fifo_pkt[i]; 27790 fi_xb = un->sd_fi_fifo_xb[i]; 27791 fi_un = un->sd_fi_fifo_un[i]; 27792 fi_arq = un->sd_fi_fifo_arq[i]; 27793 27794 27795 /* set variables accordingly */ 27796 /* set pkt if it was on fifo */ 27797 if (fi_pkt != NULL) { 27798 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27799 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27800 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27801 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27802 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27803 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27804 27805 } 27806 27807 /* set xb if it was on fifo */ 27808 if (fi_xb != NULL) { 27809 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27810 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27811 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27812 SD_CONDSET(xb, xb, xb_victim_retry_count, 27813 "xb_victim_retry_count"); 27814 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27815 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27816 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27817 27818 /* copy in block data from sense */ 27819 if (fi_xb->xb_sense_data[0] != -1) { 27820 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27821 SENSE_LENGTH); 27822 } 27823 27824 /* copy in extended sense codes */ 27825 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27826 "es_code"); 27827 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27828 "es_key"); 27829 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27830 "es_add_code"); 27831 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27832 es_qual_code, "es_qual_code"); 27833 } 27834 27835 /* set un if it was on fifo */ 27836 if (fi_un != NULL) { 27837 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27838 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27839 SD_CONDSET(un, un, un_reset_retry_count, 27840 "un_reset_retry_count"); 27841 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27842 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27843 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27844 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27845 "un_f_allow_bus_device_reset"); 27846 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27847 27848 } 27849 27850 /* copy in auto request sense if it was on fifo */ 27851 if (fi_arq != NULL) { 27852 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27853 } 27854 27855 /* free structs */ 27856 if (un->sd_fi_fifo_pkt[i] != NULL) { 27857 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27858 } 27859 if (un->sd_fi_fifo_xb[i] != NULL) { 27860 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27861 } 27862 if (un->sd_fi_fifo_un[i] != NULL) { 27863 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27864 } 27865 if (un->sd_fi_fifo_arq[i] != NULL) { 27866 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27867 } 27868 27869 /* 27870 * kmem_free does not gurantee to set to NULL 27871 * since we uses these to determine if we set 27872 * values or not lets confirm they are always 27873 * NULL after free 27874 */ 27875 un->sd_fi_fifo_pkt[i] = NULL; 27876 un->sd_fi_fifo_un[i] = NULL; 27877 un->sd_fi_fifo_xb[i] = NULL; 27878 un->sd_fi_fifo_arq[i] = NULL; 27879 27880 un->sd_fi_fifo_start++; 27881 27882 mutex_exit(SD_MUTEX(un)); 27883 27884 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27885 } 27886 27887 #endif /* SD_FAULT_INJECTION */ 27888 27889 /* 27890 * This routine is invoked in sd_unit_attach(). Before calling it, the 27891 * properties in conf file should be processed already, and "hotpluggable" 27892 * property was processed also. 27893 * 27894 * The sd driver distinguishes 3 different type of devices: removable media, 27895 * non-removable media, and hotpluggable. Below the differences are defined: 27896 * 27897 * 1. Device ID 27898 * 27899 * The device ID of a device is used to identify this device. Refer to 27900 * ddi_devid_register(9F). 27901 * 27902 * For a non-removable media disk device which can provide 0x80 or 0x83 27903 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27904 * device ID is created to identify this device. For other non-removable 27905 * media devices, a default device ID is created only if this device has 27906 * at least 2 alter cylinders. Otherwise, this device has no devid. 27907 * 27908 * ------------------------------------------------------- 27909 * removable media hotpluggable | Can Have Device ID 27910 * ------------------------------------------------------- 27911 * false false | Yes 27912 * false true | Yes 27913 * true x | No 27914 * ------------------------------------------------------ 27915 * 27916 * 27917 * 2. SCSI group 4 commands 27918 * 27919 * In SCSI specs, only some commands in group 4 command set can use 27920 * 8-byte addresses that can be used to access >2TB storage spaces. 27921 * Other commands have no such capability. Without supporting group4, 27922 * it is impossible to make full use of storage spaces of a disk with 27923 * capacity larger than 2TB. 27924 * 27925 * ----------------------------------------------- 27926 * removable media hotpluggable LP64 | Group 27927 * ----------------------------------------------- 27928 * false false false | 1 27929 * false false true | 4 27930 * false true false | 1 27931 * false true true | 4 27932 * true x x | 5 27933 * ----------------------------------------------- 27934 * 27935 * 27936 * 3. Check for VTOC Label 27937 * 27938 * If a direct-access disk has no EFI label, sd will check if it has a 27939 * valid VTOC label. Now, sd also does that check for removable media 27940 * and hotpluggable devices. 27941 * 27942 * -------------------------------------------------------------- 27943 * Direct-Access removable media hotpluggable | Check Label 27944 * ------------------------------------------------------------- 27945 * false false false | No 27946 * false false true | No 27947 * false true false | Yes 27948 * false true true | Yes 27949 * true x x | Yes 27950 * -------------------------------------------------------------- 27951 * 27952 * 27953 * 4. Building default VTOC label 27954 * 27955 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27956 * If those devices have no valid VTOC label, sd(7d) will attempt to 27957 * create default VTOC for them. Currently sd creates default VTOC label 27958 * for all devices on x86 platform (VTOC_16), but only for removable 27959 * media devices on SPARC (VTOC_8). 27960 * 27961 * ----------------------------------------------------------- 27962 * removable media hotpluggable platform | Default Label 27963 * ----------------------------------------------------------- 27964 * false false sparc | No 27965 * false true x86 | Yes 27966 * false true sparc | Yes 27967 * true x x | Yes 27968 * ---------------------------------------------------------- 27969 * 27970 * 27971 * 5. Supported blocksizes of target devices 27972 * 27973 * Sd supports non-512-byte blocksize for removable media devices only. 27974 * For other devices, only 512-byte blocksize is supported. This may be 27975 * changed in near future because some RAID devices require non-512-byte 27976 * blocksize 27977 * 27978 * ----------------------------------------------------------- 27979 * removable media hotpluggable | non-512-byte blocksize 27980 * ----------------------------------------------------------- 27981 * false false | No 27982 * false true | No 27983 * true x | Yes 27984 * ----------------------------------------------------------- 27985 * 27986 * 27987 * 6. Automatic mount & unmount 27988 * 27989 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27990 * if a device is removable media device. It return 1 for removable media 27991 * devices, and 0 for others. 27992 * 27993 * The automatic mounting subsystem should distinguish between the types 27994 * of devices and apply automounting policies to each. 27995 * 27996 * 27997 * 7. fdisk partition management 27998 * 27999 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 28000 * just supports fdisk partitions on x86 platform. On sparc platform, sd 28001 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 28002 * fdisk partitions on both x86 and SPARC platform. 28003 * 28004 * ----------------------------------------------------------- 28005 * platform removable media USB/1394 | fdisk supported 28006 * ----------------------------------------------------------- 28007 * x86 X X | true 28008 * ------------------------------------------------------------ 28009 * sparc X X | false 28010 * ------------------------------------------------------------ 28011 * 28012 * 28013 * 8. MBOOT/MBR 28014 * 28015 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 28016 * read/write mboot for removable media devices on sparc platform. 28017 * 28018 * ----------------------------------------------------------- 28019 * platform removable media USB/1394 | mboot supported 28020 * ----------------------------------------------------------- 28021 * x86 X X | true 28022 * ------------------------------------------------------------ 28023 * sparc false false | false 28024 * sparc false true | true 28025 * sparc true false | true 28026 * sparc true true | true 28027 * ------------------------------------------------------------ 28028 * 28029 * 28030 * 9. error handling during opening device 28031 * 28032 * If failed to open a disk device, an errno is returned. For some kinds 28033 * of errors, different errno is returned depending on if this device is 28034 * a removable media device. This brings USB/1394 hard disks in line with 28035 * expected hard disk behavior. It is not expected that this breaks any 28036 * application. 28037 * 28038 * ------------------------------------------------------ 28039 * removable media hotpluggable | errno 28040 * ------------------------------------------------------ 28041 * false false | EIO 28042 * false true | EIO 28043 * true x | ENXIO 28044 * ------------------------------------------------------ 28045 * 28046 * 28047 * 11. ioctls: DKIOCEJECT, CDROMEJECT 28048 * 28049 * These IOCTLs are applicable only to removable media devices. 28050 * 28051 * ----------------------------------------------------------- 28052 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 28053 * ----------------------------------------------------------- 28054 * false false | No 28055 * false true | No 28056 * true x | Yes 28057 * ----------------------------------------------------------- 28058 * 28059 * 28060 * 12. Kstats for partitions 28061 * 28062 * sd creates partition kstat for non-removable media devices. USB and 28063 * Firewire hard disks now have partition kstats 28064 * 28065 * ------------------------------------------------------ 28066 * removable media hotpluggable | kstat 28067 * ------------------------------------------------------ 28068 * false false | Yes 28069 * false true | Yes 28070 * true x | No 28071 * ------------------------------------------------------ 28072 * 28073 * 28074 * 13. Removable media & hotpluggable properties 28075 * 28076 * Sd driver creates a "removable-media" property for removable media 28077 * devices. Parent nexus drivers create a "hotpluggable" property if 28078 * it supports hotplugging. 28079 * 28080 * --------------------------------------------------------------------- 28081 * removable media hotpluggable | "removable-media" " hotpluggable" 28082 * --------------------------------------------------------------------- 28083 * false false | No No 28084 * false true | No Yes 28085 * true false | Yes No 28086 * true true | Yes Yes 28087 * --------------------------------------------------------------------- 28088 * 28089 * 28090 * 14. Power Management 28091 * 28092 * sd only power manages removable media devices or devices that support 28093 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 28094 * 28095 * A parent nexus that supports hotplugging can also set "pm-capable" 28096 * if the disk can be power managed. 28097 * 28098 * ------------------------------------------------------------ 28099 * removable media hotpluggable pm-capable | power manage 28100 * ------------------------------------------------------------ 28101 * false false false | No 28102 * false false true | Yes 28103 * false true false | No 28104 * false true true | Yes 28105 * true x x | Yes 28106 * ------------------------------------------------------------ 28107 * 28108 * USB and firewire hard disks can now be power managed independently 28109 * of the framebuffer 28110 * 28111 * 28112 * 15. Support for USB disks with capacity larger than 1TB 28113 * 28114 * Currently, sd doesn't permit a fixed disk device with capacity 28115 * larger than 1TB to be used in a 32-bit operating system environment. 28116 * However, sd doesn't do that for removable media devices. Instead, it 28117 * assumes that removable media devices cannot have a capacity larger 28118 * than 1TB. Therefore, using those devices on 32-bit system is partially 28119 * supported, which can cause some unexpected results. 28120 * 28121 * --------------------------------------------------------------------- 28122 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 28123 * --------------------------------------------------------------------- 28124 * false false | true | no 28125 * false true | true | no 28126 * true false | true | Yes 28127 * true true | true | Yes 28128 * --------------------------------------------------------------------- 28129 * 28130 * 28131 * 16. Check write-protection at open time 28132 * 28133 * When a removable media device is being opened for writing without NDELAY 28134 * flag, sd will check if this device is writable. If attempting to open 28135 * without NDELAY flag a write-protected device, this operation will abort. 28136 * 28137 * ------------------------------------------------------------ 28138 * removable media USB/1394 | WP Check 28139 * ------------------------------------------------------------ 28140 * false false | No 28141 * false true | No 28142 * true false | Yes 28143 * true true | Yes 28144 * ------------------------------------------------------------ 28145 * 28146 * 28147 * 17. syslog when corrupted VTOC is encountered 28148 * 28149 * Currently, if an invalid VTOC is encountered, sd only print syslog 28150 * for fixed SCSI disks. 28151 * ------------------------------------------------------------ 28152 * removable media USB/1394 | print syslog 28153 * ------------------------------------------------------------ 28154 * false false | Yes 28155 * false true | No 28156 * true false | No 28157 * true true | No 28158 * ------------------------------------------------------------ 28159 */ 28160 static void 28161 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 28162 { 28163 int pm_capable_prop; 28164 28165 ASSERT(un->un_sd); 28166 ASSERT(un->un_sd->sd_inq); 28167 28168 /* 28169 * Enable SYNC CACHE support for all devices. 28170 */ 28171 un->un_f_sync_cache_supported = TRUE; 28172 28173 /* 28174 * Set the sync cache required flag to false. 28175 * This would ensure that there is no SYNC CACHE 28176 * sent when there are no writes 28177 */ 28178 un->un_f_sync_cache_required = FALSE; 28179 28180 if (un->un_sd->sd_inq->inq_rmb) { 28181 /* 28182 * The media of this device is removable. And for this kind 28183 * of devices, it is possible to change medium after opening 28184 * devices. Thus we should support this operation. 28185 */ 28186 un->un_f_has_removable_media = TRUE; 28187 28188 /* 28189 * support non-512-byte blocksize of removable media devices 28190 */ 28191 un->un_f_non_devbsize_supported = TRUE; 28192 28193 /* 28194 * Assume that all removable media devices support DOOR_LOCK 28195 */ 28196 un->un_f_doorlock_supported = TRUE; 28197 28198 /* 28199 * For a removable media device, it is possible to be opened 28200 * with NDELAY flag when there is no media in drive, in this 28201 * case we don't care if device is writable. But if without 28202 * NDELAY flag, we need to check if media is write-protected. 28203 */ 28204 un->un_f_chk_wp_open = TRUE; 28205 28206 /* 28207 * need to start a SCSI watch thread to monitor media state, 28208 * when media is being inserted or ejected, notify syseventd. 28209 */ 28210 un->un_f_monitor_media_state = TRUE; 28211 28212 /* 28213 * Some devices don't support START_STOP_UNIT command. 28214 * Therefore, we'd better check if a device supports it 28215 * before sending it. 28216 */ 28217 un->un_f_check_start_stop = TRUE; 28218 28219 /* 28220 * support eject media ioctl: 28221 * FDEJECT, DKIOCEJECT, CDROMEJECT 28222 */ 28223 un->un_f_eject_media_supported = TRUE; 28224 28225 /* 28226 * Because many removable-media devices don't support 28227 * LOG_SENSE, we couldn't use this command to check if 28228 * a removable media device support power-management. 28229 * We assume that they support power-management via 28230 * START_STOP_UNIT command and can be spun up and down 28231 * without limitations. 28232 */ 28233 un->un_f_pm_supported = TRUE; 28234 28235 /* 28236 * Need to create a zero length (Boolean) property 28237 * removable-media for the removable media devices. 28238 * Note that the return value of the property is not being 28239 * checked, since if unable to create the property 28240 * then do not want the attach to fail altogether. Consistent 28241 * with other property creation in attach. 28242 */ 28243 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 28244 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 28245 28246 } else { 28247 /* 28248 * create device ID for device 28249 */ 28250 un->un_f_devid_supported = TRUE; 28251 28252 /* 28253 * Spin up non-removable-media devices once it is attached 28254 */ 28255 un->un_f_attach_spinup = TRUE; 28256 28257 /* 28258 * According to SCSI specification, Sense data has two kinds of 28259 * format: fixed format, and descriptor format. At present, we 28260 * don't support descriptor format sense data for removable 28261 * media. 28262 */ 28263 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 28264 un->un_f_descr_format_supported = TRUE; 28265 } 28266 28267 /* 28268 * kstats are created only for non-removable media devices. 28269 * 28270 * Set this in sd.conf to 0 in order to disable kstats. The 28271 * default is 1, so they are enabled by default. 28272 */ 28273 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 28274 SD_DEVINFO(un), DDI_PROP_DONTPASS, 28275 "enable-partition-kstats", 1)); 28276 28277 /* 28278 * Check if HBA has set the "pm-capable" property. 28279 * If "pm-capable" exists and is non-zero then we can 28280 * power manage the device without checking the start/stop 28281 * cycle count log sense page. 28282 * 28283 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 28284 * then we should not power manage the device. 28285 * 28286 * If "pm-capable" doesn't exist then pm_capable_prop will 28287 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 28288 * sd will check the start/stop cycle count log sense page 28289 * and power manage the device if the cycle count limit has 28290 * not been exceeded. 28291 */ 28292 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 28293 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 28294 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 28295 un->un_f_log_sense_supported = TRUE; 28296 } else { 28297 /* 28298 * pm-capable property exists. 28299 * 28300 * Convert "TRUE" values for pm_capable_prop to 28301 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 28302 * later. "TRUE" values are any values except 28303 * SD_PM_CAPABLE_FALSE (0) and 28304 * SD_PM_CAPABLE_UNDEFINED (-1) 28305 */ 28306 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 28307 un->un_f_log_sense_supported = FALSE; 28308 } else { 28309 un->un_f_pm_supported = TRUE; 28310 } 28311 28312 SD_INFO(SD_LOG_ATTACH_DETACH, un, 28313 "sd_unit_attach: un:0x%p pm-capable " 28314 "property set to %d.\n", un, un->un_f_pm_supported); 28315 } 28316 } 28317 28318 if (un->un_f_is_hotpluggable) { 28319 28320 /* 28321 * Have to watch hotpluggable devices as well, since 28322 * that's the only way for userland applications to 28323 * detect hot removal while device is busy/mounted. 28324 */ 28325 un->un_f_monitor_media_state = TRUE; 28326 28327 un->un_f_check_start_stop = TRUE; 28328 28329 } 28330 } 28331 28332 /* 28333 * sd_tg_rdwr: 28334 * Provides rdwr access for cmlb via sd_tgops. The start_block is 28335 * in sys block size, req_length in bytes. 28336 * 28337 */ 28338 static int 28339 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 28340 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 28341 { 28342 struct sd_lun *un; 28343 int path_flag = (int)(uintptr_t)tg_cookie; 28344 char *dkl = NULL; 28345 diskaddr_t real_addr = start_block; 28346 diskaddr_t first_byte, end_block; 28347 28348 size_t buffer_size = reqlength; 28349 int rval; 28350 diskaddr_t cap; 28351 uint32_t lbasize; 28352 28353 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28354 if (un == NULL) 28355 return (ENXIO); 28356 28357 if (cmd != TG_READ && cmd != TG_WRITE) 28358 return (EINVAL); 28359 28360 mutex_enter(SD_MUTEX(un)); 28361 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 28362 mutex_exit(SD_MUTEX(un)); 28363 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28364 &lbasize, path_flag); 28365 if (rval != 0) 28366 return (rval); 28367 mutex_enter(SD_MUTEX(un)); 28368 sd_update_block_info(un, lbasize, cap); 28369 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 28370 mutex_exit(SD_MUTEX(un)); 28371 return (EIO); 28372 } 28373 } 28374 28375 if (NOT_DEVBSIZE(un)) { 28376 /* 28377 * sys_blocksize != tgt_blocksize, need to re-adjust 28378 * blkno and save the index to beginning of dk_label 28379 */ 28380 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 28381 real_addr = first_byte / un->un_tgt_blocksize; 28382 28383 end_block = (first_byte + reqlength + 28384 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 28385 28386 /* round up buffer size to multiple of target block size */ 28387 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 28388 28389 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 28390 "label_addr: 0x%x allocation size: 0x%x\n", 28391 real_addr, buffer_size); 28392 28393 if (((first_byte % un->un_tgt_blocksize) != 0) || 28394 (reqlength % un->un_tgt_blocksize) != 0) 28395 /* the request is not aligned */ 28396 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 28397 } 28398 28399 /* 28400 * The MMC standard allows READ CAPACITY to be 28401 * inaccurate by a bounded amount (in the interest of 28402 * response latency). As a result, failed READs are 28403 * commonplace (due to the reading of metadata and not 28404 * data). Depending on the per-Vendor/drive Sense data, 28405 * the failed READ can cause many (unnecessary) retries. 28406 */ 28407 28408 if (ISCD(un) && (cmd == TG_READ) && 28409 (un->un_f_blockcount_is_valid == TRUE) && 28410 ((start_block == (un->un_blockcount - 1))|| 28411 (start_block == (un->un_blockcount - 2)))) { 28412 path_flag = SD_PATH_DIRECT_PRIORITY; 28413 } 28414 28415 mutex_exit(SD_MUTEX(un)); 28416 if (cmd == TG_READ) { 28417 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 28418 buffer_size, real_addr, path_flag); 28419 if (dkl != NULL) 28420 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 28421 real_addr), bufaddr, reqlength); 28422 } else { 28423 if (dkl) { 28424 rval = sd_send_scsi_READ(un, dkl, buffer_size, 28425 real_addr, path_flag); 28426 if (rval) { 28427 kmem_free(dkl, buffer_size); 28428 return (rval); 28429 } 28430 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 28431 real_addr), reqlength); 28432 } 28433 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 28434 buffer_size, real_addr, path_flag); 28435 } 28436 28437 if (dkl != NULL) 28438 kmem_free(dkl, buffer_size); 28439 28440 return (rval); 28441 } 28442 28443 28444 static int 28445 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 28446 { 28447 28448 struct sd_lun *un; 28449 diskaddr_t cap; 28450 uint32_t lbasize; 28451 int path_flag = (int)(uintptr_t)tg_cookie; 28452 int ret = 0; 28453 28454 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28455 if (un == NULL) 28456 return (ENXIO); 28457 28458 switch (cmd) { 28459 case TG_GETPHYGEOM: 28460 case TG_GETVIRTGEOM: 28461 case TG_GETCAPACITY: 28462 case TG_GETBLOCKSIZE: 28463 mutex_enter(SD_MUTEX(un)); 28464 28465 if ((un->un_f_blockcount_is_valid == TRUE) && 28466 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28467 cap = un->un_blockcount; 28468 lbasize = un->un_tgt_blocksize; 28469 mutex_exit(SD_MUTEX(un)); 28470 } else { 28471 mutex_exit(SD_MUTEX(un)); 28472 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28473 &lbasize, path_flag); 28474 if (ret != 0) 28475 return (ret); 28476 mutex_enter(SD_MUTEX(un)); 28477 sd_update_block_info(un, lbasize, cap); 28478 if ((un->un_f_blockcount_is_valid == FALSE) || 28479 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28480 mutex_exit(SD_MUTEX(un)); 28481 return (EIO); 28482 } 28483 mutex_exit(SD_MUTEX(un)); 28484 } 28485 28486 if (cmd == TG_GETCAPACITY) { 28487 *(diskaddr_t *)arg = cap; 28488 return (0); 28489 } 28490 28491 if (cmd == TG_GETBLOCKSIZE) { 28492 *(uint32_t *)arg = lbasize; 28493 return (0); 28494 } 28495 28496 if (cmd == TG_GETPHYGEOM) 28497 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28498 cap, lbasize, path_flag); 28499 else 28500 /* TG_GETVIRTGEOM */ 28501 ret = sd_get_virtual_geometry(un, 28502 (cmlb_geom_t *)arg, cap, lbasize); 28503 28504 return (ret); 28505 28506 case TG_GETATTR: 28507 mutex_enter(SD_MUTEX(un)); 28508 ((tg_attribute_t *)arg)->media_is_writable = 28509 un->un_f_mmc_writable_media; 28510 mutex_exit(SD_MUTEX(un)); 28511 return (0); 28512 default: 28513 return (ENOTTY); 28514 28515 } 28516 28517 } 28518