1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0, 516 1 517 }; 518 519 520 521 #if (defined(SD_PROP_TST)) 522 523 #define SD_TST_CTYPE_VAL CTYPE_CDROM 524 #define SD_TST_THROTTLE_VAL 16 525 #define SD_TST_NOTREADY_VAL 12 526 #define SD_TST_BUSY_VAL 60 527 #define SD_TST_RST_RETRY_VAL 36 528 #define SD_TST_RSV_REL_TIME 60 529 530 static sd_tunables tst_properties = { 531 SD_TST_THROTTLE_VAL, 532 SD_TST_CTYPE_VAL, 533 SD_TST_NOTREADY_VAL, 534 SD_TST_BUSY_VAL, 535 SD_TST_RST_RETRY_VAL, 536 SD_TST_RSV_REL_TIME, 537 0, 538 0, 539 0 540 }; 541 #endif 542 543 /* This is similar to the ANSI toupper implementation */ 544 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 545 546 /* 547 * Static Driver Configuration Table 548 * 549 * This is the table of disks which need throttle adjustment (or, perhaps 550 * something else as defined by the flags at a future time.) device_id 551 * is a string consisting of concatenated vid (vendor), pid (product/model) 552 * and revision strings as defined in the scsi_inquiry structure. Offsets of 553 * the parts of the string are as defined by the sizes in the scsi_inquiry 554 * structure. Device type is searched as far as the device_id string is 555 * defined. Flags defines which values are to be set in the driver from the 556 * properties list. 557 * 558 * Entries below which begin and end with a "*" are a special case. 559 * These do not have a specific vendor, and the string which follows 560 * can appear anywhere in the 16 byte PID portion of the inquiry data. 561 * 562 * Entries below which begin and end with a " " (blank) are a special 563 * case. The comparison function will treat multiple consecutive blanks 564 * as equivalent to a single blank. For example, this causes a 565 * sd_disk_table entry of " NEC CDROM " to match a device's id string 566 * of "NEC CDROM". 567 * 568 * Note: The MD21 controller type has been obsoleted. 569 * ST318202F is a Legacy device 570 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 571 * made with an FC connection. The entries here are a legacy. 572 */ 573 static sd_disk_config_t sd_disk_table[] = { 574 #if defined(__fibre) || defined(__i386) || defined(__amd64) 575 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 623 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 624 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 625 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 626 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 627 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 628 { "SUN T3", SD_CONF_BSET_THROTTLE | 629 SD_CONF_BSET_BSY_RETRY_COUNT| 630 SD_CONF_BSET_RST_RETRIES| 631 SD_CONF_BSET_RSV_REL_TIME, 632 &purple_properties }, 633 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 634 SD_CONF_BSET_BSY_RETRY_COUNT| 635 SD_CONF_BSET_RST_RETRIES| 636 SD_CONF_BSET_RSV_REL_TIME| 637 SD_CONF_BSET_MIN_THROTTLE| 638 SD_CONF_BSET_DISKSORT_DISABLED, 639 &sve_properties }, 640 { "SUN T4", SD_CONF_BSET_THROTTLE | 641 SD_CONF_BSET_BSY_RETRY_COUNT| 642 SD_CONF_BSET_RST_RETRIES| 643 SD_CONF_BSET_RSV_REL_TIME, 644 &purple_properties }, 645 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 646 SD_CONF_BSET_LUN_RESET_ENABLED, 647 &maserati_properties }, 648 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 649 SD_CONF_BSET_NRR_COUNT| 650 SD_CONF_BSET_BSY_RETRY_COUNT| 651 SD_CONF_BSET_RST_RETRIES| 652 SD_CONF_BSET_MIN_THROTTLE| 653 SD_CONF_BSET_DISKSORT_DISABLED| 654 SD_CONF_BSET_LUN_RESET_ENABLED, 655 &pirus_properties }, 656 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 657 SD_CONF_BSET_NRR_COUNT| 658 SD_CONF_BSET_BSY_RETRY_COUNT| 659 SD_CONF_BSET_RST_RETRIES| 660 SD_CONF_BSET_MIN_THROTTLE| 661 SD_CONF_BSET_DISKSORT_DISABLED| 662 SD_CONF_BSET_LUN_RESET_ENABLED, 663 &pirus_properties }, 664 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 665 SD_CONF_BSET_NRR_COUNT| 666 SD_CONF_BSET_BSY_RETRY_COUNT| 667 SD_CONF_BSET_RST_RETRIES| 668 SD_CONF_BSET_MIN_THROTTLE| 669 SD_CONF_BSET_DISKSORT_DISABLED| 670 SD_CONF_BSET_LUN_RESET_ENABLED, 671 &pirus_properties }, 672 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 673 SD_CONF_BSET_NRR_COUNT| 674 SD_CONF_BSET_BSY_RETRY_COUNT| 675 SD_CONF_BSET_RST_RETRIES| 676 SD_CONF_BSET_MIN_THROTTLE| 677 SD_CONF_BSET_DISKSORT_DISABLED| 678 SD_CONF_BSET_LUN_RESET_ENABLED, 679 &pirus_properties }, 680 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 681 SD_CONF_BSET_NRR_COUNT| 682 SD_CONF_BSET_BSY_RETRY_COUNT| 683 SD_CONF_BSET_RST_RETRIES| 684 SD_CONF_BSET_MIN_THROTTLE| 685 SD_CONF_BSET_DISKSORT_DISABLED| 686 SD_CONF_BSET_LUN_RESET_ENABLED, 687 &pirus_properties }, 688 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 689 SD_CONF_BSET_NRR_COUNT| 690 SD_CONF_BSET_BSY_RETRY_COUNT| 691 SD_CONF_BSET_RST_RETRIES| 692 SD_CONF_BSET_MIN_THROTTLE| 693 SD_CONF_BSET_DISKSORT_DISABLED| 694 SD_CONF_BSET_LUN_RESET_ENABLED, 695 &pirus_properties }, 696 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 697 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 698 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 699 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 700 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 701 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 702 #endif /* fibre or NON-sparc platforms */ 703 #if ((defined(__sparc) && !defined(__fibre)) ||\ 704 (defined(__i386) || defined(__amd64))) 705 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 706 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 707 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 708 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 709 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 710 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 711 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 716 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 717 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 718 &symbios_properties }, 719 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 720 &lsi_properties_scsi }, 721 #if defined(__i386) || defined(__amd64) 722 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 723 | SD_CONF_BSET_READSUB_BCD 724 | SD_CONF_BSET_READ_TOC_ADDR_BCD 725 | SD_CONF_BSET_NO_READ_HEADER 726 | SD_CONF_BSET_READ_CD_XD4), NULL }, 727 728 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 729 | SD_CONF_BSET_READSUB_BCD 730 | SD_CONF_BSET_READ_TOC_ADDR_BCD 731 | SD_CONF_BSET_NO_READ_HEADER 732 | SD_CONF_BSET_READ_CD_XD4), NULL }, 733 #endif /* __i386 || __amd64 */ 734 #endif /* sparc NON-fibre or NON-sparc platforms */ 735 736 #if (defined(SD_PROP_TST)) 737 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 738 | SD_CONF_BSET_CTYPE 739 | SD_CONF_BSET_NRR_COUNT 740 | SD_CONF_BSET_FAB_DEVID 741 | SD_CONF_BSET_NOCACHE 742 | SD_CONF_BSET_BSY_RETRY_COUNT 743 | SD_CONF_BSET_PLAYMSF_BCD 744 | SD_CONF_BSET_READSUB_BCD 745 | SD_CONF_BSET_READ_TOC_TRK_BCD 746 | SD_CONF_BSET_READ_TOC_ADDR_BCD 747 | SD_CONF_BSET_NO_READ_HEADER 748 | SD_CONF_BSET_READ_CD_XD4 749 | SD_CONF_BSET_RST_RETRIES 750 | SD_CONF_BSET_RSV_REL_TIME 751 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 752 #endif 753 }; 754 755 static const int sd_disk_table_size = 756 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 757 758 759 760 #define SD_INTERCONNECT_PARALLEL 0 761 #define SD_INTERCONNECT_FABRIC 1 762 #define SD_INTERCONNECT_FIBRE 2 763 #define SD_INTERCONNECT_SSA 3 764 #define SD_INTERCONNECT_SATA 4 765 #define SD_IS_PARALLEL_SCSI(un) \ 766 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 767 #define SD_IS_SERIAL(un) \ 768 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 769 770 /* 771 * Definitions used by device id registration routines 772 */ 773 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 774 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 775 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 776 777 static kmutex_t sd_sense_mutex = {0}; 778 779 /* 780 * Macros for updates of the driver state 781 */ 782 #define New_state(un, s) \ 783 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 784 #define Restore_state(un) \ 785 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 786 787 static struct sd_cdbinfo sd_cdbtab[] = { 788 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 789 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 790 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 791 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 792 }; 793 794 /* 795 * Specifies the number of seconds that must have elapsed since the last 796 * cmd. has completed for a device to be declared idle to the PM framework. 797 */ 798 static int sd_pm_idletime = 1; 799 800 /* 801 * Internal function prototypes 802 */ 803 804 #if (defined(__fibre)) 805 /* 806 * These #defines are to avoid namespace collisions that occur because this 807 * code is currently used to compile two separate driver modules: sd and ssd. 808 * All function names need to be treated this way (even if declared static) 809 * in order to allow the debugger to resolve the names properly. 810 * It is anticipated that in the near future the ssd module will be obsoleted, 811 * at which time this ugliness should go away. 812 */ 813 #define sd_log_trace ssd_log_trace 814 #define sd_log_info ssd_log_info 815 #define sd_log_err ssd_log_err 816 #define sdprobe ssdprobe 817 #define sdinfo ssdinfo 818 #define sd_prop_op ssd_prop_op 819 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 820 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 821 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 822 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 823 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 824 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 825 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 826 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 827 #define sd_spin_up_unit ssd_spin_up_unit 828 #define sd_enable_descr_sense ssd_enable_descr_sense 829 #define sd_reenable_dsense_task ssd_reenable_dsense_task 830 #define sd_set_mmc_caps ssd_set_mmc_caps 831 #define sd_read_unit_properties ssd_read_unit_properties 832 #define sd_process_sdconf_file ssd_process_sdconf_file 833 #define sd_process_sdconf_table ssd_process_sdconf_table 834 #define sd_sdconf_id_match ssd_sdconf_id_match 835 #define sd_blank_cmp ssd_blank_cmp 836 #define sd_chk_vers1_data ssd_chk_vers1_data 837 #define sd_set_vers1_properties ssd_set_vers1_properties 838 839 #define sd_get_physical_geometry ssd_get_physical_geometry 840 #define sd_get_virtual_geometry ssd_get_virtual_geometry 841 #define sd_update_block_info ssd_update_block_info 842 #define sd_register_devid ssd_register_devid 843 #define sd_get_devid ssd_get_devid 844 #define sd_create_devid ssd_create_devid 845 #define sd_write_deviceid ssd_write_deviceid 846 #define sd_check_vpd_page_support ssd_check_vpd_page_support 847 #define sd_setup_pm ssd_setup_pm 848 #define sd_create_pm_components ssd_create_pm_components 849 #define sd_ddi_suspend ssd_ddi_suspend 850 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 851 #define sd_ddi_resume ssd_ddi_resume 852 #define sd_ddi_pm_resume ssd_ddi_pm_resume 853 #define sdpower ssdpower 854 #define sdattach ssdattach 855 #define sddetach ssddetach 856 #define sd_unit_attach ssd_unit_attach 857 #define sd_unit_detach ssd_unit_detach 858 #define sd_set_unit_attributes ssd_set_unit_attributes 859 #define sd_create_errstats ssd_create_errstats 860 #define sd_set_errstats ssd_set_errstats 861 #define sd_set_pstats ssd_set_pstats 862 #define sddump ssddump 863 #define sd_scsi_poll ssd_scsi_poll 864 #define sd_send_polled_RQS ssd_send_polled_RQS 865 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 866 #define sd_init_event_callbacks ssd_init_event_callbacks 867 #define sd_event_callback ssd_event_callback 868 #define sd_cache_control ssd_cache_control 869 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 870 #define sd_get_nv_sup ssd_get_nv_sup 871 #define sd_make_device ssd_make_device 872 #define sdopen ssdopen 873 #define sdclose ssdclose 874 #define sd_ready_and_valid ssd_ready_and_valid 875 #define sdmin ssdmin 876 #define sdread ssdread 877 #define sdwrite ssdwrite 878 #define sdaread ssdaread 879 #define sdawrite ssdawrite 880 #define sdstrategy ssdstrategy 881 #define sdioctl ssdioctl 882 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 883 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 884 #define sd_checksum_iostart ssd_checksum_iostart 885 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 886 #define sd_pm_iostart ssd_pm_iostart 887 #define sd_core_iostart ssd_core_iostart 888 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 889 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 890 #define sd_checksum_iodone ssd_checksum_iodone 891 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 892 #define sd_pm_iodone ssd_pm_iodone 893 #define sd_initpkt_for_buf ssd_initpkt_for_buf 894 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 895 #define sd_setup_rw_pkt ssd_setup_rw_pkt 896 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 897 #define sd_buf_iodone ssd_buf_iodone 898 #define sd_uscsi_strategy ssd_uscsi_strategy 899 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 900 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 901 #define sd_uscsi_iodone ssd_uscsi_iodone 902 #define sd_xbuf_strategy ssd_xbuf_strategy 903 #define sd_xbuf_init ssd_xbuf_init 904 #define sd_pm_entry ssd_pm_entry 905 #define sd_pm_exit ssd_pm_exit 906 907 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 908 #define sd_pm_timeout_handler ssd_pm_timeout_handler 909 910 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 911 #define sdintr ssdintr 912 #define sd_start_cmds ssd_start_cmds 913 #define sd_send_scsi_cmd ssd_send_scsi_cmd 914 #define sd_bioclone_alloc ssd_bioclone_alloc 915 #define sd_bioclone_free ssd_bioclone_free 916 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 917 #define sd_shadow_buf_free ssd_shadow_buf_free 918 #define sd_print_transport_rejected_message \ 919 ssd_print_transport_rejected_message 920 #define sd_retry_command ssd_retry_command 921 #define sd_set_retry_bp ssd_set_retry_bp 922 #define sd_send_request_sense_command ssd_send_request_sense_command 923 #define sd_start_retry_command ssd_start_retry_command 924 #define sd_start_direct_priority_command \ 925 ssd_start_direct_priority_command 926 #define sd_return_failed_command ssd_return_failed_command 927 #define sd_return_failed_command_no_restart \ 928 ssd_return_failed_command_no_restart 929 #define sd_return_command ssd_return_command 930 #define sd_sync_with_callback ssd_sync_with_callback 931 #define sdrunout ssdrunout 932 #define sd_mark_rqs_busy ssd_mark_rqs_busy 933 #define sd_mark_rqs_idle ssd_mark_rqs_idle 934 #define sd_reduce_throttle ssd_reduce_throttle 935 #define sd_restore_throttle ssd_restore_throttle 936 #define sd_print_incomplete_msg ssd_print_incomplete_msg 937 #define sd_init_cdb_limits ssd_init_cdb_limits 938 #define sd_pkt_status_good ssd_pkt_status_good 939 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 940 #define sd_pkt_status_busy ssd_pkt_status_busy 941 #define sd_pkt_status_reservation_conflict \ 942 ssd_pkt_status_reservation_conflict 943 #define sd_pkt_status_qfull ssd_pkt_status_qfull 944 #define sd_handle_request_sense ssd_handle_request_sense 945 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 946 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 947 #define sd_validate_sense_data ssd_validate_sense_data 948 #define sd_decode_sense ssd_decode_sense 949 #define sd_print_sense_msg ssd_print_sense_msg 950 #define sd_sense_key_no_sense ssd_sense_key_no_sense 951 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 952 #define sd_sense_key_not_ready ssd_sense_key_not_ready 953 #define sd_sense_key_medium_or_hardware_error \ 954 ssd_sense_key_medium_or_hardware_error 955 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 956 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 957 #define sd_sense_key_fail_command ssd_sense_key_fail_command 958 #define sd_sense_key_blank_check ssd_sense_key_blank_check 959 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 960 #define sd_sense_key_default ssd_sense_key_default 961 #define sd_print_retry_msg ssd_print_retry_msg 962 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 963 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 964 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 965 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 966 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 967 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 968 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 969 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 970 #define sd_pkt_reason_default ssd_pkt_reason_default 971 #define sd_reset_target ssd_reset_target 972 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 973 #define sd_start_stop_unit_task ssd_start_stop_unit_task 974 #define sd_taskq_create ssd_taskq_create 975 #define sd_taskq_delete ssd_taskq_delete 976 #define sd_media_change_task ssd_media_change_task 977 #define sd_handle_mchange ssd_handle_mchange 978 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 979 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 980 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 981 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 982 #define sd_send_scsi_feature_GET_CONFIGURATION \ 983 sd_send_scsi_feature_GET_CONFIGURATION 984 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 985 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 986 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 987 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 988 ssd_send_scsi_PERSISTENT_RESERVE_IN 989 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 990 ssd_send_scsi_PERSISTENT_RESERVE_OUT 991 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 992 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 993 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 994 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 995 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 996 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 997 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 998 #define sd_alloc_rqs ssd_alloc_rqs 999 #define sd_free_rqs ssd_free_rqs 1000 #define sd_dump_memory ssd_dump_memory 1001 #define sd_get_media_info ssd_get_media_info 1002 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1003 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1004 #define sd_setup_next_xfer ssd_setup_next_xfer 1005 #define sd_dkio_get_temp ssd_dkio_get_temp 1006 #define sd_check_mhd ssd_check_mhd 1007 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1008 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1009 #define sd_sname ssd_sname 1010 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1011 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1012 #define sd_take_ownership ssd_take_ownership 1013 #define sd_reserve_release ssd_reserve_release 1014 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1015 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1016 #define sd_persistent_reservation_in_read_keys \ 1017 ssd_persistent_reservation_in_read_keys 1018 #define sd_persistent_reservation_in_read_resv \ 1019 ssd_persistent_reservation_in_read_resv 1020 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1021 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1022 #define sd_mhdioc_release ssd_mhdioc_release 1023 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1024 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1025 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1026 #define sr_change_blkmode ssr_change_blkmode 1027 #define sr_change_speed ssr_change_speed 1028 #define sr_atapi_change_speed ssr_atapi_change_speed 1029 #define sr_pause_resume ssr_pause_resume 1030 #define sr_play_msf ssr_play_msf 1031 #define sr_play_trkind ssr_play_trkind 1032 #define sr_read_all_subcodes ssr_read_all_subcodes 1033 #define sr_read_subchannel ssr_read_subchannel 1034 #define sr_read_tocentry ssr_read_tocentry 1035 #define sr_read_tochdr ssr_read_tochdr 1036 #define sr_read_cdda ssr_read_cdda 1037 #define sr_read_cdxa ssr_read_cdxa 1038 #define sr_read_mode1 ssr_read_mode1 1039 #define sr_read_mode2 ssr_read_mode2 1040 #define sr_read_cd_mode2 ssr_read_cd_mode2 1041 #define sr_sector_mode ssr_sector_mode 1042 #define sr_eject ssr_eject 1043 #define sr_ejected ssr_ejected 1044 #define sr_check_wp ssr_check_wp 1045 #define sd_check_media ssd_check_media 1046 #define sd_media_watch_cb ssd_media_watch_cb 1047 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1048 #define sr_volume_ctrl ssr_volume_ctrl 1049 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1050 #define sd_log_page_supported ssd_log_page_supported 1051 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1052 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1053 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1054 #define sd_range_lock ssd_range_lock 1055 #define sd_get_range ssd_get_range 1056 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1057 #define sd_range_unlock ssd_range_unlock 1058 #define sd_read_modify_write_task ssd_read_modify_write_task 1059 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1060 1061 #define sd_iostart_chain ssd_iostart_chain 1062 #define sd_iodone_chain ssd_iodone_chain 1063 #define sd_initpkt_map ssd_initpkt_map 1064 #define sd_destroypkt_map ssd_destroypkt_map 1065 #define sd_chain_type_map ssd_chain_type_map 1066 #define sd_chain_index_map ssd_chain_index_map 1067 1068 #define sd_failfast_flushctl ssd_failfast_flushctl 1069 #define sd_failfast_flushq ssd_failfast_flushq 1070 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1071 1072 #define sd_is_lsi ssd_is_lsi 1073 #define sd_tg_rdwr ssd_tg_rdwr 1074 #define sd_tg_getinfo ssd_tg_getinfo 1075 1076 #endif /* #if (defined(__fibre)) */ 1077 1078 1079 int _init(void); 1080 int _fini(void); 1081 int _info(struct modinfo *modinfop); 1082 1083 /*PRINTFLIKE3*/ 1084 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1085 /*PRINTFLIKE3*/ 1086 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1087 /*PRINTFLIKE3*/ 1088 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1089 1090 static int sdprobe(dev_info_t *devi); 1091 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1092 void **result); 1093 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1094 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1095 1096 /* 1097 * Smart probe for parallel scsi 1098 */ 1099 static void sd_scsi_probe_cache_init(void); 1100 static void sd_scsi_probe_cache_fini(void); 1101 static void sd_scsi_clear_probe_cache(void); 1102 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1103 1104 /* 1105 * Attached luns on target for parallel scsi 1106 */ 1107 static void sd_scsi_target_lun_init(void); 1108 static void sd_scsi_target_lun_fini(void); 1109 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1110 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1111 1112 static int sd_spin_up_unit(struct sd_lun *un); 1113 #ifdef _LP64 1114 static void sd_enable_descr_sense(struct sd_lun *un); 1115 static void sd_reenable_dsense_task(void *arg); 1116 #endif /* _LP64 */ 1117 1118 static void sd_set_mmc_caps(struct sd_lun *un); 1119 1120 static void sd_read_unit_properties(struct sd_lun *un); 1121 static int sd_process_sdconf_file(struct sd_lun *un); 1122 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1123 int *data_list, sd_tunables *values); 1124 static void sd_process_sdconf_table(struct sd_lun *un); 1125 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1126 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1127 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1128 int list_len, char *dataname_ptr); 1129 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1130 sd_tunables *prop_list); 1131 1132 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1133 int reservation_flag); 1134 static int sd_get_devid(struct sd_lun *un); 1135 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1136 static int sd_write_deviceid(struct sd_lun *un); 1137 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1138 static int sd_check_vpd_page_support(struct sd_lun *un); 1139 1140 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1141 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1142 1143 static int sd_ddi_suspend(dev_info_t *devi); 1144 static int sd_ddi_pm_suspend(struct sd_lun *un); 1145 static int sd_ddi_resume(dev_info_t *devi); 1146 static int sd_ddi_pm_resume(struct sd_lun *un); 1147 static int sdpower(dev_info_t *devi, int component, int level); 1148 1149 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1150 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1151 static int sd_unit_attach(dev_info_t *devi); 1152 static int sd_unit_detach(dev_info_t *devi); 1153 1154 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1155 static void sd_create_errstats(struct sd_lun *un, int instance); 1156 static void sd_set_errstats(struct sd_lun *un); 1157 static void sd_set_pstats(struct sd_lun *un); 1158 1159 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1160 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1161 static int sd_send_polled_RQS(struct sd_lun *un); 1162 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1163 1164 #if (defined(__fibre)) 1165 /* 1166 * Event callbacks (photon) 1167 */ 1168 static void sd_init_event_callbacks(struct sd_lun *un); 1169 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1170 #endif 1171 1172 /* 1173 * Defines for sd_cache_control 1174 */ 1175 1176 #define SD_CACHE_ENABLE 1 1177 #define SD_CACHE_DISABLE 0 1178 #define SD_CACHE_NOCHANGE -1 1179 1180 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1181 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1182 static void sd_get_nv_sup(struct sd_lun *un); 1183 static dev_t sd_make_device(dev_info_t *devi); 1184 1185 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1186 uint64_t capacity); 1187 1188 /* 1189 * Driver entry point functions. 1190 */ 1191 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1192 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1193 static int sd_ready_and_valid(struct sd_lun *un); 1194 1195 static void sdmin(struct buf *bp); 1196 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1197 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1198 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1199 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1200 1201 static int sdstrategy(struct buf *bp); 1202 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1203 1204 /* 1205 * Function prototypes for layering functions in the iostart chain. 1206 */ 1207 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1208 struct buf *bp); 1209 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1210 struct buf *bp); 1211 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1212 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1213 struct buf *bp); 1214 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1215 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1216 1217 /* 1218 * Function prototypes for layering functions in the iodone chain. 1219 */ 1220 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1221 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1222 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1223 struct buf *bp); 1224 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1225 struct buf *bp); 1226 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1227 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1228 struct buf *bp); 1229 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1230 1231 /* 1232 * Prototypes for functions to support buf(9S) based IO. 1233 */ 1234 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1235 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1236 static void sd_destroypkt_for_buf(struct buf *); 1237 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1238 struct buf *bp, int flags, 1239 int (*callback)(caddr_t), caddr_t callback_arg, 1240 diskaddr_t lba, uint32_t blockcount); 1241 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1242 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1243 1244 /* 1245 * Prototypes for functions to support USCSI IO. 1246 */ 1247 static int sd_uscsi_strategy(struct buf *bp); 1248 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1249 static void sd_destroypkt_for_uscsi(struct buf *); 1250 1251 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1252 uchar_t chain_type, void *pktinfop); 1253 1254 static int sd_pm_entry(struct sd_lun *un); 1255 static void sd_pm_exit(struct sd_lun *un); 1256 1257 static void sd_pm_idletimeout_handler(void *arg); 1258 1259 /* 1260 * sd_core internal functions (used at the sd_core_io layer). 1261 */ 1262 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1263 static void sdintr(struct scsi_pkt *pktp); 1264 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1265 1266 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1267 enum uio_seg dataspace, int path_flag); 1268 1269 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1270 daddr_t blkno, int (*func)(struct buf *)); 1271 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1272 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1273 static void sd_bioclone_free(struct buf *bp); 1274 static void sd_shadow_buf_free(struct buf *bp); 1275 1276 static void sd_print_transport_rejected_message(struct sd_lun *un, 1277 struct sd_xbuf *xp, int code); 1278 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1279 void *arg, int code); 1280 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1281 void *arg, int code); 1282 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1283 void *arg, int code); 1284 1285 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1286 int retry_check_flag, 1287 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1288 int c), 1289 void *user_arg, int failure_code, clock_t retry_delay, 1290 void (*statp)(kstat_io_t *)); 1291 1292 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1293 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1294 1295 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1296 struct scsi_pkt *pktp); 1297 static void sd_start_retry_command(void *arg); 1298 static void sd_start_direct_priority_command(void *arg); 1299 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1300 int errcode); 1301 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1302 struct buf *bp, int errcode); 1303 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1304 static void sd_sync_with_callback(struct sd_lun *un); 1305 static int sdrunout(caddr_t arg); 1306 1307 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1308 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1309 1310 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1311 static void sd_restore_throttle(void *arg); 1312 1313 static void sd_init_cdb_limits(struct sd_lun *un); 1314 1315 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1316 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1317 1318 /* 1319 * Error handling functions 1320 */ 1321 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1322 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1323 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1324 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1326 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1329 1330 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1334 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, size_t actual_len); 1336 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 1339 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 1342 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1343 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1345 uint8_t *sense_datap, 1346 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1347 static void sd_sense_key_not_ready(struct sd_lun *un, 1348 uint8_t *sense_datap, 1349 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1350 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1351 uint8_t *sense_datap, 1352 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_unit_attention(struct sd_lun *un, 1356 uint8_t *sense_datap, 1357 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1358 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1359 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1361 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1362 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1363 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1364 static void sd_sense_key_default(struct sd_lun *un, 1365 uint8_t *sense_datap, 1366 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1367 1368 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1369 void *arg, int flag); 1370 1371 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1372 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1374 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1375 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1376 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 1388 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1389 1390 static void sd_start_stop_unit_callback(void *arg); 1391 static void sd_start_stop_unit_task(void *arg); 1392 1393 static void sd_taskq_create(void); 1394 static void sd_taskq_delete(void); 1395 static void sd_media_change_task(void *arg); 1396 1397 static int sd_handle_mchange(struct sd_lun *un); 1398 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1399 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1400 uint32_t *lbap, int path_flag); 1401 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1402 uint32_t *lbap, int path_flag); 1403 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1404 int path_flag); 1405 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1406 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1407 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1408 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1409 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1410 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1411 uchar_t usr_cmd, uchar_t *usr_bufp); 1412 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1413 struct dk_callback *dkc); 1414 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1415 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1416 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1417 uchar_t *bufaddr, uint_t buflen, int path_flag); 1418 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1419 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1420 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1421 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1422 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1423 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1424 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1425 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1426 size_t buflen, daddr_t start_block, int path_flag); 1427 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1428 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1429 path_flag) 1430 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1431 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1432 path_flag) 1433 1434 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1435 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1436 uint16_t param_ptr, int path_flag); 1437 1438 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1439 static void sd_free_rqs(struct sd_lun *un); 1440 1441 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1442 uchar_t *data, int len, int fmt); 1443 static void sd_panic_for_res_conflict(struct sd_lun *un); 1444 1445 /* 1446 * Disk Ioctl Function Prototypes 1447 */ 1448 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1449 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1450 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1451 1452 /* 1453 * Multi-host Ioctl Prototypes 1454 */ 1455 static int sd_check_mhd(dev_t dev, int interval); 1456 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1457 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1458 static char *sd_sname(uchar_t status); 1459 static void sd_mhd_resvd_recover(void *arg); 1460 static void sd_resv_reclaim_thread(); 1461 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1462 static int sd_reserve_release(dev_t dev, int cmd); 1463 static void sd_rmv_resv_reclaim_req(dev_t dev); 1464 static void sd_mhd_reset_notify_cb(caddr_t arg); 1465 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1466 mhioc_inkeys_t *usrp, int flag); 1467 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1468 mhioc_inresvs_t *usrp, int flag); 1469 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1470 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1471 static int sd_mhdioc_release(dev_t dev); 1472 static int sd_mhdioc_register_devid(dev_t dev); 1473 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1474 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1475 1476 /* 1477 * SCSI removable prototypes 1478 */ 1479 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1480 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1481 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1482 static int sr_pause_resume(dev_t dev, int mode); 1483 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1484 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1487 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1488 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1489 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1490 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1491 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1492 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1493 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1494 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1495 static int sr_eject(dev_t dev); 1496 static void sr_ejected(register struct sd_lun *un); 1497 static int sr_check_wp(dev_t dev); 1498 static int sd_check_media(dev_t dev, enum dkio_state state); 1499 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1500 static void sd_delayed_cv_broadcast(void *arg); 1501 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1502 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1503 1504 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1505 1506 /* 1507 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1508 */ 1509 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1510 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1511 static void sd_wm_cache_destructor(void *wm, void *un); 1512 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1513 daddr_t endb, ushort_t typ); 1514 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1515 daddr_t endb); 1516 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1517 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1518 static void sd_read_modify_write_task(void * arg); 1519 static int 1520 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1521 struct buf **bpp); 1522 1523 1524 /* 1525 * Function prototypes for failfast support. 1526 */ 1527 static void sd_failfast_flushq(struct sd_lun *un); 1528 static int sd_failfast_flushq_callback(struct buf *bp); 1529 1530 /* 1531 * Function prototypes to check for lsi devices 1532 */ 1533 static void sd_is_lsi(struct sd_lun *un); 1534 1535 /* 1536 * Function prototypes for partial DMA support 1537 */ 1538 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1539 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1540 1541 1542 /* Function prototypes for cmlb */ 1543 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1544 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1545 1546 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1547 1548 /* 1549 * Constants for failfast support: 1550 * 1551 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1552 * failfast processing being performed. 1553 * 1554 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1555 * failfast processing on all bufs with B_FAILFAST set. 1556 */ 1557 1558 #define SD_FAILFAST_INACTIVE 0 1559 #define SD_FAILFAST_ACTIVE 1 1560 1561 /* 1562 * Bitmask to control behavior of buf(9S) flushes when a transition to 1563 * the failfast state occurs. Optional bits include: 1564 * 1565 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1566 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1567 * be flushed. 1568 * 1569 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1570 * driver, in addition to the regular wait queue. This includes the xbuf 1571 * queues. When clear, only the driver's wait queue will be flushed. 1572 */ 1573 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1574 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1575 1576 /* 1577 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1578 * to flush all queues within the driver. 1579 */ 1580 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1581 1582 1583 /* 1584 * SD Testing Fault Injection 1585 */ 1586 #ifdef SD_FAULT_INJECTION 1587 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1588 static void sd_faultinjection(struct scsi_pkt *pktp); 1589 static void sd_injection_log(char *buf, struct sd_lun *un); 1590 #endif 1591 1592 /* 1593 * Device driver ops vector 1594 */ 1595 static struct cb_ops sd_cb_ops = { 1596 sdopen, /* open */ 1597 sdclose, /* close */ 1598 sdstrategy, /* strategy */ 1599 nodev, /* print */ 1600 sddump, /* dump */ 1601 sdread, /* read */ 1602 sdwrite, /* write */ 1603 sdioctl, /* ioctl */ 1604 nodev, /* devmap */ 1605 nodev, /* mmap */ 1606 nodev, /* segmap */ 1607 nochpoll, /* poll */ 1608 sd_prop_op, /* cb_prop_op */ 1609 0, /* streamtab */ 1610 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1611 CB_REV, /* cb_rev */ 1612 sdaread, /* async I/O read entry point */ 1613 sdawrite /* async I/O write entry point */ 1614 }; 1615 1616 static struct dev_ops sd_ops = { 1617 DEVO_REV, /* devo_rev, */ 1618 0, /* refcnt */ 1619 sdinfo, /* info */ 1620 nulldev, /* identify */ 1621 sdprobe, /* probe */ 1622 sdattach, /* attach */ 1623 sddetach, /* detach */ 1624 nodev, /* reset */ 1625 &sd_cb_ops, /* driver operations */ 1626 NULL, /* bus operations */ 1627 sdpower /* power */ 1628 }; 1629 1630 1631 /* 1632 * This is the loadable module wrapper. 1633 */ 1634 #include <sys/modctl.h> 1635 1636 static struct modldrv modldrv = { 1637 &mod_driverops, /* Type of module. This one is a driver */ 1638 SD_MODULE_NAME, /* Module name. */ 1639 &sd_ops /* driver ops */ 1640 }; 1641 1642 1643 static struct modlinkage modlinkage = { 1644 MODREV_1, 1645 &modldrv, 1646 NULL 1647 }; 1648 1649 static cmlb_tg_ops_t sd_tgops = { 1650 TG_DK_OPS_VERSION_1, 1651 sd_tg_rdwr, 1652 sd_tg_getinfo 1653 }; 1654 1655 static struct scsi_asq_key_strings sd_additional_codes[] = { 1656 0x81, 0, "Logical Unit is Reserved", 1657 0x85, 0, "Audio Address Not Valid", 1658 0xb6, 0, "Media Load Mechanism Failed", 1659 0xB9, 0, "Audio Play Operation Aborted", 1660 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1661 0x53, 2, "Medium removal prevented", 1662 0x6f, 0, "Authentication failed during key exchange", 1663 0x6f, 1, "Key not present", 1664 0x6f, 2, "Key not established", 1665 0x6f, 3, "Read without proper authentication", 1666 0x6f, 4, "Mismatched region to this logical unit", 1667 0x6f, 5, "Region reset count error", 1668 0xffff, 0x0, NULL 1669 }; 1670 1671 1672 /* 1673 * Struct for passing printing information for sense data messages 1674 */ 1675 struct sd_sense_info { 1676 int ssi_severity; 1677 int ssi_pfa_flag; 1678 }; 1679 1680 /* 1681 * Table of function pointers for iostart-side routines. Separate "chains" 1682 * of layered function calls are formed by placing the function pointers 1683 * sequentially in the desired order. Functions are called according to an 1684 * incrementing table index ordering. The last function in each chain must 1685 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1686 * in the sd_iodone_chain[] array. 1687 * 1688 * Note: It may seem more natural to organize both the iostart and iodone 1689 * functions together, into an array of structures (or some similar 1690 * organization) with a common index, rather than two separate arrays which 1691 * must be maintained in synchronization. The purpose of this division is 1692 * to achieve improved performance: individual arrays allows for more 1693 * effective cache line utilization on certain platforms. 1694 */ 1695 1696 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1697 1698 1699 static sd_chain_t sd_iostart_chain[] = { 1700 1701 /* Chain for buf IO for disk drive targets (PM enabled) */ 1702 sd_mapblockaddr_iostart, /* Index: 0 */ 1703 sd_pm_iostart, /* Index: 1 */ 1704 sd_core_iostart, /* Index: 2 */ 1705 1706 /* Chain for buf IO for disk drive targets (PM disabled) */ 1707 sd_mapblockaddr_iostart, /* Index: 3 */ 1708 sd_core_iostart, /* Index: 4 */ 1709 1710 /* Chain for buf IO for removable-media targets (PM enabled) */ 1711 sd_mapblockaddr_iostart, /* Index: 5 */ 1712 sd_mapblocksize_iostart, /* Index: 6 */ 1713 sd_pm_iostart, /* Index: 7 */ 1714 sd_core_iostart, /* Index: 8 */ 1715 1716 /* Chain for buf IO for removable-media targets (PM disabled) */ 1717 sd_mapblockaddr_iostart, /* Index: 9 */ 1718 sd_mapblocksize_iostart, /* Index: 10 */ 1719 sd_core_iostart, /* Index: 11 */ 1720 1721 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1722 sd_mapblockaddr_iostart, /* Index: 12 */ 1723 sd_checksum_iostart, /* Index: 13 */ 1724 sd_pm_iostart, /* Index: 14 */ 1725 sd_core_iostart, /* Index: 15 */ 1726 1727 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1728 sd_mapblockaddr_iostart, /* Index: 16 */ 1729 sd_checksum_iostart, /* Index: 17 */ 1730 sd_core_iostart, /* Index: 18 */ 1731 1732 /* Chain for USCSI commands (all targets) */ 1733 sd_pm_iostart, /* Index: 19 */ 1734 sd_core_iostart, /* Index: 20 */ 1735 1736 /* Chain for checksumming USCSI commands (all targets) */ 1737 sd_checksum_uscsi_iostart, /* Index: 21 */ 1738 sd_pm_iostart, /* Index: 22 */ 1739 sd_core_iostart, /* Index: 23 */ 1740 1741 /* Chain for "direct" USCSI commands (all targets) */ 1742 sd_core_iostart, /* Index: 24 */ 1743 1744 /* Chain for "direct priority" USCSI commands (all targets) */ 1745 sd_core_iostart, /* Index: 25 */ 1746 }; 1747 1748 /* 1749 * Macros to locate the first function of each iostart chain in the 1750 * sd_iostart_chain[] array. These are located by the index in the array. 1751 */ 1752 #define SD_CHAIN_DISK_IOSTART 0 1753 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1754 #define SD_CHAIN_RMMEDIA_IOSTART 5 1755 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1756 #define SD_CHAIN_CHKSUM_IOSTART 12 1757 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1758 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1759 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1760 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1761 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1762 1763 1764 /* 1765 * Table of function pointers for the iodone-side routines for the driver- 1766 * internal layering mechanism. The calling sequence for iodone routines 1767 * uses a decrementing table index, so the last routine called in a chain 1768 * must be at the lowest array index location for that chain. The last 1769 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1770 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1771 * of the functions in an iodone side chain must correspond to the ordering 1772 * of the iostart routines for that chain. Note that there is no iodone 1773 * side routine that corresponds to sd_core_iostart(), so there is no 1774 * entry in the table for this. 1775 */ 1776 1777 static sd_chain_t sd_iodone_chain[] = { 1778 1779 /* Chain for buf IO for disk drive targets (PM enabled) */ 1780 sd_buf_iodone, /* Index: 0 */ 1781 sd_mapblockaddr_iodone, /* Index: 1 */ 1782 sd_pm_iodone, /* Index: 2 */ 1783 1784 /* Chain for buf IO for disk drive targets (PM disabled) */ 1785 sd_buf_iodone, /* Index: 3 */ 1786 sd_mapblockaddr_iodone, /* Index: 4 */ 1787 1788 /* Chain for buf IO for removable-media targets (PM enabled) */ 1789 sd_buf_iodone, /* Index: 5 */ 1790 sd_mapblockaddr_iodone, /* Index: 6 */ 1791 sd_mapblocksize_iodone, /* Index: 7 */ 1792 sd_pm_iodone, /* Index: 8 */ 1793 1794 /* Chain for buf IO for removable-media targets (PM disabled) */ 1795 sd_buf_iodone, /* Index: 9 */ 1796 sd_mapblockaddr_iodone, /* Index: 10 */ 1797 sd_mapblocksize_iodone, /* Index: 11 */ 1798 1799 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1800 sd_buf_iodone, /* Index: 12 */ 1801 sd_mapblockaddr_iodone, /* Index: 13 */ 1802 sd_checksum_iodone, /* Index: 14 */ 1803 sd_pm_iodone, /* Index: 15 */ 1804 1805 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1806 sd_buf_iodone, /* Index: 16 */ 1807 sd_mapblockaddr_iodone, /* Index: 17 */ 1808 sd_checksum_iodone, /* Index: 18 */ 1809 1810 /* Chain for USCSI commands (non-checksum targets) */ 1811 sd_uscsi_iodone, /* Index: 19 */ 1812 sd_pm_iodone, /* Index: 20 */ 1813 1814 /* Chain for USCSI commands (checksum targets) */ 1815 sd_uscsi_iodone, /* Index: 21 */ 1816 sd_checksum_uscsi_iodone, /* Index: 22 */ 1817 sd_pm_iodone, /* Index: 22 */ 1818 1819 /* Chain for "direct" USCSI commands (all targets) */ 1820 sd_uscsi_iodone, /* Index: 24 */ 1821 1822 /* Chain for "direct priority" USCSI commands (all targets) */ 1823 sd_uscsi_iodone, /* Index: 25 */ 1824 }; 1825 1826 1827 /* 1828 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1829 * each iodone-side chain. These are located by the array index, but as the 1830 * iodone side functions are called in a decrementing-index order, the 1831 * highest index number in each chain must be specified (as these correspond 1832 * to the first function in the iodone chain that will be called by the core 1833 * at IO completion time). 1834 */ 1835 1836 #define SD_CHAIN_DISK_IODONE 2 1837 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1838 #define SD_CHAIN_RMMEDIA_IODONE 8 1839 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1840 #define SD_CHAIN_CHKSUM_IODONE 15 1841 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1842 #define SD_CHAIN_USCSI_CMD_IODONE 20 1843 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1844 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1845 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1846 1847 1848 1849 1850 /* 1851 * Array to map a layering chain index to the appropriate initpkt routine. 1852 * The redundant entries are present so that the index used for accessing 1853 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1854 * with this table as well. 1855 */ 1856 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1857 1858 static sd_initpkt_t sd_initpkt_map[] = { 1859 1860 /* Chain for buf IO for disk drive targets (PM enabled) */ 1861 sd_initpkt_for_buf, /* Index: 0 */ 1862 sd_initpkt_for_buf, /* Index: 1 */ 1863 sd_initpkt_for_buf, /* Index: 2 */ 1864 1865 /* Chain for buf IO for disk drive targets (PM disabled) */ 1866 sd_initpkt_for_buf, /* Index: 3 */ 1867 sd_initpkt_for_buf, /* Index: 4 */ 1868 1869 /* Chain for buf IO for removable-media targets (PM enabled) */ 1870 sd_initpkt_for_buf, /* Index: 5 */ 1871 sd_initpkt_for_buf, /* Index: 6 */ 1872 sd_initpkt_for_buf, /* Index: 7 */ 1873 sd_initpkt_for_buf, /* Index: 8 */ 1874 1875 /* Chain for buf IO for removable-media targets (PM disabled) */ 1876 sd_initpkt_for_buf, /* Index: 9 */ 1877 sd_initpkt_for_buf, /* Index: 10 */ 1878 sd_initpkt_for_buf, /* Index: 11 */ 1879 1880 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1881 sd_initpkt_for_buf, /* Index: 12 */ 1882 sd_initpkt_for_buf, /* Index: 13 */ 1883 sd_initpkt_for_buf, /* Index: 14 */ 1884 sd_initpkt_for_buf, /* Index: 15 */ 1885 1886 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1887 sd_initpkt_for_buf, /* Index: 16 */ 1888 sd_initpkt_for_buf, /* Index: 17 */ 1889 sd_initpkt_for_buf, /* Index: 18 */ 1890 1891 /* Chain for USCSI commands (non-checksum targets) */ 1892 sd_initpkt_for_uscsi, /* Index: 19 */ 1893 sd_initpkt_for_uscsi, /* Index: 20 */ 1894 1895 /* Chain for USCSI commands (checksum targets) */ 1896 sd_initpkt_for_uscsi, /* Index: 21 */ 1897 sd_initpkt_for_uscsi, /* Index: 22 */ 1898 sd_initpkt_for_uscsi, /* Index: 22 */ 1899 1900 /* Chain for "direct" USCSI commands (all targets) */ 1901 sd_initpkt_for_uscsi, /* Index: 24 */ 1902 1903 /* Chain for "direct priority" USCSI commands (all targets) */ 1904 sd_initpkt_for_uscsi, /* Index: 25 */ 1905 1906 }; 1907 1908 1909 /* 1910 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1911 * The redundant entries are present so that the index used for accessing 1912 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1913 * with this table as well. 1914 */ 1915 typedef void (*sd_destroypkt_t)(struct buf *); 1916 1917 static sd_destroypkt_t sd_destroypkt_map[] = { 1918 1919 /* Chain for buf IO for disk drive targets (PM enabled) */ 1920 sd_destroypkt_for_buf, /* Index: 0 */ 1921 sd_destroypkt_for_buf, /* Index: 1 */ 1922 sd_destroypkt_for_buf, /* Index: 2 */ 1923 1924 /* Chain for buf IO for disk drive targets (PM disabled) */ 1925 sd_destroypkt_for_buf, /* Index: 3 */ 1926 sd_destroypkt_for_buf, /* Index: 4 */ 1927 1928 /* Chain for buf IO for removable-media targets (PM enabled) */ 1929 sd_destroypkt_for_buf, /* Index: 5 */ 1930 sd_destroypkt_for_buf, /* Index: 6 */ 1931 sd_destroypkt_for_buf, /* Index: 7 */ 1932 sd_destroypkt_for_buf, /* Index: 8 */ 1933 1934 /* Chain for buf IO for removable-media targets (PM disabled) */ 1935 sd_destroypkt_for_buf, /* Index: 9 */ 1936 sd_destroypkt_for_buf, /* Index: 10 */ 1937 sd_destroypkt_for_buf, /* Index: 11 */ 1938 1939 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1940 sd_destroypkt_for_buf, /* Index: 12 */ 1941 sd_destroypkt_for_buf, /* Index: 13 */ 1942 sd_destroypkt_for_buf, /* Index: 14 */ 1943 sd_destroypkt_for_buf, /* Index: 15 */ 1944 1945 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1946 sd_destroypkt_for_buf, /* Index: 16 */ 1947 sd_destroypkt_for_buf, /* Index: 17 */ 1948 sd_destroypkt_for_buf, /* Index: 18 */ 1949 1950 /* Chain for USCSI commands (non-checksum targets) */ 1951 sd_destroypkt_for_uscsi, /* Index: 19 */ 1952 sd_destroypkt_for_uscsi, /* Index: 20 */ 1953 1954 /* Chain for USCSI commands (checksum targets) */ 1955 sd_destroypkt_for_uscsi, /* Index: 21 */ 1956 sd_destroypkt_for_uscsi, /* Index: 22 */ 1957 sd_destroypkt_for_uscsi, /* Index: 22 */ 1958 1959 /* Chain for "direct" USCSI commands (all targets) */ 1960 sd_destroypkt_for_uscsi, /* Index: 24 */ 1961 1962 /* Chain for "direct priority" USCSI commands (all targets) */ 1963 sd_destroypkt_for_uscsi, /* Index: 25 */ 1964 1965 }; 1966 1967 1968 1969 /* 1970 * Array to map a layering chain index to the appropriate chain "type". 1971 * The chain type indicates a specific property/usage of the chain. 1972 * The redundant entries are present so that the index used for accessing 1973 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1974 * with this table as well. 1975 */ 1976 1977 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1978 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1979 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1980 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1981 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1982 /* (for error recovery) */ 1983 1984 static int sd_chain_type_map[] = { 1985 1986 /* Chain for buf IO for disk drive targets (PM enabled) */ 1987 SD_CHAIN_BUFIO, /* Index: 0 */ 1988 SD_CHAIN_BUFIO, /* Index: 1 */ 1989 SD_CHAIN_BUFIO, /* Index: 2 */ 1990 1991 /* Chain for buf IO for disk drive targets (PM disabled) */ 1992 SD_CHAIN_BUFIO, /* Index: 3 */ 1993 SD_CHAIN_BUFIO, /* Index: 4 */ 1994 1995 /* Chain for buf IO for removable-media targets (PM enabled) */ 1996 SD_CHAIN_BUFIO, /* Index: 5 */ 1997 SD_CHAIN_BUFIO, /* Index: 6 */ 1998 SD_CHAIN_BUFIO, /* Index: 7 */ 1999 SD_CHAIN_BUFIO, /* Index: 8 */ 2000 2001 /* Chain for buf IO for removable-media targets (PM disabled) */ 2002 SD_CHAIN_BUFIO, /* Index: 9 */ 2003 SD_CHAIN_BUFIO, /* Index: 10 */ 2004 SD_CHAIN_BUFIO, /* Index: 11 */ 2005 2006 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2007 SD_CHAIN_BUFIO, /* Index: 12 */ 2008 SD_CHAIN_BUFIO, /* Index: 13 */ 2009 SD_CHAIN_BUFIO, /* Index: 14 */ 2010 SD_CHAIN_BUFIO, /* Index: 15 */ 2011 2012 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2013 SD_CHAIN_BUFIO, /* Index: 16 */ 2014 SD_CHAIN_BUFIO, /* Index: 17 */ 2015 SD_CHAIN_BUFIO, /* Index: 18 */ 2016 2017 /* Chain for USCSI commands (non-checksum targets) */ 2018 SD_CHAIN_USCSI, /* Index: 19 */ 2019 SD_CHAIN_USCSI, /* Index: 20 */ 2020 2021 /* Chain for USCSI commands (checksum targets) */ 2022 SD_CHAIN_USCSI, /* Index: 21 */ 2023 SD_CHAIN_USCSI, /* Index: 22 */ 2024 SD_CHAIN_USCSI, /* Index: 22 */ 2025 2026 /* Chain for "direct" USCSI commands (all targets) */ 2027 SD_CHAIN_DIRECT, /* Index: 24 */ 2028 2029 /* Chain for "direct priority" USCSI commands (all targets) */ 2030 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2031 }; 2032 2033 2034 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2035 #define SD_IS_BUFIO(xp) \ 2036 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2037 2038 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2039 #define SD_IS_DIRECT_PRIORITY(xp) \ 2040 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2041 2042 2043 2044 /* 2045 * Struct, array, and macros to map a specific chain to the appropriate 2046 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2047 * 2048 * The sd_chain_index_map[] array is used at attach time to set the various 2049 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2050 * chain to be used with the instance. This allows different instances to use 2051 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2052 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2053 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2054 * dynamically & without the use of locking; and (2) a layer may update the 2055 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2056 * to allow for deferred processing of an IO within the same chain from a 2057 * different execution context. 2058 */ 2059 2060 struct sd_chain_index { 2061 int sci_iostart_index; 2062 int sci_iodone_index; 2063 }; 2064 2065 static struct sd_chain_index sd_chain_index_map[] = { 2066 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2067 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2068 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2069 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2070 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2071 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2072 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2073 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2074 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2075 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2076 }; 2077 2078 2079 /* 2080 * The following are indexes into the sd_chain_index_map[] array. 2081 */ 2082 2083 /* un->un_buf_chain_type must be set to one of these */ 2084 #define SD_CHAIN_INFO_DISK 0 2085 #define SD_CHAIN_INFO_DISK_NO_PM 1 2086 #define SD_CHAIN_INFO_RMMEDIA 2 2087 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2088 #define SD_CHAIN_INFO_CHKSUM 4 2089 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2090 2091 /* un->un_uscsi_chain_type must be set to one of these */ 2092 #define SD_CHAIN_INFO_USCSI_CMD 6 2093 /* USCSI with PM disabled is the same as DIRECT */ 2094 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2095 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2096 2097 /* un->un_direct_chain_type must be set to one of these */ 2098 #define SD_CHAIN_INFO_DIRECT_CMD 8 2099 2100 /* un->un_priority_chain_type must be set to one of these */ 2101 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2102 2103 /* size for devid inquiries */ 2104 #define MAX_INQUIRY_SIZE 0xF0 2105 2106 /* 2107 * Macros used by functions to pass a given buf(9S) struct along to the 2108 * next function in the layering chain for further processing. 2109 * 2110 * In the following macros, passing more than three arguments to the called 2111 * routines causes the optimizer for the SPARC compiler to stop doing tail 2112 * call elimination which results in significant performance degradation. 2113 */ 2114 #define SD_BEGIN_IOSTART(index, un, bp) \ 2115 ((*(sd_iostart_chain[index]))(index, un, bp)) 2116 2117 #define SD_BEGIN_IODONE(index, un, bp) \ 2118 ((*(sd_iodone_chain[index]))(index, un, bp)) 2119 2120 #define SD_NEXT_IOSTART(index, un, bp) \ 2121 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2122 2123 #define SD_NEXT_IODONE(index, un, bp) \ 2124 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2125 2126 /* 2127 * Function: _init 2128 * 2129 * Description: This is the driver _init(9E) entry point. 2130 * 2131 * Return Code: Returns the value from mod_install(9F) or 2132 * ddi_soft_state_init(9F) as appropriate. 2133 * 2134 * Context: Called when driver module loaded. 2135 */ 2136 2137 int 2138 _init(void) 2139 { 2140 int err; 2141 2142 /* establish driver name from module name */ 2143 sd_label = mod_modname(&modlinkage); 2144 2145 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2146 SD_MAXUNIT); 2147 2148 if (err != 0) { 2149 return (err); 2150 } 2151 2152 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2153 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2154 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2155 2156 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2157 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2158 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2159 2160 /* 2161 * it's ok to init here even for fibre device 2162 */ 2163 sd_scsi_probe_cache_init(); 2164 2165 sd_scsi_target_lun_init(); 2166 2167 /* 2168 * Creating taskq before mod_install ensures that all callers (threads) 2169 * that enter the module after a successfull mod_install encounter 2170 * a valid taskq. 2171 */ 2172 sd_taskq_create(); 2173 2174 err = mod_install(&modlinkage); 2175 if (err != 0) { 2176 /* delete taskq if install fails */ 2177 sd_taskq_delete(); 2178 2179 mutex_destroy(&sd_detach_mutex); 2180 mutex_destroy(&sd_log_mutex); 2181 mutex_destroy(&sd_label_mutex); 2182 2183 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2184 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2185 cv_destroy(&sd_tr.srq_inprocess_cv); 2186 2187 sd_scsi_probe_cache_fini(); 2188 2189 sd_scsi_target_lun_fini(); 2190 2191 ddi_soft_state_fini(&sd_state); 2192 return (err); 2193 } 2194 2195 return (err); 2196 } 2197 2198 2199 /* 2200 * Function: _fini 2201 * 2202 * Description: This is the driver _fini(9E) entry point. 2203 * 2204 * Return Code: Returns the value from mod_remove(9F) 2205 * 2206 * Context: Called when driver module is unloaded. 2207 */ 2208 2209 int 2210 _fini(void) 2211 { 2212 int err; 2213 2214 if ((err = mod_remove(&modlinkage)) != 0) { 2215 return (err); 2216 } 2217 2218 sd_taskq_delete(); 2219 2220 mutex_destroy(&sd_detach_mutex); 2221 mutex_destroy(&sd_log_mutex); 2222 mutex_destroy(&sd_label_mutex); 2223 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2224 2225 sd_scsi_probe_cache_fini(); 2226 2227 sd_scsi_target_lun_fini(); 2228 2229 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2230 cv_destroy(&sd_tr.srq_inprocess_cv); 2231 2232 ddi_soft_state_fini(&sd_state); 2233 2234 return (err); 2235 } 2236 2237 2238 /* 2239 * Function: _info 2240 * 2241 * Description: This is the driver _info(9E) entry point. 2242 * 2243 * Arguments: modinfop - pointer to the driver modinfo structure 2244 * 2245 * Return Code: Returns the value from mod_info(9F). 2246 * 2247 * Context: Kernel thread context 2248 */ 2249 2250 int 2251 _info(struct modinfo *modinfop) 2252 { 2253 return (mod_info(&modlinkage, modinfop)); 2254 } 2255 2256 2257 /* 2258 * The following routines implement the driver message logging facility. 2259 * They provide component- and level- based debug output filtering. 2260 * Output may also be restricted to messages for a single instance by 2261 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2262 * to NULL, then messages for all instances are printed. 2263 * 2264 * These routines have been cloned from each other due to the language 2265 * constraints of macros and variable argument list processing. 2266 */ 2267 2268 2269 /* 2270 * Function: sd_log_err 2271 * 2272 * Description: This routine is called by the SD_ERROR macro for debug 2273 * logging of error conditions. 2274 * 2275 * Arguments: comp - driver component being logged 2276 * dev - pointer to driver info structure 2277 * fmt - error string and format to be logged 2278 */ 2279 2280 static void 2281 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2282 { 2283 va_list ap; 2284 dev_info_t *dev; 2285 2286 ASSERT(un != NULL); 2287 dev = SD_DEVINFO(un); 2288 ASSERT(dev != NULL); 2289 2290 /* 2291 * Filter messages based on the global component and level masks. 2292 * Also print if un matches the value of sd_debug_un, or if 2293 * sd_debug_un is set to NULL. 2294 */ 2295 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2296 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2297 mutex_enter(&sd_log_mutex); 2298 va_start(ap, fmt); 2299 (void) vsprintf(sd_log_buf, fmt, ap); 2300 va_end(ap); 2301 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2302 mutex_exit(&sd_log_mutex); 2303 } 2304 #ifdef SD_FAULT_INJECTION 2305 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2306 if (un->sd_injection_mask & comp) { 2307 mutex_enter(&sd_log_mutex); 2308 va_start(ap, fmt); 2309 (void) vsprintf(sd_log_buf, fmt, ap); 2310 va_end(ap); 2311 sd_injection_log(sd_log_buf, un); 2312 mutex_exit(&sd_log_mutex); 2313 } 2314 #endif 2315 } 2316 2317 2318 /* 2319 * Function: sd_log_info 2320 * 2321 * Description: This routine is called by the SD_INFO macro for debug 2322 * logging of general purpose informational conditions. 2323 * 2324 * Arguments: comp - driver component being logged 2325 * dev - pointer to driver info structure 2326 * fmt - info string and format to be logged 2327 */ 2328 2329 static void 2330 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2331 { 2332 va_list ap; 2333 dev_info_t *dev; 2334 2335 ASSERT(un != NULL); 2336 dev = SD_DEVINFO(un); 2337 ASSERT(dev != NULL); 2338 2339 /* 2340 * Filter messages based on the global component and level masks. 2341 * Also print if un matches the value of sd_debug_un, or if 2342 * sd_debug_un is set to NULL. 2343 */ 2344 if ((sd_component_mask & component) && 2345 (sd_level_mask & SD_LOGMASK_INFO) && 2346 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2347 mutex_enter(&sd_log_mutex); 2348 va_start(ap, fmt); 2349 (void) vsprintf(sd_log_buf, fmt, ap); 2350 va_end(ap); 2351 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2352 mutex_exit(&sd_log_mutex); 2353 } 2354 #ifdef SD_FAULT_INJECTION 2355 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2356 if (un->sd_injection_mask & component) { 2357 mutex_enter(&sd_log_mutex); 2358 va_start(ap, fmt); 2359 (void) vsprintf(sd_log_buf, fmt, ap); 2360 va_end(ap); 2361 sd_injection_log(sd_log_buf, un); 2362 mutex_exit(&sd_log_mutex); 2363 } 2364 #endif 2365 } 2366 2367 2368 /* 2369 * Function: sd_log_trace 2370 * 2371 * Description: This routine is called by the SD_TRACE macro for debug 2372 * logging of trace conditions (i.e. function entry/exit). 2373 * 2374 * Arguments: comp - driver component being logged 2375 * dev - pointer to driver info structure 2376 * fmt - trace string and format to be logged 2377 */ 2378 2379 static void 2380 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2381 { 2382 va_list ap; 2383 dev_info_t *dev; 2384 2385 ASSERT(un != NULL); 2386 dev = SD_DEVINFO(un); 2387 ASSERT(dev != NULL); 2388 2389 /* 2390 * Filter messages based on the global component and level masks. 2391 * Also print if un matches the value of sd_debug_un, or if 2392 * sd_debug_un is set to NULL. 2393 */ 2394 if ((sd_component_mask & component) && 2395 (sd_level_mask & SD_LOGMASK_TRACE) && 2396 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2397 mutex_enter(&sd_log_mutex); 2398 va_start(ap, fmt); 2399 (void) vsprintf(sd_log_buf, fmt, ap); 2400 va_end(ap); 2401 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2402 mutex_exit(&sd_log_mutex); 2403 } 2404 #ifdef SD_FAULT_INJECTION 2405 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2406 if (un->sd_injection_mask & component) { 2407 mutex_enter(&sd_log_mutex); 2408 va_start(ap, fmt); 2409 (void) vsprintf(sd_log_buf, fmt, ap); 2410 va_end(ap); 2411 sd_injection_log(sd_log_buf, un); 2412 mutex_exit(&sd_log_mutex); 2413 } 2414 #endif 2415 } 2416 2417 2418 /* 2419 * Function: sdprobe 2420 * 2421 * Description: This is the driver probe(9e) entry point function. 2422 * 2423 * Arguments: devi - opaque device info handle 2424 * 2425 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2426 * DDI_PROBE_FAILURE: If the probe failed. 2427 * DDI_PROBE_PARTIAL: If the instance is not present now, 2428 * but may be present in the future. 2429 */ 2430 2431 static int 2432 sdprobe(dev_info_t *devi) 2433 { 2434 struct scsi_device *devp; 2435 int rval; 2436 int instance; 2437 2438 /* 2439 * if it wasn't for pln, sdprobe could actually be nulldev 2440 * in the "__fibre" case. 2441 */ 2442 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2443 return (DDI_PROBE_DONTCARE); 2444 } 2445 2446 devp = ddi_get_driver_private(devi); 2447 2448 if (devp == NULL) { 2449 /* Ooops... nexus driver is mis-configured... */ 2450 return (DDI_PROBE_FAILURE); 2451 } 2452 2453 instance = ddi_get_instance(devi); 2454 2455 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2456 return (DDI_PROBE_PARTIAL); 2457 } 2458 2459 /* 2460 * Call the SCSA utility probe routine to see if we actually 2461 * have a target at this SCSI nexus. 2462 */ 2463 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2464 case SCSIPROBE_EXISTS: 2465 switch (devp->sd_inq->inq_dtype) { 2466 case DTYPE_DIRECT: 2467 rval = DDI_PROBE_SUCCESS; 2468 break; 2469 case DTYPE_RODIRECT: 2470 /* CDs etc. Can be removable media */ 2471 rval = DDI_PROBE_SUCCESS; 2472 break; 2473 case DTYPE_OPTICAL: 2474 /* 2475 * Rewritable optical driver HP115AA 2476 * Can also be removable media 2477 */ 2478 2479 /* 2480 * Do not attempt to bind to DTYPE_OPTICAL if 2481 * pre solaris 9 sparc sd behavior is required 2482 * 2483 * If first time through and sd_dtype_optical_bind 2484 * has not been set in /etc/system check properties 2485 */ 2486 2487 if (sd_dtype_optical_bind < 0) { 2488 sd_dtype_optical_bind = ddi_prop_get_int 2489 (DDI_DEV_T_ANY, devi, 0, 2490 "optical-device-bind", 1); 2491 } 2492 2493 if (sd_dtype_optical_bind == 0) { 2494 rval = DDI_PROBE_FAILURE; 2495 } else { 2496 rval = DDI_PROBE_SUCCESS; 2497 } 2498 break; 2499 2500 case DTYPE_NOTPRESENT: 2501 default: 2502 rval = DDI_PROBE_FAILURE; 2503 break; 2504 } 2505 break; 2506 default: 2507 rval = DDI_PROBE_PARTIAL; 2508 break; 2509 } 2510 2511 /* 2512 * This routine checks for resource allocation prior to freeing, 2513 * so it will take care of the "smart probing" case where a 2514 * scsi_probe() may or may not have been issued and will *not* 2515 * free previously-freed resources. 2516 */ 2517 scsi_unprobe(devp); 2518 return (rval); 2519 } 2520 2521 2522 /* 2523 * Function: sdinfo 2524 * 2525 * Description: This is the driver getinfo(9e) entry point function. 2526 * Given the device number, return the devinfo pointer from 2527 * the scsi_device structure or the instance number 2528 * associated with the dev_t. 2529 * 2530 * Arguments: dip - pointer to device info structure 2531 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2532 * DDI_INFO_DEVT2INSTANCE) 2533 * arg - driver dev_t 2534 * resultp - user buffer for request response 2535 * 2536 * Return Code: DDI_SUCCESS 2537 * DDI_FAILURE 2538 */ 2539 /* ARGSUSED */ 2540 static int 2541 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2542 { 2543 struct sd_lun *un; 2544 dev_t dev; 2545 int instance; 2546 int error; 2547 2548 switch (infocmd) { 2549 case DDI_INFO_DEVT2DEVINFO: 2550 dev = (dev_t)arg; 2551 instance = SDUNIT(dev); 2552 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2553 return (DDI_FAILURE); 2554 } 2555 *result = (void *) SD_DEVINFO(un); 2556 error = DDI_SUCCESS; 2557 break; 2558 case DDI_INFO_DEVT2INSTANCE: 2559 dev = (dev_t)arg; 2560 instance = SDUNIT(dev); 2561 *result = (void *)(uintptr_t)instance; 2562 error = DDI_SUCCESS; 2563 break; 2564 default: 2565 error = DDI_FAILURE; 2566 } 2567 return (error); 2568 } 2569 2570 /* 2571 * Function: sd_prop_op 2572 * 2573 * Description: This is the driver prop_op(9e) entry point function. 2574 * Return the number of blocks for the partition in question 2575 * or forward the request to the property facilities. 2576 * 2577 * Arguments: dev - device number 2578 * dip - pointer to device info structure 2579 * prop_op - property operator 2580 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2581 * name - pointer to property name 2582 * valuep - pointer or address of the user buffer 2583 * lengthp - property length 2584 * 2585 * Return Code: DDI_PROP_SUCCESS 2586 * DDI_PROP_NOT_FOUND 2587 * DDI_PROP_UNDEFINED 2588 * DDI_PROP_NO_MEMORY 2589 * DDI_PROP_BUF_TOO_SMALL 2590 */ 2591 2592 static int 2593 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2594 char *name, caddr_t valuep, int *lengthp) 2595 { 2596 int instance = ddi_get_instance(dip); 2597 struct sd_lun *un; 2598 uint64_t nblocks64; 2599 uint_t dblk; 2600 2601 /* 2602 * Our dynamic properties are all device specific and size oriented. 2603 * Requests issued under conditions where size is valid are passed 2604 * to ddi_prop_op_nblocks with the size information, otherwise the 2605 * request is passed to ddi_prop_op. Size depends on valid geometry. 2606 */ 2607 un = ddi_get_soft_state(sd_state, instance); 2608 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2609 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2610 name, valuep, lengthp)); 2611 } else if (!SD_IS_VALID_LABEL(un)) { 2612 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2613 valuep, lengthp)); 2614 } 2615 2616 /* get nblocks value */ 2617 ASSERT(!mutex_owned(SD_MUTEX(un))); 2618 2619 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2620 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2621 2622 /* report size in target size blocks */ 2623 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2624 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2625 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2626 } 2627 2628 /* 2629 * The following functions are for smart probing: 2630 * sd_scsi_probe_cache_init() 2631 * sd_scsi_probe_cache_fini() 2632 * sd_scsi_clear_probe_cache() 2633 * sd_scsi_probe_with_cache() 2634 */ 2635 2636 /* 2637 * Function: sd_scsi_probe_cache_init 2638 * 2639 * Description: Initializes the probe response cache mutex and head pointer. 2640 * 2641 * Context: Kernel thread context 2642 */ 2643 2644 static void 2645 sd_scsi_probe_cache_init(void) 2646 { 2647 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2648 sd_scsi_probe_cache_head = NULL; 2649 } 2650 2651 2652 /* 2653 * Function: sd_scsi_probe_cache_fini 2654 * 2655 * Description: Frees all resources associated with the probe response cache. 2656 * 2657 * Context: Kernel thread context 2658 */ 2659 2660 static void 2661 sd_scsi_probe_cache_fini(void) 2662 { 2663 struct sd_scsi_probe_cache *cp; 2664 struct sd_scsi_probe_cache *ncp; 2665 2666 /* Clean up our smart probing linked list */ 2667 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2668 ncp = cp->next; 2669 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2670 } 2671 sd_scsi_probe_cache_head = NULL; 2672 mutex_destroy(&sd_scsi_probe_cache_mutex); 2673 } 2674 2675 2676 /* 2677 * Function: sd_scsi_clear_probe_cache 2678 * 2679 * Description: This routine clears the probe response cache. This is 2680 * done when open() returns ENXIO so that when deferred 2681 * attach is attempted (possibly after a device has been 2682 * turned on) we will retry the probe. Since we don't know 2683 * which target we failed to open, we just clear the 2684 * entire cache. 2685 * 2686 * Context: Kernel thread context 2687 */ 2688 2689 static void 2690 sd_scsi_clear_probe_cache(void) 2691 { 2692 struct sd_scsi_probe_cache *cp; 2693 int i; 2694 2695 mutex_enter(&sd_scsi_probe_cache_mutex); 2696 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2697 /* 2698 * Reset all entries to SCSIPROBE_EXISTS. This will 2699 * force probing to be performed the next time 2700 * sd_scsi_probe_with_cache is called. 2701 */ 2702 for (i = 0; i < NTARGETS_WIDE; i++) { 2703 cp->cache[i] = SCSIPROBE_EXISTS; 2704 } 2705 } 2706 mutex_exit(&sd_scsi_probe_cache_mutex); 2707 } 2708 2709 2710 /* 2711 * Function: sd_scsi_probe_with_cache 2712 * 2713 * Description: This routine implements support for a scsi device probe 2714 * with cache. The driver maintains a cache of the target 2715 * responses to scsi probes. If we get no response from a 2716 * target during a probe inquiry, we remember that, and we 2717 * avoid additional calls to scsi_probe on non-zero LUNs 2718 * on the same target until the cache is cleared. By doing 2719 * so we avoid the 1/4 sec selection timeout for nonzero 2720 * LUNs. lun0 of a target is always probed. 2721 * 2722 * Arguments: devp - Pointer to a scsi_device(9S) structure 2723 * waitfunc - indicates what the allocator routines should 2724 * do when resources are not available. This value 2725 * is passed on to scsi_probe() when that routine 2726 * is called. 2727 * 2728 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2729 * otherwise the value returned by scsi_probe(9F). 2730 * 2731 * Context: Kernel thread context 2732 */ 2733 2734 static int 2735 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2736 { 2737 struct sd_scsi_probe_cache *cp; 2738 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2739 int lun, tgt; 2740 2741 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2742 SCSI_ADDR_PROP_LUN, 0); 2743 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2744 SCSI_ADDR_PROP_TARGET, -1); 2745 2746 /* Make sure caching enabled and target in range */ 2747 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2748 /* do it the old way (no cache) */ 2749 return (scsi_probe(devp, waitfn)); 2750 } 2751 2752 mutex_enter(&sd_scsi_probe_cache_mutex); 2753 2754 /* Find the cache for this scsi bus instance */ 2755 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2756 if (cp->pdip == pdip) { 2757 break; 2758 } 2759 } 2760 2761 /* If we can't find a cache for this pdip, create one */ 2762 if (cp == NULL) { 2763 int i; 2764 2765 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2766 KM_SLEEP); 2767 cp->pdip = pdip; 2768 cp->next = sd_scsi_probe_cache_head; 2769 sd_scsi_probe_cache_head = cp; 2770 for (i = 0; i < NTARGETS_WIDE; i++) { 2771 cp->cache[i] = SCSIPROBE_EXISTS; 2772 } 2773 } 2774 2775 mutex_exit(&sd_scsi_probe_cache_mutex); 2776 2777 /* Recompute the cache for this target if LUN zero */ 2778 if (lun == 0) { 2779 cp->cache[tgt] = SCSIPROBE_EXISTS; 2780 } 2781 2782 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2783 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2784 return (SCSIPROBE_NORESP); 2785 } 2786 2787 /* Do the actual probe; save & return the result */ 2788 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2789 } 2790 2791 2792 /* 2793 * Function: sd_scsi_target_lun_init 2794 * 2795 * Description: Initializes the attached lun chain mutex and head pointer. 2796 * 2797 * Context: Kernel thread context 2798 */ 2799 2800 static void 2801 sd_scsi_target_lun_init(void) 2802 { 2803 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2804 sd_scsi_target_lun_head = NULL; 2805 } 2806 2807 2808 /* 2809 * Function: sd_scsi_target_lun_fini 2810 * 2811 * Description: Frees all resources associated with the attached lun 2812 * chain 2813 * 2814 * Context: Kernel thread context 2815 */ 2816 2817 static void 2818 sd_scsi_target_lun_fini(void) 2819 { 2820 struct sd_scsi_hba_tgt_lun *cp; 2821 struct sd_scsi_hba_tgt_lun *ncp; 2822 2823 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2824 ncp = cp->next; 2825 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2826 } 2827 sd_scsi_target_lun_head = NULL; 2828 mutex_destroy(&sd_scsi_target_lun_mutex); 2829 } 2830 2831 2832 /* 2833 * Function: sd_scsi_get_target_lun_count 2834 * 2835 * Description: This routine will check in the attached lun chain to see 2836 * how many luns are attached on the required SCSI controller 2837 * and target. Currently, some capabilities like tagged queue 2838 * are supported per target based by HBA. So all luns in a 2839 * target have the same capabilities. Based on this assumption, 2840 * sd should only set these capabilities once per target. This 2841 * function is called when sd needs to decide how many luns 2842 * already attached on a target. 2843 * 2844 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2845 * controller device. 2846 * target - The target ID on the controller's SCSI bus. 2847 * 2848 * Return Code: The number of luns attached on the required target and 2849 * controller. 2850 * -1 if target ID is not in parallel SCSI scope or the given 2851 * dip is not in the chain. 2852 * 2853 * Context: Kernel thread context 2854 */ 2855 2856 static int 2857 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2858 { 2859 struct sd_scsi_hba_tgt_lun *cp; 2860 2861 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2862 return (-1); 2863 } 2864 2865 mutex_enter(&sd_scsi_target_lun_mutex); 2866 2867 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2868 if (cp->pdip == dip) { 2869 break; 2870 } 2871 } 2872 2873 mutex_exit(&sd_scsi_target_lun_mutex); 2874 2875 if (cp == NULL) { 2876 return (-1); 2877 } 2878 2879 return (cp->nlun[target]); 2880 } 2881 2882 2883 /* 2884 * Function: sd_scsi_update_lun_on_target 2885 * 2886 * Description: This routine is used to update the attached lun chain when a 2887 * lun is attached or detached on a target. 2888 * 2889 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2890 * controller device. 2891 * target - The target ID on the controller's SCSI bus. 2892 * flag - Indicate the lun is attached or detached. 2893 * 2894 * Context: Kernel thread context 2895 */ 2896 2897 static void 2898 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2899 { 2900 struct sd_scsi_hba_tgt_lun *cp; 2901 2902 mutex_enter(&sd_scsi_target_lun_mutex); 2903 2904 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2905 if (cp->pdip == dip) { 2906 break; 2907 } 2908 } 2909 2910 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2911 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2912 KM_SLEEP); 2913 cp->pdip = dip; 2914 cp->next = sd_scsi_target_lun_head; 2915 sd_scsi_target_lun_head = cp; 2916 } 2917 2918 mutex_exit(&sd_scsi_target_lun_mutex); 2919 2920 if (cp != NULL) { 2921 if (flag == SD_SCSI_LUN_ATTACH) { 2922 cp->nlun[target] ++; 2923 } else { 2924 cp->nlun[target] --; 2925 } 2926 } 2927 } 2928 2929 2930 /* 2931 * Function: sd_spin_up_unit 2932 * 2933 * Description: Issues the following commands to spin-up the device: 2934 * START STOP UNIT, and INQUIRY. 2935 * 2936 * Arguments: un - driver soft state (unit) structure 2937 * 2938 * Return Code: 0 - success 2939 * EIO - failure 2940 * EACCES - reservation conflict 2941 * 2942 * Context: Kernel thread context 2943 */ 2944 2945 static int 2946 sd_spin_up_unit(struct sd_lun *un) 2947 { 2948 size_t resid = 0; 2949 int has_conflict = FALSE; 2950 uchar_t *bufaddr; 2951 2952 ASSERT(un != NULL); 2953 2954 /* 2955 * Send a throwaway START UNIT command. 2956 * 2957 * If we fail on this, we don't care presently what precisely 2958 * is wrong. EMC's arrays will also fail this with a check 2959 * condition (0x2/0x4/0x3) if the device is "inactive," but 2960 * we don't want to fail the attach because it may become 2961 * "active" later. 2962 */ 2963 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2964 == EACCES) 2965 has_conflict = TRUE; 2966 2967 /* 2968 * Send another INQUIRY command to the target. This is necessary for 2969 * non-removable media direct access devices because their INQUIRY data 2970 * may not be fully qualified until they are spun up (perhaps via the 2971 * START command above). Note: This seems to be needed for some 2972 * legacy devices only.) The INQUIRY command should succeed even if a 2973 * Reservation Conflict is present. 2974 */ 2975 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2976 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2977 kmem_free(bufaddr, SUN_INQSIZE); 2978 return (EIO); 2979 } 2980 2981 /* 2982 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2983 * Note that this routine does not return a failure here even if the 2984 * INQUIRY command did not return any data. This is a legacy behavior. 2985 */ 2986 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2987 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2988 } 2989 2990 kmem_free(bufaddr, SUN_INQSIZE); 2991 2992 /* If we hit a reservation conflict above, tell the caller. */ 2993 if (has_conflict == TRUE) { 2994 return (EACCES); 2995 } 2996 2997 return (0); 2998 } 2999 3000 #ifdef _LP64 3001 /* 3002 * Function: sd_enable_descr_sense 3003 * 3004 * Description: This routine attempts to select descriptor sense format 3005 * using the Control mode page. Devices that support 64 bit 3006 * LBAs (for >2TB luns) should also implement descriptor 3007 * sense data so we will call this function whenever we see 3008 * a lun larger than 2TB. If for some reason the device 3009 * supports 64 bit LBAs but doesn't support descriptor sense 3010 * presumably the mode select will fail. Everything will 3011 * continue to work normally except that we will not get 3012 * complete sense data for commands that fail with an LBA 3013 * larger than 32 bits. 3014 * 3015 * Arguments: un - driver soft state (unit) structure 3016 * 3017 * Context: Kernel thread context only 3018 */ 3019 3020 static void 3021 sd_enable_descr_sense(struct sd_lun *un) 3022 { 3023 uchar_t *header; 3024 struct mode_control_scsi3 *ctrl_bufp; 3025 size_t buflen; 3026 size_t bd_len; 3027 3028 /* 3029 * Read MODE SENSE page 0xA, Control Mode Page 3030 */ 3031 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3032 sizeof (struct mode_control_scsi3); 3033 header = kmem_zalloc(buflen, KM_SLEEP); 3034 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3035 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3036 SD_ERROR(SD_LOG_COMMON, un, 3037 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3038 goto eds_exit; 3039 } 3040 3041 /* 3042 * Determine size of Block Descriptors in order to locate 3043 * the mode page data. ATAPI devices return 0, SCSI devices 3044 * should return MODE_BLK_DESC_LENGTH. 3045 */ 3046 bd_len = ((struct mode_header *)header)->bdesc_length; 3047 3048 /* Clear the mode data length field for MODE SELECT */ 3049 ((struct mode_header *)header)->length = 0; 3050 3051 ctrl_bufp = (struct mode_control_scsi3 *) 3052 (header + MODE_HEADER_LENGTH + bd_len); 3053 3054 /* 3055 * If the page length is smaller than the expected value, 3056 * the target device doesn't support D_SENSE. Bail out here. 3057 */ 3058 if (ctrl_bufp->mode_page.length < 3059 sizeof (struct mode_control_scsi3) - 2) { 3060 SD_ERROR(SD_LOG_COMMON, un, 3061 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3062 goto eds_exit; 3063 } 3064 3065 /* 3066 * Clear PS bit for MODE SELECT 3067 */ 3068 ctrl_bufp->mode_page.ps = 0; 3069 3070 /* 3071 * Set D_SENSE to enable descriptor sense format. 3072 */ 3073 ctrl_bufp->d_sense = 1; 3074 3075 /* 3076 * Use MODE SELECT to commit the change to the D_SENSE bit 3077 */ 3078 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3079 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3080 SD_INFO(SD_LOG_COMMON, un, 3081 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3082 goto eds_exit; 3083 } 3084 3085 eds_exit: 3086 kmem_free(header, buflen); 3087 } 3088 3089 /* 3090 * Function: sd_reenable_dsense_task 3091 * 3092 * Description: Re-enable descriptor sense after device or bus reset 3093 * 3094 * Context: Executes in a taskq() thread context 3095 */ 3096 static void 3097 sd_reenable_dsense_task(void *arg) 3098 { 3099 struct sd_lun *un = arg; 3100 3101 ASSERT(un != NULL); 3102 sd_enable_descr_sense(un); 3103 } 3104 #endif /* _LP64 */ 3105 3106 /* 3107 * Function: sd_set_mmc_caps 3108 * 3109 * Description: This routine determines if the device is MMC compliant and if 3110 * the device supports CDDA via a mode sense of the CDVD 3111 * capabilities mode page. Also checks if the device is a 3112 * dvdram writable device. 3113 * 3114 * Arguments: un - driver soft state (unit) structure 3115 * 3116 * Context: Kernel thread context only 3117 */ 3118 3119 static void 3120 sd_set_mmc_caps(struct sd_lun *un) 3121 { 3122 struct mode_header_grp2 *sense_mhp; 3123 uchar_t *sense_page; 3124 caddr_t buf; 3125 int bd_len; 3126 int status; 3127 struct uscsi_cmd com; 3128 int rtn; 3129 uchar_t *out_data_rw, *out_data_hd; 3130 uchar_t *rqbuf_rw, *rqbuf_hd; 3131 3132 ASSERT(un != NULL); 3133 3134 /* 3135 * The flags which will be set in this function are - mmc compliant, 3136 * dvdram writable device, cdda support. Initialize them to FALSE 3137 * and if a capability is detected - it will be set to TRUE. 3138 */ 3139 un->un_f_mmc_cap = FALSE; 3140 un->un_f_dvdram_writable_device = FALSE; 3141 un->un_f_cfg_cdda = FALSE; 3142 3143 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3144 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3145 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3146 3147 if (status != 0) { 3148 /* command failed; just return */ 3149 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3150 return; 3151 } 3152 /* 3153 * If the mode sense request for the CDROM CAPABILITIES 3154 * page (0x2A) succeeds the device is assumed to be MMC. 3155 */ 3156 un->un_f_mmc_cap = TRUE; 3157 3158 /* Get to the page data */ 3159 sense_mhp = (struct mode_header_grp2 *)buf; 3160 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3161 sense_mhp->bdesc_length_lo; 3162 if (bd_len > MODE_BLK_DESC_LENGTH) { 3163 /* 3164 * We did not get back the expected block descriptor 3165 * length so we cannot determine if the device supports 3166 * CDDA. However, we still indicate the device is MMC 3167 * according to the successful response to the page 3168 * 0x2A mode sense request. 3169 */ 3170 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3171 "sd_set_mmc_caps: Mode Sense returned " 3172 "invalid block descriptor length\n"); 3173 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3174 return; 3175 } 3176 3177 /* See if read CDDA is supported */ 3178 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3179 bd_len); 3180 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3181 3182 /* See if writing DVD RAM is supported. */ 3183 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3184 if (un->un_f_dvdram_writable_device == TRUE) { 3185 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3186 return; 3187 } 3188 3189 /* 3190 * If the device presents DVD or CD capabilities in the mode 3191 * page, we can return here since a RRD will not have 3192 * these capabilities. 3193 */ 3194 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3195 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3196 return; 3197 } 3198 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3199 3200 /* 3201 * If un->un_f_dvdram_writable_device is still FALSE, 3202 * check for a Removable Rigid Disk (RRD). A RRD 3203 * device is identified by the features RANDOM_WRITABLE and 3204 * HARDWARE_DEFECT_MANAGEMENT. 3205 */ 3206 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3207 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3208 3209 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3210 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3211 RANDOM_WRITABLE, SD_PATH_STANDARD); 3212 if (rtn != 0) { 3213 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3214 kmem_free(rqbuf_rw, SENSE_LENGTH); 3215 return; 3216 } 3217 3218 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3219 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3220 3221 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3222 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3223 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3224 if (rtn == 0) { 3225 /* 3226 * We have good information, check for random writable 3227 * and hardware defect features. 3228 */ 3229 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3230 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3231 un->un_f_dvdram_writable_device = TRUE; 3232 } 3233 } 3234 3235 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3236 kmem_free(rqbuf_rw, SENSE_LENGTH); 3237 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3238 kmem_free(rqbuf_hd, SENSE_LENGTH); 3239 } 3240 3241 /* 3242 * Function: sd_check_for_writable_cd 3243 * 3244 * Description: This routine determines if the media in the device is 3245 * writable or not. It uses the get configuration command (0x46) 3246 * to determine if the media is writable 3247 * 3248 * Arguments: un - driver soft state (unit) structure 3249 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3250 * chain and the normal command waitq, or 3251 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3252 * "direct" chain and bypass the normal command 3253 * waitq. 3254 * 3255 * Context: Never called at interrupt context. 3256 */ 3257 3258 static void 3259 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3260 { 3261 struct uscsi_cmd com; 3262 uchar_t *out_data; 3263 uchar_t *rqbuf; 3264 int rtn; 3265 uchar_t *out_data_rw, *out_data_hd; 3266 uchar_t *rqbuf_rw, *rqbuf_hd; 3267 struct mode_header_grp2 *sense_mhp; 3268 uchar_t *sense_page; 3269 caddr_t buf; 3270 int bd_len; 3271 int status; 3272 3273 ASSERT(un != NULL); 3274 ASSERT(mutex_owned(SD_MUTEX(un))); 3275 3276 /* 3277 * Initialize the writable media to false, if configuration info. 3278 * tells us otherwise then only we will set it. 3279 */ 3280 un->un_f_mmc_writable_media = FALSE; 3281 mutex_exit(SD_MUTEX(un)); 3282 3283 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3284 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3285 3286 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3287 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3288 3289 mutex_enter(SD_MUTEX(un)); 3290 if (rtn == 0) { 3291 /* 3292 * We have good information, check for writable DVD. 3293 */ 3294 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3295 un->un_f_mmc_writable_media = TRUE; 3296 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3297 kmem_free(rqbuf, SENSE_LENGTH); 3298 return; 3299 } 3300 } 3301 3302 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3303 kmem_free(rqbuf, SENSE_LENGTH); 3304 3305 /* 3306 * Determine if this is a RRD type device. 3307 */ 3308 mutex_exit(SD_MUTEX(un)); 3309 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3310 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3311 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3312 mutex_enter(SD_MUTEX(un)); 3313 if (status != 0) { 3314 /* command failed; just return */ 3315 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3316 return; 3317 } 3318 3319 /* Get to the page data */ 3320 sense_mhp = (struct mode_header_grp2 *)buf; 3321 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3322 if (bd_len > MODE_BLK_DESC_LENGTH) { 3323 /* 3324 * We did not get back the expected block descriptor length so 3325 * we cannot check the mode page. 3326 */ 3327 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3328 "sd_check_for_writable_cd: Mode Sense returned " 3329 "invalid block descriptor length\n"); 3330 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3331 return; 3332 } 3333 3334 /* 3335 * If the device presents DVD or CD capabilities in the mode 3336 * page, we can return here since a RRD device will not have 3337 * these capabilities. 3338 */ 3339 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3340 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3341 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3342 return; 3343 } 3344 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3345 3346 /* 3347 * If un->un_f_mmc_writable_media is still FALSE, 3348 * check for RRD type media. A RRD device is identified 3349 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3350 */ 3351 mutex_exit(SD_MUTEX(un)); 3352 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3353 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3354 3355 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3356 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3357 RANDOM_WRITABLE, path_flag); 3358 if (rtn != 0) { 3359 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3360 kmem_free(rqbuf_rw, SENSE_LENGTH); 3361 mutex_enter(SD_MUTEX(un)); 3362 return; 3363 } 3364 3365 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3366 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3367 3368 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3369 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3370 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3371 mutex_enter(SD_MUTEX(un)); 3372 if (rtn == 0) { 3373 /* 3374 * We have good information, check for random writable 3375 * and hardware defect features as current. 3376 */ 3377 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3378 (out_data_rw[10] & 0x1) && 3379 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3380 (out_data_hd[10] & 0x1)) { 3381 un->un_f_mmc_writable_media = TRUE; 3382 } 3383 } 3384 3385 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3386 kmem_free(rqbuf_rw, SENSE_LENGTH); 3387 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3388 kmem_free(rqbuf_hd, SENSE_LENGTH); 3389 } 3390 3391 /* 3392 * Function: sd_read_unit_properties 3393 * 3394 * Description: The following implements a property lookup mechanism. 3395 * Properties for particular disks (keyed on vendor, model 3396 * and rev numbers) are sought in the sd.conf file via 3397 * sd_process_sdconf_file(), and if not found there, are 3398 * looked for in a list hardcoded in this driver via 3399 * sd_process_sdconf_table() Once located the properties 3400 * are used to update the driver unit structure. 3401 * 3402 * Arguments: un - driver soft state (unit) structure 3403 */ 3404 3405 static void 3406 sd_read_unit_properties(struct sd_lun *un) 3407 { 3408 /* 3409 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3410 * the "sd-config-list" property (from the sd.conf file) or if 3411 * there was not a match for the inquiry vid/pid. If this event 3412 * occurs the static driver configuration table is searched for 3413 * a match. 3414 */ 3415 ASSERT(un != NULL); 3416 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3417 sd_process_sdconf_table(un); 3418 } 3419 3420 /* check for LSI device */ 3421 sd_is_lsi(un); 3422 3423 3424 } 3425 3426 3427 /* 3428 * Function: sd_process_sdconf_file 3429 * 3430 * Description: Use ddi_getlongprop to obtain the properties from the 3431 * driver's config file (ie, sd.conf) and update the driver 3432 * soft state structure accordingly. 3433 * 3434 * Arguments: un - driver soft state (unit) structure 3435 * 3436 * Return Code: SD_SUCCESS - The properties were successfully set according 3437 * to the driver configuration file. 3438 * SD_FAILURE - The driver config list was not obtained or 3439 * there was no vid/pid match. This indicates that 3440 * the static config table should be used. 3441 * 3442 * The config file has a property, "sd-config-list", which consists of 3443 * one or more duplets as follows: 3444 * 3445 * sd-config-list= 3446 * <duplet>, 3447 * [<duplet>,] 3448 * [<duplet>]; 3449 * 3450 * The structure of each duplet is as follows: 3451 * 3452 * <duplet>:= <vid+pid>,<data-property-name_list> 3453 * 3454 * The first entry of the duplet is the device ID string (the concatenated 3455 * vid & pid; not to be confused with a device_id). This is defined in 3456 * the same way as in the sd_disk_table. 3457 * 3458 * The second part of the duplet is a string that identifies a 3459 * data-property-name-list. The data-property-name-list is defined as 3460 * follows: 3461 * 3462 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3463 * 3464 * The syntax of <data-property-name> depends on the <version> field. 3465 * 3466 * If version = SD_CONF_VERSION_1 we have the following syntax: 3467 * 3468 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3469 * 3470 * where the prop0 value will be used to set prop0 if bit0 set in the 3471 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3472 * 3473 */ 3474 3475 static int 3476 sd_process_sdconf_file(struct sd_lun *un) 3477 { 3478 char *config_list = NULL; 3479 int config_list_len; 3480 int len; 3481 int dupletlen = 0; 3482 char *vidptr; 3483 int vidlen; 3484 char *dnlist_ptr; 3485 char *dataname_ptr; 3486 int dnlist_len; 3487 int dataname_len; 3488 int *data_list; 3489 int data_list_len; 3490 int rval = SD_FAILURE; 3491 int i; 3492 3493 ASSERT(un != NULL); 3494 3495 /* Obtain the configuration list associated with the .conf file */ 3496 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3497 sd_config_list, (caddr_t)&config_list, &config_list_len) 3498 != DDI_PROP_SUCCESS) { 3499 return (SD_FAILURE); 3500 } 3501 3502 /* 3503 * Compare vids in each duplet to the inquiry vid - if a match is 3504 * made, get the data value and update the soft state structure 3505 * accordingly. 3506 * 3507 * Note: This algorithm is complex and difficult to maintain. It should 3508 * be replaced with a more robust implementation. 3509 */ 3510 for (len = config_list_len, vidptr = config_list; len > 0; 3511 vidptr += dupletlen, len -= dupletlen) { 3512 /* 3513 * Note: The assumption here is that each vid entry is on 3514 * a unique line from its associated duplet. 3515 */ 3516 vidlen = dupletlen = (int)strlen(vidptr); 3517 if ((vidlen == 0) || 3518 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3519 dupletlen++; 3520 continue; 3521 } 3522 3523 /* 3524 * dnlist contains 1 or more blank separated 3525 * data-property-name entries 3526 */ 3527 dnlist_ptr = vidptr + vidlen + 1; 3528 dnlist_len = (int)strlen(dnlist_ptr); 3529 dupletlen += dnlist_len + 2; 3530 3531 /* 3532 * Set a pointer for the first data-property-name 3533 * entry in the list 3534 */ 3535 dataname_ptr = dnlist_ptr; 3536 dataname_len = 0; 3537 3538 /* 3539 * Loop through all data-property-name entries in the 3540 * data-property-name-list setting the properties for each. 3541 */ 3542 while (dataname_len < dnlist_len) { 3543 int version; 3544 3545 /* 3546 * Determine the length of the current 3547 * data-property-name entry by indexing until a 3548 * blank or NULL is encountered. When the space is 3549 * encountered reset it to a NULL for compliance 3550 * with ddi_getlongprop(). 3551 */ 3552 for (i = 0; ((dataname_ptr[i] != ' ') && 3553 (dataname_ptr[i] != '\0')); i++) { 3554 ; 3555 } 3556 3557 dataname_len += i; 3558 /* If not null terminated, Make it so */ 3559 if (dataname_ptr[i] == ' ') { 3560 dataname_ptr[i] = '\0'; 3561 } 3562 dataname_len++; 3563 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3564 "sd_process_sdconf_file: disk:%s, data:%s\n", 3565 vidptr, dataname_ptr); 3566 3567 /* Get the data list */ 3568 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3569 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3570 != DDI_PROP_SUCCESS) { 3571 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3572 "sd_process_sdconf_file: data property (%s)" 3573 " has no value\n", dataname_ptr); 3574 dataname_ptr = dnlist_ptr + dataname_len; 3575 continue; 3576 } 3577 3578 version = data_list[0]; 3579 3580 if (version == SD_CONF_VERSION_1) { 3581 sd_tunables values; 3582 3583 /* Set the properties */ 3584 if (sd_chk_vers1_data(un, data_list[1], 3585 &data_list[2], data_list_len, dataname_ptr) 3586 == SD_SUCCESS) { 3587 sd_get_tunables_from_conf(un, 3588 data_list[1], &data_list[2], 3589 &values); 3590 sd_set_vers1_properties(un, 3591 data_list[1], &values); 3592 rval = SD_SUCCESS; 3593 } else { 3594 rval = SD_FAILURE; 3595 } 3596 } else { 3597 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3598 "data property %s version 0x%x is invalid.", 3599 dataname_ptr, version); 3600 rval = SD_FAILURE; 3601 } 3602 kmem_free(data_list, data_list_len); 3603 dataname_ptr = dnlist_ptr + dataname_len; 3604 } 3605 } 3606 3607 /* free up the memory allocated by ddi_getlongprop */ 3608 if (config_list) { 3609 kmem_free(config_list, config_list_len); 3610 } 3611 3612 return (rval); 3613 } 3614 3615 /* 3616 * Function: sd_get_tunables_from_conf() 3617 * 3618 * 3619 * This function reads the data list from the sd.conf file and pulls 3620 * the values that can have numeric values as arguments and places 3621 * the values in the appropriate sd_tunables member. 3622 * Since the order of the data list members varies across platforms 3623 * This function reads them from the data list in a platform specific 3624 * order and places them into the correct sd_tunable member that is 3625 * consistent across all platforms. 3626 */ 3627 static void 3628 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3629 sd_tunables *values) 3630 { 3631 int i; 3632 int mask; 3633 3634 bzero(values, sizeof (sd_tunables)); 3635 3636 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3637 3638 mask = 1 << i; 3639 if (mask > flags) { 3640 break; 3641 } 3642 3643 switch (mask & flags) { 3644 case 0: /* This mask bit not set in flags */ 3645 continue; 3646 case SD_CONF_BSET_THROTTLE: 3647 values->sdt_throttle = data_list[i]; 3648 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3649 "sd_get_tunables_from_conf: throttle = %d\n", 3650 values->sdt_throttle); 3651 break; 3652 case SD_CONF_BSET_CTYPE: 3653 values->sdt_ctype = data_list[i]; 3654 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3655 "sd_get_tunables_from_conf: ctype = %d\n", 3656 values->sdt_ctype); 3657 break; 3658 case SD_CONF_BSET_NRR_COUNT: 3659 values->sdt_not_rdy_retries = data_list[i]; 3660 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3661 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3662 values->sdt_not_rdy_retries); 3663 break; 3664 case SD_CONF_BSET_BSY_RETRY_COUNT: 3665 values->sdt_busy_retries = data_list[i]; 3666 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3667 "sd_get_tunables_from_conf: busy_retries = %d\n", 3668 values->sdt_busy_retries); 3669 break; 3670 case SD_CONF_BSET_RST_RETRIES: 3671 values->sdt_reset_retries = data_list[i]; 3672 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3673 "sd_get_tunables_from_conf: reset_retries = %d\n", 3674 values->sdt_reset_retries); 3675 break; 3676 case SD_CONF_BSET_RSV_REL_TIME: 3677 values->sdt_reserv_rel_time = data_list[i]; 3678 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3679 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3680 values->sdt_reserv_rel_time); 3681 break; 3682 case SD_CONF_BSET_MIN_THROTTLE: 3683 values->sdt_min_throttle = data_list[i]; 3684 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3685 "sd_get_tunables_from_conf: min_throttle = %d\n", 3686 values->sdt_min_throttle); 3687 break; 3688 case SD_CONF_BSET_DISKSORT_DISABLED: 3689 values->sdt_disk_sort_dis = data_list[i]; 3690 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3691 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3692 values->sdt_disk_sort_dis); 3693 break; 3694 case SD_CONF_BSET_LUN_RESET_ENABLED: 3695 values->sdt_lun_reset_enable = data_list[i]; 3696 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3697 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3698 "\n", values->sdt_lun_reset_enable); 3699 break; 3700 case SD_CONF_BSET_CACHE_IS_NV: 3701 values->sdt_suppress_cache_flush = data_list[i]; 3702 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3703 "sd_get_tunables_from_conf: \ 3704 suppress_cache_flush = %d" 3705 "\n", values->sdt_suppress_cache_flush); 3706 break; 3707 } 3708 } 3709 } 3710 3711 /* 3712 * Function: sd_process_sdconf_table 3713 * 3714 * Description: Search the static configuration table for a match on the 3715 * inquiry vid/pid and update the driver soft state structure 3716 * according to the table property values for the device. 3717 * 3718 * The form of a configuration table entry is: 3719 * <vid+pid>,<flags>,<property-data> 3720 * "SEAGATE ST42400N",1,0x40000, 3721 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3722 * 3723 * Arguments: un - driver soft state (unit) structure 3724 */ 3725 3726 static void 3727 sd_process_sdconf_table(struct sd_lun *un) 3728 { 3729 char *id = NULL; 3730 int table_index; 3731 int idlen; 3732 3733 ASSERT(un != NULL); 3734 for (table_index = 0; table_index < sd_disk_table_size; 3735 table_index++) { 3736 id = sd_disk_table[table_index].device_id; 3737 idlen = strlen(id); 3738 if (idlen == 0) { 3739 continue; 3740 } 3741 3742 /* 3743 * The static configuration table currently does not 3744 * implement version 10 properties. Additionally, 3745 * multiple data-property-name entries are not 3746 * implemented in the static configuration table. 3747 */ 3748 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3749 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3750 "sd_process_sdconf_table: disk %s\n", id); 3751 sd_set_vers1_properties(un, 3752 sd_disk_table[table_index].flags, 3753 sd_disk_table[table_index].properties); 3754 break; 3755 } 3756 } 3757 } 3758 3759 3760 /* 3761 * Function: sd_sdconf_id_match 3762 * 3763 * Description: This local function implements a case sensitive vid/pid 3764 * comparison as well as the boundary cases of wild card and 3765 * multiple blanks. 3766 * 3767 * Note: An implicit assumption made here is that the scsi 3768 * inquiry structure will always keep the vid, pid and 3769 * revision strings in consecutive sequence, so they can be 3770 * read as a single string. If this assumption is not the 3771 * case, a separate string, to be used for the check, needs 3772 * to be built with these strings concatenated. 3773 * 3774 * Arguments: un - driver soft state (unit) structure 3775 * id - table or config file vid/pid 3776 * idlen - length of the vid/pid (bytes) 3777 * 3778 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3779 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3780 */ 3781 3782 static int 3783 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3784 { 3785 struct scsi_inquiry *sd_inq; 3786 int rval = SD_SUCCESS; 3787 3788 ASSERT(un != NULL); 3789 sd_inq = un->un_sd->sd_inq; 3790 ASSERT(id != NULL); 3791 3792 /* 3793 * We use the inq_vid as a pointer to a buffer containing the 3794 * vid and pid and use the entire vid/pid length of the table 3795 * entry for the comparison. This works because the inq_pid 3796 * data member follows inq_vid in the scsi_inquiry structure. 3797 */ 3798 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3799 /* 3800 * The user id string is compared to the inquiry vid/pid 3801 * using a case insensitive comparison and ignoring 3802 * multiple spaces. 3803 */ 3804 rval = sd_blank_cmp(un, id, idlen); 3805 if (rval != SD_SUCCESS) { 3806 /* 3807 * User id strings that start and end with a "*" 3808 * are a special case. These do not have a 3809 * specific vendor, and the product string can 3810 * appear anywhere in the 16 byte PID portion of 3811 * the inquiry data. This is a simple strstr() 3812 * type search for the user id in the inquiry data. 3813 */ 3814 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3815 char *pidptr = &id[1]; 3816 int i; 3817 int j; 3818 int pidstrlen = idlen - 2; 3819 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3820 pidstrlen; 3821 3822 if (j < 0) { 3823 return (SD_FAILURE); 3824 } 3825 for (i = 0; i < j; i++) { 3826 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3827 pidptr, pidstrlen) == 0) { 3828 rval = SD_SUCCESS; 3829 break; 3830 } 3831 } 3832 } 3833 } 3834 } 3835 return (rval); 3836 } 3837 3838 3839 /* 3840 * Function: sd_blank_cmp 3841 * 3842 * Description: If the id string starts and ends with a space, treat 3843 * multiple consecutive spaces as equivalent to a single 3844 * space. For example, this causes a sd_disk_table entry 3845 * of " NEC CDROM " to match a device's id string of 3846 * "NEC CDROM". 3847 * 3848 * Note: The success exit condition for this routine is if 3849 * the pointer to the table entry is '\0' and the cnt of 3850 * the inquiry length is zero. This will happen if the inquiry 3851 * string returned by the device is padded with spaces to be 3852 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3853 * SCSI spec states that the inquiry string is to be padded with 3854 * spaces. 3855 * 3856 * Arguments: un - driver soft state (unit) structure 3857 * id - table or config file vid/pid 3858 * idlen - length of the vid/pid (bytes) 3859 * 3860 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3861 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3862 */ 3863 3864 static int 3865 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3866 { 3867 char *p1; 3868 char *p2; 3869 int cnt; 3870 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3871 sizeof (SD_INQUIRY(un)->inq_pid); 3872 3873 ASSERT(un != NULL); 3874 p2 = un->un_sd->sd_inq->inq_vid; 3875 ASSERT(id != NULL); 3876 p1 = id; 3877 3878 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3879 /* 3880 * Note: string p1 is terminated by a NUL but string p2 3881 * isn't. The end of p2 is determined by cnt. 3882 */ 3883 for (;;) { 3884 /* skip over any extra blanks in both strings */ 3885 while ((*p1 != '\0') && (*p1 == ' ')) { 3886 p1++; 3887 } 3888 while ((cnt != 0) && (*p2 == ' ')) { 3889 p2++; 3890 cnt--; 3891 } 3892 3893 /* compare the two strings */ 3894 if ((cnt == 0) || 3895 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3896 break; 3897 } 3898 while ((cnt > 0) && 3899 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3900 p1++; 3901 p2++; 3902 cnt--; 3903 } 3904 } 3905 } 3906 3907 /* return SD_SUCCESS if both strings match */ 3908 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3909 } 3910 3911 3912 /* 3913 * Function: sd_chk_vers1_data 3914 * 3915 * Description: Verify the version 1 device properties provided by the 3916 * user via the configuration file 3917 * 3918 * Arguments: un - driver soft state (unit) structure 3919 * flags - integer mask indicating properties to be set 3920 * prop_list - integer list of property values 3921 * list_len - length of user provided data 3922 * 3923 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3924 * SD_FAILURE - Indicates the user provided data is invalid 3925 */ 3926 3927 static int 3928 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3929 int list_len, char *dataname_ptr) 3930 { 3931 int i; 3932 int mask = 1; 3933 int index = 0; 3934 3935 ASSERT(un != NULL); 3936 3937 /* Check for a NULL property name and list */ 3938 if (dataname_ptr == NULL) { 3939 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3940 "sd_chk_vers1_data: NULL data property name."); 3941 return (SD_FAILURE); 3942 } 3943 if (prop_list == NULL) { 3944 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3945 "sd_chk_vers1_data: %s NULL data property list.", 3946 dataname_ptr); 3947 return (SD_FAILURE); 3948 } 3949 3950 /* Display a warning if undefined bits are set in the flags */ 3951 if (flags & ~SD_CONF_BIT_MASK) { 3952 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3953 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3954 "Properties not set.", 3955 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3956 return (SD_FAILURE); 3957 } 3958 3959 /* 3960 * Verify the length of the list by identifying the highest bit set 3961 * in the flags and validating that the property list has a length 3962 * up to the index of this bit. 3963 */ 3964 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3965 if (flags & mask) { 3966 index++; 3967 } 3968 mask = 1 << i; 3969 } 3970 if ((list_len / sizeof (int)) < (index + 2)) { 3971 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3972 "sd_chk_vers1_data: " 3973 "Data property list %s size is incorrect. " 3974 "Properties not set.", dataname_ptr); 3975 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3976 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3977 return (SD_FAILURE); 3978 } 3979 return (SD_SUCCESS); 3980 } 3981 3982 3983 /* 3984 * Function: sd_set_vers1_properties 3985 * 3986 * Description: Set version 1 device properties based on a property list 3987 * retrieved from the driver configuration file or static 3988 * configuration table. Version 1 properties have the format: 3989 * 3990 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3991 * 3992 * where the prop0 value will be used to set prop0 if bit0 3993 * is set in the flags 3994 * 3995 * Arguments: un - driver soft state (unit) structure 3996 * flags - integer mask indicating properties to be set 3997 * prop_list - integer list of property values 3998 */ 3999 4000 static void 4001 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4002 { 4003 ASSERT(un != NULL); 4004 4005 /* 4006 * Set the flag to indicate cache is to be disabled. An attempt 4007 * to disable the cache via sd_cache_control() will be made 4008 * later during attach once the basic initialization is complete. 4009 */ 4010 if (flags & SD_CONF_BSET_NOCACHE) { 4011 un->un_f_opt_disable_cache = TRUE; 4012 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4013 "sd_set_vers1_properties: caching disabled flag set\n"); 4014 } 4015 4016 /* CD-specific configuration parameters */ 4017 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4018 un->un_f_cfg_playmsf_bcd = TRUE; 4019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4020 "sd_set_vers1_properties: playmsf_bcd set\n"); 4021 } 4022 if (flags & SD_CONF_BSET_READSUB_BCD) { 4023 un->un_f_cfg_readsub_bcd = TRUE; 4024 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4025 "sd_set_vers1_properties: readsub_bcd set\n"); 4026 } 4027 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4028 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4029 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4030 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4031 } 4032 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4033 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4034 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4035 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4036 } 4037 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4038 un->un_f_cfg_no_read_header = TRUE; 4039 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4040 "sd_set_vers1_properties: no_read_header set\n"); 4041 } 4042 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4043 un->un_f_cfg_read_cd_xd4 = TRUE; 4044 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4045 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4046 } 4047 4048 /* Support for devices which do not have valid/unique serial numbers */ 4049 if (flags & SD_CONF_BSET_FAB_DEVID) { 4050 un->un_f_opt_fab_devid = TRUE; 4051 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4052 "sd_set_vers1_properties: fab_devid bit set\n"); 4053 } 4054 4055 /* Support for user throttle configuration */ 4056 if (flags & SD_CONF_BSET_THROTTLE) { 4057 ASSERT(prop_list != NULL); 4058 un->un_saved_throttle = un->un_throttle = 4059 prop_list->sdt_throttle; 4060 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4061 "sd_set_vers1_properties: throttle set to %d\n", 4062 prop_list->sdt_throttle); 4063 } 4064 4065 /* Set the per disk retry count according to the conf file or table. */ 4066 if (flags & SD_CONF_BSET_NRR_COUNT) { 4067 ASSERT(prop_list != NULL); 4068 if (prop_list->sdt_not_rdy_retries) { 4069 un->un_notready_retry_count = 4070 prop_list->sdt_not_rdy_retries; 4071 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4072 "sd_set_vers1_properties: not ready retry count" 4073 " set to %d\n", un->un_notready_retry_count); 4074 } 4075 } 4076 4077 /* The controller type is reported for generic disk driver ioctls */ 4078 if (flags & SD_CONF_BSET_CTYPE) { 4079 ASSERT(prop_list != NULL); 4080 switch (prop_list->sdt_ctype) { 4081 case CTYPE_CDROM: 4082 un->un_ctype = prop_list->sdt_ctype; 4083 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4084 "sd_set_vers1_properties: ctype set to " 4085 "CTYPE_CDROM\n"); 4086 break; 4087 case CTYPE_CCS: 4088 un->un_ctype = prop_list->sdt_ctype; 4089 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4090 "sd_set_vers1_properties: ctype set to " 4091 "CTYPE_CCS\n"); 4092 break; 4093 case CTYPE_ROD: /* RW optical */ 4094 un->un_ctype = prop_list->sdt_ctype; 4095 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4096 "sd_set_vers1_properties: ctype set to " 4097 "CTYPE_ROD\n"); 4098 break; 4099 default: 4100 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4101 "sd_set_vers1_properties: Could not set " 4102 "invalid ctype value (%d)", 4103 prop_list->sdt_ctype); 4104 } 4105 } 4106 4107 /* Purple failover timeout */ 4108 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4109 ASSERT(prop_list != NULL); 4110 un->un_busy_retry_count = 4111 prop_list->sdt_busy_retries; 4112 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4113 "sd_set_vers1_properties: " 4114 "busy retry count set to %d\n", 4115 un->un_busy_retry_count); 4116 } 4117 4118 /* Purple reset retry count */ 4119 if (flags & SD_CONF_BSET_RST_RETRIES) { 4120 ASSERT(prop_list != NULL); 4121 un->un_reset_retry_count = 4122 prop_list->sdt_reset_retries; 4123 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4124 "sd_set_vers1_properties: " 4125 "reset retry count set to %d\n", 4126 un->un_reset_retry_count); 4127 } 4128 4129 /* Purple reservation release timeout */ 4130 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4131 ASSERT(prop_list != NULL); 4132 un->un_reserve_release_time = 4133 prop_list->sdt_reserv_rel_time; 4134 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4135 "sd_set_vers1_properties: " 4136 "reservation release timeout set to %d\n", 4137 un->un_reserve_release_time); 4138 } 4139 4140 /* 4141 * Driver flag telling the driver to verify that no commands are pending 4142 * for a device before issuing a Test Unit Ready. This is a workaround 4143 * for a firmware bug in some Seagate eliteI drives. 4144 */ 4145 if (flags & SD_CONF_BSET_TUR_CHECK) { 4146 un->un_f_cfg_tur_check = TRUE; 4147 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4148 "sd_set_vers1_properties: tur queue check set\n"); 4149 } 4150 4151 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4152 un->un_min_throttle = prop_list->sdt_min_throttle; 4153 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4154 "sd_set_vers1_properties: min throttle set to %d\n", 4155 un->un_min_throttle); 4156 } 4157 4158 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4159 un->un_f_disksort_disabled = 4160 (prop_list->sdt_disk_sort_dis != 0) ? 4161 TRUE : FALSE; 4162 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4163 "sd_set_vers1_properties: disksort disabled " 4164 "flag set to %d\n", 4165 prop_list->sdt_disk_sort_dis); 4166 } 4167 4168 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4169 un->un_f_lun_reset_enabled = 4170 (prop_list->sdt_lun_reset_enable != 0) ? 4171 TRUE : FALSE; 4172 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4173 "sd_set_vers1_properties: lun reset enabled " 4174 "flag set to %d\n", 4175 prop_list->sdt_lun_reset_enable); 4176 } 4177 4178 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4179 un->un_f_suppress_cache_flush = 4180 (prop_list->sdt_suppress_cache_flush != 0) ? 4181 TRUE : FALSE; 4182 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4183 "sd_set_vers1_properties: suppress_cache_flush " 4184 "flag set to %d\n", 4185 prop_list->sdt_suppress_cache_flush); 4186 } 4187 4188 /* 4189 * Validate the throttle values. 4190 * If any of the numbers are invalid, set everything to defaults. 4191 */ 4192 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4193 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4194 (un->un_min_throttle > un->un_throttle)) { 4195 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4196 un->un_min_throttle = sd_min_throttle; 4197 } 4198 } 4199 4200 /* 4201 * Function: sd_is_lsi() 4202 * 4203 * Description: Check for lsi devices, step through the static device 4204 * table to match vid/pid. 4205 * 4206 * Args: un - ptr to sd_lun 4207 * 4208 * Notes: When creating new LSI property, need to add the new LSI property 4209 * to this function. 4210 */ 4211 static void 4212 sd_is_lsi(struct sd_lun *un) 4213 { 4214 char *id = NULL; 4215 int table_index; 4216 int idlen; 4217 void *prop; 4218 4219 ASSERT(un != NULL); 4220 for (table_index = 0; table_index < sd_disk_table_size; 4221 table_index++) { 4222 id = sd_disk_table[table_index].device_id; 4223 idlen = strlen(id); 4224 if (idlen == 0) { 4225 continue; 4226 } 4227 4228 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4229 prop = sd_disk_table[table_index].properties; 4230 if (prop == &lsi_properties || 4231 prop == &lsi_oem_properties || 4232 prop == &lsi_properties_scsi || 4233 prop == &symbios_properties) { 4234 un->un_f_cfg_is_lsi = TRUE; 4235 } 4236 break; 4237 } 4238 } 4239 } 4240 4241 /* 4242 * Function: sd_get_physical_geometry 4243 * 4244 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4245 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4246 * target, and use this information to initialize the physical 4247 * geometry cache specified by pgeom_p. 4248 * 4249 * MODE SENSE is an optional command, so failure in this case 4250 * does not necessarily denote an error. We want to use the 4251 * MODE SENSE commands to derive the physical geometry of the 4252 * device, but if either command fails, the logical geometry is 4253 * used as the fallback for disk label geometry in cmlb. 4254 * 4255 * This requires that un->un_blockcount and un->un_tgt_blocksize 4256 * have already been initialized for the current target and 4257 * that the current values be passed as args so that we don't 4258 * end up ever trying to use -1 as a valid value. This could 4259 * happen if either value is reset while we're not holding 4260 * the mutex. 4261 * 4262 * Arguments: un - driver soft state (unit) structure 4263 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4264 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4265 * to use the USCSI "direct" chain and bypass the normal 4266 * command waitq. 4267 * 4268 * Context: Kernel thread only (can sleep). 4269 */ 4270 4271 static int 4272 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4273 diskaddr_t capacity, int lbasize, int path_flag) 4274 { 4275 struct mode_format *page3p; 4276 struct mode_geometry *page4p; 4277 struct mode_header *headerp; 4278 int sector_size; 4279 int nsect; 4280 int nhead; 4281 int ncyl; 4282 int intrlv; 4283 int spc; 4284 diskaddr_t modesense_capacity; 4285 int rpm; 4286 int bd_len; 4287 int mode_header_length; 4288 uchar_t *p3bufp; 4289 uchar_t *p4bufp; 4290 int cdbsize; 4291 int ret = EIO; 4292 4293 ASSERT(un != NULL); 4294 4295 if (lbasize == 0) { 4296 if (ISCD(un)) { 4297 lbasize = 2048; 4298 } else { 4299 lbasize = un->un_sys_blocksize; 4300 } 4301 } 4302 pgeom_p->g_secsize = (unsigned short)lbasize; 4303 4304 /* 4305 * If the unit is a cd/dvd drive MODE SENSE page three 4306 * and MODE SENSE page four are reserved (see SBC spec 4307 * and MMC spec). To prevent soft errors just return 4308 * using the default LBA size. 4309 */ 4310 if (ISCD(un)) 4311 return (ret); 4312 4313 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4314 4315 /* 4316 * Retrieve MODE SENSE page 3 - Format Device Page 4317 */ 4318 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4319 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4320 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4321 != 0) { 4322 SD_ERROR(SD_LOG_COMMON, un, 4323 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4324 goto page3_exit; 4325 } 4326 4327 /* 4328 * Determine size of Block Descriptors in order to locate the mode 4329 * page data. ATAPI devices return 0, SCSI devices should return 4330 * MODE_BLK_DESC_LENGTH. 4331 */ 4332 headerp = (struct mode_header *)p3bufp; 4333 if (un->un_f_cfg_is_atapi == TRUE) { 4334 struct mode_header_grp2 *mhp = 4335 (struct mode_header_grp2 *)headerp; 4336 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4337 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4338 } else { 4339 mode_header_length = MODE_HEADER_LENGTH; 4340 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4341 } 4342 4343 if (bd_len > MODE_BLK_DESC_LENGTH) { 4344 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4345 "received unexpected bd_len of %d, page3\n", bd_len); 4346 goto page3_exit; 4347 } 4348 4349 page3p = (struct mode_format *) 4350 ((caddr_t)headerp + mode_header_length + bd_len); 4351 4352 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4353 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4354 "mode sense pg3 code mismatch %d\n", 4355 page3p->mode_page.code); 4356 goto page3_exit; 4357 } 4358 4359 /* 4360 * Use this physical geometry data only if BOTH MODE SENSE commands 4361 * complete successfully; otherwise, revert to the logical geometry. 4362 * So, we need to save everything in temporary variables. 4363 */ 4364 sector_size = BE_16(page3p->data_bytes_sect); 4365 4366 /* 4367 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4368 */ 4369 if (sector_size == 0) { 4370 sector_size = un->un_sys_blocksize; 4371 } else { 4372 sector_size &= ~(un->un_sys_blocksize - 1); 4373 } 4374 4375 nsect = BE_16(page3p->sect_track); 4376 intrlv = BE_16(page3p->interleave); 4377 4378 SD_INFO(SD_LOG_COMMON, un, 4379 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4380 SD_INFO(SD_LOG_COMMON, un, 4381 " mode page: %d; nsect: %d; sector size: %d;\n", 4382 page3p->mode_page.code, nsect, sector_size); 4383 SD_INFO(SD_LOG_COMMON, un, 4384 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4385 BE_16(page3p->track_skew), 4386 BE_16(page3p->cylinder_skew)); 4387 4388 4389 /* 4390 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4391 */ 4392 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4393 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4394 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4395 != 0) { 4396 SD_ERROR(SD_LOG_COMMON, un, 4397 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4398 goto page4_exit; 4399 } 4400 4401 /* 4402 * Determine size of Block Descriptors in order to locate the mode 4403 * page data. ATAPI devices return 0, SCSI devices should return 4404 * MODE_BLK_DESC_LENGTH. 4405 */ 4406 headerp = (struct mode_header *)p4bufp; 4407 if (un->un_f_cfg_is_atapi == TRUE) { 4408 struct mode_header_grp2 *mhp = 4409 (struct mode_header_grp2 *)headerp; 4410 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4411 } else { 4412 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4413 } 4414 4415 if (bd_len > MODE_BLK_DESC_LENGTH) { 4416 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4417 "received unexpected bd_len of %d, page4\n", bd_len); 4418 goto page4_exit; 4419 } 4420 4421 page4p = (struct mode_geometry *) 4422 ((caddr_t)headerp + mode_header_length + bd_len); 4423 4424 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4425 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4426 "mode sense pg4 code mismatch %d\n", 4427 page4p->mode_page.code); 4428 goto page4_exit; 4429 } 4430 4431 /* 4432 * Stash the data now, after we know that both commands completed. 4433 */ 4434 4435 4436 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4437 spc = nhead * nsect; 4438 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4439 rpm = BE_16(page4p->rpm); 4440 4441 modesense_capacity = spc * ncyl; 4442 4443 SD_INFO(SD_LOG_COMMON, un, 4444 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4445 SD_INFO(SD_LOG_COMMON, un, 4446 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4447 SD_INFO(SD_LOG_COMMON, un, 4448 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4449 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4450 (void *)pgeom_p, capacity); 4451 4452 /* 4453 * Compensate if the drive's geometry is not rectangular, i.e., 4454 * the product of C * H * S returned by MODE SENSE >= that returned 4455 * by read capacity. This is an idiosyncrasy of the original x86 4456 * disk subsystem. 4457 */ 4458 if (modesense_capacity >= capacity) { 4459 SD_INFO(SD_LOG_COMMON, un, 4460 "sd_get_physical_geometry: adjusting acyl; " 4461 "old: %d; new: %d\n", pgeom_p->g_acyl, 4462 (modesense_capacity - capacity + spc - 1) / spc); 4463 if (sector_size != 0) { 4464 /* 1243403: NEC D38x7 drives don't support sec size */ 4465 pgeom_p->g_secsize = (unsigned short)sector_size; 4466 } 4467 pgeom_p->g_nsect = (unsigned short)nsect; 4468 pgeom_p->g_nhead = (unsigned short)nhead; 4469 pgeom_p->g_capacity = capacity; 4470 pgeom_p->g_acyl = 4471 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4472 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4473 } 4474 4475 pgeom_p->g_rpm = (unsigned short)rpm; 4476 pgeom_p->g_intrlv = (unsigned short)intrlv; 4477 ret = 0; 4478 4479 SD_INFO(SD_LOG_COMMON, un, 4480 "sd_get_physical_geometry: mode sense geometry:\n"); 4481 SD_INFO(SD_LOG_COMMON, un, 4482 " nsect: %d; sector size: %d; interlv: %d\n", 4483 nsect, sector_size, intrlv); 4484 SD_INFO(SD_LOG_COMMON, un, 4485 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4486 nhead, ncyl, rpm, modesense_capacity); 4487 SD_INFO(SD_LOG_COMMON, un, 4488 "sd_get_physical_geometry: (cached)\n"); 4489 SD_INFO(SD_LOG_COMMON, un, 4490 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4491 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4492 pgeom_p->g_nhead, pgeom_p->g_nsect); 4493 SD_INFO(SD_LOG_COMMON, un, 4494 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4495 pgeom_p->g_secsize, pgeom_p->g_capacity, 4496 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4497 4498 page4_exit: 4499 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4500 page3_exit: 4501 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4502 4503 return (ret); 4504 } 4505 4506 /* 4507 * Function: sd_get_virtual_geometry 4508 * 4509 * Description: Ask the controller to tell us about the target device. 4510 * 4511 * Arguments: un - pointer to softstate 4512 * capacity - disk capacity in #blocks 4513 * lbasize - disk block size in bytes 4514 * 4515 * Context: Kernel thread only 4516 */ 4517 4518 static int 4519 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4520 diskaddr_t capacity, int lbasize) 4521 { 4522 uint_t geombuf; 4523 int spc; 4524 4525 ASSERT(un != NULL); 4526 4527 /* Set sector size, and total number of sectors */ 4528 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4529 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4530 4531 /* Let the HBA tell us its geometry */ 4532 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4533 4534 /* A value of -1 indicates an undefined "geometry" property */ 4535 if (geombuf == (-1)) { 4536 return (EINVAL); 4537 } 4538 4539 /* Initialize the logical geometry cache. */ 4540 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4541 lgeom_p->g_nsect = geombuf & 0xffff; 4542 lgeom_p->g_secsize = un->un_sys_blocksize; 4543 4544 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4545 4546 /* 4547 * Note: The driver originally converted the capacity value from 4548 * target blocks to system blocks. However, the capacity value passed 4549 * to this routine is already in terms of system blocks (this scaling 4550 * is done when the READ CAPACITY command is issued and processed). 4551 * This 'error' may have gone undetected because the usage of g_ncyl 4552 * (which is based upon g_capacity) is very limited within the driver 4553 */ 4554 lgeom_p->g_capacity = capacity; 4555 4556 /* 4557 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4558 * hba may return zero values if the device has been removed. 4559 */ 4560 if (spc == 0) { 4561 lgeom_p->g_ncyl = 0; 4562 } else { 4563 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4564 } 4565 lgeom_p->g_acyl = 0; 4566 4567 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4568 return (0); 4569 4570 } 4571 /* 4572 * Function: sd_update_block_info 4573 * 4574 * Description: Calculate a byte count to sector count bitshift value 4575 * from sector size. 4576 * 4577 * Arguments: un: unit struct. 4578 * lbasize: new target sector size 4579 * capacity: new target capacity, ie. block count 4580 * 4581 * Context: Kernel thread context 4582 */ 4583 4584 static void 4585 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4586 { 4587 uint_t dblk; 4588 4589 if (lbasize != 0) { 4590 un->un_tgt_blocksize = lbasize; 4591 un->un_f_tgt_blocksize_is_valid = TRUE; 4592 } 4593 4594 if (capacity != 0) { 4595 un->un_blockcount = capacity; 4596 un->un_f_blockcount_is_valid = TRUE; 4597 } 4598 4599 /* 4600 * Update device capacity properties. 4601 * 4602 * 'device-nblocks' number of blocks in target's units 4603 * 'device-blksize' data bearing size of target's block 4604 * 4605 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4606 * not be a power of two for checksumming disks with 520/528 byte 4607 * sectors. 4608 */ 4609 if (un->un_f_tgt_blocksize_is_valid && 4610 un->un_f_blockcount_is_valid && 4611 un->un_sys_blocksize) { 4612 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4613 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4614 "device-nblocks", un->un_blockcount / dblk); 4615 /* 4616 * To save memory, only define "device-blksize" when its 4617 * value is differnet than the default DEV_BSIZE value. 4618 */ 4619 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4620 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4621 SD_DEVINFO(un), "device-blksize", 4622 un->un_sys_blocksize * dblk); 4623 } 4624 } 4625 4626 4627 /* 4628 * Function: sd_register_devid 4629 * 4630 * Description: This routine will obtain the device id information from the 4631 * target, obtain the serial number, and register the device 4632 * id with the ddi framework. 4633 * 4634 * Arguments: devi - the system's dev_info_t for the device. 4635 * un - driver soft state (unit) structure 4636 * reservation_flag - indicates if a reservation conflict 4637 * occurred during attach 4638 * 4639 * Context: Kernel Thread 4640 */ 4641 static void 4642 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4643 { 4644 int rval = 0; 4645 uchar_t *inq80 = NULL; 4646 size_t inq80_len = MAX_INQUIRY_SIZE; 4647 size_t inq80_resid = 0; 4648 uchar_t *inq83 = NULL; 4649 size_t inq83_len = MAX_INQUIRY_SIZE; 4650 size_t inq83_resid = 0; 4651 int dlen, len; 4652 char *sn; 4653 4654 ASSERT(un != NULL); 4655 ASSERT(mutex_owned(SD_MUTEX(un))); 4656 ASSERT((SD_DEVINFO(un)) == devi); 4657 4658 /* 4659 * If transport has already registered a devid for this target 4660 * then that takes precedence over the driver's determination 4661 * of the devid. 4662 */ 4663 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4664 ASSERT(un->un_devid); 4665 return; /* use devid registered by the transport */ 4666 } 4667 4668 /* 4669 * This is the case of antiquated Sun disk drives that have the 4670 * FAB_DEVID property set in the disk_table. These drives 4671 * manage the devid's by storing them in last 2 available sectors 4672 * on the drive and have them fabricated by the ddi layer by calling 4673 * ddi_devid_init and passing the DEVID_FAB flag. 4674 */ 4675 if (un->un_f_opt_fab_devid == TRUE) { 4676 /* 4677 * Depending on EINVAL isn't reliable, since a reserved disk 4678 * may result in invalid geometry, so check to make sure a 4679 * reservation conflict did not occur during attach. 4680 */ 4681 if ((sd_get_devid(un) == EINVAL) && 4682 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4683 /* 4684 * The devid is invalid AND there is no reservation 4685 * conflict. Fabricate a new devid. 4686 */ 4687 (void) sd_create_devid(un); 4688 } 4689 4690 /* Register the devid if it exists */ 4691 if (un->un_devid != NULL) { 4692 (void) ddi_devid_register(SD_DEVINFO(un), 4693 un->un_devid); 4694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4695 "sd_register_devid: Devid Fabricated\n"); 4696 } 4697 return; 4698 } 4699 4700 /* 4701 * We check the availibility of the World Wide Name (0x83) and Unit 4702 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4703 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4704 * 0x83 is availible, that is the best choice. Our next choice is 4705 * 0x80. If neither are availible, we munge the devid from the device 4706 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4707 * to fabricate a devid for non-Sun qualified disks. 4708 */ 4709 if (sd_check_vpd_page_support(un) == 0) { 4710 /* collect page 80 data if available */ 4711 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4712 4713 mutex_exit(SD_MUTEX(un)); 4714 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4715 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4716 0x01, 0x80, &inq80_resid); 4717 4718 if (rval != 0) { 4719 kmem_free(inq80, inq80_len); 4720 inq80 = NULL; 4721 inq80_len = 0; 4722 } else if (ddi_prop_exists( 4723 DDI_DEV_T_NONE, SD_DEVINFO(un), 4724 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4725 INQUIRY_SERIAL_NO) == 0) { 4726 /* 4727 * If we don't already have a serial number 4728 * property, do quick verify of data returned 4729 * and define property. 4730 */ 4731 dlen = inq80_len - inq80_resid; 4732 len = (size_t)inq80[3]; 4733 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4734 /* 4735 * Ensure sn termination, skip leading 4736 * blanks, and create property 4737 * 'inquiry-serial-no'. 4738 */ 4739 sn = (char *)&inq80[4]; 4740 sn[len] = 0; 4741 while (*sn && (*sn == ' ')) 4742 sn++; 4743 if (*sn) { 4744 (void) ddi_prop_update_string( 4745 DDI_DEV_T_NONE, 4746 SD_DEVINFO(un), 4747 INQUIRY_SERIAL_NO, sn); 4748 } 4749 } 4750 } 4751 mutex_enter(SD_MUTEX(un)); 4752 } 4753 4754 /* collect page 83 data if available */ 4755 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4756 mutex_exit(SD_MUTEX(un)); 4757 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4758 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4759 0x01, 0x83, &inq83_resid); 4760 4761 if (rval != 0) { 4762 kmem_free(inq83, inq83_len); 4763 inq83 = NULL; 4764 inq83_len = 0; 4765 } 4766 mutex_enter(SD_MUTEX(un)); 4767 } 4768 } 4769 4770 /* encode best devid possible based on data available */ 4771 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4772 (char *)ddi_driver_name(SD_DEVINFO(un)), 4773 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4774 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4775 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4776 4777 /* devid successfully encoded, register devid */ 4778 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4779 4780 } else { 4781 /* 4782 * Unable to encode a devid based on data available. 4783 * This is not a Sun qualified disk. Older Sun disk 4784 * drives that have the SD_FAB_DEVID property 4785 * set in the disk_table and non Sun qualified 4786 * disks are treated in the same manner. These 4787 * drives manage the devid's by storing them in 4788 * last 2 available sectors on the drive and 4789 * have them fabricated by the ddi layer by 4790 * calling ddi_devid_init and passing the 4791 * DEVID_FAB flag. 4792 * Create a fabricate devid only if there's no 4793 * fabricate devid existed. 4794 */ 4795 if (sd_get_devid(un) == EINVAL) { 4796 (void) sd_create_devid(un); 4797 } 4798 un->un_f_opt_fab_devid = TRUE; 4799 4800 /* Register the devid if it exists */ 4801 if (un->un_devid != NULL) { 4802 (void) ddi_devid_register(SD_DEVINFO(un), 4803 un->un_devid); 4804 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4805 "sd_register_devid: devid fabricated using " 4806 "ddi framework\n"); 4807 } 4808 } 4809 4810 /* clean up resources */ 4811 if (inq80 != NULL) { 4812 kmem_free(inq80, inq80_len); 4813 } 4814 if (inq83 != NULL) { 4815 kmem_free(inq83, inq83_len); 4816 } 4817 } 4818 4819 4820 4821 /* 4822 * Function: sd_get_devid 4823 * 4824 * Description: This routine will return 0 if a valid device id has been 4825 * obtained from the target and stored in the soft state. If a 4826 * valid device id has not been previously read and stored, a 4827 * read attempt will be made. 4828 * 4829 * Arguments: un - driver soft state (unit) structure 4830 * 4831 * Return Code: 0 if we successfully get the device id 4832 * 4833 * Context: Kernel Thread 4834 */ 4835 4836 static int 4837 sd_get_devid(struct sd_lun *un) 4838 { 4839 struct dk_devid *dkdevid; 4840 ddi_devid_t tmpid; 4841 uint_t *ip; 4842 size_t sz; 4843 diskaddr_t blk; 4844 int status; 4845 int chksum; 4846 int i; 4847 size_t buffer_size; 4848 4849 ASSERT(un != NULL); 4850 ASSERT(mutex_owned(SD_MUTEX(un))); 4851 4852 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4853 un); 4854 4855 if (un->un_devid != NULL) { 4856 return (0); 4857 } 4858 4859 mutex_exit(SD_MUTEX(un)); 4860 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4861 (void *)SD_PATH_DIRECT) != 0) { 4862 mutex_enter(SD_MUTEX(un)); 4863 return (EINVAL); 4864 } 4865 4866 /* 4867 * Read and verify device id, stored in the reserved cylinders at the 4868 * end of the disk. Backup label is on the odd sectors of the last 4869 * track of the last cylinder. Device id will be on track of the next 4870 * to last cylinder. 4871 */ 4872 mutex_enter(SD_MUTEX(un)); 4873 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4874 mutex_exit(SD_MUTEX(un)); 4875 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4876 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4877 SD_PATH_DIRECT); 4878 if (status != 0) { 4879 goto error; 4880 } 4881 4882 /* Validate the revision */ 4883 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4884 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4885 status = EINVAL; 4886 goto error; 4887 } 4888 4889 /* Calculate the checksum */ 4890 chksum = 0; 4891 ip = (uint_t *)dkdevid; 4892 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4893 i++) { 4894 chksum ^= ip[i]; 4895 } 4896 4897 /* Compare the checksums */ 4898 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4899 status = EINVAL; 4900 goto error; 4901 } 4902 4903 /* Validate the device id */ 4904 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4905 status = EINVAL; 4906 goto error; 4907 } 4908 4909 /* 4910 * Store the device id in the driver soft state 4911 */ 4912 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4913 tmpid = kmem_alloc(sz, KM_SLEEP); 4914 4915 mutex_enter(SD_MUTEX(un)); 4916 4917 un->un_devid = tmpid; 4918 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4919 4920 kmem_free(dkdevid, buffer_size); 4921 4922 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4923 4924 return (status); 4925 error: 4926 mutex_enter(SD_MUTEX(un)); 4927 kmem_free(dkdevid, buffer_size); 4928 return (status); 4929 } 4930 4931 4932 /* 4933 * Function: sd_create_devid 4934 * 4935 * Description: This routine will fabricate the device id and write it 4936 * to the disk. 4937 * 4938 * Arguments: un - driver soft state (unit) structure 4939 * 4940 * Return Code: value of the fabricated device id 4941 * 4942 * Context: Kernel Thread 4943 */ 4944 4945 static ddi_devid_t 4946 sd_create_devid(struct sd_lun *un) 4947 { 4948 ASSERT(un != NULL); 4949 4950 /* Fabricate the devid */ 4951 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4952 == DDI_FAILURE) { 4953 return (NULL); 4954 } 4955 4956 /* Write the devid to disk */ 4957 if (sd_write_deviceid(un) != 0) { 4958 ddi_devid_free(un->un_devid); 4959 un->un_devid = NULL; 4960 } 4961 4962 return (un->un_devid); 4963 } 4964 4965 4966 /* 4967 * Function: sd_write_deviceid 4968 * 4969 * Description: This routine will write the device id to the disk 4970 * reserved sector. 4971 * 4972 * Arguments: un - driver soft state (unit) structure 4973 * 4974 * Return Code: EINVAL 4975 * value returned by sd_send_scsi_cmd 4976 * 4977 * Context: Kernel Thread 4978 */ 4979 4980 static int 4981 sd_write_deviceid(struct sd_lun *un) 4982 { 4983 struct dk_devid *dkdevid; 4984 diskaddr_t blk; 4985 uint_t *ip, chksum; 4986 int status; 4987 int i; 4988 4989 ASSERT(mutex_owned(SD_MUTEX(un))); 4990 4991 mutex_exit(SD_MUTEX(un)); 4992 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4993 (void *)SD_PATH_DIRECT) != 0) { 4994 mutex_enter(SD_MUTEX(un)); 4995 return (-1); 4996 } 4997 4998 4999 /* Allocate the buffer */ 5000 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5001 5002 /* Fill in the revision */ 5003 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5004 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5005 5006 /* Copy in the device id */ 5007 mutex_enter(SD_MUTEX(un)); 5008 bcopy(un->un_devid, &dkdevid->dkd_devid, 5009 ddi_devid_sizeof(un->un_devid)); 5010 mutex_exit(SD_MUTEX(un)); 5011 5012 /* Calculate the checksum */ 5013 chksum = 0; 5014 ip = (uint_t *)dkdevid; 5015 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5016 i++) { 5017 chksum ^= ip[i]; 5018 } 5019 5020 /* Fill-in checksum */ 5021 DKD_FORMCHKSUM(chksum, dkdevid); 5022 5023 /* Write the reserved sector */ 5024 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5025 SD_PATH_DIRECT); 5026 5027 kmem_free(dkdevid, un->un_sys_blocksize); 5028 5029 mutex_enter(SD_MUTEX(un)); 5030 return (status); 5031 } 5032 5033 5034 /* 5035 * Function: sd_check_vpd_page_support 5036 * 5037 * Description: This routine sends an inquiry command with the EVPD bit set and 5038 * a page code of 0x00 to the device. It is used to determine which 5039 * vital product pages are availible to find the devid. We are 5040 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5041 * device does not support that command. 5042 * 5043 * Arguments: un - driver soft state (unit) structure 5044 * 5045 * Return Code: 0 - success 5046 * 1 - check condition 5047 * 5048 * Context: This routine can sleep. 5049 */ 5050 5051 static int 5052 sd_check_vpd_page_support(struct sd_lun *un) 5053 { 5054 uchar_t *page_list = NULL; 5055 uchar_t page_length = 0xff; /* Use max possible length */ 5056 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5057 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5058 int rval = 0; 5059 int counter; 5060 5061 ASSERT(un != NULL); 5062 ASSERT(mutex_owned(SD_MUTEX(un))); 5063 5064 mutex_exit(SD_MUTEX(un)); 5065 5066 /* 5067 * We'll set the page length to the maximum to save figuring it out 5068 * with an additional call. 5069 */ 5070 page_list = kmem_zalloc(page_length, KM_SLEEP); 5071 5072 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5073 page_code, NULL); 5074 5075 mutex_enter(SD_MUTEX(un)); 5076 5077 /* 5078 * Now we must validate that the device accepted the command, as some 5079 * drives do not support it. If the drive does support it, we will 5080 * return 0, and the supported pages will be in un_vpd_page_mask. If 5081 * not, we return -1. 5082 */ 5083 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5084 /* Loop to find one of the 2 pages we need */ 5085 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5086 5087 /* 5088 * Pages are returned in ascending order, and 0x83 is what we 5089 * are hoping for. 5090 */ 5091 while ((page_list[counter] <= 0x86) && 5092 (counter <= (page_list[VPD_PAGE_LENGTH] + 5093 VPD_HEAD_OFFSET))) { 5094 /* 5095 * Add 3 because page_list[3] is the number of 5096 * pages minus 3 5097 */ 5098 5099 switch (page_list[counter]) { 5100 case 0x00: 5101 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5102 break; 5103 case 0x80: 5104 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5105 break; 5106 case 0x81: 5107 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5108 break; 5109 case 0x82: 5110 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5111 break; 5112 case 0x83: 5113 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5114 break; 5115 case 0x86: 5116 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5117 break; 5118 } 5119 counter++; 5120 } 5121 5122 } else { 5123 rval = -1; 5124 5125 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5126 "sd_check_vpd_page_support: This drive does not implement " 5127 "VPD pages.\n"); 5128 } 5129 5130 kmem_free(page_list, page_length); 5131 5132 return (rval); 5133 } 5134 5135 5136 /* 5137 * Function: sd_setup_pm 5138 * 5139 * Description: Initialize Power Management on the device 5140 * 5141 * Context: Kernel Thread 5142 */ 5143 5144 static void 5145 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5146 { 5147 uint_t log_page_size; 5148 uchar_t *log_page_data; 5149 int rval; 5150 5151 /* 5152 * Since we are called from attach, holding a mutex for 5153 * un is unnecessary. Because some of the routines called 5154 * from here require SD_MUTEX to not be held, assert this 5155 * right up front. 5156 */ 5157 ASSERT(!mutex_owned(SD_MUTEX(un))); 5158 /* 5159 * Since the sd device does not have the 'reg' property, 5160 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5161 * The following code is to tell cpr that this device 5162 * DOES need to be suspended and resumed. 5163 */ 5164 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5165 "pm-hardware-state", "needs-suspend-resume"); 5166 5167 /* 5168 * This complies with the new power management framework 5169 * for certain desktop machines. Create the pm_components 5170 * property as a string array property. 5171 */ 5172 if (un->un_f_pm_supported) { 5173 /* 5174 * not all devices have a motor, try it first. 5175 * some devices may return ILLEGAL REQUEST, some 5176 * will hang 5177 * The following START_STOP_UNIT is used to check if target 5178 * device has a motor. 5179 */ 5180 un->un_f_start_stop_supported = TRUE; 5181 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5182 SD_PATH_DIRECT) != 0) { 5183 un->un_f_start_stop_supported = FALSE; 5184 } 5185 5186 /* 5187 * create pm properties anyways otherwise the parent can't 5188 * go to sleep 5189 */ 5190 (void) sd_create_pm_components(devi, un); 5191 un->un_f_pm_is_enabled = TRUE; 5192 return; 5193 } 5194 5195 if (!un->un_f_log_sense_supported) { 5196 un->un_power_level = SD_SPINDLE_ON; 5197 un->un_f_pm_is_enabled = FALSE; 5198 return; 5199 } 5200 5201 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5202 5203 #ifdef SDDEBUG 5204 if (sd_force_pm_supported) { 5205 /* Force a successful result */ 5206 rval = 1; 5207 } 5208 #endif 5209 5210 /* 5211 * If the start-stop cycle counter log page is not supported 5212 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5213 * then we should not create the pm_components property. 5214 */ 5215 if (rval == -1) { 5216 /* 5217 * Error. 5218 * Reading log sense failed, most likely this is 5219 * an older drive that does not support log sense. 5220 * If this fails auto-pm is not supported. 5221 */ 5222 un->un_power_level = SD_SPINDLE_ON; 5223 un->un_f_pm_is_enabled = FALSE; 5224 5225 } else if (rval == 0) { 5226 /* 5227 * Page not found. 5228 * The start stop cycle counter is implemented as page 5229 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5230 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5231 */ 5232 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5233 /* 5234 * Page found, use this one. 5235 */ 5236 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5237 un->un_f_pm_is_enabled = TRUE; 5238 } else { 5239 /* 5240 * Error or page not found. 5241 * auto-pm is not supported for this device. 5242 */ 5243 un->un_power_level = SD_SPINDLE_ON; 5244 un->un_f_pm_is_enabled = FALSE; 5245 } 5246 } else { 5247 /* 5248 * Page found, use it. 5249 */ 5250 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5251 un->un_f_pm_is_enabled = TRUE; 5252 } 5253 5254 5255 if (un->un_f_pm_is_enabled == TRUE) { 5256 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5257 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5258 5259 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5260 log_page_size, un->un_start_stop_cycle_page, 5261 0x01, 0, SD_PATH_DIRECT); 5262 #ifdef SDDEBUG 5263 if (sd_force_pm_supported) { 5264 /* Force a successful result */ 5265 rval = 0; 5266 } 5267 #endif 5268 5269 /* 5270 * If the Log sense for Page( Start/stop cycle counter page) 5271 * succeeds, then power managment is supported and we can 5272 * enable auto-pm. 5273 */ 5274 if (rval == 0) { 5275 (void) sd_create_pm_components(devi, un); 5276 } else { 5277 un->un_power_level = SD_SPINDLE_ON; 5278 un->un_f_pm_is_enabled = FALSE; 5279 } 5280 5281 kmem_free(log_page_data, log_page_size); 5282 } 5283 } 5284 5285 5286 /* 5287 * Function: sd_create_pm_components 5288 * 5289 * Description: Initialize PM property. 5290 * 5291 * Context: Kernel thread context 5292 */ 5293 5294 static void 5295 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5296 { 5297 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5298 5299 ASSERT(!mutex_owned(SD_MUTEX(un))); 5300 5301 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5302 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5303 /* 5304 * When components are initially created they are idle, 5305 * power up any non-removables. 5306 * Note: the return value of pm_raise_power can't be used 5307 * for determining if PM should be enabled for this device. 5308 * Even if you check the return values and remove this 5309 * property created above, the PM framework will not honor the 5310 * change after the first call to pm_raise_power. Hence, 5311 * removal of that property does not help if pm_raise_power 5312 * fails. In the case of removable media, the start/stop 5313 * will fail if the media is not present. 5314 */ 5315 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5316 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5317 mutex_enter(SD_MUTEX(un)); 5318 un->un_power_level = SD_SPINDLE_ON; 5319 mutex_enter(&un->un_pm_mutex); 5320 /* Set to on and not busy. */ 5321 un->un_pm_count = 0; 5322 } else { 5323 mutex_enter(SD_MUTEX(un)); 5324 un->un_power_level = SD_SPINDLE_OFF; 5325 mutex_enter(&un->un_pm_mutex); 5326 /* Set to off. */ 5327 un->un_pm_count = -1; 5328 } 5329 mutex_exit(&un->un_pm_mutex); 5330 mutex_exit(SD_MUTEX(un)); 5331 } else { 5332 un->un_power_level = SD_SPINDLE_ON; 5333 un->un_f_pm_is_enabled = FALSE; 5334 } 5335 } 5336 5337 5338 /* 5339 * Function: sd_ddi_suspend 5340 * 5341 * Description: Performs system power-down operations. This includes 5342 * setting the drive state to indicate its suspended so 5343 * that no new commands will be accepted. Also, wait for 5344 * all commands that are in transport or queued to a timer 5345 * for retry to complete. All timeout threads are cancelled. 5346 * 5347 * Return Code: DDI_FAILURE or DDI_SUCCESS 5348 * 5349 * Context: Kernel thread context 5350 */ 5351 5352 static int 5353 sd_ddi_suspend(dev_info_t *devi) 5354 { 5355 struct sd_lun *un; 5356 clock_t wait_cmds_complete; 5357 5358 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5359 if (un == NULL) { 5360 return (DDI_FAILURE); 5361 } 5362 5363 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5364 5365 mutex_enter(SD_MUTEX(un)); 5366 5367 /* Return success if the device is already suspended. */ 5368 if (un->un_state == SD_STATE_SUSPENDED) { 5369 mutex_exit(SD_MUTEX(un)); 5370 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5371 "device already suspended, exiting\n"); 5372 return (DDI_SUCCESS); 5373 } 5374 5375 /* Return failure if the device is being used by HA */ 5376 if (un->un_resvd_status & 5377 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5378 mutex_exit(SD_MUTEX(un)); 5379 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5380 "device in use by HA, exiting\n"); 5381 return (DDI_FAILURE); 5382 } 5383 5384 /* 5385 * Return failure if the device is in a resource wait 5386 * or power changing state. 5387 */ 5388 if ((un->un_state == SD_STATE_RWAIT) || 5389 (un->un_state == SD_STATE_PM_CHANGING)) { 5390 mutex_exit(SD_MUTEX(un)); 5391 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5392 "device in resource wait state, exiting\n"); 5393 return (DDI_FAILURE); 5394 } 5395 5396 5397 un->un_save_state = un->un_last_state; 5398 New_state(un, SD_STATE_SUSPENDED); 5399 5400 /* 5401 * Wait for all commands that are in transport or queued to a timer 5402 * for retry to complete. 5403 * 5404 * While waiting, no new commands will be accepted or sent because of 5405 * the new state we set above. 5406 * 5407 * Wait till current operation has completed. If we are in the resource 5408 * wait state (with an intr outstanding) then we need to wait till the 5409 * intr completes and starts the next cmd. We want to wait for 5410 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5411 */ 5412 wait_cmds_complete = ddi_get_lbolt() + 5413 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5414 5415 while (un->un_ncmds_in_transport != 0) { 5416 /* 5417 * Fail if commands do not finish in the specified time. 5418 */ 5419 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5420 wait_cmds_complete) == -1) { 5421 /* 5422 * Undo the state changes made above. Everything 5423 * must go back to it's original value. 5424 */ 5425 Restore_state(un); 5426 un->un_last_state = un->un_save_state; 5427 /* Wake up any threads that might be waiting. */ 5428 cv_broadcast(&un->un_suspend_cv); 5429 mutex_exit(SD_MUTEX(un)); 5430 SD_ERROR(SD_LOG_IO_PM, un, 5431 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5432 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5433 return (DDI_FAILURE); 5434 } 5435 } 5436 5437 /* 5438 * Cancel SCSI watch thread and timeouts, if any are active 5439 */ 5440 5441 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5442 opaque_t temp_token = un->un_swr_token; 5443 mutex_exit(SD_MUTEX(un)); 5444 scsi_watch_suspend(temp_token); 5445 mutex_enter(SD_MUTEX(un)); 5446 } 5447 5448 if (un->un_reset_throttle_timeid != NULL) { 5449 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5450 un->un_reset_throttle_timeid = NULL; 5451 mutex_exit(SD_MUTEX(un)); 5452 (void) untimeout(temp_id); 5453 mutex_enter(SD_MUTEX(un)); 5454 } 5455 5456 if (un->un_dcvb_timeid != NULL) { 5457 timeout_id_t temp_id = un->un_dcvb_timeid; 5458 un->un_dcvb_timeid = NULL; 5459 mutex_exit(SD_MUTEX(un)); 5460 (void) untimeout(temp_id); 5461 mutex_enter(SD_MUTEX(un)); 5462 } 5463 5464 mutex_enter(&un->un_pm_mutex); 5465 if (un->un_pm_timeid != NULL) { 5466 timeout_id_t temp_id = un->un_pm_timeid; 5467 un->un_pm_timeid = NULL; 5468 mutex_exit(&un->un_pm_mutex); 5469 mutex_exit(SD_MUTEX(un)); 5470 (void) untimeout(temp_id); 5471 mutex_enter(SD_MUTEX(un)); 5472 } else { 5473 mutex_exit(&un->un_pm_mutex); 5474 } 5475 5476 if (un->un_retry_timeid != NULL) { 5477 timeout_id_t temp_id = un->un_retry_timeid; 5478 un->un_retry_timeid = NULL; 5479 mutex_exit(SD_MUTEX(un)); 5480 (void) untimeout(temp_id); 5481 mutex_enter(SD_MUTEX(un)); 5482 } 5483 5484 if (un->un_direct_priority_timeid != NULL) { 5485 timeout_id_t temp_id = un->un_direct_priority_timeid; 5486 un->un_direct_priority_timeid = NULL; 5487 mutex_exit(SD_MUTEX(un)); 5488 (void) untimeout(temp_id); 5489 mutex_enter(SD_MUTEX(un)); 5490 } 5491 5492 if (un->un_f_is_fibre == TRUE) { 5493 /* 5494 * Remove callbacks for insert and remove events 5495 */ 5496 if (un->un_insert_event != NULL) { 5497 mutex_exit(SD_MUTEX(un)); 5498 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5499 mutex_enter(SD_MUTEX(un)); 5500 un->un_insert_event = NULL; 5501 } 5502 5503 if (un->un_remove_event != NULL) { 5504 mutex_exit(SD_MUTEX(un)); 5505 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5506 mutex_enter(SD_MUTEX(un)); 5507 un->un_remove_event = NULL; 5508 } 5509 } 5510 5511 mutex_exit(SD_MUTEX(un)); 5512 5513 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5514 5515 return (DDI_SUCCESS); 5516 } 5517 5518 5519 /* 5520 * Function: sd_ddi_pm_suspend 5521 * 5522 * Description: Set the drive state to low power. 5523 * Someone else is required to actually change the drive 5524 * power level. 5525 * 5526 * Arguments: un - driver soft state (unit) structure 5527 * 5528 * Return Code: DDI_FAILURE or DDI_SUCCESS 5529 * 5530 * Context: Kernel thread context 5531 */ 5532 5533 static int 5534 sd_ddi_pm_suspend(struct sd_lun *un) 5535 { 5536 ASSERT(un != NULL); 5537 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5538 5539 ASSERT(!mutex_owned(SD_MUTEX(un))); 5540 mutex_enter(SD_MUTEX(un)); 5541 5542 /* 5543 * Exit if power management is not enabled for this device, or if 5544 * the device is being used by HA. 5545 */ 5546 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5547 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5548 mutex_exit(SD_MUTEX(un)); 5549 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5550 return (DDI_SUCCESS); 5551 } 5552 5553 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5554 un->un_ncmds_in_driver); 5555 5556 /* 5557 * See if the device is not busy, ie.: 5558 * - we have no commands in the driver for this device 5559 * - not waiting for resources 5560 */ 5561 if ((un->un_ncmds_in_driver == 0) && 5562 (un->un_state != SD_STATE_RWAIT)) { 5563 /* 5564 * The device is not busy, so it is OK to go to low power state. 5565 * Indicate low power, but rely on someone else to actually 5566 * change it. 5567 */ 5568 mutex_enter(&un->un_pm_mutex); 5569 un->un_pm_count = -1; 5570 mutex_exit(&un->un_pm_mutex); 5571 un->un_power_level = SD_SPINDLE_OFF; 5572 } 5573 5574 mutex_exit(SD_MUTEX(un)); 5575 5576 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5577 5578 return (DDI_SUCCESS); 5579 } 5580 5581 5582 /* 5583 * Function: sd_ddi_resume 5584 * 5585 * Description: Performs system power-up operations.. 5586 * 5587 * Return Code: DDI_SUCCESS 5588 * DDI_FAILURE 5589 * 5590 * Context: Kernel thread context 5591 */ 5592 5593 static int 5594 sd_ddi_resume(dev_info_t *devi) 5595 { 5596 struct sd_lun *un; 5597 5598 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5599 if (un == NULL) { 5600 return (DDI_FAILURE); 5601 } 5602 5603 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5604 5605 mutex_enter(SD_MUTEX(un)); 5606 Restore_state(un); 5607 5608 /* 5609 * Restore the state which was saved to give the 5610 * the right state in un_last_state 5611 */ 5612 un->un_last_state = un->un_save_state; 5613 /* 5614 * Note: throttle comes back at full. 5615 * Also note: this MUST be done before calling pm_raise_power 5616 * otherwise the system can get hung in biowait. The scenario where 5617 * this'll happen is under cpr suspend. Writing of the system 5618 * state goes through sddump, which writes 0 to un_throttle. If 5619 * writing the system state then fails, example if the partition is 5620 * too small, then cpr attempts a resume. If throttle isn't restored 5621 * from the saved value until after calling pm_raise_power then 5622 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5623 * in biowait. 5624 */ 5625 un->un_throttle = un->un_saved_throttle; 5626 5627 /* 5628 * The chance of failure is very rare as the only command done in power 5629 * entry point is START command when you transition from 0->1 or 5630 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5631 * which suspend was done. Ignore the return value as the resume should 5632 * not be failed. In the case of removable media the media need not be 5633 * inserted and hence there is a chance that raise power will fail with 5634 * media not present. 5635 */ 5636 if (un->un_f_attach_spinup) { 5637 mutex_exit(SD_MUTEX(un)); 5638 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5639 mutex_enter(SD_MUTEX(un)); 5640 } 5641 5642 /* 5643 * Don't broadcast to the suspend cv and therefore possibly 5644 * start I/O until after power has been restored. 5645 */ 5646 cv_broadcast(&un->un_suspend_cv); 5647 cv_broadcast(&un->un_state_cv); 5648 5649 /* restart thread */ 5650 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5651 scsi_watch_resume(un->un_swr_token); 5652 } 5653 5654 #if (defined(__fibre)) 5655 if (un->un_f_is_fibre == TRUE) { 5656 /* 5657 * Add callbacks for insert and remove events 5658 */ 5659 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5660 sd_init_event_callbacks(un); 5661 } 5662 } 5663 #endif 5664 5665 /* 5666 * Transport any pending commands to the target. 5667 * 5668 * If this is a low-activity device commands in queue will have to wait 5669 * until new commands come in, which may take awhile. Also, we 5670 * specifically don't check un_ncmds_in_transport because we know that 5671 * there really are no commands in progress after the unit was 5672 * suspended and we could have reached the throttle level, been 5673 * suspended, and have no new commands coming in for awhile. Highly 5674 * unlikely, but so is the low-activity disk scenario. 5675 */ 5676 ddi_xbuf_dispatch(un->un_xbuf_attr); 5677 5678 sd_start_cmds(un, NULL); 5679 mutex_exit(SD_MUTEX(un)); 5680 5681 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5682 5683 return (DDI_SUCCESS); 5684 } 5685 5686 5687 /* 5688 * Function: sd_ddi_pm_resume 5689 * 5690 * Description: Set the drive state to powered on. 5691 * Someone else is required to actually change the drive 5692 * power level. 5693 * 5694 * Arguments: un - driver soft state (unit) structure 5695 * 5696 * Return Code: DDI_SUCCESS 5697 * 5698 * Context: Kernel thread context 5699 */ 5700 5701 static int 5702 sd_ddi_pm_resume(struct sd_lun *un) 5703 { 5704 ASSERT(un != NULL); 5705 5706 ASSERT(!mutex_owned(SD_MUTEX(un))); 5707 mutex_enter(SD_MUTEX(un)); 5708 un->un_power_level = SD_SPINDLE_ON; 5709 5710 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5711 mutex_enter(&un->un_pm_mutex); 5712 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5713 un->un_pm_count++; 5714 ASSERT(un->un_pm_count == 0); 5715 /* 5716 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5717 * un_suspend_cv is for a system resume, not a power management 5718 * device resume. (4297749) 5719 * cv_broadcast(&un->un_suspend_cv); 5720 */ 5721 } 5722 mutex_exit(&un->un_pm_mutex); 5723 mutex_exit(SD_MUTEX(un)); 5724 5725 return (DDI_SUCCESS); 5726 } 5727 5728 5729 /* 5730 * Function: sd_pm_idletimeout_handler 5731 * 5732 * Description: A timer routine that's active only while a device is busy. 5733 * The purpose is to extend slightly the pm framework's busy 5734 * view of the device to prevent busy/idle thrashing for 5735 * back-to-back commands. Do this by comparing the current time 5736 * to the time at which the last command completed and when the 5737 * difference is greater than sd_pm_idletime, call 5738 * pm_idle_component. In addition to indicating idle to the pm 5739 * framework, update the chain type to again use the internal pm 5740 * layers of the driver. 5741 * 5742 * Arguments: arg - driver soft state (unit) structure 5743 * 5744 * Context: Executes in a timeout(9F) thread context 5745 */ 5746 5747 static void 5748 sd_pm_idletimeout_handler(void *arg) 5749 { 5750 struct sd_lun *un = arg; 5751 5752 time_t now; 5753 5754 mutex_enter(&sd_detach_mutex); 5755 if (un->un_detach_count != 0) { 5756 /* Abort if the instance is detaching */ 5757 mutex_exit(&sd_detach_mutex); 5758 return; 5759 } 5760 mutex_exit(&sd_detach_mutex); 5761 5762 now = ddi_get_time(); 5763 /* 5764 * Grab both mutexes, in the proper order, since we're accessing 5765 * both PM and softstate variables. 5766 */ 5767 mutex_enter(SD_MUTEX(un)); 5768 mutex_enter(&un->un_pm_mutex); 5769 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5770 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5771 /* 5772 * Update the chain types. 5773 * This takes affect on the next new command received. 5774 */ 5775 if (un->un_f_non_devbsize_supported) { 5776 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5777 } else { 5778 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5779 } 5780 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5781 5782 SD_TRACE(SD_LOG_IO_PM, un, 5783 "sd_pm_idletimeout_handler: idling device\n"); 5784 (void) pm_idle_component(SD_DEVINFO(un), 0); 5785 un->un_pm_idle_timeid = NULL; 5786 } else { 5787 un->un_pm_idle_timeid = 5788 timeout(sd_pm_idletimeout_handler, un, 5789 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5790 } 5791 mutex_exit(&un->un_pm_mutex); 5792 mutex_exit(SD_MUTEX(un)); 5793 } 5794 5795 5796 /* 5797 * Function: sd_pm_timeout_handler 5798 * 5799 * Description: Callback to tell framework we are idle. 5800 * 5801 * Context: timeout(9f) thread context. 5802 */ 5803 5804 static void 5805 sd_pm_timeout_handler(void *arg) 5806 { 5807 struct sd_lun *un = arg; 5808 5809 (void) pm_idle_component(SD_DEVINFO(un), 0); 5810 mutex_enter(&un->un_pm_mutex); 5811 un->un_pm_timeid = NULL; 5812 mutex_exit(&un->un_pm_mutex); 5813 } 5814 5815 5816 /* 5817 * Function: sdpower 5818 * 5819 * Description: PM entry point. 5820 * 5821 * Return Code: DDI_SUCCESS 5822 * DDI_FAILURE 5823 * 5824 * Context: Kernel thread context 5825 */ 5826 5827 static int 5828 sdpower(dev_info_t *devi, int component, int level) 5829 { 5830 struct sd_lun *un; 5831 int instance; 5832 int rval = DDI_SUCCESS; 5833 uint_t i, log_page_size, maxcycles, ncycles; 5834 uchar_t *log_page_data; 5835 int log_sense_page; 5836 int medium_present; 5837 time_t intvlp; 5838 dev_t dev; 5839 struct pm_trans_data sd_pm_tran_data; 5840 uchar_t save_state; 5841 int sval; 5842 uchar_t state_before_pm; 5843 int got_semaphore_here; 5844 5845 instance = ddi_get_instance(devi); 5846 5847 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5848 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5849 component != 0) { 5850 return (DDI_FAILURE); 5851 } 5852 5853 dev = sd_make_device(SD_DEVINFO(un)); 5854 5855 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5856 5857 /* 5858 * Must synchronize power down with close. 5859 * Attempt to decrement/acquire the open/close semaphore, 5860 * but do NOT wait on it. If it's not greater than zero, 5861 * ie. it can't be decremented without waiting, then 5862 * someone else, either open or close, already has it 5863 * and the try returns 0. Use that knowledge here to determine 5864 * if it's OK to change the device power level. 5865 * Also, only increment it on exit if it was decremented, ie. gotten, 5866 * here. 5867 */ 5868 got_semaphore_here = sema_tryp(&un->un_semoclose); 5869 5870 mutex_enter(SD_MUTEX(un)); 5871 5872 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5873 un->un_ncmds_in_driver); 5874 5875 /* 5876 * If un_ncmds_in_driver is non-zero it indicates commands are 5877 * already being processed in the driver, or if the semaphore was 5878 * not gotten here it indicates an open or close is being processed. 5879 * At the same time somebody is requesting to go low power which 5880 * can't happen, therefore we need to return failure. 5881 */ 5882 if ((level == SD_SPINDLE_OFF) && 5883 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5884 mutex_exit(SD_MUTEX(un)); 5885 5886 if (got_semaphore_here != 0) { 5887 sema_v(&un->un_semoclose); 5888 } 5889 SD_TRACE(SD_LOG_IO_PM, un, 5890 "sdpower: exit, device has queued cmds.\n"); 5891 return (DDI_FAILURE); 5892 } 5893 5894 /* 5895 * if it is OFFLINE that means the disk is completely dead 5896 * in our case we have to put the disk in on or off by sending commands 5897 * Of course that will fail anyway so return back here. 5898 * 5899 * Power changes to a device that's OFFLINE or SUSPENDED 5900 * are not allowed. 5901 */ 5902 if ((un->un_state == SD_STATE_OFFLINE) || 5903 (un->un_state == SD_STATE_SUSPENDED)) { 5904 mutex_exit(SD_MUTEX(un)); 5905 5906 if (got_semaphore_here != 0) { 5907 sema_v(&un->un_semoclose); 5908 } 5909 SD_TRACE(SD_LOG_IO_PM, un, 5910 "sdpower: exit, device is off-line.\n"); 5911 return (DDI_FAILURE); 5912 } 5913 5914 /* 5915 * Change the device's state to indicate it's power level 5916 * is being changed. Do this to prevent a power off in the 5917 * middle of commands, which is especially bad on devices 5918 * that are really powered off instead of just spun down. 5919 */ 5920 state_before_pm = un->un_state; 5921 un->un_state = SD_STATE_PM_CHANGING; 5922 5923 mutex_exit(SD_MUTEX(un)); 5924 5925 /* 5926 * If "pm-capable" property is set to TRUE by HBA drivers, 5927 * bypass the following checking, otherwise, check the log 5928 * sense information for this device 5929 */ 5930 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5931 /* 5932 * Get the log sense information to understand whether the 5933 * the powercycle counts have gone beyond the threshhold. 5934 */ 5935 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5936 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5937 5938 mutex_enter(SD_MUTEX(un)); 5939 log_sense_page = un->un_start_stop_cycle_page; 5940 mutex_exit(SD_MUTEX(un)); 5941 5942 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5943 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5944 #ifdef SDDEBUG 5945 if (sd_force_pm_supported) { 5946 /* Force a successful result */ 5947 rval = 0; 5948 } 5949 #endif 5950 if (rval != 0) { 5951 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5952 "Log Sense Failed\n"); 5953 kmem_free(log_page_data, log_page_size); 5954 /* Cannot support power management on those drives */ 5955 5956 if (got_semaphore_here != 0) { 5957 sema_v(&un->un_semoclose); 5958 } 5959 /* 5960 * On exit put the state back to it's original value 5961 * and broadcast to anyone waiting for the power 5962 * change completion. 5963 */ 5964 mutex_enter(SD_MUTEX(un)); 5965 un->un_state = state_before_pm; 5966 cv_broadcast(&un->un_suspend_cv); 5967 mutex_exit(SD_MUTEX(un)); 5968 SD_TRACE(SD_LOG_IO_PM, un, 5969 "sdpower: exit, Log Sense Failed.\n"); 5970 return (DDI_FAILURE); 5971 } 5972 5973 /* 5974 * From the page data - Convert the essential information to 5975 * pm_trans_data 5976 */ 5977 maxcycles = 5978 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5979 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5980 5981 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5982 5983 ncycles = 5984 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5985 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5986 5987 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5988 5989 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5990 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5991 log_page_data[8+i]; 5992 } 5993 5994 kmem_free(log_page_data, log_page_size); 5995 5996 /* 5997 * Call pm_trans_check routine to get the Ok from 5998 * the global policy 5999 */ 6000 6001 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6002 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6003 6004 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6005 #ifdef SDDEBUG 6006 if (sd_force_pm_supported) { 6007 /* Force a successful result */ 6008 rval = 1; 6009 } 6010 #endif 6011 switch (rval) { 6012 case 0: 6013 /* 6014 * Not Ok to Power cycle or error in parameters passed 6015 * Would have given the advised time to consider power 6016 * cycle. Based on the new intvlp parameter we are 6017 * supposed to pretend we are busy so that pm framework 6018 * will never call our power entry point. Because of 6019 * that install a timeout handler and wait for the 6020 * recommended time to elapse so that power management 6021 * can be effective again. 6022 * 6023 * To effect this behavior, call pm_busy_component to 6024 * indicate to the framework this device is busy. 6025 * By not adjusting un_pm_count the rest of PM in 6026 * the driver will function normally, and independant 6027 * of this but because the framework is told the device 6028 * is busy it won't attempt powering down until it gets 6029 * a matching idle. The timeout handler sends this. 6030 * Note: sd_pm_entry can't be called here to do this 6031 * because sdpower may have been called as a result 6032 * of a call to pm_raise_power from within sd_pm_entry. 6033 * 6034 * If a timeout handler is already active then 6035 * don't install another. 6036 */ 6037 mutex_enter(&un->un_pm_mutex); 6038 if (un->un_pm_timeid == NULL) { 6039 un->un_pm_timeid = 6040 timeout(sd_pm_timeout_handler, 6041 un, intvlp * drv_usectohz(1000000)); 6042 mutex_exit(&un->un_pm_mutex); 6043 (void) pm_busy_component(SD_DEVINFO(un), 0); 6044 } else { 6045 mutex_exit(&un->un_pm_mutex); 6046 } 6047 if (got_semaphore_here != 0) { 6048 sema_v(&un->un_semoclose); 6049 } 6050 /* 6051 * On exit put the state back to it's original value 6052 * and broadcast to anyone waiting for the power 6053 * change completion. 6054 */ 6055 mutex_enter(SD_MUTEX(un)); 6056 un->un_state = state_before_pm; 6057 cv_broadcast(&un->un_suspend_cv); 6058 mutex_exit(SD_MUTEX(un)); 6059 6060 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6061 "trans check Failed, not ok to power cycle.\n"); 6062 return (DDI_FAILURE); 6063 6064 case -1: 6065 if (got_semaphore_here != 0) { 6066 sema_v(&un->un_semoclose); 6067 } 6068 /* 6069 * On exit put the state back to it's original value 6070 * and broadcast to anyone waiting for the power 6071 * change completion. 6072 */ 6073 mutex_enter(SD_MUTEX(un)); 6074 un->un_state = state_before_pm; 6075 cv_broadcast(&un->un_suspend_cv); 6076 mutex_exit(SD_MUTEX(un)); 6077 SD_TRACE(SD_LOG_IO_PM, un, 6078 "sdpower: exit, trans check command Failed.\n"); 6079 return (DDI_FAILURE); 6080 } 6081 } 6082 6083 if (level == SD_SPINDLE_OFF) { 6084 /* 6085 * Save the last state... if the STOP FAILS we need it 6086 * for restoring 6087 */ 6088 mutex_enter(SD_MUTEX(un)); 6089 save_state = un->un_last_state; 6090 /* 6091 * There must not be any cmds. getting processed 6092 * in the driver when we get here. Power to the 6093 * device is potentially going off. 6094 */ 6095 ASSERT(un->un_ncmds_in_driver == 0); 6096 mutex_exit(SD_MUTEX(un)); 6097 6098 /* 6099 * For now suspend the device completely before spindle is 6100 * turned off 6101 */ 6102 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6103 if (got_semaphore_here != 0) { 6104 sema_v(&un->un_semoclose); 6105 } 6106 /* 6107 * On exit put the state back to it's original value 6108 * and broadcast to anyone waiting for the power 6109 * change completion. 6110 */ 6111 mutex_enter(SD_MUTEX(un)); 6112 un->un_state = state_before_pm; 6113 cv_broadcast(&un->un_suspend_cv); 6114 mutex_exit(SD_MUTEX(un)); 6115 SD_TRACE(SD_LOG_IO_PM, un, 6116 "sdpower: exit, PM suspend Failed.\n"); 6117 return (DDI_FAILURE); 6118 } 6119 } 6120 6121 /* 6122 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6123 * close, or strategy. Dump no long uses this routine, it uses it's 6124 * own code so it can be done in polled mode. 6125 */ 6126 6127 medium_present = TRUE; 6128 6129 /* 6130 * When powering up, issue a TUR in case the device is at unit 6131 * attention. Don't do retries. Bypass the PM layer, otherwise 6132 * a deadlock on un_pm_busy_cv will occur. 6133 */ 6134 if (level == SD_SPINDLE_ON) { 6135 (void) sd_send_scsi_TEST_UNIT_READY(un, 6136 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6137 } 6138 6139 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6140 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6141 6142 sval = sd_send_scsi_START_STOP_UNIT(un, 6143 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6144 SD_PATH_DIRECT); 6145 /* Command failed, check for media present. */ 6146 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6147 medium_present = FALSE; 6148 } 6149 6150 /* 6151 * The conditions of interest here are: 6152 * if a spindle off with media present fails, 6153 * then restore the state and return an error. 6154 * else if a spindle on fails, 6155 * then return an error (there's no state to restore). 6156 * In all other cases we setup for the new state 6157 * and return success. 6158 */ 6159 switch (level) { 6160 case SD_SPINDLE_OFF: 6161 if ((medium_present == TRUE) && (sval != 0)) { 6162 /* The stop command from above failed */ 6163 rval = DDI_FAILURE; 6164 /* 6165 * The stop command failed, and we have media 6166 * present. Put the level back by calling the 6167 * sd_pm_resume() and set the state back to 6168 * it's previous value. 6169 */ 6170 (void) sd_ddi_pm_resume(un); 6171 mutex_enter(SD_MUTEX(un)); 6172 un->un_last_state = save_state; 6173 mutex_exit(SD_MUTEX(un)); 6174 break; 6175 } 6176 /* 6177 * The stop command from above succeeded. 6178 */ 6179 if (un->un_f_monitor_media_state) { 6180 /* 6181 * Terminate watch thread in case of removable media 6182 * devices going into low power state. This is as per 6183 * the requirements of pm framework, otherwise commands 6184 * will be generated for the device (through watch 6185 * thread), even when the device is in low power state. 6186 */ 6187 mutex_enter(SD_MUTEX(un)); 6188 un->un_f_watcht_stopped = FALSE; 6189 if (un->un_swr_token != NULL) { 6190 opaque_t temp_token = un->un_swr_token; 6191 un->un_f_watcht_stopped = TRUE; 6192 un->un_swr_token = NULL; 6193 mutex_exit(SD_MUTEX(un)); 6194 (void) scsi_watch_request_terminate(temp_token, 6195 SCSI_WATCH_TERMINATE_WAIT); 6196 } else { 6197 mutex_exit(SD_MUTEX(un)); 6198 } 6199 } 6200 break; 6201 6202 default: /* The level requested is spindle on... */ 6203 /* 6204 * Legacy behavior: return success on a failed spinup 6205 * if there is no media in the drive. 6206 * Do this by looking at medium_present here. 6207 */ 6208 if ((sval != 0) && medium_present) { 6209 /* The start command from above failed */ 6210 rval = DDI_FAILURE; 6211 break; 6212 } 6213 /* 6214 * The start command from above succeeded 6215 * Resume the devices now that we have 6216 * started the disks 6217 */ 6218 (void) sd_ddi_pm_resume(un); 6219 6220 /* 6221 * Resume the watch thread since it was suspended 6222 * when the device went into low power mode. 6223 */ 6224 if (un->un_f_monitor_media_state) { 6225 mutex_enter(SD_MUTEX(un)); 6226 if (un->un_f_watcht_stopped == TRUE) { 6227 opaque_t temp_token; 6228 6229 un->un_f_watcht_stopped = FALSE; 6230 mutex_exit(SD_MUTEX(un)); 6231 temp_token = scsi_watch_request_submit( 6232 SD_SCSI_DEVP(un), 6233 sd_check_media_time, 6234 SENSE_LENGTH, sd_media_watch_cb, 6235 (caddr_t)dev); 6236 mutex_enter(SD_MUTEX(un)); 6237 un->un_swr_token = temp_token; 6238 } 6239 mutex_exit(SD_MUTEX(un)); 6240 } 6241 } 6242 if (got_semaphore_here != 0) { 6243 sema_v(&un->un_semoclose); 6244 } 6245 /* 6246 * On exit put the state back to it's original value 6247 * and broadcast to anyone waiting for the power 6248 * change completion. 6249 */ 6250 mutex_enter(SD_MUTEX(un)); 6251 un->un_state = state_before_pm; 6252 cv_broadcast(&un->un_suspend_cv); 6253 mutex_exit(SD_MUTEX(un)); 6254 6255 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6256 6257 return (rval); 6258 } 6259 6260 6261 6262 /* 6263 * Function: sdattach 6264 * 6265 * Description: Driver's attach(9e) entry point function. 6266 * 6267 * Arguments: devi - opaque device info handle 6268 * cmd - attach type 6269 * 6270 * Return Code: DDI_SUCCESS 6271 * DDI_FAILURE 6272 * 6273 * Context: Kernel thread context 6274 */ 6275 6276 static int 6277 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6278 { 6279 switch (cmd) { 6280 case DDI_ATTACH: 6281 return (sd_unit_attach(devi)); 6282 case DDI_RESUME: 6283 return (sd_ddi_resume(devi)); 6284 default: 6285 break; 6286 } 6287 return (DDI_FAILURE); 6288 } 6289 6290 6291 /* 6292 * Function: sddetach 6293 * 6294 * Description: Driver's detach(9E) entry point function. 6295 * 6296 * Arguments: devi - opaque device info handle 6297 * cmd - detach type 6298 * 6299 * Return Code: DDI_SUCCESS 6300 * DDI_FAILURE 6301 * 6302 * Context: Kernel thread context 6303 */ 6304 6305 static int 6306 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6307 { 6308 switch (cmd) { 6309 case DDI_DETACH: 6310 return (sd_unit_detach(devi)); 6311 case DDI_SUSPEND: 6312 return (sd_ddi_suspend(devi)); 6313 default: 6314 break; 6315 } 6316 return (DDI_FAILURE); 6317 } 6318 6319 6320 /* 6321 * Function: sd_sync_with_callback 6322 * 6323 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6324 * state while the callback routine is active. 6325 * 6326 * Arguments: un: softstate structure for the instance 6327 * 6328 * Context: Kernel thread context 6329 */ 6330 6331 static void 6332 sd_sync_with_callback(struct sd_lun *un) 6333 { 6334 ASSERT(un != NULL); 6335 6336 mutex_enter(SD_MUTEX(un)); 6337 6338 ASSERT(un->un_in_callback >= 0); 6339 6340 while (un->un_in_callback > 0) { 6341 mutex_exit(SD_MUTEX(un)); 6342 delay(2); 6343 mutex_enter(SD_MUTEX(un)); 6344 } 6345 6346 mutex_exit(SD_MUTEX(un)); 6347 } 6348 6349 /* 6350 * Function: sd_unit_attach 6351 * 6352 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6353 * the soft state structure for the device and performs 6354 * all necessary structure and device initializations. 6355 * 6356 * Arguments: devi: the system's dev_info_t for the device. 6357 * 6358 * Return Code: DDI_SUCCESS if attach is successful. 6359 * DDI_FAILURE if any part of the attach fails. 6360 * 6361 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6362 * Kernel thread context only. Can sleep. 6363 */ 6364 6365 static int 6366 sd_unit_attach(dev_info_t *devi) 6367 { 6368 struct scsi_device *devp; 6369 struct sd_lun *un; 6370 char *variantp; 6371 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6372 int instance; 6373 int rval; 6374 int wc_enabled; 6375 int tgt; 6376 uint64_t capacity; 6377 uint_t lbasize = 0; 6378 dev_info_t *pdip = ddi_get_parent(devi); 6379 int offbyone = 0; 6380 int geom_label_valid = 0; 6381 #if defined(__sparc) 6382 int max_xfer_size; 6383 #endif 6384 6385 /* 6386 * Retrieve the target driver's private data area. This was set 6387 * up by the HBA. 6388 */ 6389 devp = ddi_get_driver_private(devi); 6390 6391 /* 6392 * Retrieve the target ID of the device. 6393 */ 6394 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6395 SCSI_ADDR_PROP_TARGET, -1); 6396 6397 /* 6398 * Since we have no idea what state things were left in by the last 6399 * user of the device, set up some 'default' settings, ie. turn 'em 6400 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6401 * Do this before the scsi_probe, which sends an inquiry. 6402 * This is a fix for bug (4430280). 6403 * Of special importance is wide-xfer. The drive could have been left 6404 * in wide transfer mode by the last driver to communicate with it, 6405 * this includes us. If that's the case, and if the following is not 6406 * setup properly or we don't re-negotiate with the drive prior to 6407 * transferring data to/from the drive, it causes bus parity errors, 6408 * data overruns, and unexpected interrupts. This first occurred when 6409 * the fix for bug (4378686) was made. 6410 */ 6411 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6412 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6413 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6414 6415 /* 6416 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6417 * on a target. Setting it per lun instance actually sets the 6418 * capability of this target, which affects those luns already 6419 * attached on the same target. So during attach, we can only disable 6420 * this capability only when no other lun has been attached on this 6421 * target. By doing this, we assume a target has the same tagged-qing 6422 * capability for every lun. The condition can be removed when HBA 6423 * is changed to support per lun based tagged-qing capability. 6424 */ 6425 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6426 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6427 } 6428 6429 /* 6430 * Use scsi_probe() to issue an INQUIRY command to the device. 6431 * This call will allocate and fill in the scsi_inquiry structure 6432 * and point the sd_inq member of the scsi_device structure to it. 6433 * If the attach succeeds, then this memory will not be de-allocated 6434 * (via scsi_unprobe()) until the instance is detached. 6435 */ 6436 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6437 goto probe_failed; 6438 } 6439 6440 /* 6441 * Check the device type as specified in the inquiry data and 6442 * claim it if it is of a type that we support. 6443 */ 6444 switch (devp->sd_inq->inq_dtype) { 6445 case DTYPE_DIRECT: 6446 break; 6447 case DTYPE_RODIRECT: 6448 break; 6449 case DTYPE_OPTICAL: 6450 break; 6451 case DTYPE_NOTPRESENT: 6452 default: 6453 /* Unsupported device type; fail the attach. */ 6454 goto probe_failed; 6455 } 6456 6457 /* 6458 * Allocate the soft state structure for this unit. 6459 * 6460 * We rely upon this memory being set to all zeroes by 6461 * ddi_soft_state_zalloc(). We assume that any member of the 6462 * soft state structure that is not explicitly initialized by 6463 * this routine will have a value of zero. 6464 */ 6465 instance = ddi_get_instance(devp->sd_dev); 6466 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6467 goto probe_failed; 6468 } 6469 6470 /* 6471 * Retrieve a pointer to the newly-allocated soft state. 6472 * 6473 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6474 * was successful, unless something has gone horribly wrong and the 6475 * ddi's soft state internals are corrupt (in which case it is 6476 * probably better to halt here than just fail the attach....) 6477 */ 6478 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6479 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6480 instance); 6481 /*NOTREACHED*/ 6482 } 6483 6484 /* 6485 * Link the back ptr of the driver soft state to the scsi_device 6486 * struct for this lun. 6487 * Save a pointer to the softstate in the driver-private area of 6488 * the scsi_device struct. 6489 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6490 * we first set un->un_sd below. 6491 */ 6492 un->un_sd = devp; 6493 devp->sd_private = (opaque_t)un; 6494 6495 /* 6496 * The following must be after devp is stored in the soft state struct. 6497 */ 6498 #ifdef SDDEBUG 6499 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6500 "%s_unit_attach: un:0x%p instance:%d\n", 6501 ddi_driver_name(devi), un, instance); 6502 #endif 6503 6504 /* 6505 * Set up the device type and node type (for the minor nodes). 6506 * By default we assume that the device can at least support the 6507 * Common Command Set. Call it a CD-ROM if it reports itself 6508 * as a RODIRECT device. 6509 */ 6510 switch (devp->sd_inq->inq_dtype) { 6511 case DTYPE_RODIRECT: 6512 un->un_node_type = DDI_NT_CD_CHAN; 6513 un->un_ctype = CTYPE_CDROM; 6514 break; 6515 case DTYPE_OPTICAL: 6516 un->un_node_type = DDI_NT_BLOCK_CHAN; 6517 un->un_ctype = CTYPE_ROD; 6518 break; 6519 default: 6520 un->un_node_type = DDI_NT_BLOCK_CHAN; 6521 un->un_ctype = CTYPE_CCS; 6522 break; 6523 } 6524 6525 /* 6526 * Try to read the interconnect type from the HBA. 6527 * 6528 * Note: This driver is currently compiled as two binaries, a parallel 6529 * scsi version (sd) and a fibre channel version (ssd). All functional 6530 * differences are determined at compile time. In the future a single 6531 * binary will be provided and the inteconnect type will be used to 6532 * differentiate between fibre and parallel scsi behaviors. At that time 6533 * it will be necessary for all fibre channel HBAs to support this 6534 * property. 6535 * 6536 * set un_f_is_fiber to TRUE ( default fiber ) 6537 */ 6538 un->un_f_is_fibre = TRUE; 6539 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6540 case INTERCONNECT_SSA: 6541 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6542 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6543 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6544 break; 6545 case INTERCONNECT_PARALLEL: 6546 un->un_f_is_fibre = FALSE; 6547 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6548 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6549 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6550 break; 6551 case INTERCONNECT_SATA: 6552 un->un_f_is_fibre = FALSE; 6553 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6554 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6555 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6556 break; 6557 case INTERCONNECT_FIBRE: 6558 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6559 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6560 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6561 break; 6562 case INTERCONNECT_FABRIC: 6563 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6564 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6565 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6566 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6567 break; 6568 default: 6569 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6570 /* 6571 * The HBA does not support the "interconnect-type" property 6572 * (or did not provide a recognized type). 6573 * 6574 * Note: This will be obsoleted when a single fibre channel 6575 * and parallel scsi driver is delivered. In the meantime the 6576 * interconnect type will be set to the platform default.If that 6577 * type is not parallel SCSI, it means that we should be 6578 * assuming "ssd" semantics. However, here this also means that 6579 * the FC HBA is not supporting the "interconnect-type" property 6580 * like we expect it to, so log this occurrence. 6581 */ 6582 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6583 if (!SD_IS_PARALLEL_SCSI(un)) { 6584 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6585 "sd_unit_attach: un:0x%p Assuming " 6586 "INTERCONNECT_FIBRE\n", un); 6587 } else { 6588 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6589 "sd_unit_attach: un:0x%p Assuming " 6590 "INTERCONNECT_PARALLEL\n", un); 6591 un->un_f_is_fibre = FALSE; 6592 } 6593 #else 6594 /* 6595 * Note: This source will be implemented when a single fibre 6596 * channel and parallel scsi driver is delivered. The default 6597 * will be to assume that if a device does not support the 6598 * "interconnect-type" property it is a parallel SCSI HBA and 6599 * we will set the interconnect type for parallel scsi. 6600 */ 6601 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6602 un->un_f_is_fibre = FALSE; 6603 #endif 6604 break; 6605 } 6606 6607 if (un->un_f_is_fibre == TRUE) { 6608 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6609 SCSI_VERSION_3) { 6610 switch (un->un_interconnect_type) { 6611 case SD_INTERCONNECT_FIBRE: 6612 case SD_INTERCONNECT_SSA: 6613 un->un_node_type = DDI_NT_BLOCK_WWN; 6614 break; 6615 default: 6616 break; 6617 } 6618 } 6619 } 6620 6621 /* 6622 * Initialize the Request Sense command for the target 6623 */ 6624 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6625 goto alloc_rqs_failed; 6626 } 6627 6628 /* 6629 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6630 * with separate binary for sd and ssd. 6631 * 6632 * x86 has 1 binary, un_retry_count is set base on connection type. 6633 * The hardcoded values will go away when Sparc uses 1 binary 6634 * for sd and ssd. This hardcoded values need to match 6635 * SD_RETRY_COUNT in sddef.h 6636 * The value used is base on interconnect type. 6637 * fibre = 3, parallel = 5 6638 */ 6639 #if defined(__i386) || defined(__amd64) 6640 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6641 #else 6642 un->un_retry_count = SD_RETRY_COUNT; 6643 #endif 6644 6645 /* 6646 * Set the per disk retry count to the default number of retries 6647 * for disks and CDROMs. This value can be overridden by the 6648 * disk property list or an entry in sd.conf. 6649 */ 6650 un->un_notready_retry_count = 6651 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6652 : DISK_NOT_READY_RETRY_COUNT(un); 6653 6654 /* 6655 * Set the busy retry count to the default value of un_retry_count. 6656 * This can be overridden by entries in sd.conf or the device 6657 * config table. 6658 */ 6659 un->un_busy_retry_count = un->un_retry_count; 6660 6661 /* 6662 * Init the reset threshold for retries. This number determines 6663 * how many retries must be performed before a reset can be issued 6664 * (for certain error conditions). This can be overridden by entries 6665 * in sd.conf or the device config table. 6666 */ 6667 un->un_reset_retry_count = (un->un_retry_count / 2); 6668 6669 /* 6670 * Set the victim_retry_count to the default un_retry_count 6671 */ 6672 un->un_victim_retry_count = (2 * un->un_retry_count); 6673 6674 /* 6675 * Set the reservation release timeout to the default value of 6676 * 5 seconds. This can be overridden by entries in ssd.conf or the 6677 * device config table. 6678 */ 6679 un->un_reserve_release_time = 5; 6680 6681 /* 6682 * Set up the default maximum transfer size. Note that this may 6683 * get updated later in the attach, when setting up default wide 6684 * operations for disks. 6685 */ 6686 #if defined(__i386) || defined(__amd64) 6687 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6688 un->un_partial_dma_supported = 1; 6689 #else 6690 un->un_max_xfer_size = (uint_t)maxphys; 6691 #endif 6692 6693 /* 6694 * Get "allow bus device reset" property (defaults to "enabled" if 6695 * the property was not defined). This is to disable bus resets for 6696 * certain kinds of error recovery. Note: In the future when a run-time 6697 * fibre check is available the soft state flag should default to 6698 * enabled. 6699 */ 6700 if (un->un_f_is_fibre == TRUE) { 6701 un->un_f_allow_bus_device_reset = TRUE; 6702 } else { 6703 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6704 "allow-bus-device-reset", 1) != 0) { 6705 un->un_f_allow_bus_device_reset = TRUE; 6706 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6707 "sd_unit_attach: un:0x%p Bus device reset " 6708 "enabled\n", un); 6709 } else { 6710 un->un_f_allow_bus_device_reset = FALSE; 6711 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6712 "sd_unit_attach: un:0x%p Bus device reset " 6713 "disabled\n", un); 6714 } 6715 } 6716 6717 /* 6718 * Check if this is an ATAPI device. ATAPI devices use Group 1 6719 * Read/Write commands and Group 2 Mode Sense/Select commands. 6720 * 6721 * Note: The "obsolete" way of doing this is to check for the "atapi" 6722 * property. The new "variant" property with a value of "atapi" has been 6723 * introduced so that future 'variants' of standard SCSI behavior (like 6724 * atapi) could be specified by the underlying HBA drivers by supplying 6725 * a new value for the "variant" property, instead of having to define a 6726 * new property. 6727 */ 6728 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6729 un->un_f_cfg_is_atapi = TRUE; 6730 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6731 "sd_unit_attach: un:0x%p Atapi device\n", un); 6732 } 6733 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6734 &variantp) == DDI_PROP_SUCCESS) { 6735 if (strcmp(variantp, "atapi") == 0) { 6736 un->un_f_cfg_is_atapi = TRUE; 6737 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6738 "sd_unit_attach: un:0x%p Atapi device\n", un); 6739 } 6740 ddi_prop_free(variantp); 6741 } 6742 6743 un->un_cmd_timeout = SD_IO_TIME; 6744 6745 /* Info on current states, statuses, etc. (Updated frequently) */ 6746 un->un_state = SD_STATE_NORMAL; 6747 un->un_last_state = SD_STATE_NORMAL; 6748 6749 /* Control & status info for command throttling */ 6750 un->un_throttle = sd_max_throttle; 6751 un->un_saved_throttle = sd_max_throttle; 6752 un->un_min_throttle = sd_min_throttle; 6753 6754 if (un->un_f_is_fibre == TRUE) { 6755 un->un_f_use_adaptive_throttle = TRUE; 6756 } else { 6757 un->un_f_use_adaptive_throttle = FALSE; 6758 } 6759 6760 /* Removable media support. */ 6761 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6762 un->un_mediastate = DKIO_NONE; 6763 un->un_specified_mediastate = DKIO_NONE; 6764 6765 /* CVs for suspend/resume (PM or DR) */ 6766 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6767 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6768 6769 /* Power management support. */ 6770 un->un_power_level = SD_SPINDLE_UNINIT; 6771 6772 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6773 un->un_f_wcc_inprog = 0; 6774 6775 /* 6776 * The open/close semaphore is used to serialize threads executing 6777 * in the driver's open & close entry point routines for a given 6778 * instance. 6779 */ 6780 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6781 6782 /* 6783 * The conf file entry and softstate variable is a forceful override, 6784 * meaning a non-zero value must be entered to change the default. 6785 */ 6786 un->un_f_disksort_disabled = FALSE; 6787 6788 /* 6789 * Retrieve the properties from the static driver table or the driver 6790 * configuration file (.conf) for this unit and update the soft state 6791 * for the device as needed for the indicated properties. 6792 * Note: the property configuration needs to occur here as some of the 6793 * following routines may have dependancies on soft state flags set 6794 * as part of the driver property configuration. 6795 */ 6796 sd_read_unit_properties(un); 6797 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6798 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6799 6800 /* 6801 * Only if a device has "hotpluggable" property, it is 6802 * treated as hotpluggable device. Otherwise, it is 6803 * regarded as non-hotpluggable one. 6804 */ 6805 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6806 -1) != -1) { 6807 un->un_f_is_hotpluggable = TRUE; 6808 } 6809 6810 /* 6811 * set unit's attributes(flags) according to "hotpluggable" and 6812 * RMB bit in INQUIRY data. 6813 */ 6814 sd_set_unit_attributes(un, devi); 6815 6816 /* 6817 * By default, we mark the capacity, lbasize, and geometry 6818 * as invalid. Only if we successfully read a valid capacity 6819 * will we update the un_blockcount and un_tgt_blocksize with the 6820 * valid values (the geometry will be validated later). 6821 */ 6822 un->un_f_blockcount_is_valid = FALSE; 6823 un->un_f_tgt_blocksize_is_valid = FALSE; 6824 6825 /* 6826 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6827 * otherwise. 6828 */ 6829 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6830 un->un_blockcount = 0; 6831 6832 /* 6833 * Set up the per-instance info needed to determine the correct 6834 * CDBs and other info for issuing commands to the target. 6835 */ 6836 sd_init_cdb_limits(un); 6837 6838 /* 6839 * Set up the IO chains to use, based upon the target type. 6840 */ 6841 if (un->un_f_non_devbsize_supported) { 6842 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6843 } else { 6844 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6845 } 6846 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6847 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6848 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6849 6850 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6851 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6852 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6853 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6854 6855 6856 if (ISCD(un)) { 6857 un->un_additional_codes = sd_additional_codes; 6858 } else { 6859 un->un_additional_codes = NULL; 6860 } 6861 6862 /* 6863 * Create the kstats here so they can be available for attach-time 6864 * routines that send commands to the unit (either polled or via 6865 * sd_send_scsi_cmd). 6866 * 6867 * Note: This is a critical sequence that needs to be maintained: 6868 * 1) Instantiate the kstats here, before any routines using the 6869 * iopath (i.e. sd_send_scsi_cmd). 6870 * 2) Instantiate and initialize the partition stats 6871 * (sd_set_pstats). 6872 * 3) Initialize the error stats (sd_set_errstats), following 6873 * sd_validate_geometry(),sd_register_devid(), 6874 * and sd_cache_control(). 6875 */ 6876 6877 un->un_stats = kstat_create(sd_label, instance, 6878 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6879 if (un->un_stats != NULL) { 6880 un->un_stats->ks_lock = SD_MUTEX(un); 6881 kstat_install(un->un_stats); 6882 } 6883 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6884 "sd_unit_attach: un:0x%p un_stats created\n", un); 6885 6886 sd_create_errstats(un, instance); 6887 if (un->un_errstats == NULL) { 6888 goto create_errstats_failed; 6889 } 6890 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6891 "sd_unit_attach: un:0x%p errstats created\n", un); 6892 6893 /* 6894 * The following if/else code was relocated here from below as part 6895 * of the fix for bug (4430280). However with the default setup added 6896 * on entry to this routine, it's no longer absolutely necessary for 6897 * this to be before the call to sd_spin_up_unit. 6898 */ 6899 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6900 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 6901 (devp->sd_inq->inq_ansi == 5)) && 6902 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 6903 6904 /* 6905 * If tagged queueing is supported by the target 6906 * and by the host adapter then we will enable it 6907 */ 6908 un->un_tagflags = 0; 6909 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 6910 (un->un_f_arq_enabled == TRUE)) { 6911 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6912 1, 1) == 1) { 6913 un->un_tagflags = FLAG_STAG; 6914 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6915 "sd_unit_attach: un:0x%p tag queueing " 6916 "enabled\n", un); 6917 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6918 "untagged-qing", 0) == 1) { 6919 un->un_f_opt_queueing = TRUE; 6920 un->un_saved_throttle = un->un_throttle = 6921 min(un->un_throttle, 3); 6922 } else { 6923 un->un_f_opt_queueing = FALSE; 6924 un->un_saved_throttle = un->un_throttle = 1; 6925 } 6926 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6927 == 1) && (un->un_f_arq_enabled == TRUE)) { 6928 /* The Host Adapter supports internal queueing. */ 6929 un->un_f_opt_queueing = TRUE; 6930 un->un_saved_throttle = un->un_throttle = 6931 min(un->un_throttle, 3); 6932 } else { 6933 un->un_f_opt_queueing = FALSE; 6934 un->un_saved_throttle = un->un_throttle = 1; 6935 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6936 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6937 } 6938 6939 /* 6940 * Enable large transfers for SATA/SAS drives 6941 */ 6942 if (SD_IS_SERIAL(un)) { 6943 un->un_max_xfer_size = 6944 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6945 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6946 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6947 "sd_unit_attach: un:0x%p max transfer " 6948 "size=0x%x\n", un, un->un_max_xfer_size); 6949 6950 } 6951 6952 /* Setup or tear down default wide operations for disks */ 6953 6954 /* 6955 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6956 * and "ssd_max_xfer_size" to exist simultaneously on the same 6957 * system and be set to different values. In the future this 6958 * code may need to be updated when the ssd module is 6959 * obsoleted and removed from the system. (4299588) 6960 */ 6961 if (SD_IS_PARALLEL_SCSI(un) && 6962 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6963 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6964 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6965 1, 1) == 1) { 6966 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6967 "sd_unit_attach: un:0x%p Wide Transfer " 6968 "enabled\n", un); 6969 } 6970 6971 /* 6972 * If tagged queuing has also been enabled, then 6973 * enable large xfers 6974 */ 6975 if (un->un_saved_throttle == sd_max_throttle) { 6976 un->un_max_xfer_size = 6977 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6978 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6979 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6980 "sd_unit_attach: un:0x%p max transfer " 6981 "size=0x%x\n", un, un->un_max_xfer_size); 6982 } 6983 } else { 6984 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6985 0, 1) == 1) { 6986 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6987 "sd_unit_attach: un:0x%p " 6988 "Wide Transfer disabled\n", un); 6989 } 6990 } 6991 } else { 6992 un->un_tagflags = FLAG_STAG; 6993 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6994 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6995 } 6996 6997 /* 6998 * If this target supports LUN reset, try to enable it. 6999 */ 7000 if (un->un_f_lun_reset_enabled) { 7001 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7002 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7003 "un:0x%p lun_reset capability set\n", un); 7004 } else { 7005 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7006 "un:0x%p lun-reset capability not set\n", un); 7007 } 7008 } 7009 7010 /* 7011 * Adjust the maximum transfer size. This is to fix 7012 * the problem of partial DMA support on SPARC. Some 7013 * HBA driver, like aac, has very small dma_attr_maxxfer 7014 * size, which requires partial DMA support on SPARC. 7015 * In the future the SPARC pci nexus driver may solve 7016 * the problem instead of this fix. 7017 */ 7018 #if defined(__sparc) 7019 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7020 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7021 un->un_max_xfer_size = max_xfer_size; 7022 un->un_partial_dma_supported = 1; 7023 } 7024 #endif 7025 7026 /* 7027 * Set PKT_DMA_PARTIAL flag. 7028 */ 7029 if (un->un_partial_dma_supported == 1) { 7030 un->un_pkt_flags = PKT_DMA_PARTIAL; 7031 } else { 7032 un->un_pkt_flags = 0; 7033 } 7034 7035 /* 7036 * At this point in the attach, we have enough info in the 7037 * soft state to be able to issue commands to the target. 7038 * 7039 * All command paths used below MUST issue their commands as 7040 * SD_PATH_DIRECT. This is important as intermediate layers 7041 * are not all initialized yet (such as PM). 7042 */ 7043 7044 /* 7045 * Send a TEST UNIT READY command to the device. This should clear 7046 * any outstanding UNIT ATTENTION that may be present. 7047 * 7048 * Note: Don't check for success, just track if there is a reservation, 7049 * this is a throw away command to clear any unit attentions. 7050 * 7051 * Note: This MUST be the first command issued to the target during 7052 * attach to ensure power on UNIT ATTENTIONS are cleared. 7053 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7054 * with attempts at spinning up a device with no media. 7055 */ 7056 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7057 reservation_flag = SD_TARGET_IS_RESERVED; 7058 } 7059 7060 /* 7061 * If the device is NOT a removable media device, attempt to spin 7062 * it up (using the START_STOP_UNIT command) and read its capacity 7063 * (using the READ CAPACITY command). Note, however, that either 7064 * of these could fail and in some cases we would continue with 7065 * the attach despite the failure (see below). 7066 */ 7067 if (un->un_f_descr_format_supported) { 7068 switch (sd_spin_up_unit(un)) { 7069 case 0: 7070 /* 7071 * Spin-up was successful; now try to read the 7072 * capacity. If successful then save the results 7073 * and mark the capacity & lbasize as valid. 7074 */ 7075 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7076 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7077 7078 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7079 &lbasize, SD_PATH_DIRECT)) { 7080 case 0: { 7081 if (capacity > DK_MAX_BLOCKS) { 7082 #ifdef _LP64 7083 if (capacity + 1 > 7084 SD_GROUP1_MAX_ADDRESS) { 7085 /* 7086 * Enable descriptor format 7087 * sense data so that we can 7088 * get 64 bit sense data 7089 * fields. 7090 */ 7091 sd_enable_descr_sense(un); 7092 } 7093 #else 7094 /* 32-bit kernels can't handle this */ 7095 scsi_log(SD_DEVINFO(un), 7096 sd_label, CE_WARN, 7097 "disk has %llu blocks, which " 7098 "is too large for a 32-bit " 7099 "kernel", capacity); 7100 7101 #if defined(__i386) || defined(__amd64) 7102 /* 7103 * 1TB disk was treated as (1T - 512)B 7104 * in the past, so that it might have 7105 * valid VTOC and solaris partitions, 7106 * we have to allow it to continue to 7107 * work. 7108 */ 7109 if (capacity -1 > DK_MAX_BLOCKS) 7110 #endif 7111 goto spinup_failed; 7112 #endif 7113 } 7114 7115 /* 7116 * Here it's not necessary to check the case: 7117 * the capacity of the device is bigger than 7118 * what the max hba cdb can support. Because 7119 * sd_send_scsi_READ_CAPACITY will retrieve 7120 * the capacity by sending USCSI command, which 7121 * is constrained by the max hba cdb. Actually, 7122 * sd_send_scsi_READ_CAPACITY will return 7123 * EINVAL when using bigger cdb than required 7124 * cdb length. Will handle this case in 7125 * "case EINVAL". 7126 */ 7127 7128 /* 7129 * The following relies on 7130 * sd_send_scsi_READ_CAPACITY never 7131 * returning 0 for capacity and/or lbasize. 7132 */ 7133 sd_update_block_info(un, lbasize, capacity); 7134 7135 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7136 "sd_unit_attach: un:0x%p capacity = %ld " 7137 "blocks; lbasize= %ld.\n", un, 7138 un->un_blockcount, un->un_tgt_blocksize); 7139 7140 break; 7141 } 7142 case EINVAL: 7143 /* 7144 * In the case where the max-cdb-length property 7145 * is smaller than the required CDB length for 7146 * a SCSI device, a target driver can fail to 7147 * attach to that device. 7148 */ 7149 scsi_log(SD_DEVINFO(un), 7150 sd_label, CE_WARN, 7151 "disk capacity is too large " 7152 "for current cdb length"); 7153 goto spinup_failed; 7154 case EACCES: 7155 /* 7156 * Should never get here if the spin-up 7157 * succeeded, but code it in anyway. 7158 * From here, just continue with the attach... 7159 */ 7160 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7161 "sd_unit_attach: un:0x%p " 7162 "sd_send_scsi_READ_CAPACITY " 7163 "returned reservation conflict\n", un); 7164 reservation_flag = SD_TARGET_IS_RESERVED; 7165 break; 7166 default: 7167 /* 7168 * Likewise, should never get here if the 7169 * spin-up succeeded. Just continue with 7170 * the attach... 7171 */ 7172 break; 7173 } 7174 break; 7175 case EACCES: 7176 /* 7177 * Device is reserved by another host. In this case 7178 * we could not spin it up or read the capacity, but 7179 * we continue with the attach anyway. 7180 */ 7181 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7182 "sd_unit_attach: un:0x%p spin-up reservation " 7183 "conflict.\n", un); 7184 reservation_flag = SD_TARGET_IS_RESERVED; 7185 break; 7186 default: 7187 /* Fail the attach if the spin-up failed. */ 7188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7189 "sd_unit_attach: un:0x%p spin-up failed.", un); 7190 goto spinup_failed; 7191 } 7192 } 7193 7194 /* 7195 * Check to see if this is a MMC drive 7196 */ 7197 if (ISCD(un)) { 7198 sd_set_mmc_caps(un); 7199 } 7200 7201 7202 /* 7203 * Add a zero-length attribute to tell the world we support 7204 * kernel ioctls (for layered drivers) 7205 */ 7206 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7207 DDI_KERNEL_IOCTL, NULL, 0); 7208 7209 /* 7210 * Add a boolean property to tell the world we support 7211 * the B_FAILFAST flag (for layered drivers) 7212 */ 7213 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7214 "ddi-failfast-supported", NULL, 0); 7215 7216 /* 7217 * Initialize power management 7218 */ 7219 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7220 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7221 sd_setup_pm(un, devi); 7222 if (un->un_f_pm_is_enabled == FALSE) { 7223 /* 7224 * For performance, point to a jump table that does 7225 * not include pm. 7226 * The direct and priority chains don't change with PM. 7227 * 7228 * Note: this is currently done based on individual device 7229 * capabilities. When an interface for determining system 7230 * power enabled state becomes available, or when additional 7231 * layers are added to the command chain, these values will 7232 * have to be re-evaluated for correctness. 7233 */ 7234 if (un->un_f_non_devbsize_supported) { 7235 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7236 } else { 7237 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7238 } 7239 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7240 } 7241 7242 /* 7243 * This property is set to 0 by HA software to avoid retries 7244 * on a reserved disk. (The preferred property name is 7245 * "retry-on-reservation-conflict") (1189689) 7246 * 7247 * Note: The use of a global here can have unintended consequences. A 7248 * per instance variable is preferrable to match the capabilities of 7249 * different underlying hba's (4402600) 7250 */ 7251 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7252 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7253 sd_retry_on_reservation_conflict); 7254 if (sd_retry_on_reservation_conflict != 0) { 7255 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7256 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7257 sd_retry_on_reservation_conflict); 7258 } 7259 7260 /* Set up options for QFULL handling. */ 7261 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7262 "qfull-retries", -1)) != -1) { 7263 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7264 rval, 1); 7265 } 7266 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7267 "qfull-retry-interval", -1)) != -1) { 7268 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7269 rval, 1); 7270 } 7271 7272 /* 7273 * This just prints a message that announces the existence of the 7274 * device. The message is always printed in the system logfile, but 7275 * only appears on the console if the system is booted with the 7276 * -v (verbose) argument. 7277 */ 7278 ddi_report_dev(devi); 7279 7280 un->un_mediastate = DKIO_NONE; 7281 7282 cmlb_alloc_handle(&un->un_cmlbhandle); 7283 7284 #if defined(__i386) || defined(__amd64) 7285 /* 7286 * On x86, compensate for off-by-1 legacy error 7287 */ 7288 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7289 (lbasize == un->un_sys_blocksize)) 7290 offbyone = CMLB_OFF_BY_ONE; 7291 #endif 7292 7293 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7294 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7295 un->un_node_type, offbyone, un->un_cmlbhandle, 7296 (void *)SD_PATH_DIRECT) != 0) { 7297 goto cmlb_attach_failed; 7298 } 7299 7300 7301 /* 7302 * Read and validate the device's geometry (ie, disk label) 7303 * A new unformatted drive will not have a valid geometry, but 7304 * the driver needs to successfully attach to this device so 7305 * the drive can be formatted via ioctls. 7306 */ 7307 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7308 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7309 7310 mutex_enter(SD_MUTEX(un)); 7311 7312 /* 7313 * Read and initialize the devid for the unit. 7314 */ 7315 if (un->un_f_devid_supported) { 7316 sd_register_devid(un, devi, reservation_flag); 7317 } 7318 mutex_exit(SD_MUTEX(un)); 7319 7320 #if (defined(__fibre)) 7321 /* 7322 * Register callbacks for fibre only. You can't do this soley 7323 * on the basis of the devid_type because this is hba specific. 7324 * We need to query our hba capabilities to find out whether to 7325 * register or not. 7326 */ 7327 if (un->un_f_is_fibre) { 7328 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7329 sd_init_event_callbacks(un); 7330 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7331 "sd_unit_attach: un:0x%p event callbacks inserted", 7332 un); 7333 } 7334 } 7335 #endif 7336 7337 if (un->un_f_opt_disable_cache == TRUE) { 7338 /* 7339 * Disable both read cache and write cache. This is 7340 * the historic behavior of the keywords in the config file. 7341 */ 7342 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7343 0) { 7344 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7345 "sd_unit_attach: un:0x%p Could not disable " 7346 "caching", un); 7347 goto devid_failed; 7348 } 7349 } 7350 7351 /* 7352 * Check the value of the WCE bit now and 7353 * set un_f_write_cache_enabled accordingly. 7354 */ 7355 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7356 mutex_enter(SD_MUTEX(un)); 7357 un->un_f_write_cache_enabled = (wc_enabled != 0); 7358 mutex_exit(SD_MUTEX(un)); 7359 7360 /* 7361 * Check the value of the NV_SUP bit and set 7362 * un_f_suppress_cache_flush accordingly. 7363 */ 7364 sd_get_nv_sup(un); 7365 7366 /* 7367 * Find out what type of reservation this disk supports. 7368 */ 7369 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7370 case 0: 7371 /* 7372 * SCSI-3 reservations are supported. 7373 */ 7374 un->un_reservation_type = SD_SCSI3_RESERVATION; 7375 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7376 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7377 break; 7378 case ENOTSUP: 7379 /* 7380 * The PERSISTENT RESERVE IN command would not be recognized by 7381 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7382 */ 7383 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7384 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7385 un->un_reservation_type = SD_SCSI2_RESERVATION; 7386 break; 7387 default: 7388 /* 7389 * default to SCSI-3 reservations 7390 */ 7391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7392 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7393 un->un_reservation_type = SD_SCSI3_RESERVATION; 7394 break; 7395 } 7396 7397 /* 7398 * Set the pstat and error stat values here, so data obtained during the 7399 * previous attach-time routines is available. 7400 * 7401 * Note: This is a critical sequence that needs to be maintained: 7402 * 1) Instantiate the kstats before any routines using the iopath 7403 * (i.e. sd_send_scsi_cmd). 7404 * 2) Initialize the error stats (sd_set_errstats) and partition 7405 * stats (sd_set_pstats)here, following 7406 * cmlb_validate_geometry(), sd_register_devid(), and 7407 * sd_cache_control(). 7408 */ 7409 7410 if (un->un_f_pkstats_enabled && geom_label_valid) { 7411 sd_set_pstats(un); 7412 SD_TRACE(SD_LOG_IO_PARTITION, un, 7413 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7414 } 7415 7416 sd_set_errstats(un); 7417 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7418 "sd_unit_attach: un:0x%p errstats set\n", un); 7419 7420 7421 /* 7422 * After successfully attaching an instance, we record the information 7423 * of how many luns have been attached on the relative target and 7424 * controller for parallel SCSI. This information is used when sd tries 7425 * to set the tagged queuing capability in HBA. 7426 */ 7427 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7428 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7429 } 7430 7431 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7432 "sd_unit_attach: un:0x%p exit success\n", un); 7433 7434 return (DDI_SUCCESS); 7435 7436 /* 7437 * An error occurred during the attach; clean up & return failure. 7438 */ 7439 7440 devid_failed: 7441 7442 setup_pm_failed: 7443 ddi_remove_minor_node(devi, NULL); 7444 7445 cmlb_attach_failed: 7446 /* 7447 * Cleanup from the scsi_ifsetcap() calls (437868) 7448 */ 7449 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7450 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7451 7452 /* 7453 * Refer to the comments of setting tagged-qing in the beginning of 7454 * sd_unit_attach. We can only disable tagged queuing when there is 7455 * no lun attached on the target. 7456 */ 7457 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7458 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7459 } 7460 7461 if (un->un_f_is_fibre == FALSE) { 7462 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7463 } 7464 7465 spinup_failed: 7466 7467 mutex_enter(SD_MUTEX(un)); 7468 7469 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7470 if (un->un_direct_priority_timeid != NULL) { 7471 timeout_id_t temp_id = un->un_direct_priority_timeid; 7472 un->un_direct_priority_timeid = NULL; 7473 mutex_exit(SD_MUTEX(un)); 7474 (void) untimeout(temp_id); 7475 mutex_enter(SD_MUTEX(un)); 7476 } 7477 7478 /* Cancel any pending start/stop timeouts */ 7479 if (un->un_startstop_timeid != NULL) { 7480 timeout_id_t temp_id = un->un_startstop_timeid; 7481 un->un_startstop_timeid = NULL; 7482 mutex_exit(SD_MUTEX(un)); 7483 (void) untimeout(temp_id); 7484 mutex_enter(SD_MUTEX(un)); 7485 } 7486 7487 /* Cancel any pending reset-throttle timeouts */ 7488 if (un->un_reset_throttle_timeid != NULL) { 7489 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7490 un->un_reset_throttle_timeid = NULL; 7491 mutex_exit(SD_MUTEX(un)); 7492 (void) untimeout(temp_id); 7493 mutex_enter(SD_MUTEX(un)); 7494 } 7495 7496 /* Cancel any pending retry timeouts */ 7497 if (un->un_retry_timeid != NULL) { 7498 timeout_id_t temp_id = un->un_retry_timeid; 7499 un->un_retry_timeid = NULL; 7500 mutex_exit(SD_MUTEX(un)); 7501 (void) untimeout(temp_id); 7502 mutex_enter(SD_MUTEX(un)); 7503 } 7504 7505 /* Cancel any pending delayed cv broadcast timeouts */ 7506 if (un->un_dcvb_timeid != NULL) { 7507 timeout_id_t temp_id = un->un_dcvb_timeid; 7508 un->un_dcvb_timeid = NULL; 7509 mutex_exit(SD_MUTEX(un)); 7510 (void) untimeout(temp_id); 7511 mutex_enter(SD_MUTEX(un)); 7512 } 7513 7514 mutex_exit(SD_MUTEX(un)); 7515 7516 /* There should not be any in-progress I/O so ASSERT this check */ 7517 ASSERT(un->un_ncmds_in_transport == 0); 7518 ASSERT(un->un_ncmds_in_driver == 0); 7519 7520 /* Do not free the softstate if the callback routine is active */ 7521 sd_sync_with_callback(un); 7522 7523 /* 7524 * Partition stats apparently are not used with removables. These would 7525 * not have been created during attach, so no need to clean them up... 7526 */ 7527 if (un->un_errstats != NULL) { 7528 kstat_delete(un->un_errstats); 7529 un->un_errstats = NULL; 7530 } 7531 7532 create_errstats_failed: 7533 7534 if (un->un_stats != NULL) { 7535 kstat_delete(un->un_stats); 7536 un->un_stats = NULL; 7537 } 7538 7539 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7540 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7541 7542 ddi_prop_remove_all(devi); 7543 sema_destroy(&un->un_semoclose); 7544 cv_destroy(&un->un_state_cv); 7545 7546 getrbuf_failed: 7547 7548 sd_free_rqs(un); 7549 7550 alloc_rqs_failed: 7551 7552 devp->sd_private = NULL; 7553 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7554 7555 get_softstate_failed: 7556 /* 7557 * Note: the man pages are unclear as to whether or not doing a 7558 * ddi_soft_state_free(sd_state, instance) is the right way to 7559 * clean up after the ddi_soft_state_zalloc() if the subsequent 7560 * ddi_get_soft_state() fails. The implication seems to be 7561 * that the get_soft_state cannot fail if the zalloc succeeds. 7562 */ 7563 ddi_soft_state_free(sd_state, instance); 7564 7565 probe_failed: 7566 scsi_unprobe(devp); 7567 7568 return (DDI_FAILURE); 7569 } 7570 7571 7572 /* 7573 * Function: sd_unit_detach 7574 * 7575 * Description: Performs DDI_DETACH processing for sddetach(). 7576 * 7577 * Return Code: DDI_SUCCESS 7578 * DDI_FAILURE 7579 * 7580 * Context: Kernel thread context 7581 */ 7582 7583 static int 7584 sd_unit_detach(dev_info_t *devi) 7585 { 7586 struct scsi_device *devp; 7587 struct sd_lun *un; 7588 int i; 7589 int tgt; 7590 dev_t dev; 7591 dev_info_t *pdip = ddi_get_parent(devi); 7592 int instance = ddi_get_instance(devi); 7593 7594 mutex_enter(&sd_detach_mutex); 7595 7596 /* 7597 * Fail the detach for any of the following: 7598 * - Unable to get the sd_lun struct for the instance 7599 * - A layered driver has an outstanding open on the instance 7600 * - Another thread is already detaching this instance 7601 * - Another thread is currently performing an open 7602 */ 7603 devp = ddi_get_driver_private(devi); 7604 if ((devp == NULL) || 7605 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7606 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7607 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7608 mutex_exit(&sd_detach_mutex); 7609 return (DDI_FAILURE); 7610 } 7611 7612 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7613 7614 /* 7615 * Mark this instance as currently in a detach, to inhibit any 7616 * opens from a layered driver. 7617 */ 7618 un->un_detach_count++; 7619 mutex_exit(&sd_detach_mutex); 7620 7621 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7622 SCSI_ADDR_PROP_TARGET, -1); 7623 7624 dev = sd_make_device(SD_DEVINFO(un)); 7625 7626 #ifndef lint 7627 _NOTE(COMPETING_THREADS_NOW); 7628 #endif 7629 7630 mutex_enter(SD_MUTEX(un)); 7631 7632 /* 7633 * Fail the detach if there are any outstanding layered 7634 * opens on this device. 7635 */ 7636 for (i = 0; i < NDKMAP; i++) { 7637 if (un->un_ocmap.lyropen[i] != 0) { 7638 goto err_notclosed; 7639 } 7640 } 7641 7642 /* 7643 * Verify there are NO outstanding commands issued to this device. 7644 * ie, un_ncmds_in_transport == 0. 7645 * It's possible to have outstanding commands through the physio 7646 * code path, even though everything's closed. 7647 */ 7648 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7649 (un->un_direct_priority_timeid != NULL) || 7650 (un->un_state == SD_STATE_RWAIT)) { 7651 mutex_exit(SD_MUTEX(un)); 7652 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7653 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7654 goto err_stillbusy; 7655 } 7656 7657 /* 7658 * If we have the device reserved, release the reservation. 7659 */ 7660 if ((un->un_resvd_status & SD_RESERVE) && 7661 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7662 mutex_exit(SD_MUTEX(un)); 7663 /* 7664 * Note: sd_reserve_release sends a command to the device 7665 * via the sd_ioctlcmd() path, and can sleep. 7666 */ 7667 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7668 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7669 "sd_dr_detach: Cannot release reservation \n"); 7670 } 7671 } else { 7672 mutex_exit(SD_MUTEX(un)); 7673 } 7674 7675 /* 7676 * Untimeout any reserve recover, throttle reset, restart unit 7677 * and delayed broadcast timeout threads. Protect the timeout pointer 7678 * from getting nulled by their callback functions. 7679 */ 7680 mutex_enter(SD_MUTEX(un)); 7681 if (un->un_resvd_timeid != NULL) { 7682 timeout_id_t temp_id = un->un_resvd_timeid; 7683 un->un_resvd_timeid = NULL; 7684 mutex_exit(SD_MUTEX(un)); 7685 (void) untimeout(temp_id); 7686 mutex_enter(SD_MUTEX(un)); 7687 } 7688 7689 if (un->un_reset_throttle_timeid != NULL) { 7690 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7691 un->un_reset_throttle_timeid = NULL; 7692 mutex_exit(SD_MUTEX(un)); 7693 (void) untimeout(temp_id); 7694 mutex_enter(SD_MUTEX(un)); 7695 } 7696 7697 if (un->un_startstop_timeid != NULL) { 7698 timeout_id_t temp_id = un->un_startstop_timeid; 7699 un->un_startstop_timeid = NULL; 7700 mutex_exit(SD_MUTEX(un)); 7701 (void) untimeout(temp_id); 7702 mutex_enter(SD_MUTEX(un)); 7703 } 7704 7705 if (un->un_dcvb_timeid != NULL) { 7706 timeout_id_t temp_id = un->un_dcvb_timeid; 7707 un->un_dcvb_timeid = NULL; 7708 mutex_exit(SD_MUTEX(un)); 7709 (void) untimeout(temp_id); 7710 } else { 7711 mutex_exit(SD_MUTEX(un)); 7712 } 7713 7714 /* Remove any pending reservation reclaim requests for this device */ 7715 sd_rmv_resv_reclaim_req(dev); 7716 7717 mutex_enter(SD_MUTEX(un)); 7718 7719 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7720 if (un->un_direct_priority_timeid != NULL) { 7721 timeout_id_t temp_id = un->un_direct_priority_timeid; 7722 un->un_direct_priority_timeid = NULL; 7723 mutex_exit(SD_MUTEX(un)); 7724 (void) untimeout(temp_id); 7725 mutex_enter(SD_MUTEX(un)); 7726 } 7727 7728 /* Cancel any active multi-host disk watch thread requests */ 7729 if (un->un_mhd_token != NULL) { 7730 mutex_exit(SD_MUTEX(un)); 7731 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7732 if (scsi_watch_request_terminate(un->un_mhd_token, 7733 SCSI_WATCH_TERMINATE_NOWAIT)) { 7734 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7735 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7736 /* 7737 * Note: We are returning here after having removed 7738 * some driver timeouts above. This is consistent with 7739 * the legacy implementation but perhaps the watch 7740 * terminate call should be made with the wait flag set. 7741 */ 7742 goto err_stillbusy; 7743 } 7744 mutex_enter(SD_MUTEX(un)); 7745 un->un_mhd_token = NULL; 7746 } 7747 7748 if (un->un_swr_token != NULL) { 7749 mutex_exit(SD_MUTEX(un)); 7750 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7751 if (scsi_watch_request_terminate(un->un_swr_token, 7752 SCSI_WATCH_TERMINATE_NOWAIT)) { 7753 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7754 "sd_dr_detach: Cannot cancel swr watch request\n"); 7755 /* 7756 * Note: We are returning here after having removed 7757 * some driver timeouts above. This is consistent with 7758 * the legacy implementation but perhaps the watch 7759 * terminate call should be made with the wait flag set. 7760 */ 7761 goto err_stillbusy; 7762 } 7763 mutex_enter(SD_MUTEX(un)); 7764 un->un_swr_token = NULL; 7765 } 7766 7767 mutex_exit(SD_MUTEX(un)); 7768 7769 /* 7770 * Clear any scsi_reset_notifies. We clear the reset notifies 7771 * if we have not registered one. 7772 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7773 */ 7774 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7775 sd_mhd_reset_notify_cb, (caddr_t)un); 7776 7777 /* 7778 * protect the timeout pointers from getting nulled by 7779 * their callback functions during the cancellation process. 7780 * In such a scenario untimeout can be invoked with a null value. 7781 */ 7782 _NOTE(NO_COMPETING_THREADS_NOW); 7783 7784 mutex_enter(&un->un_pm_mutex); 7785 if (un->un_pm_idle_timeid != NULL) { 7786 timeout_id_t temp_id = un->un_pm_idle_timeid; 7787 un->un_pm_idle_timeid = NULL; 7788 mutex_exit(&un->un_pm_mutex); 7789 7790 /* 7791 * Timeout is active; cancel it. 7792 * Note that it'll never be active on a device 7793 * that does not support PM therefore we don't 7794 * have to check before calling pm_idle_component. 7795 */ 7796 (void) untimeout(temp_id); 7797 (void) pm_idle_component(SD_DEVINFO(un), 0); 7798 mutex_enter(&un->un_pm_mutex); 7799 } 7800 7801 /* 7802 * Check whether there is already a timeout scheduled for power 7803 * management. If yes then don't lower the power here, that's. 7804 * the timeout handler's job. 7805 */ 7806 if (un->un_pm_timeid != NULL) { 7807 timeout_id_t temp_id = un->un_pm_timeid; 7808 un->un_pm_timeid = NULL; 7809 mutex_exit(&un->un_pm_mutex); 7810 /* 7811 * Timeout is active; cancel it. 7812 * Note that it'll never be active on a device 7813 * that does not support PM therefore we don't 7814 * have to check before calling pm_idle_component. 7815 */ 7816 (void) untimeout(temp_id); 7817 (void) pm_idle_component(SD_DEVINFO(un), 0); 7818 7819 } else { 7820 mutex_exit(&un->un_pm_mutex); 7821 if ((un->un_f_pm_is_enabled == TRUE) && 7822 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7823 DDI_SUCCESS)) { 7824 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7825 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7826 /* 7827 * Fix for bug: 4297749, item # 13 7828 * The above test now includes a check to see if PM is 7829 * supported by this device before call 7830 * pm_lower_power(). 7831 * Note, the following is not dead code. The call to 7832 * pm_lower_power above will generate a call back into 7833 * our sdpower routine which might result in a timeout 7834 * handler getting activated. Therefore the following 7835 * code is valid and necessary. 7836 */ 7837 mutex_enter(&un->un_pm_mutex); 7838 if (un->un_pm_timeid != NULL) { 7839 timeout_id_t temp_id = un->un_pm_timeid; 7840 un->un_pm_timeid = NULL; 7841 mutex_exit(&un->un_pm_mutex); 7842 (void) untimeout(temp_id); 7843 (void) pm_idle_component(SD_DEVINFO(un), 0); 7844 } else { 7845 mutex_exit(&un->un_pm_mutex); 7846 } 7847 } 7848 } 7849 7850 /* 7851 * Cleanup from the scsi_ifsetcap() calls (437868) 7852 * Relocated here from above to be after the call to 7853 * pm_lower_power, which was getting errors. 7854 */ 7855 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7856 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7857 7858 /* 7859 * Currently, tagged queuing is supported per target based by HBA. 7860 * Setting this per lun instance actually sets the capability of this 7861 * target in HBA, which affects those luns already attached on the 7862 * same target. So during detach, we can only disable this capability 7863 * only when this is the only lun left on this target. By doing 7864 * this, we assume a target has the same tagged queuing capability 7865 * for every lun. The condition can be removed when HBA is changed to 7866 * support per lun based tagged queuing capability. 7867 */ 7868 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7869 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7870 } 7871 7872 if (un->un_f_is_fibre == FALSE) { 7873 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7874 } 7875 7876 /* 7877 * Remove any event callbacks, fibre only 7878 */ 7879 if (un->un_f_is_fibre == TRUE) { 7880 if ((un->un_insert_event != NULL) && 7881 (ddi_remove_event_handler(un->un_insert_cb_id) != 7882 DDI_SUCCESS)) { 7883 /* 7884 * Note: We are returning here after having done 7885 * substantial cleanup above. This is consistent 7886 * with the legacy implementation but this may not 7887 * be the right thing to do. 7888 */ 7889 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7890 "sd_dr_detach: Cannot cancel insert event\n"); 7891 goto err_remove_event; 7892 } 7893 un->un_insert_event = NULL; 7894 7895 if ((un->un_remove_event != NULL) && 7896 (ddi_remove_event_handler(un->un_remove_cb_id) != 7897 DDI_SUCCESS)) { 7898 /* 7899 * Note: We are returning here after having done 7900 * substantial cleanup above. This is consistent 7901 * with the legacy implementation but this may not 7902 * be the right thing to do. 7903 */ 7904 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7905 "sd_dr_detach: Cannot cancel remove event\n"); 7906 goto err_remove_event; 7907 } 7908 un->un_remove_event = NULL; 7909 } 7910 7911 /* Do not free the softstate if the callback routine is active */ 7912 sd_sync_with_callback(un); 7913 7914 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7915 cmlb_free_handle(&un->un_cmlbhandle); 7916 7917 /* 7918 * Hold the detach mutex here, to make sure that no other threads ever 7919 * can access a (partially) freed soft state structure. 7920 */ 7921 mutex_enter(&sd_detach_mutex); 7922 7923 /* 7924 * Clean up the soft state struct. 7925 * Cleanup is done in reverse order of allocs/inits. 7926 * At this point there should be no competing threads anymore. 7927 */ 7928 7929 /* Unregister and free device id. */ 7930 ddi_devid_unregister(devi); 7931 if (un->un_devid) { 7932 ddi_devid_free(un->un_devid); 7933 un->un_devid = NULL; 7934 } 7935 7936 /* 7937 * Destroy wmap cache if it exists. 7938 */ 7939 if (un->un_wm_cache != NULL) { 7940 kmem_cache_destroy(un->un_wm_cache); 7941 un->un_wm_cache = NULL; 7942 } 7943 7944 /* 7945 * kstat cleanup is done in detach for all device types (4363169). 7946 * We do not want to fail detach if the device kstats are not deleted 7947 * since there is a confusion about the devo_refcnt for the device. 7948 * We just delete the kstats and let detach complete successfully. 7949 */ 7950 if (un->un_stats != NULL) { 7951 kstat_delete(un->un_stats); 7952 un->un_stats = NULL; 7953 } 7954 if (un->un_errstats != NULL) { 7955 kstat_delete(un->un_errstats); 7956 un->un_errstats = NULL; 7957 } 7958 7959 /* Remove partition stats */ 7960 if (un->un_f_pkstats_enabled) { 7961 for (i = 0; i < NSDMAP; i++) { 7962 if (un->un_pstats[i] != NULL) { 7963 kstat_delete(un->un_pstats[i]); 7964 un->un_pstats[i] = NULL; 7965 } 7966 } 7967 } 7968 7969 /* Remove xbuf registration */ 7970 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7971 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7972 7973 /* Remove driver properties */ 7974 ddi_prop_remove_all(devi); 7975 7976 mutex_destroy(&un->un_pm_mutex); 7977 cv_destroy(&un->un_pm_busy_cv); 7978 7979 cv_destroy(&un->un_wcc_cv); 7980 7981 /* Open/close semaphore */ 7982 sema_destroy(&un->un_semoclose); 7983 7984 /* Removable media condvar. */ 7985 cv_destroy(&un->un_state_cv); 7986 7987 /* Suspend/resume condvar. */ 7988 cv_destroy(&un->un_suspend_cv); 7989 cv_destroy(&un->un_disk_busy_cv); 7990 7991 sd_free_rqs(un); 7992 7993 /* Free up soft state */ 7994 devp->sd_private = NULL; 7995 7996 bzero(un, sizeof (struct sd_lun)); 7997 ddi_soft_state_free(sd_state, instance); 7998 7999 mutex_exit(&sd_detach_mutex); 8000 8001 /* This frees up the INQUIRY data associated with the device. */ 8002 scsi_unprobe(devp); 8003 8004 /* 8005 * After successfully detaching an instance, we update the information 8006 * of how many luns have been attached in the relative target and 8007 * controller for parallel SCSI. This information is used when sd tries 8008 * to set the tagged queuing capability in HBA. 8009 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8010 * check if the device is parallel SCSI. However, we don't need to 8011 * check here because we've already checked during attach. No device 8012 * that is not parallel SCSI is in the chain. 8013 */ 8014 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8015 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8016 } 8017 8018 return (DDI_SUCCESS); 8019 8020 err_notclosed: 8021 mutex_exit(SD_MUTEX(un)); 8022 8023 err_stillbusy: 8024 _NOTE(NO_COMPETING_THREADS_NOW); 8025 8026 err_remove_event: 8027 mutex_enter(&sd_detach_mutex); 8028 un->un_detach_count--; 8029 mutex_exit(&sd_detach_mutex); 8030 8031 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8032 return (DDI_FAILURE); 8033 } 8034 8035 8036 /* 8037 * Function: sd_create_errstats 8038 * 8039 * Description: This routine instantiates the device error stats. 8040 * 8041 * Note: During attach the stats are instantiated first so they are 8042 * available for attach-time routines that utilize the driver 8043 * iopath to send commands to the device. The stats are initialized 8044 * separately so data obtained during some attach-time routines is 8045 * available. (4362483) 8046 * 8047 * Arguments: un - driver soft state (unit) structure 8048 * instance - driver instance 8049 * 8050 * Context: Kernel thread context 8051 */ 8052 8053 static void 8054 sd_create_errstats(struct sd_lun *un, int instance) 8055 { 8056 struct sd_errstats *stp; 8057 char kstatmodule_err[KSTAT_STRLEN]; 8058 char kstatname[KSTAT_STRLEN]; 8059 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8060 8061 ASSERT(un != NULL); 8062 8063 if (un->un_errstats != NULL) { 8064 return; 8065 } 8066 8067 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8068 "%serr", sd_label); 8069 (void) snprintf(kstatname, sizeof (kstatname), 8070 "%s%d,err", sd_label, instance); 8071 8072 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8073 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8074 8075 if (un->un_errstats == NULL) { 8076 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8077 "sd_create_errstats: Failed kstat_create\n"); 8078 return; 8079 } 8080 8081 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8082 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8083 KSTAT_DATA_UINT32); 8084 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8085 KSTAT_DATA_UINT32); 8086 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8087 KSTAT_DATA_UINT32); 8088 kstat_named_init(&stp->sd_vid, "Vendor", 8089 KSTAT_DATA_CHAR); 8090 kstat_named_init(&stp->sd_pid, "Product", 8091 KSTAT_DATA_CHAR); 8092 kstat_named_init(&stp->sd_revision, "Revision", 8093 KSTAT_DATA_CHAR); 8094 kstat_named_init(&stp->sd_serial, "Serial No", 8095 KSTAT_DATA_CHAR); 8096 kstat_named_init(&stp->sd_capacity, "Size", 8097 KSTAT_DATA_ULONGLONG); 8098 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8099 KSTAT_DATA_UINT32); 8100 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8101 KSTAT_DATA_UINT32); 8102 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8103 KSTAT_DATA_UINT32); 8104 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8105 KSTAT_DATA_UINT32); 8106 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8107 KSTAT_DATA_UINT32); 8108 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8109 KSTAT_DATA_UINT32); 8110 8111 un->un_errstats->ks_private = un; 8112 un->un_errstats->ks_update = nulldev; 8113 8114 kstat_install(un->un_errstats); 8115 } 8116 8117 8118 /* 8119 * Function: sd_set_errstats 8120 * 8121 * Description: This routine sets the value of the vendor id, product id, 8122 * revision, serial number, and capacity device error stats. 8123 * 8124 * Note: During attach the stats are instantiated first so they are 8125 * available for attach-time routines that utilize the driver 8126 * iopath to send commands to the device. The stats are initialized 8127 * separately so data obtained during some attach-time routines is 8128 * available. (4362483) 8129 * 8130 * Arguments: un - driver soft state (unit) structure 8131 * 8132 * Context: Kernel thread context 8133 */ 8134 8135 static void 8136 sd_set_errstats(struct sd_lun *un) 8137 { 8138 struct sd_errstats *stp; 8139 8140 ASSERT(un != NULL); 8141 ASSERT(un->un_errstats != NULL); 8142 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8143 ASSERT(stp != NULL); 8144 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8145 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8146 (void) strncpy(stp->sd_revision.value.c, 8147 un->un_sd->sd_inq->inq_revision, 4); 8148 8149 /* 8150 * All the errstats are persistent across detach/attach, 8151 * so reset all the errstats here in case of the hot 8152 * replacement of disk drives, except for not changed 8153 * Sun qualified drives. 8154 */ 8155 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8156 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8157 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8158 stp->sd_softerrs.value.ui32 = 0; 8159 stp->sd_harderrs.value.ui32 = 0; 8160 stp->sd_transerrs.value.ui32 = 0; 8161 stp->sd_rq_media_err.value.ui32 = 0; 8162 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8163 stp->sd_rq_nodev_err.value.ui32 = 0; 8164 stp->sd_rq_recov_err.value.ui32 = 0; 8165 stp->sd_rq_illrq_err.value.ui32 = 0; 8166 stp->sd_rq_pfa_err.value.ui32 = 0; 8167 } 8168 8169 /* 8170 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8171 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8172 * (4376302)) 8173 */ 8174 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8175 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8176 sizeof (SD_INQUIRY(un)->inq_serial)); 8177 } 8178 8179 if (un->un_f_blockcount_is_valid != TRUE) { 8180 /* 8181 * Set capacity error stat to 0 for no media. This ensures 8182 * a valid capacity is displayed in response to 'iostat -E' 8183 * when no media is present in the device. 8184 */ 8185 stp->sd_capacity.value.ui64 = 0; 8186 } else { 8187 /* 8188 * Multiply un_blockcount by un->un_sys_blocksize to get 8189 * capacity. 8190 * 8191 * Note: for non-512 blocksize devices "un_blockcount" has been 8192 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8193 * (un_tgt_blocksize / un->un_sys_blocksize). 8194 */ 8195 stp->sd_capacity.value.ui64 = (uint64_t) 8196 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8197 } 8198 } 8199 8200 8201 /* 8202 * Function: sd_set_pstats 8203 * 8204 * Description: This routine instantiates and initializes the partition 8205 * stats for each partition with more than zero blocks. 8206 * (4363169) 8207 * 8208 * Arguments: un - driver soft state (unit) structure 8209 * 8210 * Context: Kernel thread context 8211 */ 8212 8213 static void 8214 sd_set_pstats(struct sd_lun *un) 8215 { 8216 char kstatname[KSTAT_STRLEN]; 8217 int instance; 8218 int i; 8219 diskaddr_t nblks = 0; 8220 char *partname = NULL; 8221 8222 ASSERT(un != NULL); 8223 8224 instance = ddi_get_instance(SD_DEVINFO(un)); 8225 8226 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8227 for (i = 0; i < NSDMAP; i++) { 8228 8229 if (cmlb_partinfo(un->un_cmlbhandle, i, 8230 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8231 continue; 8232 mutex_enter(SD_MUTEX(un)); 8233 8234 if ((un->un_pstats[i] == NULL) && 8235 (nblks != 0)) { 8236 8237 (void) snprintf(kstatname, sizeof (kstatname), 8238 "%s%d,%s", sd_label, instance, 8239 partname); 8240 8241 un->un_pstats[i] = kstat_create(sd_label, 8242 instance, kstatname, "partition", KSTAT_TYPE_IO, 8243 1, KSTAT_FLAG_PERSISTENT); 8244 if (un->un_pstats[i] != NULL) { 8245 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8246 kstat_install(un->un_pstats[i]); 8247 } 8248 } 8249 mutex_exit(SD_MUTEX(un)); 8250 } 8251 } 8252 8253 8254 #if (defined(__fibre)) 8255 /* 8256 * Function: sd_init_event_callbacks 8257 * 8258 * Description: This routine initializes the insertion and removal event 8259 * callbacks. (fibre only) 8260 * 8261 * Arguments: un - driver soft state (unit) structure 8262 * 8263 * Context: Kernel thread context 8264 */ 8265 8266 static void 8267 sd_init_event_callbacks(struct sd_lun *un) 8268 { 8269 ASSERT(un != NULL); 8270 8271 if ((un->un_insert_event == NULL) && 8272 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8273 &un->un_insert_event) == DDI_SUCCESS)) { 8274 /* 8275 * Add the callback for an insertion event 8276 */ 8277 (void) ddi_add_event_handler(SD_DEVINFO(un), 8278 un->un_insert_event, sd_event_callback, (void *)un, 8279 &(un->un_insert_cb_id)); 8280 } 8281 8282 if ((un->un_remove_event == NULL) && 8283 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8284 &un->un_remove_event) == DDI_SUCCESS)) { 8285 /* 8286 * Add the callback for a removal event 8287 */ 8288 (void) ddi_add_event_handler(SD_DEVINFO(un), 8289 un->un_remove_event, sd_event_callback, (void *)un, 8290 &(un->un_remove_cb_id)); 8291 } 8292 } 8293 8294 8295 /* 8296 * Function: sd_event_callback 8297 * 8298 * Description: This routine handles insert/remove events (photon). The 8299 * state is changed to OFFLINE which can be used to supress 8300 * error msgs. (fibre only) 8301 * 8302 * Arguments: un - driver soft state (unit) structure 8303 * 8304 * Context: Callout thread context 8305 */ 8306 /* ARGSUSED */ 8307 static void 8308 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8309 void *bus_impldata) 8310 { 8311 struct sd_lun *un = (struct sd_lun *)arg; 8312 8313 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8314 if (event == un->un_insert_event) { 8315 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8316 mutex_enter(SD_MUTEX(un)); 8317 if (un->un_state == SD_STATE_OFFLINE) { 8318 if (un->un_last_state != SD_STATE_SUSPENDED) { 8319 un->un_state = un->un_last_state; 8320 } else { 8321 /* 8322 * We have gone through SUSPEND/RESUME while 8323 * we were offline. Restore the last state 8324 */ 8325 un->un_state = un->un_save_state; 8326 } 8327 } 8328 mutex_exit(SD_MUTEX(un)); 8329 8330 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8331 } else if (event == un->un_remove_event) { 8332 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8333 mutex_enter(SD_MUTEX(un)); 8334 /* 8335 * We need to handle an event callback that occurs during 8336 * the suspend operation, since we don't prevent it. 8337 */ 8338 if (un->un_state != SD_STATE_OFFLINE) { 8339 if (un->un_state != SD_STATE_SUSPENDED) { 8340 New_state(un, SD_STATE_OFFLINE); 8341 } else { 8342 un->un_last_state = SD_STATE_OFFLINE; 8343 } 8344 } 8345 mutex_exit(SD_MUTEX(un)); 8346 } else { 8347 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8348 "!Unknown event\n"); 8349 } 8350 8351 } 8352 #endif 8353 8354 /* 8355 * Function: sd_cache_control() 8356 * 8357 * Description: This routine is the driver entry point for setting 8358 * read and write caching by modifying the WCE (write cache 8359 * enable) and RCD (read cache disable) bits of mode 8360 * page 8 (MODEPAGE_CACHING). 8361 * 8362 * Arguments: un - driver soft state (unit) structure 8363 * rcd_flag - flag for controlling the read cache 8364 * wce_flag - flag for controlling the write cache 8365 * 8366 * Return Code: EIO 8367 * code returned by sd_send_scsi_MODE_SENSE and 8368 * sd_send_scsi_MODE_SELECT 8369 * 8370 * Context: Kernel Thread 8371 */ 8372 8373 static int 8374 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8375 { 8376 struct mode_caching *mode_caching_page; 8377 uchar_t *header; 8378 size_t buflen; 8379 int hdrlen; 8380 int bd_len; 8381 int rval = 0; 8382 struct mode_header_grp2 *mhp; 8383 8384 ASSERT(un != NULL); 8385 8386 /* 8387 * Do a test unit ready, otherwise a mode sense may not work if this 8388 * is the first command sent to the device after boot. 8389 */ 8390 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8391 8392 if (un->un_f_cfg_is_atapi == TRUE) { 8393 hdrlen = MODE_HEADER_LENGTH_GRP2; 8394 } else { 8395 hdrlen = MODE_HEADER_LENGTH; 8396 } 8397 8398 /* 8399 * Allocate memory for the retrieved mode page and its headers. Set 8400 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8401 * we get all of the mode sense data otherwise, the mode select 8402 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8403 */ 8404 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8405 sizeof (struct mode_cache_scsi3); 8406 8407 header = kmem_zalloc(buflen, KM_SLEEP); 8408 8409 /* Get the information from the device. */ 8410 if (un->un_f_cfg_is_atapi == TRUE) { 8411 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8412 MODEPAGE_CACHING, SD_PATH_DIRECT); 8413 } else { 8414 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8415 MODEPAGE_CACHING, SD_PATH_DIRECT); 8416 } 8417 if (rval != 0) { 8418 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8419 "sd_cache_control: Mode Sense Failed\n"); 8420 kmem_free(header, buflen); 8421 return (rval); 8422 } 8423 8424 /* 8425 * Determine size of Block Descriptors in order to locate 8426 * the mode page data. ATAPI devices return 0, SCSI devices 8427 * should return MODE_BLK_DESC_LENGTH. 8428 */ 8429 if (un->un_f_cfg_is_atapi == TRUE) { 8430 mhp = (struct mode_header_grp2 *)header; 8431 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8432 } else { 8433 bd_len = ((struct mode_header *)header)->bdesc_length; 8434 } 8435 8436 if (bd_len > MODE_BLK_DESC_LENGTH) { 8437 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8438 "sd_cache_control: Mode Sense returned invalid " 8439 "block descriptor length\n"); 8440 kmem_free(header, buflen); 8441 return (EIO); 8442 } 8443 8444 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8445 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8446 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8447 " caching page code mismatch %d\n", 8448 mode_caching_page->mode_page.code); 8449 kmem_free(header, buflen); 8450 return (EIO); 8451 } 8452 8453 /* Check the relevant bits on successful mode sense. */ 8454 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8455 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8456 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8457 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8458 8459 size_t sbuflen; 8460 uchar_t save_pg; 8461 8462 /* 8463 * Construct select buffer length based on the 8464 * length of the sense data returned. 8465 */ 8466 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8467 sizeof (struct mode_page) + 8468 (int)mode_caching_page->mode_page.length; 8469 8470 /* 8471 * Set the caching bits as requested. 8472 */ 8473 if (rcd_flag == SD_CACHE_ENABLE) 8474 mode_caching_page->rcd = 0; 8475 else if (rcd_flag == SD_CACHE_DISABLE) 8476 mode_caching_page->rcd = 1; 8477 8478 if (wce_flag == SD_CACHE_ENABLE) 8479 mode_caching_page->wce = 1; 8480 else if (wce_flag == SD_CACHE_DISABLE) 8481 mode_caching_page->wce = 0; 8482 8483 /* 8484 * Save the page if the mode sense says the 8485 * drive supports it. 8486 */ 8487 save_pg = mode_caching_page->mode_page.ps ? 8488 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8489 8490 /* Clear reserved bits before mode select. */ 8491 mode_caching_page->mode_page.ps = 0; 8492 8493 /* 8494 * Clear out mode header for mode select. 8495 * The rest of the retrieved page will be reused. 8496 */ 8497 bzero(header, hdrlen); 8498 8499 if (un->un_f_cfg_is_atapi == TRUE) { 8500 mhp = (struct mode_header_grp2 *)header; 8501 mhp->bdesc_length_hi = bd_len >> 8; 8502 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8503 } else { 8504 ((struct mode_header *)header)->bdesc_length = bd_len; 8505 } 8506 8507 /* Issue mode select to change the cache settings */ 8508 if (un->un_f_cfg_is_atapi == TRUE) { 8509 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8510 sbuflen, save_pg, SD_PATH_DIRECT); 8511 } else { 8512 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8513 sbuflen, save_pg, SD_PATH_DIRECT); 8514 } 8515 } 8516 8517 kmem_free(header, buflen); 8518 return (rval); 8519 } 8520 8521 8522 /* 8523 * Function: sd_get_write_cache_enabled() 8524 * 8525 * Description: This routine is the driver entry point for determining if 8526 * write caching is enabled. It examines the WCE (write cache 8527 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8528 * 8529 * Arguments: un - driver soft state (unit) structure 8530 * is_enabled - pointer to int where write cache enabled state 8531 * is returned (non-zero -> write cache enabled) 8532 * 8533 * 8534 * Return Code: EIO 8535 * code returned by sd_send_scsi_MODE_SENSE 8536 * 8537 * Context: Kernel Thread 8538 * 8539 * NOTE: If ioctl is added to disable write cache, this sequence should 8540 * be followed so that no locking is required for accesses to 8541 * un->un_f_write_cache_enabled: 8542 * do mode select to clear wce 8543 * do synchronize cache to flush cache 8544 * set un->un_f_write_cache_enabled = FALSE 8545 * 8546 * Conversely, an ioctl to enable the write cache should be done 8547 * in this order: 8548 * set un->un_f_write_cache_enabled = TRUE 8549 * do mode select to set wce 8550 */ 8551 8552 static int 8553 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8554 { 8555 struct mode_caching *mode_caching_page; 8556 uchar_t *header; 8557 size_t buflen; 8558 int hdrlen; 8559 int bd_len; 8560 int rval = 0; 8561 8562 ASSERT(un != NULL); 8563 ASSERT(is_enabled != NULL); 8564 8565 /* in case of error, flag as enabled */ 8566 *is_enabled = TRUE; 8567 8568 /* 8569 * Do a test unit ready, otherwise a mode sense may not work if this 8570 * is the first command sent to the device after boot. 8571 */ 8572 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8573 8574 if (un->un_f_cfg_is_atapi == TRUE) { 8575 hdrlen = MODE_HEADER_LENGTH_GRP2; 8576 } else { 8577 hdrlen = MODE_HEADER_LENGTH; 8578 } 8579 8580 /* 8581 * Allocate memory for the retrieved mode page and its headers. Set 8582 * a pointer to the page itself. 8583 */ 8584 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8585 header = kmem_zalloc(buflen, KM_SLEEP); 8586 8587 /* Get the information from the device. */ 8588 if (un->un_f_cfg_is_atapi == TRUE) { 8589 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8590 MODEPAGE_CACHING, SD_PATH_DIRECT); 8591 } else { 8592 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8593 MODEPAGE_CACHING, SD_PATH_DIRECT); 8594 } 8595 if (rval != 0) { 8596 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8597 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8598 kmem_free(header, buflen); 8599 return (rval); 8600 } 8601 8602 /* 8603 * Determine size of Block Descriptors in order to locate 8604 * the mode page data. ATAPI devices return 0, SCSI devices 8605 * should return MODE_BLK_DESC_LENGTH. 8606 */ 8607 if (un->un_f_cfg_is_atapi == TRUE) { 8608 struct mode_header_grp2 *mhp; 8609 mhp = (struct mode_header_grp2 *)header; 8610 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8611 } else { 8612 bd_len = ((struct mode_header *)header)->bdesc_length; 8613 } 8614 8615 if (bd_len > MODE_BLK_DESC_LENGTH) { 8616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8617 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8618 "block descriptor length\n"); 8619 kmem_free(header, buflen); 8620 return (EIO); 8621 } 8622 8623 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8624 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8625 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8626 " caching page code mismatch %d\n", 8627 mode_caching_page->mode_page.code); 8628 kmem_free(header, buflen); 8629 return (EIO); 8630 } 8631 *is_enabled = mode_caching_page->wce; 8632 8633 kmem_free(header, buflen); 8634 return (0); 8635 } 8636 8637 /* 8638 * Function: sd_get_nv_sup() 8639 * 8640 * Description: This routine is the driver entry point for 8641 * determining whether non-volatile cache is supported. This 8642 * determination process works as follows: 8643 * 8644 * 1. sd first queries sd.conf on whether 8645 * suppress_cache_flush bit is set for this device. 8646 * 8647 * 2. if not there, then queries the internal disk table. 8648 * 8649 * 3. if either sd.conf or internal disk table specifies 8650 * cache flush be suppressed, we don't bother checking 8651 * NV_SUP bit. 8652 * 8653 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8654 * the optional INQUIRY VPD page 0x86. If the device 8655 * supports VPD page 0x86, sd examines the NV_SUP 8656 * (non-volatile cache support) bit in the INQUIRY VPD page 8657 * 0x86: 8658 * o If NV_SUP bit is set, sd assumes the device has a 8659 * non-volatile cache and set the 8660 * un_f_sync_nv_supported to TRUE. 8661 * o Otherwise cache is not non-volatile, 8662 * un_f_sync_nv_supported is set to FALSE. 8663 * 8664 * Arguments: un - driver soft state (unit) structure 8665 * 8666 * Return Code: 8667 * 8668 * Context: Kernel Thread 8669 */ 8670 8671 static void 8672 sd_get_nv_sup(struct sd_lun *un) 8673 { 8674 int rval = 0; 8675 uchar_t *inq86 = NULL; 8676 size_t inq86_len = MAX_INQUIRY_SIZE; 8677 size_t inq86_resid = 0; 8678 struct dk_callback *dkc; 8679 8680 ASSERT(un != NULL); 8681 8682 mutex_enter(SD_MUTEX(un)); 8683 8684 /* 8685 * Be conservative on the device's support of 8686 * SYNC_NV bit: un_f_sync_nv_supported is 8687 * initialized to be false. 8688 */ 8689 un->un_f_sync_nv_supported = FALSE; 8690 8691 /* 8692 * If either sd.conf or internal disk table 8693 * specifies cache flush be suppressed, then 8694 * we don't bother checking NV_SUP bit. 8695 */ 8696 if (un->un_f_suppress_cache_flush == TRUE) { 8697 mutex_exit(SD_MUTEX(un)); 8698 return; 8699 } 8700 8701 if (sd_check_vpd_page_support(un) == 0 && 8702 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8703 mutex_exit(SD_MUTEX(un)); 8704 /* collect page 86 data if available */ 8705 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8706 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8707 0x01, 0x86, &inq86_resid); 8708 8709 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8710 SD_TRACE(SD_LOG_COMMON, un, 8711 "sd_get_nv_sup: \ 8712 successfully get VPD page: %x \ 8713 PAGE LENGTH: %x BYTE 6: %x\n", 8714 inq86[1], inq86[3], inq86[6]); 8715 8716 mutex_enter(SD_MUTEX(un)); 8717 /* 8718 * check the value of NV_SUP bit: only if the device 8719 * reports NV_SUP bit to be 1, the 8720 * un_f_sync_nv_supported bit will be set to true. 8721 */ 8722 if (inq86[6] & SD_VPD_NV_SUP) { 8723 un->un_f_sync_nv_supported = TRUE; 8724 } 8725 mutex_exit(SD_MUTEX(un)); 8726 } 8727 kmem_free(inq86, inq86_len); 8728 } else { 8729 mutex_exit(SD_MUTEX(un)); 8730 } 8731 8732 /* 8733 * Send a SYNC CACHE command to check whether 8734 * SYNC_NV bit is supported. This command should have 8735 * un_f_sync_nv_supported set to correct value. 8736 */ 8737 mutex_enter(SD_MUTEX(un)); 8738 if (un->un_f_sync_nv_supported) { 8739 mutex_exit(SD_MUTEX(un)); 8740 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8741 dkc->dkc_flag = FLUSH_VOLATILE; 8742 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8743 8744 /* 8745 * Send a TEST UNIT READY command to the device. This should 8746 * clear any outstanding UNIT ATTENTION that may be present. 8747 */ 8748 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8749 8750 kmem_free(dkc, sizeof (struct dk_callback)); 8751 } else { 8752 mutex_exit(SD_MUTEX(un)); 8753 } 8754 8755 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8756 un_f_suppress_cache_flush is set to %d\n", 8757 un->un_f_suppress_cache_flush); 8758 } 8759 8760 /* 8761 * Function: sd_make_device 8762 * 8763 * Description: Utility routine to return the Solaris device number from 8764 * the data in the device's dev_info structure. 8765 * 8766 * Return Code: The Solaris device number 8767 * 8768 * Context: Any 8769 */ 8770 8771 static dev_t 8772 sd_make_device(dev_info_t *devi) 8773 { 8774 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8775 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8776 } 8777 8778 8779 /* 8780 * Function: sd_pm_entry 8781 * 8782 * Description: Called at the start of a new command to manage power 8783 * and busy status of a device. This includes determining whether 8784 * the current power state of the device is sufficient for 8785 * performing the command or whether it must be changed. 8786 * The PM framework is notified appropriately. 8787 * Only with a return status of DDI_SUCCESS will the 8788 * component be busy to the framework. 8789 * 8790 * All callers of sd_pm_entry must check the return status 8791 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8792 * of DDI_FAILURE indicates the device failed to power up. 8793 * In this case un_pm_count has been adjusted so the result 8794 * on exit is still powered down, ie. count is less than 0. 8795 * Calling sd_pm_exit with this count value hits an ASSERT. 8796 * 8797 * Return Code: DDI_SUCCESS or DDI_FAILURE 8798 * 8799 * Context: Kernel thread context. 8800 */ 8801 8802 static int 8803 sd_pm_entry(struct sd_lun *un) 8804 { 8805 int return_status = DDI_SUCCESS; 8806 8807 ASSERT(!mutex_owned(SD_MUTEX(un))); 8808 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8809 8810 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8811 8812 if (un->un_f_pm_is_enabled == FALSE) { 8813 SD_TRACE(SD_LOG_IO_PM, un, 8814 "sd_pm_entry: exiting, PM not enabled\n"); 8815 return (return_status); 8816 } 8817 8818 /* 8819 * Just increment a counter if PM is enabled. On the transition from 8820 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8821 * the count with each IO and mark the device as idle when the count 8822 * hits 0. 8823 * 8824 * If the count is less than 0 the device is powered down. If a powered 8825 * down device is successfully powered up then the count must be 8826 * incremented to reflect the power up. Note that it'll get incremented 8827 * a second time to become busy. 8828 * 8829 * Because the following has the potential to change the device state 8830 * and must release the un_pm_mutex to do so, only one thread can be 8831 * allowed through at a time. 8832 */ 8833 8834 mutex_enter(&un->un_pm_mutex); 8835 while (un->un_pm_busy == TRUE) { 8836 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8837 } 8838 un->un_pm_busy = TRUE; 8839 8840 if (un->un_pm_count < 1) { 8841 8842 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8843 8844 /* 8845 * Indicate we are now busy so the framework won't attempt to 8846 * power down the device. This call will only fail if either 8847 * we passed a bad component number or the device has no 8848 * components. Neither of these should ever happen. 8849 */ 8850 mutex_exit(&un->un_pm_mutex); 8851 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8852 ASSERT(return_status == DDI_SUCCESS); 8853 8854 mutex_enter(&un->un_pm_mutex); 8855 8856 if (un->un_pm_count < 0) { 8857 mutex_exit(&un->un_pm_mutex); 8858 8859 SD_TRACE(SD_LOG_IO_PM, un, 8860 "sd_pm_entry: power up component\n"); 8861 8862 /* 8863 * pm_raise_power will cause sdpower to be called 8864 * which brings the device power level to the 8865 * desired state, ON in this case. If successful, 8866 * un_pm_count and un_power_level will be updated 8867 * appropriately. 8868 */ 8869 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8870 SD_SPINDLE_ON); 8871 8872 mutex_enter(&un->un_pm_mutex); 8873 8874 if (return_status != DDI_SUCCESS) { 8875 /* 8876 * Power up failed. 8877 * Idle the device and adjust the count 8878 * so the result on exit is that we're 8879 * still powered down, ie. count is less than 0. 8880 */ 8881 SD_TRACE(SD_LOG_IO_PM, un, 8882 "sd_pm_entry: power up failed," 8883 " idle the component\n"); 8884 8885 (void) pm_idle_component(SD_DEVINFO(un), 0); 8886 un->un_pm_count--; 8887 } else { 8888 /* 8889 * Device is powered up, verify the 8890 * count is non-negative. 8891 * This is debug only. 8892 */ 8893 ASSERT(un->un_pm_count == 0); 8894 } 8895 } 8896 8897 if (return_status == DDI_SUCCESS) { 8898 /* 8899 * For performance, now that the device has been tagged 8900 * as busy, and it's known to be powered up, update the 8901 * chain types to use jump tables that do not include 8902 * pm. This significantly lowers the overhead and 8903 * therefore improves performance. 8904 */ 8905 8906 mutex_exit(&un->un_pm_mutex); 8907 mutex_enter(SD_MUTEX(un)); 8908 SD_TRACE(SD_LOG_IO_PM, un, 8909 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8910 un->un_uscsi_chain_type); 8911 8912 if (un->un_f_non_devbsize_supported) { 8913 un->un_buf_chain_type = 8914 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8915 } else { 8916 un->un_buf_chain_type = 8917 SD_CHAIN_INFO_DISK_NO_PM; 8918 } 8919 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8920 8921 SD_TRACE(SD_LOG_IO_PM, un, 8922 " changed uscsi_chain_type to %d\n", 8923 un->un_uscsi_chain_type); 8924 mutex_exit(SD_MUTEX(un)); 8925 mutex_enter(&un->un_pm_mutex); 8926 8927 if (un->un_pm_idle_timeid == NULL) { 8928 /* 300 ms. */ 8929 un->un_pm_idle_timeid = 8930 timeout(sd_pm_idletimeout_handler, un, 8931 (drv_usectohz((clock_t)300000))); 8932 /* 8933 * Include an extra call to busy which keeps the 8934 * device busy with-respect-to the PM layer 8935 * until the timer fires, at which time it'll 8936 * get the extra idle call. 8937 */ 8938 (void) pm_busy_component(SD_DEVINFO(un), 0); 8939 } 8940 } 8941 } 8942 un->un_pm_busy = FALSE; 8943 /* Next... */ 8944 cv_signal(&un->un_pm_busy_cv); 8945 8946 un->un_pm_count++; 8947 8948 SD_TRACE(SD_LOG_IO_PM, un, 8949 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8950 8951 mutex_exit(&un->un_pm_mutex); 8952 8953 return (return_status); 8954 } 8955 8956 8957 /* 8958 * Function: sd_pm_exit 8959 * 8960 * Description: Called at the completion of a command to manage busy 8961 * status for the device. If the device becomes idle the 8962 * PM framework is notified. 8963 * 8964 * Context: Kernel thread context 8965 */ 8966 8967 static void 8968 sd_pm_exit(struct sd_lun *un) 8969 { 8970 ASSERT(!mutex_owned(SD_MUTEX(un))); 8971 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8972 8973 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8974 8975 /* 8976 * After attach the following flag is only read, so don't 8977 * take the penalty of acquiring a mutex for it. 8978 */ 8979 if (un->un_f_pm_is_enabled == TRUE) { 8980 8981 mutex_enter(&un->un_pm_mutex); 8982 un->un_pm_count--; 8983 8984 SD_TRACE(SD_LOG_IO_PM, un, 8985 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8986 8987 ASSERT(un->un_pm_count >= 0); 8988 if (un->un_pm_count == 0) { 8989 mutex_exit(&un->un_pm_mutex); 8990 8991 SD_TRACE(SD_LOG_IO_PM, un, 8992 "sd_pm_exit: idle component\n"); 8993 8994 (void) pm_idle_component(SD_DEVINFO(un), 0); 8995 8996 } else { 8997 mutex_exit(&un->un_pm_mutex); 8998 } 8999 } 9000 9001 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9002 } 9003 9004 9005 /* 9006 * Function: sdopen 9007 * 9008 * Description: Driver's open(9e) entry point function. 9009 * 9010 * Arguments: dev_i - pointer to device number 9011 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9012 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9013 * cred_p - user credential pointer 9014 * 9015 * Return Code: EINVAL 9016 * ENXIO 9017 * EIO 9018 * EROFS 9019 * EBUSY 9020 * 9021 * Context: Kernel thread context 9022 */ 9023 /* ARGSUSED */ 9024 static int 9025 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9026 { 9027 struct sd_lun *un; 9028 int nodelay; 9029 int part; 9030 uint64_t partmask; 9031 int instance; 9032 dev_t dev; 9033 int rval = EIO; 9034 diskaddr_t nblks = 0; 9035 9036 /* Validate the open type */ 9037 if (otyp >= OTYPCNT) { 9038 return (EINVAL); 9039 } 9040 9041 dev = *dev_p; 9042 instance = SDUNIT(dev); 9043 mutex_enter(&sd_detach_mutex); 9044 9045 /* 9046 * Fail the open if there is no softstate for the instance, or 9047 * if another thread somewhere is trying to detach the instance. 9048 */ 9049 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9050 (un->un_detach_count != 0)) { 9051 mutex_exit(&sd_detach_mutex); 9052 /* 9053 * The probe cache only needs to be cleared when open (9e) fails 9054 * with ENXIO (4238046). 9055 */ 9056 /* 9057 * un-conditionally clearing probe cache is ok with 9058 * separate sd/ssd binaries 9059 * x86 platform can be an issue with both parallel 9060 * and fibre in 1 binary 9061 */ 9062 sd_scsi_clear_probe_cache(); 9063 return (ENXIO); 9064 } 9065 9066 /* 9067 * The un_layer_count is to prevent another thread in specfs from 9068 * trying to detach the instance, which can happen when we are 9069 * called from a higher-layer driver instead of thru specfs. 9070 * This will not be needed when DDI provides a layered driver 9071 * interface that allows specfs to know that an instance is in 9072 * use by a layered driver & should not be detached. 9073 * 9074 * Note: the semantics for layered driver opens are exactly one 9075 * close for every open. 9076 */ 9077 if (otyp == OTYP_LYR) { 9078 un->un_layer_count++; 9079 } 9080 9081 /* 9082 * Keep a count of the current # of opens in progress. This is because 9083 * some layered drivers try to call us as a regular open. This can 9084 * cause problems that we cannot prevent, however by keeping this count 9085 * we can at least keep our open and detach routines from racing against 9086 * each other under such conditions. 9087 */ 9088 un->un_opens_in_progress++; 9089 mutex_exit(&sd_detach_mutex); 9090 9091 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9092 part = SDPART(dev); 9093 partmask = 1 << part; 9094 9095 /* 9096 * We use a semaphore here in order to serialize 9097 * open and close requests on the device. 9098 */ 9099 sema_p(&un->un_semoclose); 9100 9101 mutex_enter(SD_MUTEX(un)); 9102 9103 /* 9104 * All device accesses go thru sdstrategy() where we check 9105 * on suspend status but there could be a scsi_poll command, 9106 * which bypasses sdstrategy(), so we need to check pm 9107 * status. 9108 */ 9109 9110 if (!nodelay) { 9111 while ((un->un_state == SD_STATE_SUSPENDED) || 9112 (un->un_state == SD_STATE_PM_CHANGING)) { 9113 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9114 } 9115 9116 mutex_exit(SD_MUTEX(un)); 9117 if (sd_pm_entry(un) != DDI_SUCCESS) { 9118 rval = EIO; 9119 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9120 "sdopen: sd_pm_entry failed\n"); 9121 goto open_failed_with_pm; 9122 } 9123 mutex_enter(SD_MUTEX(un)); 9124 } 9125 9126 /* check for previous exclusive open */ 9127 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9128 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9129 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9130 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9131 9132 if (un->un_exclopen & (partmask)) { 9133 goto excl_open_fail; 9134 } 9135 9136 if (flag & FEXCL) { 9137 int i; 9138 if (un->un_ocmap.lyropen[part]) { 9139 goto excl_open_fail; 9140 } 9141 for (i = 0; i < (OTYPCNT - 1); i++) { 9142 if (un->un_ocmap.regopen[i] & (partmask)) { 9143 goto excl_open_fail; 9144 } 9145 } 9146 } 9147 9148 /* 9149 * Check the write permission if this is a removable media device, 9150 * NDELAY has not been set, and writable permission is requested. 9151 * 9152 * Note: If NDELAY was set and this is write-protected media the WRITE 9153 * attempt will fail with EIO as part of the I/O processing. This is a 9154 * more permissive implementation that allows the open to succeed and 9155 * WRITE attempts to fail when appropriate. 9156 */ 9157 if (un->un_f_chk_wp_open) { 9158 if ((flag & FWRITE) && (!nodelay)) { 9159 mutex_exit(SD_MUTEX(un)); 9160 /* 9161 * Defer the check for write permission on writable 9162 * DVD drive till sdstrategy and will not fail open even 9163 * if FWRITE is set as the device can be writable 9164 * depending upon the media and the media can change 9165 * after the call to open(). 9166 */ 9167 if (un->un_f_dvdram_writable_device == FALSE) { 9168 if (ISCD(un) || sr_check_wp(dev)) { 9169 rval = EROFS; 9170 mutex_enter(SD_MUTEX(un)); 9171 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9172 "write to cd or write protected media\n"); 9173 goto open_fail; 9174 } 9175 } 9176 mutex_enter(SD_MUTEX(un)); 9177 } 9178 } 9179 9180 /* 9181 * If opening in NDELAY/NONBLOCK mode, just return. 9182 * Check if disk is ready and has a valid geometry later. 9183 */ 9184 if (!nodelay) { 9185 mutex_exit(SD_MUTEX(un)); 9186 rval = sd_ready_and_valid(un); 9187 mutex_enter(SD_MUTEX(un)); 9188 /* 9189 * Fail if device is not ready or if the number of disk 9190 * blocks is zero or negative for non CD devices. 9191 */ 9192 9193 nblks = 0; 9194 9195 if (rval == SD_READY_VALID && (!ISCD(un))) { 9196 /* if cmlb_partinfo fails, nblks remains 0 */ 9197 mutex_exit(SD_MUTEX(un)); 9198 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9199 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9200 mutex_enter(SD_MUTEX(un)); 9201 } 9202 9203 if ((rval != SD_READY_VALID) || 9204 (!ISCD(un) && nblks <= 0)) { 9205 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9206 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9207 "device not ready or invalid disk block value\n"); 9208 goto open_fail; 9209 } 9210 #if defined(__i386) || defined(__amd64) 9211 } else { 9212 uchar_t *cp; 9213 /* 9214 * x86 requires special nodelay handling, so that p0 is 9215 * always defined and accessible. 9216 * Invalidate geometry only if device is not already open. 9217 */ 9218 cp = &un->un_ocmap.chkd[0]; 9219 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9220 if (*cp != (uchar_t)0) { 9221 break; 9222 } 9223 cp++; 9224 } 9225 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9226 mutex_exit(SD_MUTEX(un)); 9227 cmlb_invalidate(un->un_cmlbhandle, 9228 (void *)SD_PATH_DIRECT); 9229 mutex_enter(SD_MUTEX(un)); 9230 } 9231 9232 #endif 9233 } 9234 9235 if (otyp == OTYP_LYR) { 9236 un->un_ocmap.lyropen[part]++; 9237 } else { 9238 un->un_ocmap.regopen[otyp] |= partmask; 9239 } 9240 9241 /* Set up open and exclusive open flags */ 9242 if (flag & FEXCL) { 9243 un->un_exclopen |= (partmask); 9244 } 9245 9246 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9247 "open of part %d type %d\n", part, otyp); 9248 9249 mutex_exit(SD_MUTEX(un)); 9250 if (!nodelay) { 9251 sd_pm_exit(un); 9252 } 9253 9254 sema_v(&un->un_semoclose); 9255 9256 mutex_enter(&sd_detach_mutex); 9257 un->un_opens_in_progress--; 9258 mutex_exit(&sd_detach_mutex); 9259 9260 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9261 return (DDI_SUCCESS); 9262 9263 excl_open_fail: 9264 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9265 rval = EBUSY; 9266 9267 open_fail: 9268 mutex_exit(SD_MUTEX(un)); 9269 9270 /* 9271 * On a failed open we must exit the pm management. 9272 */ 9273 if (!nodelay) { 9274 sd_pm_exit(un); 9275 } 9276 open_failed_with_pm: 9277 sema_v(&un->un_semoclose); 9278 9279 mutex_enter(&sd_detach_mutex); 9280 un->un_opens_in_progress--; 9281 if (otyp == OTYP_LYR) { 9282 un->un_layer_count--; 9283 } 9284 mutex_exit(&sd_detach_mutex); 9285 9286 return (rval); 9287 } 9288 9289 9290 /* 9291 * Function: sdclose 9292 * 9293 * Description: Driver's close(9e) entry point function. 9294 * 9295 * Arguments: dev - device number 9296 * flag - file status flag, informational only 9297 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9298 * cred_p - user credential pointer 9299 * 9300 * Return Code: ENXIO 9301 * 9302 * Context: Kernel thread context 9303 */ 9304 /* ARGSUSED */ 9305 static int 9306 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9307 { 9308 struct sd_lun *un; 9309 uchar_t *cp; 9310 int part; 9311 int nodelay; 9312 int rval = 0; 9313 9314 /* Validate the open type */ 9315 if (otyp >= OTYPCNT) { 9316 return (ENXIO); 9317 } 9318 9319 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9320 return (ENXIO); 9321 } 9322 9323 part = SDPART(dev); 9324 nodelay = flag & (FNDELAY | FNONBLOCK); 9325 9326 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9327 "sdclose: close of part %d type %d\n", part, otyp); 9328 9329 /* 9330 * We use a semaphore here in order to serialize 9331 * open and close requests on the device. 9332 */ 9333 sema_p(&un->un_semoclose); 9334 9335 mutex_enter(SD_MUTEX(un)); 9336 9337 /* Don't proceed if power is being changed. */ 9338 while (un->un_state == SD_STATE_PM_CHANGING) { 9339 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9340 } 9341 9342 if (un->un_exclopen & (1 << part)) { 9343 un->un_exclopen &= ~(1 << part); 9344 } 9345 9346 /* Update the open partition map */ 9347 if (otyp == OTYP_LYR) { 9348 un->un_ocmap.lyropen[part] -= 1; 9349 } else { 9350 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9351 } 9352 9353 cp = &un->un_ocmap.chkd[0]; 9354 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9355 if (*cp != NULL) { 9356 break; 9357 } 9358 cp++; 9359 } 9360 9361 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9362 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9363 9364 /* 9365 * We avoid persistance upon the last close, and set 9366 * the throttle back to the maximum. 9367 */ 9368 un->un_throttle = un->un_saved_throttle; 9369 9370 if (un->un_state == SD_STATE_OFFLINE) { 9371 if (un->un_f_is_fibre == FALSE) { 9372 scsi_log(SD_DEVINFO(un), sd_label, 9373 CE_WARN, "offline\n"); 9374 } 9375 mutex_exit(SD_MUTEX(un)); 9376 cmlb_invalidate(un->un_cmlbhandle, 9377 (void *)SD_PATH_DIRECT); 9378 mutex_enter(SD_MUTEX(un)); 9379 9380 } else { 9381 /* 9382 * Flush any outstanding writes in NVRAM cache. 9383 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9384 * cmd, it may not work for non-Pluto devices. 9385 * SYNCHRONIZE CACHE is not required for removables, 9386 * except DVD-RAM drives. 9387 * 9388 * Also note: because SYNCHRONIZE CACHE is currently 9389 * the only command issued here that requires the 9390 * drive be powered up, only do the power up before 9391 * sending the Sync Cache command. If additional 9392 * commands are added which require a powered up 9393 * drive, the following sequence may have to change. 9394 * 9395 * And finally, note that parallel SCSI on SPARC 9396 * only issues a Sync Cache to DVD-RAM, a newly 9397 * supported device. 9398 */ 9399 #if defined(__i386) || defined(__amd64) 9400 if (un->un_f_sync_cache_supported || 9401 un->un_f_dvdram_writable_device == TRUE) { 9402 #else 9403 if (un->un_f_dvdram_writable_device == TRUE) { 9404 #endif 9405 mutex_exit(SD_MUTEX(un)); 9406 if (sd_pm_entry(un) == DDI_SUCCESS) { 9407 rval = 9408 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9409 NULL); 9410 /* ignore error if not supported */ 9411 if (rval == ENOTSUP) { 9412 rval = 0; 9413 } else if (rval != 0) { 9414 rval = EIO; 9415 } 9416 sd_pm_exit(un); 9417 } else { 9418 rval = EIO; 9419 } 9420 mutex_enter(SD_MUTEX(un)); 9421 } 9422 9423 /* 9424 * For devices which supports DOOR_LOCK, send an ALLOW 9425 * MEDIA REMOVAL command, but don't get upset if it 9426 * fails. We need to raise the power of the drive before 9427 * we can call sd_send_scsi_DOORLOCK() 9428 */ 9429 if (un->un_f_doorlock_supported) { 9430 mutex_exit(SD_MUTEX(un)); 9431 if (sd_pm_entry(un) == DDI_SUCCESS) { 9432 rval = sd_send_scsi_DOORLOCK(un, 9433 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9434 9435 sd_pm_exit(un); 9436 if (ISCD(un) && (rval != 0) && 9437 (nodelay != 0)) { 9438 rval = ENXIO; 9439 } 9440 } else { 9441 rval = EIO; 9442 } 9443 mutex_enter(SD_MUTEX(un)); 9444 } 9445 9446 /* 9447 * If a device has removable media, invalidate all 9448 * parameters related to media, such as geometry, 9449 * blocksize, and blockcount. 9450 */ 9451 if (un->un_f_has_removable_media) { 9452 sr_ejected(un); 9453 } 9454 9455 /* 9456 * Destroy the cache (if it exists) which was 9457 * allocated for the write maps since this is 9458 * the last close for this media. 9459 */ 9460 if (un->un_wm_cache) { 9461 /* 9462 * Check if there are pending commands. 9463 * and if there are give a warning and 9464 * do not destroy the cache. 9465 */ 9466 if (un->un_ncmds_in_driver > 0) { 9467 scsi_log(SD_DEVINFO(un), 9468 sd_label, CE_WARN, 9469 "Unable to clean up memory " 9470 "because of pending I/O\n"); 9471 } else { 9472 kmem_cache_destroy( 9473 un->un_wm_cache); 9474 un->un_wm_cache = NULL; 9475 } 9476 } 9477 } 9478 } 9479 9480 mutex_exit(SD_MUTEX(un)); 9481 sema_v(&un->un_semoclose); 9482 9483 if (otyp == OTYP_LYR) { 9484 mutex_enter(&sd_detach_mutex); 9485 /* 9486 * The detach routine may run when the layer count 9487 * drops to zero. 9488 */ 9489 un->un_layer_count--; 9490 mutex_exit(&sd_detach_mutex); 9491 } 9492 9493 return (rval); 9494 } 9495 9496 9497 /* 9498 * Function: sd_ready_and_valid 9499 * 9500 * Description: Test if device is ready and has a valid geometry. 9501 * 9502 * Arguments: dev - device number 9503 * un - driver soft state (unit) structure 9504 * 9505 * Return Code: SD_READY_VALID ready and valid label 9506 * SD_NOT_READY_VALID not ready, no label 9507 * SD_RESERVED_BY_OTHERS reservation conflict 9508 * 9509 * Context: Never called at interrupt context. 9510 */ 9511 9512 static int 9513 sd_ready_and_valid(struct sd_lun *un) 9514 { 9515 struct sd_errstats *stp; 9516 uint64_t capacity; 9517 uint_t lbasize; 9518 int rval = SD_READY_VALID; 9519 char name_str[48]; 9520 int is_valid; 9521 9522 ASSERT(un != NULL); 9523 ASSERT(!mutex_owned(SD_MUTEX(un))); 9524 9525 mutex_enter(SD_MUTEX(un)); 9526 /* 9527 * If a device has removable media, we must check if media is 9528 * ready when checking if this device is ready and valid. 9529 */ 9530 if (un->un_f_has_removable_media) { 9531 mutex_exit(SD_MUTEX(un)); 9532 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9533 rval = SD_NOT_READY_VALID; 9534 mutex_enter(SD_MUTEX(un)); 9535 goto done; 9536 } 9537 9538 is_valid = SD_IS_VALID_LABEL(un); 9539 mutex_enter(SD_MUTEX(un)); 9540 if (!is_valid || 9541 (un->un_f_blockcount_is_valid == FALSE) || 9542 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9543 9544 /* capacity has to be read every open. */ 9545 mutex_exit(SD_MUTEX(un)); 9546 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9547 &lbasize, SD_PATH_DIRECT) != 0) { 9548 cmlb_invalidate(un->un_cmlbhandle, 9549 (void *)SD_PATH_DIRECT); 9550 mutex_enter(SD_MUTEX(un)); 9551 rval = SD_NOT_READY_VALID; 9552 goto done; 9553 } else { 9554 mutex_enter(SD_MUTEX(un)); 9555 sd_update_block_info(un, lbasize, capacity); 9556 } 9557 } 9558 9559 /* 9560 * Check if the media in the device is writable or not. 9561 */ 9562 if (!is_valid && ISCD(un)) { 9563 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9564 } 9565 9566 } else { 9567 /* 9568 * Do a test unit ready to clear any unit attention from non-cd 9569 * devices. 9570 */ 9571 mutex_exit(SD_MUTEX(un)); 9572 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9573 mutex_enter(SD_MUTEX(un)); 9574 } 9575 9576 9577 /* 9578 * If this is a non 512 block device, allocate space for 9579 * the wmap cache. This is being done here since every time 9580 * a media is changed this routine will be called and the 9581 * block size is a function of media rather than device. 9582 */ 9583 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9584 if (!(un->un_wm_cache)) { 9585 (void) snprintf(name_str, sizeof (name_str), 9586 "%s%d_cache", 9587 ddi_driver_name(SD_DEVINFO(un)), 9588 ddi_get_instance(SD_DEVINFO(un))); 9589 un->un_wm_cache = kmem_cache_create( 9590 name_str, sizeof (struct sd_w_map), 9591 8, sd_wm_cache_constructor, 9592 sd_wm_cache_destructor, NULL, 9593 (void *)un, NULL, 0); 9594 if (!(un->un_wm_cache)) { 9595 rval = ENOMEM; 9596 goto done; 9597 } 9598 } 9599 } 9600 9601 if (un->un_state == SD_STATE_NORMAL) { 9602 /* 9603 * If the target is not yet ready here (defined by a TUR 9604 * failure), invalidate the geometry and print an 'offline' 9605 * message. This is a legacy message, as the state of the 9606 * target is not actually changed to SD_STATE_OFFLINE. 9607 * 9608 * If the TUR fails for EACCES (Reservation Conflict), 9609 * SD_RESERVED_BY_OTHERS will be returned to indicate 9610 * reservation conflict. If the TUR fails for other 9611 * reasons, SD_NOT_READY_VALID will be returned. 9612 */ 9613 int err; 9614 9615 mutex_exit(SD_MUTEX(un)); 9616 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9617 mutex_enter(SD_MUTEX(un)); 9618 9619 if (err != 0) { 9620 mutex_exit(SD_MUTEX(un)); 9621 cmlb_invalidate(un->un_cmlbhandle, 9622 (void *)SD_PATH_DIRECT); 9623 mutex_enter(SD_MUTEX(un)); 9624 if (err == EACCES) { 9625 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9626 "reservation conflict\n"); 9627 rval = SD_RESERVED_BY_OTHERS; 9628 } else { 9629 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9630 "drive offline\n"); 9631 rval = SD_NOT_READY_VALID; 9632 } 9633 goto done; 9634 } 9635 } 9636 9637 if (un->un_f_format_in_progress == FALSE) { 9638 mutex_exit(SD_MUTEX(un)); 9639 if (cmlb_validate(un->un_cmlbhandle, 0, 9640 (void *)SD_PATH_DIRECT) != 0) { 9641 rval = SD_NOT_READY_VALID; 9642 mutex_enter(SD_MUTEX(un)); 9643 goto done; 9644 } 9645 if (un->un_f_pkstats_enabled) { 9646 sd_set_pstats(un); 9647 SD_TRACE(SD_LOG_IO_PARTITION, un, 9648 "sd_ready_and_valid: un:0x%p pstats created and " 9649 "set\n", un); 9650 } 9651 mutex_enter(SD_MUTEX(un)); 9652 } 9653 9654 /* 9655 * If this device supports DOOR_LOCK command, try and send 9656 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9657 * if it fails. For a CD, however, it is an error 9658 */ 9659 if (un->un_f_doorlock_supported) { 9660 mutex_exit(SD_MUTEX(un)); 9661 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9662 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9663 rval = SD_NOT_READY_VALID; 9664 mutex_enter(SD_MUTEX(un)); 9665 goto done; 9666 } 9667 mutex_enter(SD_MUTEX(un)); 9668 } 9669 9670 /* The state has changed, inform the media watch routines */ 9671 un->un_mediastate = DKIO_INSERTED; 9672 cv_broadcast(&un->un_state_cv); 9673 rval = SD_READY_VALID; 9674 9675 done: 9676 9677 /* 9678 * Initialize the capacity kstat value, if no media previously 9679 * (capacity kstat is 0) and a media has been inserted 9680 * (un_blockcount > 0). 9681 */ 9682 if (un->un_errstats != NULL) { 9683 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9684 if ((stp->sd_capacity.value.ui64 == 0) && 9685 (un->un_f_blockcount_is_valid == TRUE)) { 9686 stp->sd_capacity.value.ui64 = 9687 (uint64_t)((uint64_t)un->un_blockcount * 9688 un->un_sys_blocksize); 9689 } 9690 } 9691 9692 mutex_exit(SD_MUTEX(un)); 9693 return (rval); 9694 } 9695 9696 9697 /* 9698 * Function: sdmin 9699 * 9700 * Description: Routine to limit the size of a data transfer. Used in 9701 * conjunction with physio(9F). 9702 * 9703 * Arguments: bp - pointer to the indicated buf(9S) struct. 9704 * 9705 * Context: Kernel thread context. 9706 */ 9707 9708 static void 9709 sdmin(struct buf *bp) 9710 { 9711 struct sd_lun *un; 9712 int instance; 9713 9714 instance = SDUNIT(bp->b_edev); 9715 9716 un = ddi_get_soft_state(sd_state, instance); 9717 ASSERT(un != NULL); 9718 9719 if (bp->b_bcount > un->un_max_xfer_size) { 9720 bp->b_bcount = un->un_max_xfer_size; 9721 } 9722 } 9723 9724 9725 /* 9726 * Function: sdread 9727 * 9728 * Description: Driver's read(9e) entry point function. 9729 * 9730 * Arguments: dev - device number 9731 * uio - structure pointer describing where data is to be stored 9732 * in user's space 9733 * cred_p - user credential pointer 9734 * 9735 * Return Code: ENXIO 9736 * EIO 9737 * EINVAL 9738 * value returned by physio 9739 * 9740 * Context: Kernel thread context. 9741 */ 9742 /* ARGSUSED */ 9743 static int 9744 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9745 { 9746 struct sd_lun *un = NULL; 9747 int secmask; 9748 int err; 9749 9750 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9751 return (ENXIO); 9752 } 9753 9754 ASSERT(!mutex_owned(SD_MUTEX(un))); 9755 9756 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9757 mutex_enter(SD_MUTEX(un)); 9758 /* 9759 * Because the call to sd_ready_and_valid will issue I/O we 9760 * must wait here if either the device is suspended or 9761 * if it's power level is changing. 9762 */ 9763 while ((un->un_state == SD_STATE_SUSPENDED) || 9764 (un->un_state == SD_STATE_PM_CHANGING)) { 9765 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9766 } 9767 un->un_ncmds_in_driver++; 9768 mutex_exit(SD_MUTEX(un)); 9769 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9770 mutex_enter(SD_MUTEX(un)); 9771 un->un_ncmds_in_driver--; 9772 ASSERT(un->un_ncmds_in_driver >= 0); 9773 mutex_exit(SD_MUTEX(un)); 9774 return (EIO); 9775 } 9776 mutex_enter(SD_MUTEX(un)); 9777 un->un_ncmds_in_driver--; 9778 ASSERT(un->un_ncmds_in_driver >= 0); 9779 mutex_exit(SD_MUTEX(un)); 9780 } 9781 9782 /* 9783 * Read requests are restricted to multiples of the system block size. 9784 */ 9785 secmask = un->un_sys_blocksize - 1; 9786 9787 if (uio->uio_loffset & ((offset_t)(secmask))) { 9788 SD_ERROR(SD_LOG_READ_WRITE, un, 9789 "sdread: file offset not modulo %d\n", 9790 un->un_sys_blocksize); 9791 err = EINVAL; 9792 } else if (uio->uio_iov->iov_len & (secmask)) { 9793 SD_ERROR(SD_LOG_READ_WRITE, un, 9794 "sdread: transfer length not modulo %d\n", 9795 un->un_sys_blocksize); 9796 err = EINVAL; 9797 } else { 9798 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9799 } 9800 return (err); 9801 } 9802 9803 9804 /* 9805 * Function: sdwrite 9806 * 9807 * Description: Driver's write(9e) entry point function. 9808 * 9809 * Arguments: dev - device number 9810 * uio - structure pointer describing where data is stored in 9811 * user's space 9812 * cred_p - user credential pointer 9813 * 9814 * Return Code: ENXIO 9815 * EIO 9816 * EINVAL 9817 * value returned by physio 9818 * 9819 * Context: Kernel thread context. 9820 */ 9821 /* ARGSUSED */ 9822 static int 9823 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9824 { 9825 struct sd_lun *un = NULL; 9826 int secmask; 9827 int err; 9828 9829 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9830 return (ENXIO); 9831 } 9832 9833 ASSERT(!mutex_owned(SD_MUTEX(un))); 9834 9835 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9836 mutex_enter(SD_MUTEX(un)); 9837 /* 9838 * Because the call to sd_ready_and_valid will issue I/O we 9839 * must wait here if either the device is suspended or 9840 * if it's power level is changing. 9841 */ 9842 while ((un->un_state == SD_STATE_SUSPENDED) || 9843 (un->un_state == SD_STATE_PM_CHANGING)) { 9844 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9845 } 9846 un->un_ncmds_in_driver++; 9847 mutex_exit(SD_MUTEX(un)); 9848 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9849 mutex_enter(SD_MUTEX(un)); 9850 un->un_ncmds_in_driver--; 9851 ASSERT(un->un_ncmds_in_driver >= 0); 9852 mutex_exit(SD_MUTEX(un)); 9853 return (EIO); 9854 } 9855 mutex_enter(SD_MUTEX(un)); 9856 un->un_ncmds_in_driver--; 9857 ASSERT(un->un_ncmds_in_driver >= 0); 9858 mutex_exit(SD_MUTEX(un)); 9859 } 9860 9861 /* 9862 * Write requests are restricted to multiples of the system block size. 9863 */ 9864 secmask = un->un_sys_blocksize - 1; 9865 9866 if (uio->uio_loffset & ((offset_t)(secmask))) { 9867 SD_ERROR(SD_LOG_READ_WRITE, un, 9868 "sdwrite: file offset not modulo %d\n", 9869 un->un_sys_blocksize); 9870 err = EINVAL; 9871 } else if (uio->uio_iov->iov_len & (secmask)) { 9872 SD_ERROR(SD_LOG_READ_WRITE, un, 9873 "sdwrite: transfer length not modulo %d\n", 9874 un->un_sys_blocksize); 9875 err = EINVAL; 9876 } else { 9877 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9878 } 9879 return (err); 9880 } 9881 9882 9883 /* 9884 * Function: sdaread 9885 * 9886 * Description: Driver's aread(9e) entry point function. 9887 * 9888 * Arguments: dev - device number 9889 * aio - structure pointer describing where data is to be stored 9890 * cred_p - user credential pointer 9891 * 9892 * Return Code: ENXIO 9893 * EIO 9894 * EINVAL 9895 * value returned by aphysio 9896 * 9897 * Context: Kernel thread context. 9898 */ 9899 /* ARGSUSED */ 9900 static int 9901 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9902 { 9903 struct sd_lun *un = NULL; 9904 struct uio *uio = aio->aio_uio; 9905 int secmask; 9906 int err; 9907 9908 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9909 return (ENXIO); 9910 } 9911 9912 ASSERT(!mutex_owned(SD_MUTEX(un))); 9913 9914 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9915 mutex_enter(SD_MUTEX(un)); 9916 /* 9917 * Because the call to sd_ready_and_valid will issue I/O we 9918 * must wait here if either the device is suspended or 9919 * if it's power level is changing. 9920 */ 9921 while ((un->un_state == SD_STATE_SUSPENDED) || 9922 (un->un_state == SD_STATE_PM_CHANGING)) { 9923 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9924 } 9925 un->un_ncmds_in_driver++; 9926 mutex_exit(SD_MUTEX(un)); 9927 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9928 mutex_enter(SD_MUTEX(un)); 9929 un->un_ncmds_in_driver--; 9930 ASSERT(un->un_ncmds_in_driver >= 0); 9931 mutex_exit(SD_MUTEX(un)); 9932 return (EIO); 9933 } 9934 mutex_enter(SD_MUTEX(un)); 9935 un->un_ncmds_in_driver--; 9936 ASSERT(un->un_ncmds_in_driver >= 0); 9937 mutex_exit(SD_MUTEX(un)); 9938 } 9939 9940 /* 9941 * Read requests are restricted to multiples of the system block size. 9942 */ 9943 secmask = un->un_sys_blocksize - 1; 9944 9945 if (uio->uio_loffset & ((offset_t)(secmask))) { 9946 SD_ERROR(SD_LOG_READ_WRITE, un, 9947 "sdaread: file offset not modulo %d\n", 9948 un->un_sys_blocksize); 9949 err = EINVAL; 9950 } else if (uio->uio_iov->iov_len & (secmask)) { 9951 SD_ERROR(SD_LOG_READ_WRITE, un, 9952 "sdaread: transfer length not modulo %d\n", 9953 un->un_sys_blocksize); 9954 err = EINVAL; 9955 } else { 9956 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9957 } 9958 return (err); 9959 } 9960 9961 9962 /* 9963 * Function: sdawrite 9964 * 9965 * Description: Driver's awrite(9e) entry point function. 9966 * 9967 * Arguments: dev - device number 9968 * aio - structure pointer describing where data is stored 9969 * cred_p - user credential pointer 9970 * 9971 * Return Code: ENXIO 9972 * EIO 9973 * EINVAL 9974 * value returned by aphysio 9975 * 9976 * Context: Kernel thread context. 9977 */ 9978 /* ARGSUSED */ 9979 static int 9980 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9981 { 9982 struct sd_lun *un = NULL; 9983 struct uio *uio = aio->aio_uio; 9984 int secmask; 9985 int err; 9986 9987 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9988 return (ENXIO); 9989 } 9990 9991 ASSERT(!mutex_owned(SD_MUTEX(un))); 9992 9993 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9994 mutex_enter(SD_MUTEX(un)); 9995 /* 9996 * Because the call to sd_ready_and_valid will issue I/O we 9997 * must wait here if either the device is suspended or 9998 * if it's power level is changing. 9999 */ 10000 while ((un->un_state == SD_STATE_SUSPENDED) || 10001 (un->un_state == SD_STATE_PM_CHANGING)) { 10002 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10003 } 10004 un->un_ncmds_in_driver++; 10005 mutex_exit(SD_MUTEX(un)); 10006 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10007 mutex_enter(SD_MUTEX(un)); 10008 un->un_ncmds_in_driver--; 10009 ASSERT(un->un_ncmds_in_driver >= 0); 10010 mutex_exit(SD_MUTEX(un)); 10011 return (EIO); 10012 } 10013 mutex_enter(SD_MUTEX(un)); 10014 un->un_ncmds_in_driver--; 10015 ASSERT(un->un_ncmds_in_driver >= 0); 10016 mutex_exit(SD_MUTEX(un)); 10017 } 10018 10019 /* 10020 * Write requests are restricted to multiples of the system block size. 10021 */ 10022 secmask = un->un_sys_blocksize - 1; 10023 10024 if (uio->uio_loffset & ((offset_t)(secmask))) { 10025 SD_ERROR(SD_LOG_READ_WRITE, un, 10026 "sdawrite: file offset not modulo %d\n", 10027 un->un_sys_blocksize); 10028 err = EINVAL; 10029 } else if (uio->uio_iov->iov_len & (secmask)) { 10030 SD_ERROR(SD_LOG_READ_WRITE, un, 10031 "sdawrite: transfer length not modulo %d\n", 10032 un->un_sys_blocksize); 10033 err = EINVAL; 10034 } else { 10035 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10036 } 10037 return (err); 10038 } 10039 10040 10041 10042 10043 10044 /* 10045 * Driver IO processing follows the following sequence: 10046 * 10047 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10048 * | | ^ 10049 * v v | 10050 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10051 * | | | | 10052 * v | | | 10053 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10054 * | | ^ ^ 10055 * v v | | 10056 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10057 * | | | | 10058 * +---+ | +------------+ +-------+ 10059 * | | | | 10060 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10061 * | v | | 10062 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10063 * | | ^ | 10064 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10065 * | v | | 10066 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10067 * | | ^ | 10068 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10069 * | v | | 10070 * | sd_checksum_iostart() sd_checksum_iodone() | 10071 * | | ^ | 10072 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10073 * | v | | 10074 * | sd_pm_iostart() sd_pm_iodone() | 10075 * | | ^ | 10076 * | | | | 10077 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10078 * | ^ 10079 * v | 10080 * sd_core_iostart() | 10081 * | | 10082 * | +------>(*destroypkt)() 10083 * +-> sd_start_cmds() <-+ | | 10084 * | | | v 10085 * | | | scsi_destroy_pkt(9F) 10086 * | | | 10087 * +->(*initpkt)() +- sdintr() 10088 * | | | | 10089 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10090 * | +-> scsi_setup_cdb(9F) | 10091 * | | 10092 * +--> scsi_transport(9F) | 10093 * | | 10094 * +----> SCSA ---->+ 10095 * 10096 * 10097 * This code is based upon the following presumptions: 10098 * 10099 * - iostart and iodone functions operate on buf(9S) structures. These 10100 * functions perform the necessary operations on the buf(9S) and pass 10101 * them along to the next function in the chain by using the macros 10102 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10103 * (for iodone side functions). 10104 * 10105 * - The iostart side functions may sleep. The iodone side functions 10106 * are called under interrupt context and may NOT sleep. Therefore 10107 * iodone side functions also may not call iostart side functions. 10108 * (NOTE: iostart side functions should NOT sleep for memory, as 10109 * this could result in deadlock.) 10110 * 10111 * - An iostart side function may call its corresponding iodone side 10112 * function directly (if necessary). 10113 * 10114 * - In the event of an error, an iostart side function can return a buf(9S) 10115 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10116 * b_error in the usual way of course). 10117 * 10118 * - The taskq mechanism may be used by the iodone side functions to dispatch 10119 * requests to the iostart side functions. The iostart side functions in 10120 * this case would be called under the context of a taskq thread, so it's 10121 * OK for them to block/sleep/spin in this case. 10122 * 10123 * - iostart side functions may allocate "shadow" buf(9S) structs and 10124 * pass them along to the next function in the chain. The corresponding 10125 * iodone side functions must coalesce the "shadow" bufs and return 10126 * the "original" buf to the next higher layer. 10127 * 10128 * - The b_private field of the buf(9S) struct holds a pointer to 10129 * an sd_xbuf struct, which contains information needed to 10130 * construct the scsi_pkt for the command. 10131 * 10132 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10133 * layer must acquire & release the SD_MUTEX(un) as needed. 10134 */ 10135 10136 10137 /* 10138 * Create taskq for all targets in the system. This is created at 10139 * _init(9E) and destroyed at _fini(9E). 10140 * 10141 * Note: here we set the minalloc to a reasonably high number to ensure that 10142 * we will have an adequate supply of task entries available at interrupt time. 10143 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10144 * sd_create_taskq(). Since we do not want to sleep for allocations at 10145 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10146 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10147 * requests any one instant in time. 10148 */ 10149 #define SD_TASKQ_NUMTHREADS 8 10150 #define SD_TASKQ_MINALLOC 256 10151 #define SD_TASKQ_MAXALLOC 256 10152 10153 static taskq_t *sd_tq = NULL; 10154 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10155 10156 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10157 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10158 10159 /* 10160 * The following task queue is being created for the write part of 10161 * read-modify-write of non-512 block size devices. 10162 * Limit the number of threads to 1 for now. This number has been chosen 10163 * considering the fact that it applies only to dvd ram drives/MO drives 10164 * currently. Performance for which is not main criteria at this stage. 10165 * Note: It needs to be explored if we can use a single taskq in future 10166 */ 10167 #define SD_WMR_TASKQ_NUMTHREADS 1 10168 static taskq_t *sd_wmr_tq = NULL; 10169 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10170 10171 /* 10172 * Function: sd_taskq_create 10173 * 10174 * Description: Create taskq thread(s) and preallocate task entries 10175 * 10176 * Return Code: Returns a pointer to the allocated taskq_t. 10177 * 10178 * Context: Can sleep. Requires blockable context. 10179 * 10180 * Notes: - The taskq() facility currently is NOT part of the DDI. 10181 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10182 * - taskq_create() will block for memory, also it will panic 10183 * if it cannot create the requested number of threads. 10184 * - Currently taskq_create() creates threads that cannot be 10185 * swapped. 10186 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10187 * supply of taskq entries at interrupt time (ie, so that we 10188 * do not have to sleep for memory) 10189 */ 10190 10191 static void 10192 sd_taskq_create(void) 10193 { 10194 char taskq_name[TASKQ_NAMELEN]; 10195 10196 ASSERT(sd_tq == NULL); 10197 ASSERT(sd_wmr_tq == NULL); 10198 10199 (void) snprintf(taskq_name, sizeof (taskq_name), 10200 "%s_drv_taskq", sd_label); 10201 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10202 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10203 TASKQ_PREPOPULATE)); 10204 10205 (void) snprintf(taskq_name, sizeof (taskq_name), 10206 "%s_rmw_taskq", sd_label); 10207 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10208 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10209 TASKQ_PREPOPULATE)); 10210 } 10211 10212 10213 /* 10214 * Function: sd_taskq_delete 10215 * 10216 * Description: Complementary cleanup routine for sd_taskq_create(). 10217 * 10218 * Context: Kernel thread context. 10219 */ 10220 10221 static void 10222 sd_taskq_delete(void) 10223 { 10224 ASSERT(sd_tq != NULL); 10225 ASSERT(sd_wmr_tq != NULL); 10226 taskq_destroy(sd_tq); 10227 taskq_destroy(sd_wmr_tq); 10228 sd_tq = NULL; 10229 sd_wmr_tq = NULL; 10230 } 10231 10232 10233 /* 10234 * Function: sdstrategy 10235 * 10236 * Description: Driver's strategy (9E) entry point function. 10237 * 10238 * Arguments: bp - pointer to buf(9S) 10239 * 10240 * Return Code: Always returns zero 10241 * 10242 * Context: Kernel thread context. 10243 */ 10244 10245 static int 10246 sdstrategy(struct buf *bp) 10247 { 10248 struct sd_lun *un; 10249 10250 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10251 if (un == NULL) { 10252 bioerror(bp, EIO); 10253 bp->b_resid = bp->b_bcount; 10254 biodone(bp); 10255 return (0); 10256 } 10257 /* As was done in the past, fail new cmds. if state is dumping. */ 10258 if (un->un_state == SD_STATE_DUMPING) { 10259 bioerror(bp, ENXIO); 10260 bp->b_resid = bp->b_bcount; 10261 biodone(bp); 10262 return (0); 10263 } 10264 10265 ASSERT(!mutex_owned(SD_MUTEX(un))); 10266 10267 /* 10268 * Commands may sneak in while we released the mutex in 10269 * DDI_SUSPEND, we should block new commands. However, old 10270 * commands that are still in the driver at this point should 10271 * still be allowed to drain. 10272 */ 10273 mutex_enter(SD_MUTEX(un)); 10274 /* 10275 * Must wait here if either the device is suspended or 10276 * if it's power level is changing. 10277 */ 10278 while ((un->un_state == SD_STATE_SUSPENDED) || 10279 (un->un_state == SD_STATE_PM_CHANGING)) { 10280 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10281 } 10282 10283 un->un_ncmds_in_driver++; 10284 10285 /* 10286 * atapi: Since we are running the CD for now in PIO mode we need to 10287 * call bp_mapin here to avoid bp_mapin called interrupt context under 10288 * the HBA's init_pkt routine. 10289 */ 10290 if (un->un_f_cfg_is_atapi == TRUE) { 10291 mutex_exit(SD_MUTEX(un)); 10292 bp_mapin(bp); 10293 mutex_enter(SD_MUTEX(un)); 10294 } 10295 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10296 un->un_ncmds_in_driver); 10297 10298 mutex_exit(SD_MUTEX(un)); 10299 10300 /* 10301 * This will (eventually) allocate the sd_xbuf area and 10302 * call sd_xbuf_strategy(). We just want to return the 10303 * result of ddi_xbuf_qstrategy so that we have an opt- 10304 * imized tail call which saves us a stack frame. 10305 */ 10306 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10307 } 10308 10309 10310 /* 10311 * Function: sd_xbuf_strategy 10312 * 10313 * Description: Function for initiating IO operations via the 10314 * ddi_xbuf_qstrategy() mechanism. 10315 * 10316 * Context: Kernel thread context. 10317 */ 10318 10319 static void 10320 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10321 { 10322 struct sd_lun *un = arg; 10323 10324 ASSERT(bp != NULL); 10325 ASSERT(xp != NULL); 10326 ASSERT(un != NULL); 10327 ASSERT(!mutex_owned(SD_MUTEX(un))); 10328 10329 /* 10330 * Initialize the fields in the xbuf and save a pointer to the 10331 * xbuf in bp->b_private. 10332 */ 10333 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10334 10335 /* Send the buf down the iostart chain */ 10336 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10337 } 10338 10339 10340 /* 10341 * Function: sd_xbuf_init 10342 * 10343 * Description: Prepare the given sd_xbuf struct for use. 10344 * 10345 * Arguments: un - ptr to softstate 10346 * bp - ptr to associated buf(9S) 10347 * xp - ptr to associated sd_xbuf 10348 * chain_type - IO chain type to use: 10349 * SD_CHAIN_NULL 10350 * SD_CHAIN_BUFIO 10351 * SD_CHAIN_USCSI 10352 * SD_CHAIN_DIRECT 10353 * SD_CHAIN_DIRECT_PRIORITY 10354 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10355 * initialization; may be NULL if none. 10356 * 10357 * Context: Kernel thread context 10358 */ 10359 10360 static void 10361 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10362 uchar_t chain_type, void *pktinfop) 10363 { 10364 int index; 10365 10366 ASSERT(un != NULL); 10367 ASSERT(bp != NULL); 10368 ASSERT(xp != NULL); 10369 10370 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10371 bp, chain_type); 10372 10373 xp->xb_un = un; 10374 xp->xb_pktp = NULL; 10375 xp->xb_pktinfo = pktinfop; 10376 xp->xb_private = bp->b_private; 10377 xp->xb_blkno = (daddr_t)bp->b_blkno; 10378 10379 /* 10380 * Set up the iostart and iodone chain indexes in the xbuf, based 10381 * upon the specified chain type to use. 10382 */ 10383 switch (chain_type) { 10384 case SD_CHAIN_NULL: 10385 /* 10386 * Fall thru to just use the values for the buf type, even 10387 * tho for the NULL chain these values will never be used. 10388 */ 10389 /* FALLTHRU */ 10390 case SD_CHAIN_BUFIO: 10391 index = un->un_buf_chain_type; 10392 break; 10393 case SD_CHAIN_USCSI: 10394 index = un->un_uscsi_chain_type; 10395 break; 10396 case SD_CHAIN_DIRECT: 10397 index = un->un_direct_chain_type; 10398 break; 10399 case SD_CHAIN_DIRECT_PRIORITY: 10400 index = un->un_priority_chain_type; 10401 break; 10402 default: 10403 /* We're really broken if we ever get here... */ 10404 panic("sd_xbuf_init: illegal chain type!"); 10405 /*NOTREACHED*/ 10406 } 10407 10408 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10409 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10410 10411 /* 10412 * It might be a bit easier to simply bzero the entire xbuf above, 10413 * but it turns out that since we init a fair number of members anyway, 10414 * we save a fair number cycles by doing explicit assignment of zero. 10415 */ 10416 xp->xb_pkt_flags = 0; 10417 xp->xb_dma_resid = 0; 10418 xp->xb_retry_count = 0; 10419 xp->xb_victim_retry_count = 0; 10420 xp->xb_ua_retry_count = 0; 10421 xp->xb_nr_retry_count = 0; 10422 xp->xb_sense_bp = NULL; 10423 xp->xb_sense_status = 0; 10424 xp->xb_sense_state = 0; 10425 xp->xb_sense_resid = 0; 10426 10427 bp->b_private = xp; 10428 bp->b_flags &= ~(B_DONE | B_ERROR); 10429 bp->b_resid = 0; 10430 bp->av_forw = NULL; 10431 bp->av_back = NULL; 10432 bioerror(bp, 0); 10433 10434 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10435 } 10436 10437 10438 /* 10439 * Function: sd_uscsi_strategy 10440 * 10441 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10442 * 10443 * Arguments: bp - buf struct ptr 10444 * 10445 * Return Code: Always returns 0 10446 * 10447 * Context: Kernel thread context 10448 */ 10449 10450 static int 10451 sd_uscsi_strategy(struct buf *bp) 10452 { 10453 struct sd_lun *un; 10454 struct sd_uscsi_info *uip; 10455 struct sd_xbuf *xp; 10456 uchar_t chain_type; 10457 10458 ASSERT(bp != NULL); 10459 10460 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10461 if (un == NULL) { 10462 bioerror(bp, EIO); 10463 bp->b_resid = bp->b_bcount; 10464 biodone(bp); 10465 return (0); 10466 } 10467 10468 ASSERT(!mutex_owned(SD_MUTEX(un))); 10469 10470 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10471 10472 mutex_enter(SD_MUTEX(un)); 10473 /* 10474 * atapi: Since we are running the CD for now in PIO mode we need to 10475 * call bp_mapin here to avoid bp_mapin called interrupt context under 10476 * the HBA's init_pkt routine. 10477 */ 10478 if (un->un_f_cfg_is_atapi == TRUE) { 10479 mutex_exit(SD_MUTEX(un)); 10480 bp_mapin(bp); 10481 mutex_enter(SD_MUTEX(un)); 10482 } 10483 un->un_ncmds_in_driver++; 10484 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10485 un->un_ncmds_in_driver); 10486 mutex_exit(SD_MUTEX(un)); 10487 10488 /* 10489 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10490 */ 10491 ASSERT(bp->b_private != NULL); 10492 uip = (struct sd_uscsi_info *)bp->b_private; 10493 10494 switch (uip->ui_flags) { 10495 case SD_PATH_DIRECT: 10496 chain_type = SD_CHAIN_DIRECT; 10497 break; 10498 case SD_PATH_DIRECT_PRIORITY: 10499 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10500 break; 10501 default: 10502 chain_type = SD_CHAIN_USCSI; 10503 break; 10504 } 10505 10506 /* 10507 * We may allocate extra buf for external USCSI commands. If the 10508 * application asks for bigger than 20-byte sense data via USCSI, 10509 * SCSA layer will allocate 252 bytes sense buf for that command. 10510 */ 10511 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10512 SENSE_LENGTH) { 10513 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10514 MAX_SENSE_LENGTH, KM_SLEEP); 10515 } else { 10516 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10517 } 10518 10519 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10520 10521 /* Use the index obtained within xbuf_init */ 10522 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10523 10524 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10525 10526 return (0); 10527 } 10528 10529 /* 10530 * Function: sd_send_scsi_cmd 10531 * 10532 * Description: Runs a USCSI command for user (when called thru sdioctl), 10533 * or for the driver 10534 * 10535 * Arguments: dev - the dev_t for the device 10536 * incmd - ptr to a valid uscsi_cmd struct 10537 * flag - bit flag, indicating open settings, 32/64 bit type 10538 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10539 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10540 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10541 * to use the USCSI "direct" chain and bypass the normal 10542 * command waitq. 10543 * 10544 * Return Code: 0 - successful completion of the given command 10545 * EIO - scsi_uscsi_handle_command() failed 10546 * ENXIO - soft state not found for specified dev 10547 * EINVAL 10548 * EFAULT - copyin/copyout error 10549 * return code of scsi_uscsi_handle_command(): 10550 * EIO 10551 * ENXIO 10552 * EACCES 10553 * 10554 * Context: Waits for command to complete. Can sleep. 10555 */ 10556 10557 static int 10558 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10559 enum uio_seg dataspace, int path_flag) 10560 { 10561 struct sd_uscsi_info *uip; 10562 struct uscsi_cmd *uscmd; 10563 struct sd_lun *un; 10564 int format = 0; 10565 int rval; 10566 10567 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10568 if (un == NULL) { 10569 return (ENXIO); 10570 } 10571 10572 ASSERT(!mutex_owned(SD_MUTEX(un))); 10573 10574 #ifdef SDDEBUG 10575 switch (dataspace) { 10576 case UIO_USERSPACE: 10577 SD_TRACE(SD_LOG_IO, un, 10578 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10579 break; 10580 case UIO_SYSSPACE: 10581 SD_TRACE(SD_LOG_IO, un, 10582 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10583 break; 10584 default: 10585 SD_TRACE(SD_LOG_IO, un, 10586 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10587 break; 10588 } 10589 #endif 10590 10591 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10592 SD_ADDRESS(un), &uscmd); 10593 if (rval != 0) { 10594 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10595 "scsi_uscsi_alloc_and_copyin failed\n", un); 10596 return (rval); 10597 } 10598 10599 if ((uscmd->uscsi_cdb != NULL) && 10600 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10601 mutex_enter(SD_MUTEX(un)); 10602 un->un_f_format_in_progress = TRUE; 10603 mutex_exit(SD_MUTEX(un)); 10604 format = 1; 10605 } 10606 10607 /* 10608 * Allocate an sd_uscsi_info struct and fill it with the info 10609 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10610 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10611 * since we allocate the buf here in this function, we do not 10612 * need to preserve the prior contents of b_private. 10613 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10614 */ 10615 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10616 uip->ui_flags = path_flag; 10617 uip->ui_cmdp = uscmd; 10618 10619 /* 10620 * Commands sent with priority are intended for error recovery 10621 * situations, and do not have retries performed. 10622 */ 10623 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10624 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10625 } 10626 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10627 10628 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10629 sd_uscsi_strategy, NULL, uip); 10630 10631 #ifdef SDDEBUG 10632 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10633 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10634 uscmd->uscsi_status, uscmd->uscsi_resid); 10635 if (uscmd->uscsi_bufaddr != NULL) { 10636 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10637 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10638 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10639 if (dataspace == UIO_SYSSPACE) { 10640 SD_DUMP_MEMORY(un, SD_LOG_IO, 10641 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10642 uscmd->uscsi_buflen, SD_LOG_HEX); 10643 } 10644 } 10645 #endif 10646 10647 if (format == 1) { 10648 mutex_enter(SD_MUTEX(un)); 10649 un->un_f_format_in_progress = FALSE; 10650 mutex_exit(SD_MUTEX(un)); 10651 } 10652 10653 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10654 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10655 10656 return (rval); 10657 } 10658 10659 10660 /* 10661 * Function: sd_buf_iodone 10662 * 10663 * Description: Frees the sd_xbuf & returns the buf to its originator. 10664 * 10665 * Context: May be called from interrupt context. 10666 */ 10667 /* ARGSUSED */ 10668 static void 10669 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10670 { 10671 struct sd_xbuf *xp; 10672 10673 ASSERT(un != NULL); 10674 ASSERT(bp != NULL); 10675 ASSERT(!mutex_owned(SD_MUTEX(un))); 10676 10677 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10678 10679 xp = SD_GET_XBUF(bp); 10680 ASSERT(xp != NULL); 10681 10682 mutex_enter(SD_MUTEX(un)); 10683 10684 /* 10685 * Grab time when the cmd completed. 10686 * This is used for determining if the system has been 10687 * idle long enough to make it idle to the PM framework. 10688 * This is for lowering the overhead, and therefore improving 10689 * performance per I/O operation. 10690 */ 10691 un->un_pm_idle_time = ddi_get_time(); 10692 10693 un->un_ncmds_in_driver--; 10694 ASSERT(un->un_ncmds_in_driver >= 0); 10695 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10696 un->un_ncmds_in_driver); 10697 10698 mutex_exit(SD_MUTEX(un)); 10699 10700 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10701 biodone(bp); /* bp is gone after this */ 10702 10703 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10704 } 10705 10706 10707 /* 10708 * Function: sd_uscsi_iodone 10709 * 10710 * Description: Frees the sd_xbuf & returns the buf to its originator. 10711 * 10712 * Context: May be called from interrupt context. 10713 */ 10714 /* ARGSUSED */ 10715 static void 10716 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10717 { 10718 struct sd_xbuf *xp; 10719 10720 ASSERT(un != NULL); 10721 ASSERT(bp != NULL); 10722 10723 xp = SD_GET_XBUF(bp); 10724 ASSERT(xp != NULL); 10725 ASSERT(!mutex_owned(SD_MUTEX(un))); 10726 10727 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10728 10729 bp->b_private = xp->xb_private; 10730 10731 mutex_enter(SD_MUTEX(un)); 10732 10733 /* 10734 * Grab time when the cmd completed. 10735 * This is used for determining if the system has been 10736 * idle long enough to make it idle to the PM framework. 10737 * This is for lowering the overhead, and therefore improving 10738 * performance per I/O operation. 10739 */ 10740 un->un_pm_idle_time = ddi_get_time(); 10741 10742 un->un_ncmds_in_driver--; 10743 ASSERT(un->un_ncmds_in_driver >= 0); 10744 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10745 un->un_ncmds_in_driver); 10746 10747 mutex_exit(SD_MUTEX(un)); 10748 10749 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 10750 SENSE_LENGTH) { 10751 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 10752 MAX_SENSE_LENGTH); 10753 } else { 10754 kmem_free(xp, sizeof (struct sd_xbuf)); 10755 } 10756 10757 biodone(bp); 10758 10759 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10760 } 10761 10762 10763 /* 10764 * Function: sd_mapblockaddr_iostart 10765 * 10766 * Description: Verify request lies within the partition limits for 10767 * the indicated minor device. Issue "overrun" buf if 10768 * request would exceed partition range. Converts 10769 * partition-relative block address to absolute. 10770 * 10771 * Context: Can sleep 10772 * 10773 * Issues: This follows what the old code did, in terms of accessing 10774 * some of the partition info in the unit struct without holding 10775 * the mutext. This is a general issue, if the partition info 10776 * can be altered while IO is in progress... as soon as we send 10777 * a buf, its partitioning can be invalid before it gets to the 10778 * device. Probably the right fix is to move partitioning out 10779 * of the driver entirely. 10780 */ 10781 10782 static void 10783 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10784 { 10785 diskaddr_t nblocks; /* #blocks in the given partition */ 10786 daddr_t blocknum; /* Block number specified by the buf */ 10787 size_t requested_nblocks; 10788 size_t available_nblocks; 10789 int partition; 10790 diskaddr_t partition_offset; 10791 struct sd_xbuf *xp; 10792 10793 10794 ASSERT(un != NULL); 10795 ASSERT(bp != NULL); 10796 ASSERT(!mutex_owned(SD_MUTEX(un))); 10797 10798 SD_TRACE(SD_LOG_IO_PARTITION, un, 10799 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10800 10801 xp = SD_GET_XBUF(bp); 10802 ASSERT(xp != NULL); 10803 10804 /* 10805 * If the geometry is not indicated as valid, attempt to access 10806 * the unit & verify the geometry/label. This can be the case for 10807 * removable-media devices, of if the device was opened in 10808 * NDELAY/NONBLOCK mode. 10809 */ 10810 if (!SD_IS_VALID_LABEL(un) && 10811 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10812 /* 10813 * For removable devices it is possible to start an I/O 10814 * without a media by opening the device in nodelay mode. 10815 * Also for writable CDs there can be many scenarios where 10816 * there is no geometry yet but volume manager is trying to 10817 * issue a read() just because it can see TOC on the CD. So 10818 * do not print a message for removables. 10819 */ 10820 if (!un->un_f_has_removable_media) { 10821 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10822 "i/o to invalid geometry\n"); 10823 } 10824 bioerror(bp, EIO); 10825 bp->b_resid = bp->b_bcount; 10826 SD_BEGIN_IODONE(index, un, bp); 10827 return; 10828 } 10829 10830 partition = SDPART(bp->b_edev); 10831 10832 nblocks = 0; 10833 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10834 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10835 10836 /* 10837 * blocknum is the starting block number of the request. At this 10838 * point it is still relative to the start of the minor device. 10839 */ 10840 blocknum = xp->xb_blkno; 10841 10842 /* 10843 * Legacy: If the starting block number is one past the last block 10844 * in the partition, do not set B_ERROR in the buf. 10845 */ 10846 if (blocknum == nblocks) { 10847 goto error_exit; 10848 } 10849 10850 /* 10851 * Confirm that the first block of the request lies within the 10852 * partition limits. Also the requested number of bytes must be 10853 * a multiple of the system block size. 10854 */ 10855 if ((blocknum < 0) || (blocknum >= nblocks) || 10856 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10857 bp->b_flags |= B_ERROR; 10858 goto error_exit; 10859 } 10860 10861 /* 10862 * If the requsted # blocks exceeds the available # blocks, that 10863 * is an overrun of the partition. 10864 */ 10865 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10866 available_nblocks = (size_t)(nblocks - blocknum); 10867 ASSERT(nblocks >= blocknum); 10868 10869 if (requested_nblocks > available_nblocks) { 10870 /* 10871 * Allocate an "overrun" buf to allow the request to proceed 10872 * for the amount of space available in the partition. The 10873 * amount not transferred will be added into the b_resid 10874 * when the operation is complete. The overrun buf 10875 * replaces the original buf here, and the original buf 10876 * is saved inside the overrun buf, for later use. 10877 */ 10878 size_t resid = SD_SYSBLOCKS2BYTES(un, 10879 (offset_t)(requested_nblocks - available_nblocks)); 10880 size_t count = bp->b_bcount - resid; 10881 /* 10882 * Note: count is an unsigned entity thus it'll NEVER 10883 * be less than 0 so ASSERT the original values are 10884 * correct. 10885 */ 10886 ASSERT(bp->b_bcount >= resid); 10887 10888 bp = sd_bioclone_alloc(bp, count, blocknum, 10889 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10890 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10891 ASSERT(xp != NULL); 10892 } 10893 10894 /* At this point there should be no residual for this buf. */ 10895 ASSERT(bp->b_resid == 0); 10896 10897 /* Convert the block number to an absolute address. */ 10898 xp->xb_blkno += partition_offset; 10899 10900 SD_NEXT_IOSTART(index, un, bp); 10901 10902 SD_TRACE(SD_LOG_IO_PARTITION, un, 10903 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10904 10905 return; 10906 10907 error_exit: 10908 bp->b_resid = bp->b_bcount; 10909 SD_BEGIN_IODONE(index, un, bp); 10910 SD_TRACE(SD_LOG_IO_PARTITION, un, 10911 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10912 } 10913 10914 10915 /* 10916 * Function: sd_mapblockaddr_iodone 10917 * 10918 * Description: Completion-side processing for partition management. 10919 * 10920 * Context: May be called under interrupt context 10921 */ 10922 10923 static void 10924 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10925 { 10926 /* int partition; */ /* Not used, see below. */ 10927 ASSERT(un != NULL); 10928 ASSERT(bp != NULL); 10929 ASSERT(!mutex_owned(SD_MUTEX(un))); 10930 10931 SD_TRACE(SD_LOG_IO_PARTITION, un, 10932 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10933 10934 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10935 /* 10936 * We have an "overrun" buf to deal with... 10937 */ 10938 struct sd_xbuf *xp; 10939 struct buf *obp; /* ptr to the original buf */ 10940 10941 xp = SD_GET_XBUF(bp); 10942 ASSERT(xp != NULL); 10943 10944 /* Retrieve the pointer to the original buf */ 10945 obp = (struct buf *)xp->xb_private; 10946 ASSERT(obp != NULL); 10947 10948 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10949 bioerror(obp, bp->b_error); 10950 10951 sd_bioclone_free(bp); 10952 10953 /* 10954 * Get back the original buf. 10955 * Note that since the restoration of xb_blkno below 10956 * was removed, the sd_xbuf is not needed. 10957 */ 10958 bp = obp; 10959 /* 10960 * xp = SD_GET_XBUF(bp); 10961 * ASSERT(xp != NULL); 10962 */ 10963 } 10964 10965 /* 10966 * Convert sd->xb_blkno back to a minor-device relative value. 10967 * Note: this has been commented out, as it is not needed in the 10968 * current implementation of the driver (ie, since this function 10969 * is at the top of the layering chains, so the info will be 10970 * discarded) and it is in the "hot" IO path. 10971 * 10972 * partition = getminor(bp->b_edev) & SDPART_MASK; 10973 * xp->xb_blkno -= un->un_offset[partition]; 10974 */ 10975 10976 SD_NEXT_IODONE(index, un, bp); 10977 10978 SD_TRACE(SD_LOG_IO_PARTITION, un, 10979 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10980 } 10981 10982 10983 /* 10984 * Function: sd_mapblocksize_iostart 10985 * 10986 * Description: Convert between system block size (un->un_sys_blocksize) 10987 * and target block size (un->un_tgt_blocksize). 10988 * 10989 * Context: Can sleep to allocate resources. 10990 * 10991 * Assumptions: A higher layer has already performed any partition validation, 10992 * and converted the xp->xb_blkno to an absolute value relative 10993 * to the start of the device. 10994 * 10995 * It is also assumed that the higher layer has implemented 10996 * an "overrun" mechanism for the case where the request would 10997 * read/write beyond the end of a partition. In this case we 10998 * assume (and ASSERT) that bp->b_resid == 0. 10999 * 11000 * Note: The implementation for this routine assumes the target 11001 * block size remains constant between allocation and transport. 11002 */ 11003 11004 static void 11005 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11006 { 11007 struct sd_mapblocksize_info *bsp; 11008 struct sd_xbuf *xp; 11009 offset_t first_byte; 11010 daddr_t start_block, end_block; 11011 daddr_t request_bytes; 11012 ushort_t is_aligned = FALSE; 11013 11014 ASSERT(un != NULL); 11015 ASSERT(bp != NULL); 11016 ASSERT(!mutex_owned(SD_MUTEX(un))); 11017 ASSERT(bp->b_resid == 0); 11018 11019 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11020 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11021 11022 /* 11023 * For a non-writable CD, a write request is an error 11024 */ 11025 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11026 (un->un_f_mmc_writable_media == FALSE)) { 11027 bioerror(bp, EIO); 11028 bp->b_resid = bp->b_bcount; 11029 SD_BEGIN_IODONE(index, un, bp); 11030 return; 11031 } 11032 11033 /* 11034 * We do not need a shadow buf if the device is using 11035 * un->un_sys_blocksize as its block size or if bcount == 0. 11036 * In this case there is no layer-private data block allocated. 11037 */ 11038 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11039 (bp->b_bcount == 0)) { 11040 goto done; 11041 } 11042 11043 #if defined(__i386) || defined(__amd64) 11044 /* We do not support non-block-aligned transfers for ROD devices */ 11045 ASSERT(!ISROD(un)); 11046 #endif 11047 11048 xp = SD_GET_XBUF(bp); 11049 ASSERT(xp != NULL); 11050 11051 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11052 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11053 un->un_tgt_blocksize, un->un_sys_blocksize); 11054 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11055 "request start block:0x%x\n", xp->xb_blkno); 11056 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11057 "request len:0x%x\n", bp->b_bcount); 11058 11059 /* 11060 * Allocate the layer-private data area for the mapblocksize layer. 11061 * Layers are allowed to use the xp_private member of the sd_xbuf 11062 * struct to store the pointer to their layer-private data block, but 11063 * each layer also has the responsibility of restoring the prior 11064 * contents of xb_private before returning the buf/xbuf to the 11065 * higher layer that sent it. 11066 * 11067 * Here we save the prior contents of xp->xb_private into the 11068 * bsp->mbs_oprivate field of our layer-private data area. This value 11069 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11070 * the layer-private area and returning the buf/xbuf to the layer 11071 * that sent it. 11072 * 11073 * Note that here we use kmem_zalloc for the allocation as there are 11074 * parts of the mapblocksize code that expect certain fields to be 11075 * zero unless explicitly set to a required value. 11076 */ 11077 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11078 bsp->mbs_oprivate = xp->xb_private; 11079 xp->xb_private = bsp; 11080 11081 /* 11082 * This treats the data on the disk (target) as an array of bytes. 11083 * first_byte is the byte offset, from the beginning of the device, 11084 * to the location of the request. This is converted from a 11085 * un->un_sys_blocksize block address to a byte offset, and then back 11086 * to a block address based upon a un->un_tgt_blocksize block size. 11087 * 11088 * xp->xb_blkno should be absolute upon entry into this function, 11089 * but, but it is based upon partitions that use the "system" 11090 * block size. It must be adjusted to reflect the block size of 11091 * the target. 11092 * 11093 * Note that end_block is actually the block that follows the last 11094 * block of the request, but that's what is needed for the computation. 11095 */ 11096 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11097 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11098 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11099 un->un_tgt_blocksize; 11100 11101 /* request_bytes is rounded up to a multiple of the target block size */ 11102 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11103 11104 /* 11105 * See if the starting address of the request and the request 11106 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11107 * then we do not need to allocate a shadow buf to handle the request. 11108 */ 11109 if (((first_byte % un->un_tgt_blocksize) == 0) && 11110 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11111 is_aligned = TRUE; 11112 } 11113 11114 if ((bp->b_flags & B_READ) == 0) { 11115 /* 11116 * Lock the range for a write operation. An aligned request is 11117 * considered a simple write; otherwise the request must be a 11118 * read-modify-write. 11119 */ 11120 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11121 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11122 } 11123 11124 /* 11125 * Alloc a shadow buf if the request is not aligned. Also, this is 11126 * where the READ command is generated for a read-modify-write. (The 11127 * write phase is deferred until after the read completes.) 11128 */ 11129 if (is_aligned == FALSE) { 11130 11131 struct sd_mapblocksize_info *shadow_bsp; 11132 struct sd_xbuf *shadow_xp; 11133 struct buf *shadow_bp; 11134 11135 /* 11136 * Allocate the shadow buf and it associated xbuf. Note that 11137 * after this call the xb_blkno value in both the original 11138 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11139 * same: absolute relative to the start of the device, and 11140 * adjusted for the target block size. The b_blkno in the 11141 * shadow buf will also be set to this value. We should never 11142 * change b_blkno in the original bp however. 11143 * 11144 * Note also that the shadow buf will always need to be a 11145 * READ command, regardless of whether the incoming command 11146 * is a READ or a WRITE. 11147 */ 11148 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11149 xp->xb_blkno, 11150 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11151 11152 shadow_xp = SD_GET_XBUF(shadow_bp); 11153 11154 /* 11155 * Allocate the layer-private data for the shadow buf. 11156 * (No need to preserve xb_private in the shadow xbuf.) 11157 */ 11158 shadow_xp->xb_private = shadow_bsp = 11159 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11160 11161 /* 11162 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11163 * to figure out where the start of the user data is (based upon 11164 * the system block size) in the data returned by the READ 11165 * command (which will be based upon the target blocksize). Note 11166 * that this is only really used if the request is unaligned. 11167 */ 11168 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11169 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11170 ASSERT((bsp->mbs_copy_offset >= 0) && 11171 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11172 11173 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11174 11175 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11176 11177 /* Transfer the wmap (if any) to the shadow buf */ 11178 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11179 bsp->mbs_wmp = NULL; 11180 11181 /* 11182 * The shadow buf goes on from here in place of the 11183 * original buf. 11184 */ 11185 shadow_bsp->mbs_orig_bp = bp; 11186 bp = shadow_bp; 11187 } 11188 11189 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11190 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11191 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11192 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11193 request_bytes); 11194 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11195 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11196 11197 done: 11198 SD_NEXT_IOSTART(index, un, bp); 11199 11200 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11201 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11202 } 11203 11204 11205 /* 11206 * Function: sd_mapblocksize_iodone 11207 * 11208 * Description: Completion side processing for block-size mapping. 11209 * 11210 * Context: May be called under interrupt context 11211 */ 11212 11213 static void 11214 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11215 { 11216 struct sd_mapblocksize_info *bsp; 11217 struct sd_xbuf *xp; 11218 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11219 struct buf *orig_bp; /* ptr to the original buf */ 11220 offset_t shadow_end; 11221 offset_t request_end; 11222 offset_t shadow_start; 11223 ssize_t copy_offset; 11224 size_t copy_length; 11225 size_t shortfall; 11226 uint_t is_write; /* TRUE if this bp is a WRITE */ 11227 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11228 11229 ASSERT(un != NULL); 11230 ASSERT(bp != NULL); 11231 11232 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11233 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11234 11235 /* 11236 * There is no shadow buf or layer-private data if the target is 11237 * using un->un_sys_blocksize as its block size or if bcount == 0. 11238 */ 11239 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11240 (bp->b_bcount == 0)) { 11241 goto exit; 11242 } 11243 11244 xp = SD_GET_XBUF(bp); 11245 ASSERT(xp != NULL); 11246 11247 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11248 bsp = xp->xb_private; 11249 11250 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11251 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11252 11253 if (is_write) { 11254 /* 11255 * For a WRITE request we must free up the block range that 11256 * we have locked up. This holds regardless of whether this is 11257 * an aligned write request or a read-modify-write request. 11258 */ 11259 sd_range_unlock(un, bsp->mbs_wmp); 11260 bsp->mbs_wmp = NULL; 11261 } 11262 11263 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11264 /* 11265 * An aligned read or write command will have no shadow buf; 11266 * there is not much else to do with it. 11267 */ 11268 goto done; 11269 } 11270 11271 orig_bp = bsp->mbs_orig_bp; 11272 ASSERT(orig_bp != NULL); 11273 orig_xp = SD_GET_XBUF(orig_bp); 11274 ASSERT(orig_xp != NULL); 11275 ASSERT(!mutex_owned(SD_MUTEX(un))); 11276 11277 if (!is_write && has_wmap) { 11278 /* 11279 * A READ with a wmap means this is the READ phase of a 11280 * read-modify-write. If an error occurred on the READ then 11281 * we do not proceed with the WRITE phase or copy any data. 11282 * Just release the write maps and return with an error. 11283 */ 11284 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11285 orig_bp->b_resid = orig_bp->b_bcount; 11286 bioerror(orig_bp, bp->b_error); 11287 sd_range_unlock(un, bsp->mbs_wmp); 11288 goto freebuf_done; 11289 } 11290 } 11291 11292 /* 11293 * Here is where we set up to copy the data from the shadow buf 11294 * into the space associated with the original buf. 11295 * 11296 * To deal with the conversion between block sizes, these 11297 * computations treat the data as an array of bytes, with the 11298 * first byte (byte 0) corresponding to the first byte in the 11299 * first block on the disk. 11300 */ 11301 11302 /* 11303 * shadow_start and shadow_len indicate the location and size of 11304 * the data returned with the shadow IO request. 11305 */ 11306 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11307 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11308 11309 /* 11310 * copy_offset gives the offset (in bytes) from the start of the first 11311 * block of the READ request to the beginning of the data. We retrieve 11312 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11313 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11314 * data to be copied (in bytes). 11315 */ 11316 copy_offset = bsp->mbs_copy_offset; 11317 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11318 copy_length = orig_bp->b_bcount; 11319 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11320 11321 /* 11322 * Set up the resid and error fields of orig_bp as appropriate. 11323 */ 11324 if (shadow_end >= request_end) { 11325 /* We got all the requested data; set resid to zero */ 11326 orig_bp->b_resid = 0; 11327 } else { 11328 /* 11329 * We failed to get enough data to fully satisfy the original 11330 * request. Just copy back whatever data we got and set 11331 * up the residual and error code as required. 11332 * 11333 * 'shortfall' is the amount by which the data received with the 11334 * shadow buf has "fallen short" of the requested amount. 11335 */ 11336 shortfall = (size_t)(request_end - shadow_end); 11337 11338 if (shortfall > orig_bp->b_bcount) { 11339 /* 11340 * We did not get enough data to even partially 11341 * fulfill the original request. The residual is 11342 * equal to the amount requested. 11343 */ 11344 orig_bp->b_resid = orig_bp->b_bcount; 11345 } else { 11346 /* 11347 * We did not get all the data that we requested 11348 * from the device, but we will try to return what 11349 * portion we did get. 11350 */ 11351 orig_bp->b_resid = shortfall; 11352 } 11353 ASSERT(copy_length >= orig_bp->b_resid); 11354 copy_length -= orig_bp->b_resid; 11355 } 11356 11357 /* Propagate the error code from the shadow buf to the original buf */ 11358 bioerror(orig_bp, bp->b_error); 11359 11360 if (is_write) { 11361 goto freebuf_done; /* No data copying for a WRITE */ 11362 } 11363 11364 if (has_wmap) { 11365 /* 11366 * This is a READ command from the READ phase of a 11367 * read-modify-write request. We have to copy the data given 11368 * by the user OVER the data returned by the READ command, 11369 * then convert the command from a READ to a WRITE and send 11370 * it back to the target. 11371 */ 11372 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11373 copy_length); 11374 11375 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11376 11377 /* 11378 * Dispatch the WRITE command to the taskq thread, which 11379 * will in turn send the command to the target. When the 11380 * WRITE command completes, we (sd_mapblocksize_iodone()) 11381 * will get called again as part of the iodone chain 11382 * processing for it. Note that we will still be dealing 11383 * with the shadow buf at that point. 11384 */ 11385 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11386 KM_NOSLEEP) != 0) { 11387 /* 11388 * Dispatch was successful so we are done. Return 11389 * without going any higher up the iodone chain. Do 11390 * not free up any layer-private data until after the 11391 * WRITE completes. 11392 */ 11393 return; 11394 } 11395 11396 /* 11397 * Dispatch of the WRITE command failed; set up the error 11398 * condition and send this IO back up the iodone chain. 11399 */ 11400 bioerror(orig_bp, EIO); 11401 orig_bp->b_resid = orig_bp->b_bcount; 11402 11403 } else { 11404 /* 11405 * This is a regular READ request (ie, not a RMW). Copy the 11406 * data from the shadow buf into the original buf. The 11407 * copy_offset compensates for any "misalignment" between the 11408 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11409 * original buf (with its un->un_sys_blocksize blocks). 11410 */ 11411 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11412 copy_length); 11413 } 11414 11415 freebuf_done: 11416 11417 /* 11418 * At this point we still have both the shadow buf AND the original 11419 * buf to deal with, as well as the layer-private data area in each. 11420 * Local variables are as follows: 11421 * 11422 * bp -- points to shadow buf 11423 * xp -- points to xbuf of shadow buf 11424 * bsp -- points to layer-private data area of shadow buf 11425 * orig_bp -- points to original buf 11426 * 11427 * First free the shadow buf and its associated xbuf, then free the 11428 * layer-private data area from the shadow buf. There is no need to 11429 * restore xb_private in the shadow xbuf. 11430 */ 11431 sd_shadow_buf_free(bp); 11432 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11433 11434 /* 11435 * Now update the local variables to point to the original buf, xbuf, 11436 * and layer-private area. 11437 */ 11438 bp = orig_bp; 11439 xp = SD_GET_XBUF(bp); 11440 ASSERT(xp != NULL); 11441 ASSERT(xp == orig_xp); 11442 bsp = xp->xb_private; 11443 ASSERT(bsp != NULL); 11444 11445 done: 11446 /* 11447 * Restore xb_private to whatever it was set to by the next higher 11448 * layer in the chain, then free the layer-private data area. 11449 */ 11450 xp->xb_private = bsp->mbs_oprivate; 11451 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11452 11453 exit: 11454 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11455 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11456 11457 SD_NEXT_IODONE(index, un, bp); 11458 } 11459 11460 11461 /* 11462 * Function: sd_checksum_iostart 11463 * 11464 * Description: A stub function for a layer that's currently not used. 11465 * For now just a placeholder. 11466 * 11467 * Context: Kernel thread context 11468 */ 11469 11470 static void 11471 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11472 { 11473 ASSERT(un != NULL); 11474 ASSERT(bp != NULL); 11475 ASSERT(!mutex_owned(SD_MUTEX(un))); 11476 SD_NEXT_IOSTART(index, un, bp); 11477 } 11478 11479 11480 /* 11481 * Function: sd_checksum_iodone 11482 * 11483 * Description: A stub function for a layer that's currently not used. 11484 * For now just a placeholder. 11485 * 11486 * Context: May be called under interrupt context 11487 */ 11488 11489 static void 11490 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11491 { 11492 ASSERT(un != NULL); 11493 ASSERT(bp != NULL); 11494 ASSERT(!mutex_owned(SD_MUTEX(un))); 11495 SD_NEXT_IODONE(index, un, bp); 11496 } 11497 11498 11499 /* 11500 * Function: sd_checksum_uscsi_iostart 11501 * 11502 * Description: A stub function for a layer that's currently not used. 11503 * For now just a placeholder. 11504 * 11505 * Context: Kernel thread context 11506 */ 11507 11508 static void 11509 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11510 { 11511 ASSERT(un != NULL); 11512 ASSERT(bp != NULL); 11513 ASSERT(!mutex_owned(SD_MUTEX(un))); 11514 SD_NEXT_IOSTART(index, un, bp); 11515 } 11516 11517 11518 /* 11519 * Function: sd_checksum_uscsi_iodone 11520 * 11521 * Description: A stub function for a layer that's currently not used. 11522 * For now just a placeholder. 11523 * 11524 * Context: May be called under interrupt context 11525 */ 11526 11527 static void 11528 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11529 { 11530 ASSERT(un != NULL); 11531 ASSERT(bp != NULL); 11532 ASSERT(!mutex_owned(SD_MUTEX(un))); 11533 SD_NEXT_IODONE(index, un, bp); 11534 } 11535 11536 11537 /* 11538 * Function: sd_pm_iostart 11539 * 11540 * Description: iostart-side routine for Power mangement. 11541 * 11542 * Context: Kernel thread context 11543 */ 11544 11545 static void 11546 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11547 { 11548 ASSERT(un != NULL); 11549 ASSERT(bp != NULL); 11550 ASSERT(!mutex_owned(SD_MUTEX(un))); 11551 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11552 11553 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11554 11555 if (sd_pm_entry(un) != DDI_SUCCESS) { 11556 /* 11557 * Set up to return the failed buf back up the 'iodone' 11558 * side of the calling chain. 11559 */ 11560 bioerror(bp, EIO); 11561 bp->b_resid = bp->b_bcount; 11562 11563 SD_BEGIN_IODONE(index, un, bp); 11564 11565 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11566 return; 11567 } 11568 11569 SD_NEXT_IOSTART(index, un, bp); 11570 11571 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11572 } 11573 11574 11575 /* 11576 * Function: sd_pm_iodone 11577 * 11578 * Description: iodone-side routine for power mangement. 11579 * 11580 * Context: may be called from interrupt context 11581 */ 11582 11583 static void 11584 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11585 { 11586 ASSERT(un != NULL); 11587 ASSERT(bp != NULL); 11588 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11589 11590 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11591 11592 /* 11593 * After attach the following flag is only read, so don't 11594 * take the penalty of acquiring a mutex for it. 11595 */ 11596 if (un->un_f_pm_is_enabled == TRUE) { 11597 sd_pm_exit(un); 11598 } 11599 11600 SD_NEXT_IODONE(index, un, bp); 11601 11602 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11603 } 11604 11605 11606 /* 11607 * Function: sd_core_iostart 11608 * 11609 * Description: Primary driver function for enqueuing buf(9S) structs from 11610 * the system and initiating IO to the target device 11611 * 11612 * Context: Kernel thread context. Can sleep. 11613 * 11614 * Assumptions: - The given xp->xb_blkno is absolute 11615 * (ie, relative to the start of the device). 11616 * - The IO is to be done using the native blocksize of 11617 * the device, as specified in un->un_tgt_blocksize. 11618 */ 11619 /* ARGSUSED */ 11620 static void 11621 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11622 { 11623 struct sd_xbuf *xp; 11624 11625 ASSERT(un != NULL); 11626 ASSERT(bp != NULL); 11627 ASSERT(!mutex_owned(SD_MUTEX(un))); 11628 ASSERT(bp->b_resid == 0); 11629 11630 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11631 11632 xp = SD_GET_XBUF(bp); 11633 ASSERT(xp != NULL); 11634 11635 mutex_enter(SD_MUTEX(un)); 11636 11637 /* 11638 * If we are currently in the failfast state, fail any new IO 11639 * that has B_FAILFAST set, then return. 11640 */ 11641 if ((bp->b_flags & B_FAILFAST) && 11642 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11643 mutex_exit(SD_MUTEX(un)); 11644 bioerror(bp, EIO); 11645 bp->b_resid = bp->b_bcount; 11646 SD_BEGIN_IODONE(index, un, bp); 11647 return; 11648 } 11649 11650 if (SD_IS_DIRECT_PRIORITY(xp)) { 11651 /* 11652 * Priority command -- transport it immediately. 11653 * 11654 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11655 * because all direct priority commands should be associated 11656 * with error recovery actions which we don't want to retry. 11657 */ 11658 sd_start_cmds(un, bp); 11659 } else { 11660 /* 11661 * Normal command -- add it to the wait queue, then start 11662 * transporting commands from the wait queue. 11663 */ 11664 sd_add_buf_to_waitq(un, bp); 11665 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11666 sd_start_cmds(un, NULL); 11667 } 11668 11669 mutex_exit(SD_MUTEX(un)); 11670 11671 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11672 } 11673 11674 11675 /* 11676 * Function: sd_init_cdb_limits 11677 * 11678 * Description: This is to handle scsi_pkt initialization differences 11679 * between the driver platforms. 11680 * 11681 * Legacy behaviors: 11682 * 11683 * If the block number or the sector count exceeds the 11684 * capabilities of a Group 0 command, shift over to a 11685 * Group 1 command. We don't blindly use Group 1 11686 * commands because a) some drives (CDC Wren IVs) get a 11687 * bit confused, and b) there is probably a fair amount 11688 * of speed difference for a target to receive and decode 11689 * a 10 byte command instead of a 6 byte command. 11690 * 11691 * The xfer time difference of 6 vs 10 byte CDBs is 11692 * still significant so this code is still worthwhile. 11693 * 10 byte CDBs are very inefficient with the fas HBA driver 11694 * and older disks. Each CDB byte took 1 usec with some 11695 * popular disks. 11696 * 11697 * Context: Must be called at attach time 11698 */ 11699 11700 static void 11701 sd_init_cdb_limits(struct sd_lun *un) 11702 { 11703 int hba_cdb_limit; 11704 11705 /* 11706 * Use CDB_GROUP1 commands for most devices except for 11707 * parallel SCSI fixed drives in which case we get better 11708 * performance using CDB_GROUP0 commands (where applicable). 11709 */ 11710 un->un_mincdb = SD_CDB_GROUP1; 11711 #if !defined(__fibre) 11712 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11713 !un->un_f_has_removable_media) { 11714 un->un_mincdb = SD_CDB_GROUP0; 11715 } 11716 #endif 11717 11718 /* 11719 * Try to read the max-cdb-length supported by HBA. 11720 */ 11721 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11722 if (0 >= un->un_max_hba_cdb) { 11723 un->un_max_hba_cdb = CDB_GROUP4; 11724 hba_cdb_limit = SD_CDB_GROUP4; 11725 } else if (0 < un->un_max_hba_cdb && 11726 un->un_max_hba_cdb < CDB_GROUP1) { 11727 hba_cdb_limit = SD_CDB_GROUP0; 11728 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11729 un->un_max_hba_cdb < CDB_GROUP5) { 11730 hba_cdb_limit = SD_CDB_GROUP1; 11731 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11732 un->un_max_hba_cdb < CDB_GROUP4) { 11733 hba_cdb_limit = SD_CDB_GROUP5; 11734 } else { 11735 hba_cdb_limit = SD_CDB_GROUP4; 11736 } 11737 11738 /* 11739 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11740 * commands for fixed disks unless we are building for a 32 bit 11741 * kernel. 11742 */ 11743 #ifdef _LP64 11744 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11745 min(hba_cdb_limit, SD_CDB_GROUP4); 11746 #else 11747 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11748 min(hba_cdb_limit, SD_CDB_GROUP1); 11749 #endif 11750 11751 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11752 ? sizeof (struct scsi_arq_status) : 1); 11753 un->un_cmd_timeout = (ushort_t)sd_io_time; 11754 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11755 } 11756 11757 11758 /* 11759 * Function: sd_initpkt_for_buf 11760 * 11761 * Description: Allocate and initialize for transport a scsi_pkt struct, 11762 * based upon the info specified in the given buf struct. 11763 * 11764 * Assumes the xb_blkno in the request is absolute (ie, 11765 * relative to the start of the device (NOT partition!). 11766 * Also assumes that the request is using the native block 11767 * size of the device (as returned by the READ CAPACITY 11768 * command). 11769 * 11770 * Return Code: SD_PKT_ALLOC_SUCCESS 11771 * SD_PKT_ALLOC_FAILURE 11772 * SD_PKT_ALLOC_FAILURE_NO_DMA 11773 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11774 * 11775 * Context: Kernel thread and may be called from software interrupt context 11776 * as part of a sdrunout callback. This function may not block or 11777 * call routines that block 11778 */ 11779 11780 static int 11781 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11782 { 11783 struct sd_xbuf *xp; 11784 struct scsi_pkt *pktp = NULL; 11785 struct sd_lun *un; 11786 size_t blockcount; 11787 daddr_t startblock; 11788 int rval; 11789 int cmd_flags; 11790 11791 ASSERT(bp != NULL); 11792 ASSERT(pktpp != NULL); 11793 xp = SD_GET_XBUF(bp); 11794 ASSERT(xp != NULL); 11795 un = SD_GET_UN(bp); 11796 ASSERT(un != NULL); 11797 ASSERT(mutex_owned(SD_MUTEX(un))); 11798 ASSERT(bp->b_resid == 0); 11799 11800 SD_TRACE(SD_LOG_IO_CORE, un, 11801 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11802 11803 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11804 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11805 /* 11806 * Already have a scsi_pkt -- just need DMA resources. 11807 * We must recompute the CDB in case the mapping returns 11808 * a nonzero pkt_resid. 11809 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11810 * that is being retried, the unmap/remap of the DMA resouces 11811 * will result in the entire transfer starting over again 11812 * from the very first block. 11813 */ 11814 ASSERT(xp->xb_pktp != NULL); 11815 pktp = xp->xb_pktp; 11816 } else { 11817 pktp = NULL; 11818 } 11819 #endif /* __i386 || __amd64 */ 11820 11821 startblock = xp->xb_blkno; /* Absolute block num. */ 11822 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11823 11824 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11825 11826 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11827 11828 #else 11829 11830 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11831 11832 #endif 11833 11834 /* 11835 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11836 * call scsi_init_pkt, and build the CDB. 11837 */ 11838 rval = sd_setup_rw_pkt(un, &pktp, bp, 11839 cmd_flags, sdrunout, (caddr_t)un, 11840 startblock, blockcount); 11841 11842 if (rval == 0) { 11843 /* 11844 * Success. 11845 * 11846 * If partial DMA is being used and required for this transfer. 11847 * set it up here. 11848 */ 11849 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11850 (pktp->pkt_resid != 0)) { 11851 11852 /* 11853 * Save the CDB length and pkt_resid for the 11854 * next xfer 11855 */ 11856 xp->xb_dma_resid = pktp->pkt_resid; 11857 11858 /* rezero resid */ 11859 pktp->pkt_resid = 0; 11860 11861 } else { 11862 xp->xb_dma_resid = 0; 11863 } 11864 11865 pktp->pkt_flags = un->un_tagflags; 11866 pktp->pkt_time = un->un_cmd_timeout; 11867 pktp->pkt_comp = sdintr; 11868 11869 pktp->pkt_private = bp; 11870 *pktpp = pktp; 11871 11872 SD_TRACE(SD_LOG_IO_CORE, un, 11873 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11874 11875 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11876 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11877 #endif 11878 11879 return (SD_PKT_ALLOC_SUCCESS); 11880 11881 } 11882 11883 /* 11884 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11885 * from sd_setup_rw_pkt. 11886 */ 11887 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11888 11889 if (rval == SD_PKT_ALLOC_FAILURE) { 11890 *pktpp = NULL; 11891 /* 11892 * Set the driver state to RWAIT to indicate the driver 11893 * is waiting on resource allocations. The driver will not 11894 * suspend, pm_suspend, or detatch while the state is RWAIT. 11895 */ 11896 New_state(un, SD_STATE_RWAIT); 11897 11898 SD_ERROR(SD_LOG_IO_CORE, un, 11899 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11900 11901 if ((bp->b_flags & B_ERROR) != 0) { 11902 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11903 } 11904 return (SD_PKT_ALLOC_FAILURE); 11905 } else { 11906 /* 11907 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11908 * 11909 * This should never happen. Maybe someone messed with the 11910 * kernel's minphys? 11911 */ 11912 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11913 "Request rejected: too large for CDB: " 11914 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11915 SD_ERROR(SD_LOG_IO_CORE, un, 11916 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11917 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11918 11919 } 11920 } 11921 11922 11923 /* 11924 * Function: sd_destroypkt_for_buf 11925 * 11926 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11927 * 11928 * Context: Kernel thread or interrupt context 11929 */ 11930 11931 static void 11932 sd_destroypkt_for_buf(struct buf *bp) 11933 { 11934 ASSERT(bp != NULL); 11935 ASSERT(SD_GET_UN(bp) != NULL); 11936 11937 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11938 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11939 11940 ASSERT(SD_GET_PKTP(bp) != NULL); 11941 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11942 11943 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11944 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11945 } 11946 11947 /* 11948 * Function: sd_setup_rw_pkt 11949 * 11950 * Description: Determines appropriate CDB group for the requested LBA 11951 * and transfer length, calls scsi_init_pkt, and builds 11952 * the CDB. Do not use for partial DMA transfers except 11953 * for the initial transfer since the CDB size must 11954 * remain constant. 11955 * 11956 * Context: Kernel thread and may be called from software interrupt 11957 * context as part of a sdrunout callback. This function may not 11958 * block or call routines that block 11959 */ 11960 11961 11962 int 11963 sd_setup_rw_pkt(struct sd_lun *un, 11964 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11965 int (*callback)(caddr_t), caddr_t callback_arg, 11966 diskaddr_t lba, uint32_t blockcount) 11967 { 11968 struct scsi_pkt *return_pktp; 11969 union scsi_cdb *cdbp; 11970 struct sd_cdbinfo *cp = NULL; 11971 int i; 11972 11973 /* 11974 * See which size CDB to use, based upon the request. 11975 */ 11976 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11977 11978 /* 11979 * Check lba and block count against sd_cdbtab limits. 11980 * In the partial DMA case, we have to use the same size 11981 * CDB for all the transfers. Check lba + blockcount 11982 * against the max LBA so we know that segment of the 11983 * transfer can use the CDB we select. 11984 */ 11985 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11986 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11987 11988 /* 11989 * The command will fit into the CDB type 11990 * specified by sd_cdbtab[i]. 11991 */ 11992 cp = sd_cdbtab + i; 11993 11994 /* 11995 * Call scsi_init_pkt so we can fill in the 11996 * CDB. 11997 */ 11998 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11999 bp, cp->sc_grpcode, un->un_status_len, 0, 12000 flags, callback, callback_arg); 12001 12002 if (return_pktp != NULL) { 12003 12004 /* 12005 * Return new value of pkt 12006 */ 12007 *pktpp = return_pktp; 12008 12009 /* 12010 * To be safe, zero the CDB insuring there is 12011 * no leftover data from a previous command. 12012 */ 12013 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12014 12015 /* 12016 * Handle partial DMA mapping 12017 */ 12018 if (return_pktp->pkt_resid != 0) { 12019 12020 /* 12021 * Not going to xfer as many blocks as 12022 * originally expected 12023 */ 12024 blockcount -= 12025 SD_BYTES2TGTBLOCKS(un, 12026 return_pktp->pkt_resid); 12027 } 12028 12029 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12030 12031 /* 12032 * Set command byte based on the CDB 12033 * type we matched. 12034 */ 12035 cdbp->scc_cmd = cp->sc_grpmask | 12036 ((bp->b_flags & B_READ) ? 12037 SCMD_READ : SCMD_WRITE); 12038 12039 SD_FILL_SCSI1_LUN(un, return_pktp); 12040 12041 /* 12042 * Fill in LBA and length 12043 */ 12044 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12045 (cp->sc_grpcode == CDB_GROUP4) || 12046 (cp->sc_grpcode == CDB_GROUP0) || 12047 (cp->sc_grpcode == CDB_GROUP5)); 12048 12049 if (cp->sc_grpcode == CDB_GROUP1) { 12050 FORMG1ADDR(cdbp, lba); 12051 FORMG1COUNT(cdbp, blockcount); 12052 return (0); 12053 } else if (cp->sc_grpcode == CDB_GROUP4) { 12054 FORMG4LONGADDR(cdbp, lba); 12055 FORMG4COUNT(cdbp, blockcount); 12056 return (0); 12057 } else if (cp->sc_grpcode == CDB_GROUP0) { 12058 FORMG0ADDR(cdbp, lba); 12059 FORMG0COUNT(cdbp, blockcount); 12060 return (0); 12061 } else if (cp->sc_grpcode == CDB_GROUP5) { 12062 FORMG5ADDR(cdbp, lba); 12063 FORMG5COUNT(cdbp, blockcount); 12064 return (0); 12065 } 12066 12067 /* 12068 * It should be impossible to not match one 12069 * of the CDB types above, so we should never 12070 * reach this point. Set the CDB command byte 12071 * to test-unit-ready to avoid writing 12072 * to somewhere we don't intend. 12073 */ 12074 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12075 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12076 } else { 12077 /* 12078 * Couldn't get scsi_pkt 12079 */ 12080 return (SD_PKT_ALLOC_FAILURE); 12081 } 12082 } 12083 } 12084 12085 /* 12086 * None of the available CDB types were suitable. This really 12087 * should never happen: on a 64 bit system we support 12088 * READ16/WRITE16 which will hold an entire 64 bit disk address 12089 * and on a 32 bit system we will refuse to bind to a device 12090 * larger than 2TB so addresses will never be larger than 32 bits. 12091 */ 12092 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12093 } 12094 12095 /* 12096 * Function: sd_setup_next_rw_pkt 12097 * 12098 * Description: Setup packet for partial DMA transfers, except for the 12099 * initial transfer. sd_setup_rw_pkt should be used for 12100 * the initial transfer. 12101 * 12102 * Context: Kernel thread and may be called from interrupt context. 12103 */ 12104 12105 int 12106 sd_setup_next_rw_pkt(struct sd_lun *un, 12107 struct scsi_pkt *pktp, struct buf *bp, 12108 diskaddr_t lba, uint32_t blockcount) 12109 { 12110 uchar_t com; 12111 union scsi_cdb *cdbp; 12112 uchar_t cdb_group_id; 12113 12114 ASSERT(pktp != NULL); 12115 ASSERT(pktp->pkt_cdbp != NULL); 12116 12117 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12118 com = cdbp->scc_cmd; 12119 cdb_group_id = CDB_GROUPID(com); 12120 12121 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12122 (cdb_group_id == CDB_GROUPID_1) || 12123 (cdb_group_id == CDB_GROUPID_4) || 12124 (cdb_group_id == CDB_GROUPID_5)); 12125 12126 /* 12127 * Move pkt to the next portion of the xfer. 12128 * func is NULL_FUNC so we do not have to release 12129 * the disk mutex here. 12130 */ 12131 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12132 NULL_FUNC, NULL) == pktp) { 12133 /* Success. Handle partial DMA */ 12134 if (pktp->pkt_resid != 0) { 12135 blockcount -= 12136 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12137 } 12138 12139 cdbp->scc_cmd = com; 12140 SD_FILL_SCSI1_LUN(un, pktp); 12141 if (cdb_group_id == CDB_GROUPID_1) { 12142 FORMG1ADDR(cdbp, lba); 12143 FORMG1COUNT(cdbp, blockcount); 12144 return (0); 12145 } else if (cdb_group_id == CDB_GROUPID_4) { 12146 FORMG4LONGADDR(cdbp, lba); 12147 FORMG4COUNT(cdbp, blockcount); 12148 return (0); 12149 } else if (cdb_group_id == CDB_GROUPID_0) { 12150 FORMG0ADDR(cdbp, lba); 12151 FORMG0COUNT(cdbp, blockcount); 12152 return (0); 12153 } else if (cdb_group_id == CDB_GROUPID_5) { 12154 FORMG5ADDR(cdbp, lba); 12155 FORMG5COUNT(cdbp, blockcount); 12156 return (0); 12157 } 12158 12159 /* Unreachable */ 12160 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12161 } 12162 12163 /* 12164 * Error setting up next portion of cmd transfer. 12165 * Something is definitely very wrong and this 12166 * should not happen. 12167 */ 12168 return (SD_PKT_ALLOC_FAILURE); 12169 } 12170 12171 /* 12172 * Function: sd_initpkt_for_uscsi 12173 * 12174 * Description: Allocate and initialize for transport a scsi_pkt struct, 12175 * based upon the info specified in the given uscsi_cmd struct. 12176 * 12177 * Return Code: SD_PKT_ALLOC_SUCCESS 12178 * SD_PKT_ALLOC_FAILURE 12179 * SD_PKT_ALLOC_FAILURE_NO_DMA 12180 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12181 * 12182 * Context: Kernel thread and may be called from software interrupt context 12183 * as part of a sdrunout callback. This function may not block or 12184 * call routines that block 12185 */ 12186 12187 static int 12188 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12189 { 12190 struct uscsi_cmd *uscmd; 12191 struct sd_xbuf *xp; 12192 struct scsi_pkt *pktp; 12193 struct sd_lun *un; 12194 uint32_t flags = 0; 12195 12196 ASSERT(bp != NULL); 12197 ASSERT(pktpp != NULL); 12198 xp = SD_GET_XBUF(bp); 12199 ASSERT(xp != NULL); 12200 un = SD_GET_UN(bp); 12201 ASSERT(un != NULL); 12202 ASSERT(mutex_owned(SD_MUTEX(un))); 12203 12204 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12205 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12206 ASSERT(uscmd != NULL); 12207 12208 SD_TRACE(SD_LOG_IO_CORE, un, 12209 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12210 12211 /* 12212 * Allocate the scsi_pkt for the command. 12213 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12214 * during scsi_init_pkt time and will continue to use the 12215 * same path as long as the same scsi_pkt is used without 12216 * intervening scsi_dma_free(). Since uscsi command does 12217 * not call scsi_dmafree() before retry failed command, it 12218 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12219 * set such that scsi_vhci can use other available path for 12220 * retry. Besides, ucsci command does not allow DMA breakup, 12221 * so there is no need to set PKT_DMA_PARTIAL flag. 12222 */ 12223 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12224 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12225 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12226 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12227 - sizeof (struct scsi_extended_sense)), 0, 12228 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12229 sdrunout, (caddr_t)un); 12230 } else { 12231 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12232 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12233 sizeof (struct scsi_arq_status), 0, 12234 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12235 sdrunout, (caddr_t)un); 12236 } 12237 12238 if (pktp == NULL) { 12239 *pktpp = NULL; 12240 /* 12241 * Set the driver state to RWAIT to indicate the driver 12242 * is waiting on resource allocations. The driver will not 12243 * suspend, pm_suspend, or detatch while the state is RWAIT. 12244 */ 12245 New_state(un, SD_STATE_RWAIT); 12246 12247 SD_ERROR(SD_LOG_IO_CORE, un, 12248 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12249 12250 if ((bp->b_flags & B_ERROR) != 0) { 12251 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12252 } 12253 return (SD_PKT_ALLOC_FAILURE); 12254 } 12255 12256 /* 12257 * We do not do DMA breakup for USCSI commands, so return failure 12258 * here if all the needed DMA resources were not allocated. 12259 */ 12260 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12261 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12262 scsi_destroy_pkt(pktp); 12263 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12264 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12265 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12266 } 12267 12268 /* Init the cdb from the given uscsi struct */ 12269 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12270 uscmd->uscsi_cdb[0], 0, 0, 0); 12271 12272 SD_FILL_SCSI1_LUN(un, pktp); 12273 12274 /* 12275 * Set up the optional USCSI flags. See the uscsi (7I) man page 12276 * for listing of the supported flags. 12277 */ 12278 12279 if (uscmd->uscsi_flags & USCSI_SILENT) { 12280 flags |= FLAG_SILENT; 12281 } 12282 12283 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12284 flags |= FLAG_DIAGNOSE; 12285 } 12286 12287 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12288 flags |= FLAG_ISOLATE; 12289 } 12290 12291 if (un->un_f_is_fibre == FALSE) { 12292 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12293 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12294 } 12295 } 12296 12297 /* 12298 * Set the pkt flags here so we save time later. 12299 * Note: These flags are NOT in the uscsi man page!!! 12300 */ 12301 if (uscmd->uscsi_flags & USCSI_HEAD) { 12302 flags |= FLAG_HEAD; 12303 } 12304 12305 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12306 flags |= FLAG_NOINTR; 12307 } 12308 12309 /* 12310 * For tagged queueing, things get a bit complicated. 12311 * Check first for head of queue and last for ordered queue. 12312 * If neither head nor order, use the default driver tag flags. 12313 */ 12314 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12315 if (uscmd->uscsi_flags & USCSI_HTAG) { 12316 flags |= FLAG_HTAG; 12317 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12318 flags |= FLAG_OTAG; 12319 } else { 12320 flags |= un->un_tagflags & FLAG_TAGMASK; 12321 } 12322 } 12323 12324 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12325 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12326 } 12327 12328 pktp->pkt_flags = flags; 12329 12330 /* Copy the caller's CDB into the pkt... */ 12331 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12332 12333 if (uscmd->uscsi_timeout == 0) { 12334 pktp->pkt_time = un->un_uscsi_timeout; 12335 } else { 12336 pktp->pkt_time = uscmd->uscsi_timeout; 12337 } 12338 12339 /* need it later to identify USCSI request in sdintr */ 12340 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12341 12342 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12343 12344 pktp->pkt_private = bp; 12345 pktp->pkt_comp = sdintr; 12346 *pktpp = pktp; 12347 12348 SD_TRACE(SD_LOG_IO_CORE, un, 12349 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12350 12351 return (SD_PKT_ALLOC_SUCCESS); 12352 } 12353 12354 12355 /* 12356 * Function: sd_destroypkt_for_uscsi 12357 * 12358 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12359 * IOs.. Also saves relevant info into the associated uscsi_cmd 12360 * struct. 12361 * 12362 * Context: May be called under interrupt context 12363 */ 12364 12365 static void 12366 sd_destroypkt_for_uscsi(struct buf *bp) 12367 { 12368 struct uscsi_cmd *uscmd; 12369 struct sd_xbuf *xp; 12370 struct scsi_pkt *pktp; 12371 struct sd_lun *un; 12372 12373 ASSERT(bp != NULL); 12374 xp = SD_GET_XBUF(bp); 12375 ASSERT(xp != NULL); 12376 un = SD_GET_UN(bp); 12377 ASSERT(un != NULL); 12378 ASSERT(!mutex_owned(SD_MUTEX(un))); 12379 pktp = SD_GET_PKTP(bp); 12380 ASSERT(pktp != NULL); 12381 12382 SD_TRACE(SD_LOG_IO_CORE, un, 12383 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12384 12385 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12386 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12387 ASSERT(uscmd != NULL); 12388 12389 /* Save the status and the residual into the uscsi_cmd struct */ 12390 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12391 uscmd->uscsi_resid = bp->b_resid; 12392 12393 /* 12394 * If enabled, copy any saved sense data into the area specified 12395 * by the uscsi command. 12396 */ 12397 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12398 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12399 /* 12400 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12401 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12402 */ 12403 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12404 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12405 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12406 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12407 MAX_SENSE_LENGTH); 12408 } else { 12409 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12410 SENSE_LENGTH); 12411 } 12412 } 12413 12414 /* We are done with the scsi_pkt; free it now */ 12415 ASSERT(SD_GET_PKTP(bp) != NULL); 12416 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12417 12418 SD_TRACE(SD_LOG_IO_CORE, un, 12419 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12420 } 12421 12422 12423 /* 12424 * Function: sd_bioclone_alloc 12425 * 12426 * Description: Allocate a buf(9S) and init it as per the given buf 12427 * and the various arguments. The associated sd_xbuf 12428 * struct is (nearly) duplicated. The struct buf *bp 12429 * argument is saved in new_xp->xb_private. 12430 * 12431 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12432 * datalen - size of data area for the shadow bp 12433 * blkno - starting LBA 12434 * func - function pointer for b_iodone in the shadow buf. (May 12435 * be NULL if none.) 12436 * 12437 * Return Code: Pointer to allocates buf(9S) struct 12438 * 12439 * Context: Can sleep. 12440 */ 12441 12442 static struct buf * 12443 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12444 daddr_t blkno, int (*func)(struct buf *)) 12445 { 12446 struct sd_lun *un; 12447 struct sd_xbuf *xp; 12448 struct sd_xbuf *new_xp; 12449 struct buf *new_bp; 12450 12451 ASSERT(bp != NULL); 12452 xp = SD_GET_XBUF(bp); 12453 ASSERT(xp != NULL); 12454 un = SD_GET_UN(bp); 12455 ASSERT(un != NULL); 12456 ASSERT(!mutex_owned(SD_MUTEX(un))); 12457 12458 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12459 NULL, KM_SLEEP); 12460 12461 new_bp->b_lblkno = blkno; 12462 12463 /* 12464 * Allocate an xbuf for the shadow bp and copy the contents of the 12465 * original xbuf into it. 12466 */ 12467 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12468 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12469 12470 /* 12471 * The given bp is automatically saved in the xb_private member 12472 * of the new xbuf. Callers are allowed to depend on this. 12473 */ 12474 new_xp->xb_private = bp; 12475 12476 new_bp->b_private = new_xp; 12477 12478 return (new_bp); 12479 } 12480 12481 /* 12482 * Function: sd_shadow_buf_alloc 12483 * 12484 * Description: Allocate a buf(9S) and init it as per the given buf 12485 * and the various arguments. The associated sd_xbuf 12486 * struct is (nearly) duplicated. The struct buf *bp 12487 * argument is saved in new_xp->xb_private. 12488 * 12489 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12490 * datalen - size of data area for the shadow bp 12491 * bflags - B_READ or B_WRITE (pseudo flag) 12492 * blkno - starting LBA 12493 * func - function pointer for b_iodone in the shadow buf. (May 12494 * be NULL if none.) 12495 * 12496 * Return Code: Pointer to allocates buf(9S) struct 12497 * 12498 * Context: Can sleep. 12499 */ 12500 12501 static struct buf * 12502 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12503 daddr_t blkno, int (*func)(struct buf *)) 12504 { 12505 struct sd_lun *un; 12506 struct sd_xbuf *xp; 12507 struct sd_xbuf *new_xp; 12508 struct buf *new_bp; 12509 12510 ASSERT(bp != NULL); 12511 xp = SD_GET_XBUF(bp); 12512 ASSERT(xp != NULL); 12513 un = SD_GET_UN(bp); 12514 ASSERT(un != NULL); 12515 ASSERT(!mutex_owned(SD_MUTEX(un))); 12516 12517 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12518 bp_mapin(bp); 12519 } 12520 12521 bflags &= (B_READ | B_WRITE); 12522 #if defined(__i386) || defined(__amd64) 12523 new_bp = getrbuf(KM_SLEEP); 12524 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12525 new_bp->b_bcount = datalen; 12526 new_bp->b_flags = bflags | 12527 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12528 #else 12529 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12530 datalen, bflags, SLEEP_FUNC, NULL); 12531 #endif 12532 new_bp->av_forw = NULL; 12533 new_bp->av_back = NULL; 12534 new_bp->b_dev = bp->b_dev; 12535 new_bp->b_blkno = blkno; 12536 new_bp->b_iodone = func; 12537 new_bp->b_edev = bp->b_edev; 12538 new_bp->b_resid = 0; 12539 12540 /* We need to preserve the B_FAILFAST flag */ 12541 if (bp->b_flags & B_FAILFAST) { 12542 new_bp->b_flags |= B_FAILFAST; 12543 } 12544 12545 /* 12546 * Allocate an xbuf for the shadow bp and copy the contents of the 12547 * original xbuf into it. 12548 */ 12549 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12550 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12551 12552 /* Need later to copy data between the shadow buf & original buf! */ 12553 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12554 12555 /* 12556 * The given bp is automatically saved in the xb_private member 12557 * of the new xbuf. Callers are allowed to depend on this. 12558 */ 12559 new_xp->xb_private = bp; 12560 12561 new_bp->b_private = new_xp; 12562 12563 return (new_bp); 12564 } 12565 12566 /* 12567 * Function: sd_bioclone_free 12568 * 12569 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12570 * in the larger than partition operation. 12571 * 12572 * Context: May be called under interrupt context 12573 */ 12574 12575 static void 12576 sd_bioclone_free(struct buf *bp) 12577 { 12578 struct sd_xbuf *xp; 12579 12580 ASSERT(bp != NULL); 12581 xp = SD_GET_XBUF(bp); 12582 ASSERT(xp != NULL); 12583 12584 /* 12585 * Call bp_mapout() before freeing the buf, in case a lower 12586 * layer or HBA had done a bp_mapin(). we must do this here 12587 * as we are the "originator" of the shadow buf. 12588 */ 12589 bp_mapout(bp); 12590 12591 /* 12592 * Null out b_iodone before freeing the bp, to ensure that the driver 12593 * never gets confused by a stale value in this field. (Just a little 12594 * extra defensiveness here.) 12595 */ 12596 bp->b_iodone = NULL; 12597 12598 freerbuf(bp); 12599 12600 kmem_free(xp, sizeof (struct sd_xbuf)); 12601 } 12602 12603 /* 12604 * Function: sd_shadow_buf_free 12605 * 12606 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12607 * 12608 * Context: May be called under interrupt context 12609 */ 12610 12611 static void 12612 sd_shadow_buf_free(struct buf *bp) 12613 { 12614 struct sd_xbuf *xp; 12615 12616 ASSERT(bp != NULL); 12617 xp = SD_GET_XBUF(bp); 12618 ASSERT(xp != NULL); 12619 12620 #if defined(__sparc) 12621 /* 12622 * Call bp_mapout() before freeing the buf, in case a lower 12623 * layer or HBA had done a bp_mapin(). we must do this here 12624 * as we are the "originator" of the shadow buf. 12625 */ 12626 bp_mapout(bp); 12627 #endif 12628 12629 /* 12630 * Null out b_iodone before freeing the bp, to ensure that the driver 12631 * never gets confused by a stale value in this field. (Just a little 12632 * extra defensiveness here.) 12633 */ 12634 bp->b_iodone = NULL; 12635 12636 #if defined(__i386) || defined(__amd64) 12637 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12638 freerbuf(bp); 12639 #else 12640 scsi_free_consistent_buf(bp); 12641 #endif 12642 12643 kmem_free(xp, sizeof (struct sd_xbuf)); 12644 } 12645 12646 12647 /* 12648 * Function: sd_print_transport_rejected_message 12649 * 12650 * Description: This implements the ludicrously complex rules for printing 12651 * a "transport rejected" message. This is to address the 12652 * specific problem of having a flood of this error message 12653 * produced when a failover occurs. 12654 * 12655 * Context: Any. 12656 */ 12657 12658 static void 12659 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12660 int code) 12661 { 12662 ASSERT(un != NULL); 12663 ASSERT(mutex_owned(SD_MUTEX(un))); 12664 ASSERT(xp != NULL); 12665 12666 /* 12667 * Print the "transport rejected" message under the following 12668 * conditions: 12669 * 12670 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12671 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12672 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12673 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12674 * scsi_transport(9F) (which indicates that the target might have 12675 * gone off-line). This uses the un->un_tran_fatal_count 12676 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12677 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12678 * from scsi_transport(). 12679 * 12680 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12681 * the preceeding cases in order for the message to be printed. 12682 */ 12683 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12684 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12685 (code != TRAN_FATAL_ERROR) || 12686 (un->un_tran_fatal_count == 1)) { 12687 switch (code) { 12688 case TRAN_BADPKT: 12689 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12690 "transport rejected bad packet\n"); 12691 break; 12692 case TRAN_FATAL_ERROR: 12693 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12694 "transport rejected fatal error\n"); 12695 break; 12696 default: 12697 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12698 "transport rejected (%d)\n", code); 12699 break; 12700 } 12701 } 12702 } 12703 } 12704 12705 12706 /* 12707 * Function: sd_add_buf_to_waitq 12708 * 12709 * Description: Add the given buf(9S) struct to the wait queue for the 12710 * instance. If sorting is enabled, then the buf is added 12711 * to the queue via an elevator sort algorithm (a la 12712 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12713 * If sorting is not enabled, then the buf is just added 12714 * to the end of the wait queue. 12715 * 12716 * Return Code: void 12717 * 12718 * Context: Does not sleep/block, therefore technically can be called 12719 * from any context. However if sorting is enabled then the 12720 * execution time is indeterminate, and may take long if 12721 * the wait queue grows large. 12722 */ 12723 12724 static void 12725 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12726 { 12727 struct buf *ap; 12728 12729 ASSERT(bp != NULL); 12730 ASSERT(un != NULL); 12731 ASSERT(mutex_owned(SD_MUTEX(un))); 12732 12733 /* If the queue is empty, add the buf as the only entry & return. */ 12734 if (un->un_waitq_headp == NULL) { 12735 ASSERT(un->un_waitq_tailp == NULL); 12736 un->un_waitq_headp = un->un_waitq_tailp = bp; 12737 bp->av_forw = NULL; 12738 return; 12739 } 12740 12741 ASSERT(un->un_waitq_tailp != NULL); 12742 12743 /* 12744 * If sorting is disabled, just add the buf to the tail end of 12745 * the wait queue and return. 12746 */ 12747 if (un->un_f_disksort_disabled) { 12748 un->un_waitq_tailp->av_forw = bp; 12749 un->un_waitq_tailp = bp; 12750 bp->av_forw = NULL; 12751 return; 12752 } 12753 12754 /* 12755 * Sort thru the list of requests currently on the wait queue 12756 * and add the new buf request at the appropriate position. 12757 * 12758 * The un->un_waitq_headp is an activity chain pointer on which 12759 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12760 * first queue holds those requests which are positioned after 12761 * the current SD_GET_BLKNO() (in the first request); the second holds 12762 * requests which came in after their SD_GET_BLKNO() number was passed. 12763 * Thus we implement a one way scan, retracting after reaching 12764 * the end of the drive to the first request on the second 12765 * queue, at which time it becomes the first queue. 12766 * A one-way scan is natural because of the way UNIX read-ahead 12767 * blocks are allocated. 12768 * 12769 * If we lie after the first request, then we must locate the 12770 * second request list and add ourselves to it. 12771 */ 12772 ap = un->un_waitq_headp; 12773 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12774 while (ap->av_forw != NULL) { 12775 /* 12776 * Look for an "inversion" in the (normally 12777 * ascending) block numbers. This indicates 12778 * the start of the second request list. 12779 */ 12780 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12781 /* 12782 * Search the second request list for the 12783 * first request at a larger block number. 12784 * We go before that; however if there is 12785 * no such request, we go at the end. 12786 */ 12787 do { 12788 if (SD_GET_BLKNO(bp) < 12789 SD_GET_BLKNO(ap->av_forw)) { 12790 goto insert; 12791 } 12792 ap = ap->av_forw; 12793 } while (ap->av_forw != NULL); 12794 goto insert; /* after last */ 12795 } 12796 ap = ap->av_forw; 12797 } 12798 12799 /* 12800 * No inversions... we will go after the last, and 12801 * be the first request in the second request list. 12802 */ 12803 goto insert; 12804 } 12805 12806 /* 12807 * Request is at/after the current request... 12808 * sort in the first request list. 12809 */ 12810 while (ap->av_forw != NULL) { 12811 /* 12812 * We want to go after the current request (1) if 12813 * there is an inversion after it (i.e. it is the end 12814 * of the first request list), or (2) if the next 12815 * request is a larger block no. than our request. 12816 */ 12817 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12818 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12819 goto insert; 12820 } 12821 ap = ap->av_forw; 12822 } 12823 12824 /* 12825 * Neither a second list nor a larger request, therefore 12826 * we go at the end of the first list (which is the same 12827 * as the end of the whole schebang). 12828 */ 12829 insert: 12830 bp->av_forw = ap->av_forw; 12831 ap->av_forw = bp; 12832 12833 /* 12834 * If we inserted onto the tail end of the waitq, make sure the 12835 * tail pointer is updated. 12836 */ 12837 if (ap == un->un_waitq_tailp) { 12838 un->un_waitq_tailp = bp; 12839 } 12840 } 12841 12842 12843 /* 12844 * Function: sd_start_cmds 12845 * 12846 * Description: Remove and transport cmds from the driver queues. 12847 * 12848 * Arguments: un - pointer to the unit (soft state) struct for the target. 12849 * 12850 * immed_bp - ptr to a buf to be transported immediately. Only 12851 * the immed_bp is transported; bufs on the waitq are not 12852 * processed and the un_retry_bp is not checked. If immed_bp is 12853 * NULL, then normal queue processing is performed. 12854 * 12855 * Context: May be called from kernel thread context, interrupt context, 12856 * or runout callback context. This function may not block or 12857 * call routines that block. 12858 */ 12859 12860 static void 12861 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12862 { 12863 struct sd_xbuf *xp; 12864 struct buf *bp; 12865 void (*statp)(kstat_io_t *); 12866 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12867 void (*saved_statp)(kstat_io_t *); 12868 #endif 12869 int rval; 12870 12871 ASSERT(un != NULL); 12872 ASSERT(mutex_owned(SD_MUTEX(un))); 12873 ASSERT(un->un_ncmds_in_transport >= 0); 12874 ASSERT(un->un_throttle >= 0); 12875 12876 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12877 12878 do { 12879 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12880 saved_statp = NULL; 12881 #endif 12882 12883 /* 12884 * If we are syncing or dumping, fail the command to 12885 * avoid recursively calling back into scsi_transport(). 12886 * The dump I/O itself uses a separate code path so this 12887 * only prevents non-dump I/O from being sent while dumping. 12888 * File system sync takes place before dumping begins. 12889 * During panic, filesystem I/O is allowed provided 12890 * un_in_callback is <= 1. This is to prevent recursion 12891 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12892 * sd_start_cmds and so on. See panic.c for more information 12893 * about the states the system can be in during panic. 12894 */ 12895 if ((un->un_state == SD_STATE_DUMPING) || 12896 (ddi_in_panic() && (un->un_in_callback > 1))) { 12897 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12898 "sd_start_cmds: panicking\n"); 12899 goto exit; 12900 } 12901 12902 if ((bp = immed_bp) != NULL) { 12903 /* 12904 * We have a bp that must be transported immediately. 12905 * It's OK to transport the immed_bp here without doing 12906 * the throttle limit check because the immed_bp is 12907 * always used in a retry/recovery case. This means 12908 * that we know we are not at the throttle limit by 12909 * virtue of the fact that to get here we must have 12910 * already gotten a command back via sdintr(). This also 12911 * relies on (1) the command on un_retry_bp preventing 12912 * further commands from the waitq from being issued; 12913 * and (2) the code in sd_retry_command checking the 12914 * throttle limit before issuing a delayed or immediate 12915 * retry. This holds even if the throttle limit is 12916 * currently ratcheted down from its maximum value. 12917 */ 12918 statp = kstat_runq_enter; 12919 if (bp == un->un_retry_bp) { 12920 ASSERT((un->un_retry_statp == NULL) || 12921 (un->un_retry_statp == kstat_waitq_enter) || 12922 (un->un_retry_statp == 12923 kstat_runq_back_to_waitq)); 12924 /* 12925 * If the waitq kstat was incremented when 12926 * sd_set_retry_bp() queued this bp for a retry, 12927 * then we must set up statp so that the waitq 12928 * count will get decremented correctly below. 12929 * Also we must clear un->un_retry_statp to 12930 * ensure that we do not act on a stale value 12931 * in this field. 12932 */ 12933 if ((un->un_retry_statp == kstat_waitq_enter) || 12934 (un->un_retry_statp == 12935 kstat_runq_back_to_waitq)) { 12936 statp = kstat_waitq_to_runq; 12937 } 12938 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12939 saved_statp = un->un_retry_statp; 12940 #endif 12941 un->un_retry_statp = NULL; 12942 12943 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12944 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12945 "un_throttle:%d un_ncmds_in_transport:%d\n", 12946 un, un->un_retry_bp, un->un_throttle, 12947 un->un_ncmds_in_transport); 12948 } else { 12949 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12950 "processing priority bp:0x%p\n", bp); 12951 } 12952 12953 } else if ((bp = un->un_waitq_headp) != NULL) { 12954 /* 12955 * A command on the waitq is ready to go, but do not 12956 * send it if: 12957 * 12958 * (1) the throttle limit has been reached, or 12959 * (2) a retry is pending, or 12960 * (3) a START_STOP_UNIT callback pending, or 12961 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12962 * command is pending. 12963 * 12964 * For all of these conditions, IO processing will 12965 * restart after the condition is cleared. 12966 */ 12967 if (un->un_ncmds_in_transport >= un->un_throttle) { 12968 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12969 "sd_start_cmds: exiting, " 12970 "throttle limit reached!\n"); 12971 goto exit; 12972 } 12973 if (un->un_retry_bp != NULL) { 12974 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12975 "sd_start_cmds: exiting, retry pending!\n"); 12976 goto exit; 12977 } 12978 if (un->un_startstop_timeid != NULL) { 12979 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12980 "sd_start_cmds: exiting, " 12981 "START_STOP pending!\n"); 12982 goto exit; 12983 } 12984 if (un->un_direct_priority_timeid != NULL) { 12985 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12986 "sd_start_cmds: exiting, " 12987 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12988 goto exit; 12989 } 12990 12991 /* Dequeue the command */ 12992 un->un_waitq_headp = bp->av_forw; 12993 if (un->un_waitq_headp == NULL) { 12994 un->un_waitq_tailp = NULL; 12995 } 12996 bp->av_forw = NULL; 12997 statp = kstat_waitq_to_runq; 12998 SD_TRACE(SD_LOG_IO_CORE, un, 12999 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13000 13001 } else { 13002 /* No work to do so bail out now */ 13003 SD_TRACE(SD_LOG_IO_CORE, un, 13004 "sd_start_cmds: no more work, exiting!\n"); 13005 goto exit; 13006 } 13007 13008 /* 13009 * Reset the state to normal. This is the mechanism by which 13010 * the state transitions from either SD_STATE_RWAIT or 13011 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13012 * If state is SD_STATE_PM_CHANGING then this command is 13013 * part of the device power control and the state must 13014 * not be put back to normal. Doing so would would 13015 * allow new commands to proceed when they shouldn't, 13016 * the device may be going off. 13017 */ 13018 if ((un->un_state != SD_STATE_SUSPENDED) && 13019 (un->un_state != SD_STATE_PM_CHANGING)) { 13020 New_state(un, SD_STATE_NORMAL); 13021 } 13022 13023 xp = SD_GET_XBUF(bp); 13024 ASSERT(xp != NULL); 13025 13026 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13027 /* 13028 * Allocate the scsi_pkt if we need one, or attach DMA 13029 * resources if we have a scsi_pkt that needs them. The 13030 * latter should only occur for commands that are being 13031 * retried. 13032 */ 13033 if ((xp->xb_pktp == NULL) || 13034 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13035 #else 13036 if (xp->xb_pktp == NULL) { 13037 #endif 13038 /* 13039 * There is no scsi_pkt allocated for this buf. Call 13040 * the initpkt function to allocate & init one. 13041 * 13042 * The scsi_init_pkt runout callback functionality is 13043 * implemented as follows: 13044 * 13045 * 1) The initpkt function always calls 13046 * scsi_init_pkt(9F) with sdrunout specified as the 13047 * callback routine. 13048 * 2) A successful packet allocation is initialized and 13049 * the I/O is transported. 13050 * 3) The I/O associated with an allocation resource 13051 * failure is left on its queue to be retried via 13052 * runout or the next I/O. 13053 * 4) The I/O associated with a DMA error is removed 13054 * from the queue and failed with EIO. Processing of 13055 * the transport queues is also halted to be 13056 * restarted via runout or the next I/O. 13057 * 5) The I/O associated with a CDB size or packet 13058 * size error is removed from the queue and failed 13059 * with EIO. Processing of the transport queues is 13060 * continued. 13061 * 13062 * Note: there is no interface for canceling a runout 13063 * callback. To prevent the driver from detaching or 13064 * suspending while a runout is pending the driver 13065 * state is set to SD_STATE_RWAIT 13066 * 13067 * Note: using the scsi_init_pkt callback facility can 13068 * result in an I/O request persisting at the head of 13069 * the list which cannot be satisfied even after 13070 * multiple retries. In the future the driver may 13071 * implement some kind of maximum runout count before 13072 * failing an I/O. 13073 * 13074 * Note: the use of funcp below may seem superfluous, 13075 * but it helps warlock figure out the correct 13076 * initpkt function calls (see [s]sd.wlcmd). 13077 */ 13078 struct scsi_pkt *pktp; 13079 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13080 13081 ASSERT(bp != un->un_rqs_bp); 13082 13083 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13084 switch ((*funcp)(bp, &pktp)) { 13085 case SD_PKT_ALLOC_SUCCESS: 13086 xp->xb_pktp = pktp; 13087 SD_TRACE(SD_LOG_IO_CORE, un, 13088 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13089 pktp); 13090 goto got_pkt; 13091 13092 case SD_PKT_ALLOC_FAILURE: 13093 /* 13094 * Temporary (hopefully) resource depletion. 13095 * Since retries and RQS commands always have a 13096 * scsi_pkt allocated, these cases should never 13097 * get here. So the only cases this needs to 13098 * handle is a bp from the waitq (which we put 13099 * back onto the waitq for sdrunout), or a bp 13100 * sent as an immed_bp (which we just fail). 13101 */ 13102 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13103 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13104 13105 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13106 13107 if (bp == immed_bp) { 13108 /* 13109 * If SD_XB_DMA_FREED is clear, then 13110 * this is a failure to allocate a 13111 * scsi_pkt, and we must fail the 13112 * command. 13113 */ 13114 if ((xp->xb_pkt_flags & 13115 SD_XB_DMA_FREED) == 0) { 13116 break; 13117 } 13118 13119 /* 13120 * If this immediate command is NOT our 13121 * un_retry_bp, then we must fail it. 13122 */ 13123 if (bp != un->un_retry_bp) { 13124 break; 13125 } 13126 13127 /* 13128 * We get here if this cmd is our 13129 * un_retry_bp that was DMAFREED, but 13130 * scsi_init_pkt() failed to reallocate 13131 * DMA resources when we attempted to 13132 * retry it. This can happen when an 13133 * mpxio failover is in progress, but 13134 * we don't want to just fail the 13135 * command in this case. 13136 * 13137 * Use timeout(9F) to restart it after 13138 * a 100ms delay. We don't want to 13139 * let sdrunout() restart it, because 13140 * sdrunout() is just supposed to start 13141 * commands that are sitting on the 13142 * wait queue. The un_retry_bp stays 13143 * set until the command completes, but 13144 * sdrunout can be called many times 13145 * before that happens. Since sdrunout 13146 * cannot tell if the un_retry_bp is 13147 * already in the transport, it could 13148 * end up calling scsi_transport() for 13149 * the un_retry_bp multiple times. 13150 * 13151 * Also: don't schedule the callback 13152 * if some other callback is already 13153 * pending. 13154 */ 13155 if (un->un_retry_statp == NULL) { 13156 /* 13157 * restore the kstat pointer to 13158 * keep kstat counts coherent 13159 * when we do retry the command. 13160 */ 13161 un->un_retry_statp = 13162 saved_statp; 13163 } 13164 13165 if ((un->un_startstop_timeid == NULL) && 13166 (un->un_retry_timeid == NULL) && 13167 (un->un_direct_priority_timeid == 13168 NULL)) { 13169 13170 un->un_retry_timeid = 13171 timeout( 13172 sd_start_retry_command, 13173 un, SD_RESTART_TIMEOUT); 13174 } 13175 goto exit; 13176 } 13177 13178 #else 13179 if (bp == immed_bp) { 13180 break; /* Just fail the command */ 13181 } 13182 #endif 13183 13184 /* Add the buf back to the head of the waitq */ 13185 bp->av_forw = un->un_waitq_headp; 13186 un->un_waitq_headp = bp; 13187 if (un->un_waitq_tailp == NULL) { 13188 un->un_waitq_tailp = bp; 13189 } 13190 goto exit; 13191 13192 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13193 /* 13194 * HBA DMA resource failure. Fail the command 13195 * and continue processing of the queues. 13196 */ 13197 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13198 "sd_start_cmds: " 13199 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13200 break; 13201 13202 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13203 /* 13204 * Note:x86: Partial DMA mapping not supported 13205 * for USCSI commands, and all the needed DMA 13206 * resources were not allocated. 13207 */ 13208 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13209 "sd_start_cmds: " 13210 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13211 break; 13212 13213 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13214 /* 13215 * Note:x86: Request cannot fit into CDB based 13216 * on lba and len. 13217 */ 13218 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13219 "sd_start_cmds: " 13220 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13221 break; 13222 13223 default: 13224 /* Should NEVER get here! */ 13225 panic("scsi_initpkt error"); 13226 /*NOTREACHED*/ 13227 } 13228 13229 /* 13230 * Fatal error in allocating a scsi_pkt for this buf. 13231 * Update kstats & return the buf with an error code. 13232 * We must use sd_return_failed_command_no_restart() to 13233 * avoid a recursive call back into sd_start_cmds(). 13234 * However this also means that we must keep processing 13235 * the waitq here in order to avoid stalling. 13236 */ 13237 if (statp == kstat_waitq_to_runq) { 13238 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13239 } 13240 sd_return_failed_command_no_restart(un, bp, EIO); 13241 if (bp == immed_bp) { 13242 /* immed_bp is gone by now, so clear this */ 13243 immed_bp = NULL; 13244 } 13245 continue; 13246 } 13247 got_pkt: 13248 if (bp == immed_bp) { 13249 /* goto the head of the class.... */ 13250 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13251 } 13252 13253 un->un_ncmds_in_transport++; 13254 SD_UPDATE_KSTATS(un, statp, bp); 13255 13256 /* 13257 * Call scsi_transport() to send the command to the target. 13258 * According to SCSA architecture, we must drop the mutex here 13259 * before calling scsi_transport() in order to avoid deadlock. 13260 * Note that the scsi_pkt's completion routine can be executed 13261 * (from interrupt context) even before the call to 13262 * scsi_transport() returns. 13263 */ 13264 SD_TRACE(SD_LOG_IO_CORE, un, 13265 "sd_start_cmds: calling scsi_transport()\n"); 13266 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13267 13268 mutex_exit(SD_MUTEX(un)); 13269 rval = scsi_transport(xp->xb_pktp); 13270 mutex_enter(SD_MUTEX(un)); 13271 13272 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13273 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13274 13275 switch (rval) { 13276 case TRAN_ACCEPT: 13277 /* Clear this with every pkt accepted by the HBA */ 13278 un->un_tran_fatal_count = 0; 13279 break; /* Success; try the next cmd (if any) */ 13280 13281 case TRAN_BUSY: 13282 un->un_ncmds_in_transport--; 13283 ASSERT(un->un_ncmds_in_transport >= 0); 13284 13285 /* 13286 * Don't retry request sense, the sense data 13287 * is lost when another request is sent. 13288 * Free up the rqs buf and retry 13289 * the original failed cmd. Update kstat. 13290 */ 13291 if (bp == un->un_rqs_bp) { 13292 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13293 bp = sd_mark_rqs_idle(un, xp); 13294 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13295 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13296 kstat_waitq_enter); 13297 goto exit; 13298 } 13299 13300 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13301 /* 13302 * Free the DMA resources for the scsi_pkt. This will 13303 * allow mpxio to select another path the next time 13304 * we call scsi_transport() with this scsi_pkt. 13305 * See sdintr() for the rationalization behind this. 13306 */ 13307 if ((un->un_f_is_fibre == TRUE) && 13308 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13309 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13310 scsi_dmafree(xp->xb_pktp); 13311 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13312 } 13313 #endif 13314 13315 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13316 /* 13317 * Commands that are SD_PATH_DIRECT_PRIORITY 13318 * are for error recovery situations. These do 13319 * not use the normal command waitq, so if they 13320 * get a TRAN_BUSY we cannot put them back onto 13321 * the waitq for later retry. One possible 13322 * problem is that there could already be some 13323 * other command on un_retry_bp that is waiting 13324 * for this one to complete, so we would be 13325 * deadlocked if we put this command back onto 13326 * the waitq for later retry (since un_retry_bp 13327 * must complete before the driver gets back to 13328 * commands on the waitq). 13329 * 13330 * To avoid deadlock we must schedule a callback 13331 * that will restart this command after a set 13332 * interval. This should keep retrying for as 13333 * long as the underlying transport keeps 13334 * returning TRAN_BUSY (just like for other 13335 * commands). Use the same timeout interval as 13336 * for the ordinary TRAN_BUSY retry. 13337 */ 13338 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13339 "sd_start_cmds: scsi_transport() returned " 13340 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13341 13342 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13343 un->un_direct_priority_timeid = 13344 timeout(sd_start_direct_priority_command, 13345 bp, SD_BSY_TIMEOUT / 500); 13346 13347 goto exit; 13348 } 13349 13350 /* 13351 * For TRAN_BUSY, we want to reduce the throttle value, 13352 * unless we are retrying a command. 13353 */ 13354 if (bp != un->un_retry_bp) { 13355 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13356 } 13357 13358 /* 13359 * Set up the bp to be tried again 10 ms later. 13360 * Note:x86: Is there a timeout value in the sd_lun 13361 * for this condition? 13362 */ 13363 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13364 kstat_runq_back_to_waitq); 13365 goto exit; 13366 13367 case TRAN_FATAL_ERROR: 13368 un->un_tran_fatal_count++; 13369 /* FALLTHRU */ 13370 13371 case TRAN_BADPKT: 13372 default: 13373 un->un_ncmds_in_transport--; 13374 ASSERT(un->un_ncmds_in_transport >= 0); 13375 13376 /* 13377 * If this is our REQUEST SENSE command with a 13378 * transport error, we must get back the pointers 13379 * to the original buf, and mark the REQUEST 13380 * SENSE command as "available". 13381 */ 13382 if (bp == un->un_rqs_bp) { 13383 bp = sd_mark_rqs_idle(un, xp); 13384 xp = SD_GET_XBUF(bp); 13385 } else { 13386 /* 13387 * Legacy behavior: do not update transport 13388 * error count for request sense commands. 13389 */ 13390 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13391 } 13392 13393 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13394 sd_print_transport_rejected_message(un, xp, rval); 13395 13396 /* 13397 * We must use sd_return_failed_command_no_restart() to 13398 * avoid a recursive call back into sd_start_cmds(). 13399 * However this also means that we must keep processing 13400 * the waitq here in order to avoid stalling. 13401 */ 13402 sd_return_failed_command_no_restart(un, bp, EIO); 13403 13404 /* 13405 * Notify any threads waiting in sd_ddi_suspend() that 13406 * a command completion has occurred. 13407 */ 13408 if (un->un_state == SD_STATE_SUSPENDED) { 13409 cv_broadcast(&un->un_disk_busy_cv); 13410 } 13411 13412 if (bp == immed_bp) { 13413 /* immed_bp is gone by now, so clear this */ 13414 immed_bp = NULL; 13415 } 13416 break; 13417 } 13418 13419 } while (immed_bp == NULL); 13420 13421 exit: 13422 ASSERT(mutex_owned(SD_MUTEX(un))); 13423 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13424 } 13425 13426 13427 /* 13428 * Function: sd_return_command 13429 * 13430 * Description: Returns a command to its originator (with or without an 13431 * error). Also starts commands waiting to be transported 13432 * to the target. 13433 * 13434 * Context: May be called from interrupt, kernel, or timeout context 13435 */ 13436 13437 static void 13438 sd_return_command(struct sd_lun *un, struct buf *bp) 13439 { 13440 struct sd_xbuf *xp; 13441 struct scsi_pkt *pktp; 13442 13443 ASSERT(bp != NULL); 13444 ASSERT(un != NULL); 13445 ASSERT(mutex_owned(SD_MUTEX(un))); 13446 ASSERT(bp != un->un_rqs_bp); 13447 xp = SD_GET_XBUF(bp); 13448 ASSERT(xp != NULL); 13449 13450 pktp = SD_GET_PKTP(bp); 13451 13452 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13453 13454 /* 13455 * Note: check for the "sdrestart failed" case. 13456 */ 13457 if ((un->un_partial_dma_supported == 1) && 13458 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13459 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13460 (xp->xb_pktp->pkt_resid == 0)) { 13461 13462 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13463 /* 13464 * Successfully set up next portion of cmd 13465 * transfer, try sending it 13466 */ 13467 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13468 NULL, NULL, 0, (clock_t)0, NULL); 13469 sd_start_cmds(un, NULL); 13470 return; /* Note:x86: need a return here? */ 13471 } 13472 } 13473 13474 /* 13475 * If this is the failfast bp, clear it from un_failfast_bp. This 13476 * can happen if upon being re-tried the failfast bp either 13477 * succeeded or encountered another error (possibly even a different 13478 * error than the one that precipitated the failfast state, but in 13479 * that case it would have had to exhaust retries as well). Regardless, 13480 * this should not occur whenever the instance is in the active 13481 * failfast state. 13482 */ 13483 if (bp == un->un_failfast_bp) { 13484 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13485 un->un_failfast_bp = NULL; 13486 } 13487 13488 /* 13489 * Clear the failfast state upon successful completion of ANY cmd. 13490 */ 13491 if (bp->b_error == 0) { 13492 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13493 } 13494 13495 /* 13496 * This is used if the command was retried one or more times. Show that 13497 * we are done with it, and allow processing of the waitq to resume. 13498 */ 13499 if (bp == un->un_retry_bp) { 13500 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13501 "sd_return_command: un:0x%p: " 13502 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13503 un->un_retry_bp = NULL; 13504 un->un_retry_statp = NULL; 13505 } 13506 13507 SD_UPDATE_RDWR_STATS(un, bp); 13508 SD_UPDATE_PARTITION_STATS(un, bp); 13509 13510 switch (un->un_state) { 13511 case SD_STATE_SUSPENDED: 13512 /* 13513 * Notify any threads waiting in sd_ddi_suspend() that 13514 * a command completion has occurred. 13515 */ 13516 cv_broadcast(&un->un_disk_busy_cv); 13517 break; 13518 default: 13519 sd_start_cmds(un, NULL); 13520 break; 13521 } 13522 13523 /* Return this command up the iodone chain to its originator. */ 13524 mutex_exit(SD_MUTEX(un)); 13525 13526 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13527 xp->xb_pktp = NULL; 13528 13529 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13530 13531 ASSERT(!mutex_owned(SD_MUTEX(un))); 13532 mutex_enter(SD_MUTEX(un)); 13533 13534 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13535 } 13536 13537 13538 /* 13539 * Function: sd_return_failed_command 13540 * 13541 * Description: Command completion when an error occurred. 13542 * 13543 * Context: May be called from interrupt context 13544 */ 13545 13546 static void 13547 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13548 { 13549 ASSERT(bp != NULL); 13550 ASSERT(un != NULL); 13551 ASSERT(mutex_owned(SD_MUTEX(un))); 13552 13553 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13554 "sd_return_failed_command: entry\n"); 13555 13556 /* 13557 * b_resid could already be nonzero due to a partial data 13558 * transfer, so do not change it here. 13559 */ 13560 SD_BIOERROR(bp, errcode); 13561 13562 sd_return_command(un, bp); 13563 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13564 "sd_return_failed_command: exit\n"); 13565 } 13566 13567 13568 /* 13569 * Function: sd_return_failed_command_no_restart 13570 * 13571 * Description: Same as sd_return_failed_command, but ensures that no 13572 * call back into sd_start_cmds will be issued. 13573 * 13574 * Context: May be called from interrupt context 13575 */ 13576 13577 static void 13578 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13579 int errcode) 13580 { 13581 struct sd_xbuf *xp; 13582 13583 ASSERT(bp != NULL); 13584 ASSERT(un != NULL); 13585 ASSERT(mutex_owned(SD_MUTEX(un))); 13586 xp = SD_GET_XBUF(bp); 13587 ASSERT(xp != NULL); 13588 ASSERT(errcode != 0); 13589 13590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13591 "sd_return_failed_command_no_restart: entry\n"); 13592 13593 /* 13594 * b_resid could already be nonzero due to a partial data 13595 * transfer, so do not change it here. 13596 */ 13597 SD_BIOERROR(bp, errcode); 13598 13599 /* 13600 * If this is the failfast bp, clear it. This can happen if the 13601 * failfast bp encounterd a fatal error when we attempted to 13602 * re-try it (such as a scsi_transport(9F) failure). However 13603 * we should NOT be in an active failfast state if the failfast 13604 * bp is not NULL. 13605 */ 13606 if (bp == un->un_failfast_bp) { 13607 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13608 un->un_failfast_bp = NULL; 13609 } 13610 13611 if (bp == un->un_retry_bp) { 13612 /* 13613 * This command was retried one or more times. Show that we are 13614 * done with it, and allow processing of the waitq to resume. 13615 */ 13616 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13617 "sd_return_failed_command_no_restart: " 13618 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13619 un->un_retry_bp = NULL; 13620 un->un_retry_statp = NULL; 13621 } 13622 13623 SD_UPDATE_RDWR_STATS(un, bp); 13624 SD_UPDATE_PARTITION_STATS(un, bp); 13625 13626 mutex_exit(SD_MUTEX(un)); 13627 13628 if (xp->xb_pktp != NULL) { 13629 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13630 xp->xb_pktp = NULL; 13631 } 13632 13633 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13634 13635 mutex_enter(SD_MUTEX(un)); 13636 13637 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13638 "sd_return_failed_command_no_restart: exit\n"); 13639 } 13640 13641 13642 /* 13643 * Function: sd_retry_command 13644 * 13645 * Description: queue up a command for retry, or (optionally) fail it 13646 * if retry counts are exhausted. 13647 * 13648 * Arguments: un - Pointer to the sd_lun struct for the target. 13649 * 13650 * bp - Pointer to the buf for the command to be retried. 13651 * 13652 * retry_check_flag - Flag to see which (if any) of the retry 13653 * counts should be decremented/checked. If the indicated 13654 * retry count is exhausted, then the command will not be 13655 * retried; it will be failed instead. This should use a 13656 * value equal to one of the following: 13657 * 13658 * SD_RETRIES_NOCHECK 13659 * SD_RESD_RETRIES_STANDARD 13660 * SD_RETRIES_VICTIM 13661 * 13662 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13663 * if the check should be made to see of FLAG_ISOLATE is set 13664 * in the pkt. If FLAG_ISOLATE is set, then the command is 13665 * not retried, it is simply failed. 13666 * 13667 * user_funcp - Ptr to function to call before dispatching the 13668 * command. May be NULL if no action needs to be performed. 13669 * (Primarily intended for printing messages.) 13670 * 13671 * user_arg - Optional argument to be passed along to 13672 * the user_funcp call. 13673 * 13674 * failure_code - errno return code to set in the bp if the 13675 * command is going to be failed. 13676 * 13677 * retry_delay - Retry delay interval in (clock_t) units. May 13678 * be zero which indicates that the retry should be retried 13679 * immediately (ie, without an intervening delay). 13680 * 13681 * statp - Ptr to kstat function to be updated if the command 13682 * is queued for a delayed retry. May be NULL if no kstat 13683 * update is desired. 13684 * 13685 * Context: May be called from interrupt context. 13686 */ 13687 13688 static void 13689 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13690 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13691 code), void *user_arg, int failure_code, clock_t retry_delay, 13692 void (*statp)(kstat_io_t *)) 13693 { 13694 struct sd_xbuf *xp; 13695 struct scsi_pkt *pktp; 13696 13697 ASSERT(un != NULL); 13698 ASSERT(mutex_owned(SD_MUTEX(un))); 13699 ASSERT(bp != NULL); 13700 xp = SD_GET_XBUF(bp); 13701 ASSERT(xp != NULL); 13702 pktp = SD_GET_PKTP(bp); 13703 ASSERT(pktp != NULL); 13704 13705 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13706 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13707 13708 /* 13709 * If we are syncing or dumping, fail the command to avoid 13710 * recursively calling back into scsi_transport(). 13711 */ 13712 if (ddi_in_panic()) { 13713 goto fail_command_no_log; 13714 } 13715 13716 /* 13717 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13718 * log an error and fail the command. 13719 */ 13720 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13721 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13722 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13723 sd_dump_memory(un, SD_LOG_IO, "CDB", 13724 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13725 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13726 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13727 goto fail_command; 13728 } 13729 13730 /* 13731 * If we are suspended, then put the command onto head of the 13732 * wait queue since we don't want to start more commands, and 13733 * clear the un_retry_bp. Next time when we are resumed, will 13734 * handle the command in the wait queue. 13735 */ 13736 switch (un->un_state) { 13737 case SD_STATE_SUSPENDED: 13738 case SD_STATE_DUMPING: 13739 bp->av_forw = un->un_waitq_headp; 13740 un->un_waitq_headp = bp; 13741 if (un->un_waitq_tailp == NULL) { 13742 un->un_waitq_tailp = bp; 13743 } 13744 if (bp == un->un_retry_bp) { 13745 un->un_retry_bp = NULL; 13746 un->un_retry_statp = NULL; 13747 } 13748 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13749 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13750 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13751 return; 13752 default: 13753 break; 13754 } 13755 13756 /* 13757 * If the caller wants us to check FLAG_ISOLATE, then see if that 13758 * is set; if it is then we do not want to retry the command. 13759 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13760 */ 13761 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13762 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13763 goto fail_command; 13764 } 13765 } 13766 13767 13768 /* 13769 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13770 * command timeout or a selection timeout has occurred. This means 13771 * that we were unable to establish an kind of communication with 13772 * the target, and subsequent retries and/or commands are likely 13773 * to encounter similar results and take a long time to complete. 13774 * 13775 * If this is a failfast error condition, we need to update the 13776 * failfast state, even if this bp does not have B_FAILFAST set. 13777 */ 13778 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13779 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13780 ASSERT(un->un_failfast_bp == NULL); 13781 /* 13782 * If we are already in the active failfast state, and 13783 * another failfast error condition has been detected, 13784 * then fail this command if it has B_FAILFAST set. 13785 * If B_FAILFAST is clear, then maintain the legacy 13786 * behavior of retrying heroically, even tho this will 13787 * take a lot more time to fail the command. 13788 */ 13789 if (bp->b_flags & B_FAILFAST) { 13790 goto fail_command; 13791 } 13792 } else { 13793 /* 13794 * We're not in the active failfast state, but we 13795 * have a failfast error condition, so we must begin 13796 * transition to the next state. We do this regardless 13797 * of whether or not this bp has B_FAILFAST set. 13798 */ 13799 if (un->un_failfast_bp == NULL) { 13800 /* 13801 * This is the first bp to meet a failfast 13802 * condition so save it on un_failfast_bp & 13803 * do normal retry processing. Do not enter 13804 * active failfast state yet. This marks 13805 * entry into the "failfast pending" state. 13806 */ 13807 un->un_failfast_bp = bp; 13808 13809 } else if (un->un_failfast_bp == bp) { 13810 /* 13811 * This is the second time *this* bp has 13812 * encountered a failfast error condition, 13813 * so enter active failfast state & flush 13814 * queues as appropriate. 13815 */ 13816 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13817 un->un_failfast_bp = NULL; 13818 sd_failfast_flushq(un); 13819 13820 /* 13821 * Fail this bp now if B_FAILFAST set; 13822 * otherwise continue with retries. (It would 13823 * be pretty ironic if this bp succeeded on a 13824 * subsequent retry after we just flushed all 13825 * the queues). 13826 */ 13827 if (bp->b_flags & B_FAILFAST) { 13828 goto fail_command; 13829 } 13830 13831 #if !defined(lint) && !defined(__lint) 13832 } else { 13833 /* 13834 * If neither of the preceeding conditionals 13835 * was true, it means that there is some 13836 * *other* bp that has met an inital failfast 13837 * condition and is currently either being 13838 * retried or is waiting to be retried. In 13839 * that case we should perform normal retry 13840 * processing on *this* bp, since there is a 13841 * chance that the current failfast condition 13842 * is transient and recoverable. If that does 13843 * not turn out to be the case, then retries 13844 * will be cleared when the wait queue is 13845 * flushed anyway. 13846 */ 13847 #endif 13848 } 13849 } 13850 } else { 13851 /* 13852 * SD_RETRIES_FAILFAST is clear, which indicates that we 13853 * likely were able to at least establish some level of 13854 * communication with the target and subsequent commands 13855 * and/or retries are likely to get through to the target, 13856 * In this case we want to be aggressive about clearing 13857 * the failfast state. Note that this does not affect 13858 * the "failfast pending" condition. 13859 */ 13860 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13861 } 13862 13863 13864 /* 13865 * Check the specified retry count to see if we can still do 13866 * any retries with this pkt before we should fail it. 13867 */ 13868 switch (retry_check_flag & SD_RETRIES_MASK) { 13869 case SD_RETRIES_VICTIM: 13870 /* 13871 * Check the victim retry count. If exhausted, then fall 13872 * thru & check against the standard retry count. 13873 */ 13874 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13875 /* Increment count & proceed with the retry */ 13876 xp->xb_victim_retry_count++; 13877 break; 13878 } 13879 /* Victim retries exhausted, fall back to std. retries... */ 13880 /* FALLTHRU */ 13881 13882 case SD_RETRIES_STANDARD: 13883 if (xp->xb_retry_count >= un->un_retry_count) { 13884 /* Retries exhausted, fail the command */ 13885 SD_TRACE(SD_LOG_IO_CORE, un, 13886 "sd_retry_command: retries exhausted!\n"); 13887 /* 13888 * update b_resid for failed SCMD_READ & SCMD_WRITE 13889 * commands with nonzero pkt_resid. 13890 */ 13891 if ((pktp->pkt_reason == CMD_CMPLT) && 13892 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13893 (pktp->pkt_resid != 0)) { 13894 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13895 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13896 SD_UPDATE_B_RESID(bp, pktp); 13897 } 13898 } 13899 goto fail_command; 13900 } 13901 xp->xb_retry_count++; 13902 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13903 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13904 break; 13905 13906 case SD_RETRIES_UA: 13907 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13908 /* Retries exhausted, fail the command */ 13909 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13910 "Unit Attention retries exhausted. " 13911 "Check the target.\n"); 13912 goto fail_command; 13913 } 13914 xp->xb_ua_retry_count++; 13915 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13916 "sd_retry_command: retry count:%d\n", 13917 xp->xb_ua_retry_count); 13918 break; 13919 13920 case SD_RETRIES_BUSY: 13921 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13922 /* Retries exhausted, fail the command */ 13923 SD_TRACE(SD_LOG_IO_CORE, un, 13924 "sd_retry_command: retries exhausted!\n"); 13925 goto fail_command; 13926 } 13927 xp->xb_retry_count++; 13928 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13929 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13930 break; 13931 13932 case SD_RETRIES_NOCHECK: 13933 default: 13934 /* No retry count to check. Just proceed with the retry */ 13935 break; 13936 } 13937 13938 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13939 13940 /* 13941 * If we were given a zero timeout, we must attempt to retry the 13942 * command immediately (ie, without a delay). 13943 */ 13944 if (retry_delay == 0) { 13945 /* 13946 * Check some limiting conditions to see if we can actually 13947 * do the immediate retry. If we cannot, then we must 13948 * fall back to queueing up a delayed retry. 13949 */ 13950 if (un->un_ncmds_in_transport >= un->un_throttle) { 13951 /* 13952 * We are at the throttle limit for the target, 13953 * fall back to delayed retry. 13954 */ 13955 retry_delay = SD_BSY_TIMEOUT; 13956 statp = kstat_waitq_enter; 13957 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13958 "sd_retry_command: immed. retry hit " 13959 "throttle!\n"); 13960 } else { 13961 /* 13962 * We're clear to proceed with the immediate retry. 13963 * First call the user-provided function (if any) 13964 */ 13965 if (user_funcp != NULL) { 13966 (*user_funcp)(un, bp, user_arg, 13967 SD_IMMEDIATE_RETRY_ISSUED); 13968 #ifdef __lock_lint 13969 sd_print_incomplete_msg(un, bp, user_arg, 13970 SD_IMMEDIATE_RETRY_ISSUED); 13971 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13972 SD_IMMEDIATE_RETRY_ISSUED); 13973 sd_print_sense_failed_msg(un, bp, user_arg, 13974 SD_IMMEDIATE_RETRY_ISSUED); 13975 #endif 13976 } 13977 13978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13979 "sd_retry_command: issuing immediate retry\n"); 13980 13981 /* 13982 * Call sd_start_cmds() to transport the command to 13983 * the target. 13984 */ 13985 sd_start_cmds(un, bp); 13986 13987 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13988 "sd_retry_command exit\n"); 13989 return; 13990 } 13991 } 13992 13993 /* 13994 * Set up to retry the command after a delay. 13995 * First call the user-provided function (if any) 13996 */ 13997 if (user_funcp != NULL) { 13998 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13999 } 14000 14001 sd_set_retry_bp(un, bp, retry_delay, statp); 14002 14003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14004 return; 14005 14006 fail_command: 14007 14008 if (user_funcp != NULL) { 14009 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14010 } 14011 14012 fail_command_no_log: 14013 14014 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14015 "sd_retry_command: returning failed command\n"); 14016 14017 sd_return_failed_command(un, bp, failure_code); 14018 14019 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14020 } 14021 14022 14023 /* 14024 * Function: sd_set_retry_bp 14025 * 14026 * Description: Set up the given bp for retry. 14027 * 14028 * Arguments: un - ptr to associated softstate 14029 * bp - ptr to buf(9S) for the command 14030 * retry_delay - time interval before issuing retry (may be 0) 14031 * statp - optional pointer to kstat function 14032 * 14033 * Context: May be called under interrupt context 14034 */ 14035 14036 static void 14037 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14038 void (*statp)(kstat_io_t *)) 14039 { 14040 ASSERT(un != NULL); 14041 ASSERT(mutex_owned(SD_MUTEX(un))); 14042 ASSERT(bp != NULL); 14043 14044 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14045 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14046 14047 /* 14048 * Indicate that the command is being retried. This will not allow any 14049 * other commands on the wait queue to be transported to the target 14050 * until this command has been completed (success or failure). The 14051 * "retry command" is not transported to the target until the given 14052 * time delay expires, unless the user specified a 0 retry_delay. 14053 * 14054 * Note: the timeout(9F) callback routine is what actually calls 14055 * sd_start_cmds() to transport the command, with the exception of a 14056 * zero retry_delay. The only current implementor of a zero retry delay 14057 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14058 */ 14059 if (un->un_retry_bp == NULL) { 14060 ASSERT(un->un_retry_statp == NULL); 14061 un->un_retry_bp = bp; 14062 14063 /* 14064 * If the user has not specified a delay the command should 14065 * be queued and no timeout should be scheduled. 14066 */ 14067 if (retry_delay == 0) { 14068 /* 14069 * Save the kstat pointer that will be used in the 14070 * call to SD_UPDATE_KSTATS() below, so that 14071 * sd_start_cmds() can correctly decrement the waitq 14072 * count when it is time to transport this command. 14073 */ 14074 un->un_retry_statp = statp; 14075 goto done; 14076 } 14077 } 14078 14079 if (un->un_retry_bp == bp) { 14080 /* 14081 * Save the kstat pointer that will be used in the call to 14082 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14083 * correctly decrement the waitq count when it is time to 14084 * transport this command. 14085 */ 14086 un->un_retry_statp = statp; 14087 14088 /* 14089 * Schedule a timeout if: 14090 * 1) The user has specified a delay. 14091 * 2) There is not a START_STOP_UNIT callback pending. 14092 * 14093 * If no delay has been specified, then it is up to the caller 14094 * to ensure that IO processing continues without stalling. 14095 * Effectively, this means that the caller will issue the 14096 * required call to sd_start_cmds(). The START_STOP_UNIT 14097 * callback does this after the START STOP UNIT command has 14098 * completed. In either of these cases we should not schedule 14099 * a timeout callback here. Also don't schedule the timeout if 14100 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14101 */ 14102 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14103 (un->un_direct_priority_timeid == NULL)) { 14104 un->un_retry_timeid = 14105 timeout(sd_start_retry_command, un, retry_delay); 14106 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14107 "sd_set_retry_bp: setting timeout: un: 0x%p" 14108 " bp:0x%p un_retry_timeid:0x%p\n", 14109 un, bp, un->un_retry_timeid); 14110 } 14111 } else { 14112 /* 14113 * We only get in here if there is already another command 14114 * waiting to be retried. In this case, we just put the 14115 * given command onto the wait queue, so it can be transported 14116 * after the current retry command has completed. 14117 * 14118 * Also we have to make sure that if the command at the head 14119 * of the wait queue is the un_failfast_bp, that we do not 14120 * put ahead of it any other commands that are to be retried. 14121 */ 14122 if ((un->un_failfast_bp != NULL) && 14123 (un->un_failfast_bp == un->un_waitq_headp)) { 14124 /* 14125 * Enqueue this command AFTER the first command on 14126 * the wait queue (which is also un_failfast_bp). 14127 */ 14128 bp->av_forw = un->un_waitq_headp->av_forw; 14129 un->un_waitq_headp->av_forw = bp; 14130 if (un->un_waitq_headp == un->un_waitq_tailp) { 14131 un->un_waitq_tailp = bp; 14132 } 14133 } else { 14134 /* Enqueue this command at the head of the waitq. */ 14135 bp->av_forw = un->un_waitq_headp; 14136 un->un_waitq_headp = bp; 14137 if (un->un_waitq_tailp == NULL) { 14138 un->un_waitq_tailp = bp; 14139 } 14140 } 14141 14142 if (statp == NULL) { 14143 statp = kstat_waitq_enter; 14144 } 14145 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14146 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14147 } 14148 14149 done: 14150 if (statp != NULL) { 14151 SD_UPDATE_KSTATS(un, statp, bp); 14152 } 14153 14154 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14155 "sd_set_retry_bp: exit un:0x%p\n", un); 14156 } 14157 14158 14159 /* 14160 * Function: sd_start_retry_command 14161 * 14162 * Description: Start the command that has been waiting on the target's 14163 * retry queue. Called from timeout(9F) context after the 14164 * retry delay interval has expired. 14165 * 14166 * Arguments: arg - pointer to associated softstate for the device. 14167 * 14168 * Context: timeout(9F) thread context. May not sleep. 14169 */ 14170 14171 static void 14172 sd_start_retry_command(void *arg) 14173 { 14174 struct sd_lun *un = arg; 14175 14176 ASSERT(un != NULL); 14177 ASSERT(!mutex_owned(SD_MUTEX(un))); 14178 14179 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14180 "sd_start_retry_command: entry\n"); 14181 14182 mutex_enter(SD_MUTEX(un)); 14183 14184 un->un_retry_timeid = NULL; 14185 14186 if (un->un_retry_bp != NULL) { 14187 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14188 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14189 un, un->un_retry_bp); 14190 sd_start_cmds(un, un->un_retry_bp); 14191 } 14192 14193 mutex_exit(SD_MUTEX(un)); 14194 14195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14196 "sd_start_retry_command: exit\n"); 14197 } 14198 14199 14200 /* 14201 * Function: sd_start_direct_priority_command 14202 * 14203 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14204 * received TRAN_BUSY when we called scsi_transport() to send it 14205 * to the underlying HBA. This function is called from timeout(9F) 14206 * context after the delay interval has expired. 14207 * 14208 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14209 * 14210 * Context: timeout(9F) thread context. May not sleep. 14211 */ 14212 14213 static void 14214 sd_start_direct_priority_command(void *arg) 14215 { 14216 struct buf *priority_bp = arg; 14217 struct sd_lun *un; 14218 14219 ASSERT(priority_bp != NULL); 14220 un = SD_GET_UN(priority_bp); 14221 ASSERT(un != NULL); 14222 ASSERT(!mutex_owned(SD_MUTEX(un))); 14223 14224 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14225 "sd_start_direct_priority_command: entry\n"); 14226 14227 mutex_enter(SD_MUTEX(un)); 14228 un->un_direct_priority_timeid = NULL; 14229 sd_start_cmds(un, priority_bp); 14230 mutex_exit(SD_MUTEX(un)); 14231 14232 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14233 "sd_start_direct_priority_command: exit\n"); 14234 } 14235 14236 14237 /* 14238 * Function: sd_send_request_sense_command 14239 * 14240 * Description: Sends a REQUEST SENSE command to the target 14241 * 14242 * Context: May be called from interrupt context. 14243 */ 14244 14245 static void 14246 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14247 struct scsi_pkt *pktp) 14248 { 14249 ASSERT(bp != NULL); 14250 ASSERT(un != NULL); 14251 ASSERT(mutex_owned(SD_MUTEX(un))); 14252 14253 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14254 "entry: buf:0x%p\n", bp); 14255 14256 /* 14257 * If we are syncing or dumping, then fail the command to avoid a 14258 * recursive callback into scsi_transport(). Also fail the command 14259 * if we are suspended (legacy behavior). 14260 */ 14261 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14262 (un->un_state == SD_STATE_DUMPING)) { 14263 sd_return_failed_command(un, bp, EIO); 14264 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14265 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14266 return; 14267 } 14268 14269 /* 14270 * Retry the failed command and don't issue the request sense if: 14271 * 1) the sense buf is busy 14272 * 2) we have 1 or more outstanding commands on the target 14273 * (the sense data will be cleared or invalidated any way) 14274 * 14275 * Note: There could be an issue with not checking a retry limit here, 14276 * the problem is determining which retry limit to check. 14277 */ 14278 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14279 /* Don't retry if the command is flagged as non-retryable */ 14280 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14281 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14282 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14283 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14284 "sd_send_request_sense_command: " 14285 "at full throttle, retrying exit\n"); 14286 } else { 14287 sd_return_failed_command(un, bp, EIO); 14288 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14289 "sd_send_request_sense_command: " 14290 "at full throttle, non-retryable exit\n"); 14291 } 14292 return; 14293 } 14294 14295 sd_mark_rqs_busy(un, bp); 14296 sd_start_cmds(un, un->un_rqs_bp); 14297 14298 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14299 "sd_send_request_sense_command: exit\n"); 14300 } 14301 14302 14303 /* 14304 * Function: sd_mark_rqs_busy 14305 * 14306 * Description: Indicate that the request sense bp for this instance is 14307 * in use. 14308 * 14309 * Context: May be called under interrupt context 14310 */ 14311 14312 static void 14313 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14314 { 14315 struct sd_xbuf *sense_xp; 14316 14317 ASSERT(un != NULL); 14318 ASSERT(bp != NULL); 14319 ASSERT(mutex_owned(SD_MUTEX(un))); 14320 ASSERT(un->un_sense_isbusy == 0); 14321 14322 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14323 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14324 14325 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14326 ASSERT(sense_xp != NULL); 14327 14328 SD_INFO(SD_LOG_IO, un, 14329 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14330 14331 ASSERT(sense_xp->xb_pktp != NULL); 14332 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14333 == (FLAG_SENSING | FLAG_HEAD)); 14334 14335 un->un_sense_isbusy = 1; 14336 un->un_rqs_bp->b_resid = 0; 14337 sense_xp->xb_pktp->pkt_resid = 0; 14338 sense_xp->xb_pktp->pkt_reason = 0; 14339 14340 /* So we can get back the bp at interrupt time! */ 14341 sense_xp->xb_sense_bp = bp; 14342 14343 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14344 14345 /* 14346 * Mark this buf as awaiting sense data. (This is already set in 14347 * the pkt_flags for the RQS packet.) 14348 */ 14349 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14350 14351 sense_xp->xb_retry_count = 0; 14352 sense_xp->xb_victim_retry_count = 0; 14353 sense_xp->xb_ua_retry_count = 0; 14354 sense_xp->xb_nr_retry_count = 0; 14355 sense_xp->xb_dma_resid = 0; 14356 14357 /* Clean up the fields for auto-request sense */ 14358 sense_xp->xb_sense_status = 0; 14359 sense_xp->xb_sense_state = 0; 14360 sense_xp->xb_sense_resid = 0; 14361 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14362 14363 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14364 } 14365 14366 14367 /* 14368 * Function: sd_mark_rqs_idle 14369 * 14370 * Description: SD_MUTEX must be held continuously through this routine 14371 * to prevent reuse of the rqs struct before the caller can 14372 * complete it's processing. 14373 * 14374 * Return Code: Pointer to the RQS buf 14375 * 14376 * Context: May be called under interrupt context 14377 */ 14378 14379 static struct buf * 14380 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14381 { 14382 struct buf *bp; 14383 ASSERT(un != NULL); 14384 ASSERT(sense_xp != NULL); 14385 ASSERT(mutex_owned(SD_MUTEX(un))); 14386 ASSERT(un->un_sense_isbusy != 0); 14387 14388 un->un_sense_isbusy = 0; 14389 bp = sense_xp->xb_sense_bp; 14390 sense_xp->xb_sense_bp = NULL; 14391 14392 /* This pkt is no longer interested in getting sense data */ 14393 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14394 14395 return (bp); 14396 } 14397 14398 14399 14400 /* 14401 * Function: sd_alloc_rqs 14402 * 14403 * Description: Set up the unit to receive auto request sense data 14404 * 14405 * Return Code: DDI_SUCCESS or DDI_FAILURE 14406 * 14407 * Context: Called under attach(9E) context 14408 */ 14409 14410 static int 14411 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14412 { 14413 struct sd_xbuf *xp; 14414 14415 ASSERT(un != NULL); 14416 ASSERT(!mutex_owned(SD_MUTEX(un))); 14417 ASSERT(un->un_rqs_bp == NULL); 14418 ASSERT(un->un_rqs_pktp == NULL); 14419 14420 /* 14421 * First allocate the required buf and scsi_pkt structs, then set up 14422 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14423 */ 14424 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14425 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14426 if (un->un_rqs_bp == NULL) { 14427 return (DDI_FAILURE); 14428 } 14429 14430 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14431 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14432 14433 if (un->un_rqs_pktp == NULL) { 14434 sd_free_rqs(un); 14435 return (DDI_FAILURE); 14436 } 14437 14438 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14439 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14440 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14441 14442 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14443 14444 /* Set up the other needed members in the ARQ scsi_pkt. */ 14445 un->un_rqs_pktp->pkt_comp = sdintr; 14446 un->un_rqs_pktp->pkt_time = sd_io_time; 14447 un->un_rqs_pktp->pkt_flags |= 14448 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14449 14450 /* 14451 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14452 * provide any intpkt, destroypkt routines as we take care of 14453 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14454 */ 14455 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14456 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14457 xp->xb_pktp = un->un_rqs_pktp; 14458 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14459 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14460 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14461 14462 /* 14463 * Save the pointer to the request sense private bp so it can 14464 * be retrieved in sdintr. 14465 */ 14466 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14467 ASSERT(un->un_rqs_bp->b_private == xp); 14468 14469 /* 14470 * See if the HBA supports auto-request sense for the specified 14471 * target/lun. If it does, then try to enable it (if not already 14472 * enabled). 14473 * 14474 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14475 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14476 * return success. However, in both of these cases ARQ is always 14477 * enabled and scsi_ifgetcap will always return true. The best approach 14478 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14479 * 14480 * The 3rd case is the HBA (adp) always return enabled on 14481 * scsi_ifgetgetcap even when it's not enable, the best approach 14482 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14483 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14484 */ 14485 14486 if (un->un_f_is_fibre == TRUE) { 14487 un->un_f_arq_enabled = TRUE; 14488 } else { 14489 #if defined(__i386) || defined(__amd64) 14490 /* 14491 * Circumvent the Adaptec bug, remove this code when 14492 * the bug is fixed 14493 */ 14494 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14495 #endif 14496 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14497 case 0: 14498 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14499 "sd_alloc_rqs: HBA supports ARQ\n"); 14500 /* 14501 * ARQ is supported by this HBA but currently is not 14502 * enabled. Attempt to enable it and if successful then 14503 * mark this instance as ARQ enabled. 14504 */ 14505 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14506 == 1) { 14507 /* Successfully enabled ARQ in the HBA */ 14508 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14509 "sd_alloc_rqs: ARQ enabled\n"); 14510 un->un_f_arq_enabled = TRUE; 14511 } else { 14512 /* Could not enable ARQ in the HBA */ 14513 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14514 "sd_alloc_rqs: failed ARQ enable\n"); 14515 un->un_f_arq_enabled = FALSE; 14516 } 14517 break; 14518 case 1: 14519 /* 14520 * ARQ is supported by this HBA and is already enabled. 14521 * Just mark ARQ as enabled for this instance. 14522 */ 14523 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14524 "sd_alloc_rqs: ARQ already enabled\n"); 14525 un->un_f_arq_enabled = TRUE; 14526 break; 14527 default: 14528 /* 14529 * ARQ is not supported by this HBA; disable it for this 14530 * instance. 14531 */ 14532 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14533 "sd_alloc_rqs: HBA does not support ARQ\n"); 14534 un->un_f_arq_enabled = FALSE; 14535 break; 14536 } 14537 } 14538 14539 return (DDI_SUCCESS); 14540 } 14541 14542 14543 /* 14544 * Function: sd_free_rqs 14545 * 14546 * Description: Cleanup for the pre-instance RQS command. 14547 * 14548 * Context: Kernel thread context 14549 */ 14550 14551 static void 14552 sd_free_rqs(struct sd_lun *un) 14553 { 14554 ASSERT(un != NULL); 14555 14556 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14557 14558 /* 14559 * If consistent memory is bound to a scsi_pkt, the pkt 14560 * has to be destroyed *before* freeing the consistent memory. 14561 * Don't change the sequence of this operations. 14562 * scsi_destroy_pkt() might access memory, which isn't allowed, 14563 * after it was freed in scsi_free_consistent_buf(). 14564 */ 14565 if (un->un_rqs_pktp != NULL) { 14566 scsi_destroy_pkt(un->un_rqs_pktp); 14567 un->un_rqs_pktp = NULL; 14568 } 14569 14570 if (un->un_rqs_bp != NULL) { 14571 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14572 if (xp != NULL) { 14573 kmem_free(xp, sizeof (struct sd_xbuf)); 14574 } 14575 scsi_free_consistent_buf(un->un_rqs_bp); 14576 un->un_rqs_bp = NULL; 14577 } 14578 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14579 } 14580 14581 14582 14583 /* 14584 * Function: sd_reduce_throttle 14585 * 14586 * Description: Reduces the maximum # of outstanding commands on a 14587 * target to the current number of outstanding commands. 14588 * Queues a tiemout(9F) callback to restore the limit 14589 * after a specified interval has elapsed. 14590 * Typically used when we get a TRAN_BUSY return code 14591 * back from scsi_transport(). 14592 * 14593 * Arguments: un - ptr to the sd_lun softstate struct 14594 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14595 * 14596 * Context: May be called from interrupt context 14597 */ 14598 14599 static void 14600 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14601 { 14602 ASSERT(un != NULL); 14603 ASSERT(mutex_owned(SD_MUTEX(un))); 14604 ASSERT(un->un_ncmds_in_transport >= 0); 14605 14606 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14607 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14608 un, un->un_throttle, un->un_ncmds_in_transport); 14609 14610 if (un->un_throttle > 1) { 14611 if (un->un_f_use_adaptive_throttle == TRUE) { 14612 switch (throttle_type) { 14613 case SD_THROTTLE_TRAN_BUSY: 14614 if (un->un_busy_throttle == 0) { 14615 un->un_busy_throttle = un->un_throttle; 14616 } 14617 break; 14618 case SD_THROTTLE_QFULL: 14619 un->un_busy_throttle = 0; 14620 break; 14621 default: 14622 ASSERT(FALSE); 14623 } 14624 14625 if (un->un_ncmds_in_transport > 0) { 14626 un->un_throttle = un->un_ncmds_in_transport; 14627 } 14628 14629 } else { 14630 if (un->un_ncmds_in_transport == 0) { 14631 un->un_throttle = 1; 14632 } else { 14633 un->un_throttle = un->un_ncmds_in_transport; 14634 } 14635 } 14636 } 14637 14638 /* Reschedule the timeout if none is currently active */ 14639 if (un->un_reset_throttle_timeid == NULL) { 14640 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14641 un, SD_THROTTLE_RESET_INTERVAL); 14642 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14643 "sd_reduce_throttle: timeout scheduled!\n"); 14644 } 14645 14646 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14647 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14648 } 14649 14650 14651 14652 /* 14653 * Function: sd_restore_throttle 14654 * 14655 * Description: Callback function for timeout(9F). Resets the current 14656 * value of un->un_throttle to its default. 14657 * 14658 * Arguments: arg - pointer to associated softstate for the device. 14659 * 14660 * Context: May be called from interrupt context 14661 */ 14662 14663 static void 14664 sd_restore_throttle(void *arg) 14665 { 14666 struct sd_lun *un = arg; 14667 14668 ASSERT(un != NULL); 14669 ASSERT(!mutex_owned(SD_MUTEX(un))); 14670 14671 mutex_enter(SD_MUTEX(un)); 14672 14673 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14674 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14675 14676 un->un_reset_throttle_timeid = NULL; 14677 14678 if (un->un_f_use_adaptive_throttle == TRUE) { 14679 /* 14680 * If un_busy_throttle is nonzero, then it contains the 14681 * value that un_throttle was when we got a TRAN_BUSY back 14682 * from scsi_transport(). We want to revert back to this 14683 * value. 14684 * 14685 * In the QFULL case, the throttle limit will incrementally 14686 * increase until it reaches max throttle. 14687 */ 14688 if (un->un_busy_throttle > 0) { 14689 un->un_throttle = un->un_busy_throttle; 14690 un->un_busy_throttle = 0; 14691 } else { 14692 /* 14693 * increase throttle by 10% open gate slowly, schedule 14694 * another restore if saved throttle has not been 14695 * reached 14696 */ 14697 short throttle; 14698 if (sd_qfull_throttle_enable) { 14699 throttle = un->un_throttle + 14700 max((un->un_throttle / 10), 1); 14701 un->un_throttle = 14702 (throttle < un->un_saved_throttle) ? 14703 throttle : un->un_saved_throttle; 14704 if (un->un_throttle < un->un_saved_throttle) { 14705 un->un_reset_throttle_timeid = 14706 timeout(sd_restore_throttle, 14707 un, 14708 SD_QFULL_THROTTLE_RESET_INTERVAL); 14709 } 14710 } 14711 } 14712 14713 /* 14714 * If un_throttle has fallen below the low-water mark, we 14715 * restore the maximum value here (and allow it to ratchet 14716 * down again if necessary). 14717 */ 14718 if (un->un_throttle < un->un_min_throttle) { 14719 un->un_throttle = un->un_saved_throttle; 14720 } 14721 } else { 14722 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14723 "restoring limit from 0x%x to 0x%x\n", 14724 un->un_throttle, un->un_saved_throttle); 14725 un->un_throttle = un->un_saved_throttle; 14726 } 14727 14728 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14729 "sd_restore_throttle: calling sd_start_cmds!\n"); 14730 14731 sd_start_cmds(un, NULL); 14732 14733 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14734 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14735 un, un->un_throttle); 14736 14737 mutex_exit(SD_MUTEX(un)); 14738 14739 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14740 } 14741 14742 /* 14743 * Function: sdrunout 14744 * 14745 * Description: Callback routine for scsi_init_pkt when a resource allocation 14746 * fails. 14747 * 14748 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14749 * soft state instance. 14750 * 14751 * Return Code: The scsi_init_pkt routine allows for the callback function to 14752 * return a 0 indicating the callback should be rescheduled or a 1 14753 * indicating not to reschedule. This routine always returns 1 14754 * because the driver always provides a callback function to 14755 * scsi_init_pkt. This results in a callback always being scheduled 14756 * (via the scsi_init_pkt callback implementation) if a resource 14757 * failure occurs. 14758 * 14759 * Context: This callback function may not block or call routines that block 14760 * 14761 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14762 * request persisting at the head of the list which cannot be 14763 * satisfied even after multiple retries. In the future the driver 14764 * may implement some time of maximum runout count before failing 14765 * an I/O. 14766 */ 14767 14768 static int 14769 sdrunout(caddr_t arg) 14770 { 14771 struct sd_lun *un = (struct sd_lun *)arg; 14772 14773 ASSERT(un != NULL); 14774 ASSERT(!mutex_owned(SD_MUTEX(un))); 14775 14776 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14777 14778 mutex_enter(SD_MUTEX(un)); 14779 sd_start_cmds(un, NULL); 14780 mutex_exit(SD_MUTEX(un)); 14781 /* 14782 * This callback routine always returns 1 (i.e. do not reschedule) 14783 * because we always specify sdrunout as the callback handler for 14784 * scsi_init_pkt inside the call to sd_start_cmds. 14785 */ 14786 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14787 return (1); 14788 } 14789 14790 14791 /* 14792 * Function: sdintr 14793 * 14794 * Description: Completion callback routine for scsi_pkt(9S) structs 14795 * sent to the HBA driver via scsi_transport(9F). 14796 * 14797 * Context: Interrupt context 14798 */ 14799 14800 static void 14801 sdintr(struct scsi_pkt *pktp) 14802 { 14803 struct buf *bp; 14804 struct sd_xbuf *xp; 14805 struct sd_lun *un; 14806 size_t actual_len; 14807 14808 ASSERT(pktp != NULL); 14809 bp = (struct buf *)pktp->pkt_private; 14810 ASSERT(bp != NULL); 14811 xp = SD_GET_XBUF(bp); 14812 ASSERT(xp != NULL); 14813 ASSERT(xp->xb_pktp != NULL); 14814 un = SD_GET_UN(bp); 14815 ASSERT(un != NULL); 14816 ASSERT(!mutex_owned(SD_MUTEX(un))); 14817 14818 #ifdef SD_FAULT_INJECTION 14819 14820 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14821 /* SD FaultInjection */ 14822 sd_faultinjection(pktp); 14823 14824 #endif /* SD_FAULT_INJECTION */ 14825 14826 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14827 " xp:0x%p, un:0x%p\n", bp, xp, un); 14828 14829 mutex_enter(SD_MUTEX(un)); 14830 14831 /* Reduce the count of the #commands currently in transport */ 14832 un->un_ncmds_in_transport--; 14833 ASSERT(un->un_ncmds_in_transport >= 0); 14834 14835 /* Increment counter to indicate that the callback routine is active */ 14836 un->un_in_callback++; 14837 14838 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14839 14840 #ifdef SDDEBUG 14841 if (bp == un->un_retry_bp) { 14842 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14843 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14844 un, un->un_retry_bp, un->un_ncmds_in_transport); 14845 } 14846 #endif 14847 14848 /* 14849 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14850 * state if needed. 14851 */ 14852 if (pktp->pkt_reason == CMD_DEV_GONE) { 14853 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14854 "Command failed to complete...Device is gone\n"); 14855 if (un->un_mediastate != DKIO_DEV_GONE) { 14856 un->un_mediastate = DKIO_DEV_GONE; 14857 cv_broadcast(&un->un_state_cv); 14858 } 14859 sd_return_failed_command(un, bp, EIO); 14860 goto exit; 14861 } 14862 14863 if (pktp->pkt_state & STATE_XARQ_DONE) { 14864 SD_TRACE(SD_LOG_COMMON, un, 14865 "sdintr: extra sense data received. pkt=%p\n", pktp); 14866 } 14867 14868 /* 14869 * First see if the pkt has auto-request sense data with it.... 14870 * Look at the packet state first so we don't take a performance 14871 * hit looking at the arq enabled flag unless absolutely necessary. 14872 */ 14873 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14874 (un->un_f_arq_enabled == TRUE)) { 14875 /* 14876 * The HBA did an auto request sense for this command so check 14877 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14878 * driver command that should not be retried. 14879 */ 14880 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14881 /* 14882 * Save the relevant sense info into the xp for the 14883 * original cmd. 14884 */ 14885 struct scsi_arq_status *asp; 14886 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14887 xp->xb_sense_status = 14888 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14889 xp->xb_sense_state = asp->sts_rqpkt_state; 14890 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14891 if (pktp->pkt_state & STATE_XARQ_DONE) { 14892 actual_len = MAX_SENSE_LENGTH - 14893 xp->xb_sense_resid; 14894 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14895 MAX_SENSE_LENGTH); 14896 } else { 14897 if (xp->xb_sense_resid > SENSE_LENGTH) { 14898 actual_len = MAX_SENSE_LENGTH - 14899 xp->xb_sense_resid; 14900 } else { 14901 actual_len = SENSE_LENGTH - 14902 xp->xb_sense_resid; 14903 } 14904 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14905 xp->xb_sense_resid = 14906 (int)(((struct uscsi_cmd *) 14907 (xp->xb_pktinfo))-> 14908 uscsi_rqlen) - actual_len; 14909 } 14910 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14911 SENSE_LENGTH); 14912 } 14913 14914 /* fail the command */ 14915 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14916 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14917 sd_return_failed_command(un, bp, EIO); 14918 goto exit; 14919 } 14920 14921 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14922 /* 14923 * We want to either retry or fail this command, so free 14924 * the DMA resources here. If we retry the command then 14925 * the DMA resources will be reallocated in sd_start_cmds(). 14926 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14927 * causes the *entire* transfer to start over again from the 14928 * beginning of the request, even for PARTIAL chunks that 14929 * have already transferred successfully. 14930 */ 14931 if ((un->un_f_is_fibre == TRUE) && 14932 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14933 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14934 scsi_dmafree(pktp); 14935 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14936 } 14937 #endif 14938 14939 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14940 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14941 14942 sd_handle_auto_request_sense(un, bp, xp, pktp); 14943 goto exit; 14944 } 14945 14946 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14947 if (pktp->pkt_flags & FLAG_SENSING) { 14948 /* This pktp is from the unit's REQUEST_SENSE command */ 14949 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14950 "sdintr: sd_handle_request_sense\n"); 14951 sd_handle_request_sense(un, bp, xp, pktp); 14952 goto exit; 14953 } 14954 14955 /* 14956 * Check to see if the command successfully completed as requested; 14957 * this is the most common case (and also the hot performance path). 14958 * 14959 * Requirements for successful completion are: 14960 * pkt_reason is CMD_CMPLT and packet status is status good. 14961 * In addition: 14962 * - A residual of zero indicates successful completion no matter what 14963 * the command is. 14964 * - If the residual is not zero and the command is not a read or 14965 * write, then it's still defined as successful completion. In other 14966 * words, if the command is a read or write the residual must be 14967 * zero for successful completion. 14968 * - If the residual is not zero and the command is a read or 14969 * write, and it's a USCSICMD, then it's still defined as 14970 * successful completion. 14971 */ 14972 if ((pktp->pkt_reason == CMD_CMPLT) && 14973 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14974 14975 /* 14976 * Since this command is returned with a good status, we 14977 * can reset the count for Sonoma failover. 14978 */ 14979 un->un_sonoma_failure_count = 0; 14980 14981 /* 14982 * Return all USCSI commands on good status 14983 */ 14984 if (pktp->pkt_resid == 0) { 14985 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14986 "sdintr: returning command for resid == 0\n"); 14987 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14988 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14989 SD_UPDATE_B_RESID(bp, pktp); 14990 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14991 "sdintr: returning command for resid != 0\n"); 14992 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14993 SD_UPDATE_B_RESID(bp, pktp); 14994 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14995 "sdintr: returning uscsi command\n"); 14996 } else { 14997 goto not_successful; 14998 } 14999 sd_return_command(un, bp); 15000 15001 /* 15002 * Decrement counter to indicate that the callback routine 15003 * is done. 15004 */ 15005 un->un_in_callback--; 15006 ASSERT(un->un_in_callback >= 0); 15007 mutex_exit(SD_MUTEX(un)); 15008 15009 return; 15010 } 15011 15012 not_successful: 15013 15014 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15015 /* 15016 * The following is based upon knowledge of the underlying transport 15017 * and its use of DMA resources. This code should be removed when 15018 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15019 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15020 * and sd_start_cmds(). 15021 * 15022 * Free any DMA resources associated with this command if there 15023 * is a chance it could be retried or enqueued for later retry. 15024 * If we keep the DMA binding then mpxio cannot reissue the 15025 * command on another path whenever a path failure occurs. 15026 * 15027 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15028 * causes the *entire* transfer to start over again from the 15029 * beginning of the request, even for PARTIAL chunks that 15030 * have already transferred successfully. 15031 * 15032 * This is only done for non-uscsi commands (and also skipped for the 15033 * driver's internal RQS command). Also just do this for Fibre Channel 15034 * devices as these are the only ones that support mpxio. 15035 */ 15036 if ((un->un_f_is_fibre == TRUE) && 15037 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15038 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15039 scsi_dmafree(pktp); 15040 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15041 } 15042 #endif 15043 15044 /* 15045 * The command did not successfully complete as requested so check 15046 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15047 * driver command that should not be retried so just return. If 15048 * FLAG_DIAGNOSE is not set the error will be processed below. 15049 */ 15050 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15051 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15052 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15053 /* 15054 * Issue a request sense if a check condition caused the error 15055 * (we handle the auto request sense case above), otherwise 15056 * just fail the command. 15057 */ 15058 if ((pktp->pkt_reason == CMD_CMPLT) && 15059 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15060 sd_send_request_sense_command(un, bp, pktp); 15061 } else { 15062 sd_return_failed_command(un, bp, EIO); 15063 } 15064 goto exit; 15065 } 15066 15067 /* 15068 * The command did not successfully complete as requested so process 15069 * the error, retry, and/or attempt recovery. 15070 */ 15071 switch (pktp->pkt_reason) { 15072 case CMD_CMPLT: 15073 switch (SD_GET_PKT_STATUS(pktp)) { 15074 case STATUS_GOOD: 15075 /* 15076 * The command completed successfully with a non-zero 15077 * residual 15078 */ 15079 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15080 "sdintr: STATUS_GOOD \n"); 15081 sd_pkt_status_good(un, bp, xp, pktp); 15082 break; 15083 15084 case STATUS_CHECK: 15085 case STATUS_TERMINATED: 15086 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15087 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15088 sd_pkt_status_check_condition(un, bp, xp, pktp); 15089 break; 15090 15091 case STATUS_BUSY: 15092 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15093 "sdintr: STATUS_BUSY\n"); 15094 sd_pkt_status_busy(un, bp, xp, pktp); 15095 break; 15096 15097 case STATUS_RESERVATION_CONFLICT: 15098 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15099 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15100 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15101 break; 15102 15103 case STATUS_QFULL: 15104 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15105 "sdintr: STATUS_QFULL\n"); 15106 sd_pkt_status_qfull(un, bp, xp, pktp); 15107 break; 15108 15109 case STATUS_MET: 15110 case STATUS_INTERMEDIATE: 15111 case STATUS_SCSI2: 15112 case STATUS_INTERMEDIATE_MET: 15113 case STATUS_ACA_ACTIVE: 15114 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15115 "Unexpected SCSI status received: 0x%x\n", 15116 SD_GET_PKT_STATUS(pktp)); 15117 sd_return_failed_command(un, bp, EIO); 15118 break; 15119 15120 default: 15121 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15122 "Invalid SCSI status received: 0x%x\n", 15123 SD_GET_PKT_STATUS(pktp)); 15124 sd_return_failed_command(un, bp, EIO); 15125 break; 15126 15127 } 15128 break; 15129 15130 case CMD_INCOMPLETE: 15131 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15132 "sdintr: CMD_INCOMPLETE\n"); 15133 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15134 break; 15135 case CMD_TRAN_ERR: 15136 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15137 "sdintr: CMD_TRAN_ERR\n"); 15138 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15139 break; 15140 case CMD_RESET: 15141 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15142 "sdintr: CMD_RESET \n"); 15143 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15144 break; 15145 case CMD_ABORTED: 15146 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15147 "sdintr: CMD_ABORTED \n"); 15148 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15149 break; 15150 case CMD_TIMEOUT: 15151 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15152 "sdintr: CMD_TIMEOUT\n"); 15153 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15154 break; 15155 case CMD_UNX_BUS_FREE: 15156 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15157 "sdintr: CMD_UNX_BUS_FREE \n"); 15158 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15159 break; 15160 case CMD_TAG_REJECT: 15161 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15162 "sdintr: CMD_TAG_REJECT\n"); 15163 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15164 break; 15165 default: 15166 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15167 "sdintr: default\n"); 15168 sd_pkt_reason_default(un, bp, xp, pktp); 15169 break; 15170 } 15171 15172 exit: 15173 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15174 15175 /* Decrement counter to indicate that the callback routine is done. */ 15176 un->un_in_callback--; 15177 ASSERT(un->un_in_callback >= 0); 15178 15179 /* 15180 * At this point, the pkt has been dispatched, ie, it is either 15181 * being re-tried or has been returned to its caller and should 15182 * not be referenced. 15183 */ 15184 15185 mutex_exit(SD_MUTEX(un)); 15186 } 15187 15188 15189 /* 15190 * Function: sd_print_incomplete_msg 15191 * 15192 * Description: Prints the error message for a CMD_INCOMPLETE error. 15193 * 15194 * Arguments: un - ptr to associated softstate for the device. 15195 * bp - ptr to the buf(9S) for the command. 15196 * arg - message string ptr 15197 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15198 * or SD_NO_RETRY_ISSUED. 15199 * 15200 * Context: May be called under interrupt context 15201 */ 15202 15203 static void 15204 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15205 { 15206 struct scsi_pkt *pktp; 15207 char *msgp; 15208 char *cmdp = arg; 15209 15210 ASSERT(un != NULL); 15211 ASSERT(mutex_owned(SD_MUTEX(un))); 15212 ASSERT(bp != NULL); 15213 ASSERT(arg != NULL); 15214 pktp = SD_GET_PKTP(bp); 15215 ASSERT(pktp != NULL); 15216 15217 switch (code) { 15218 case SD_DELAYED_RETRY_ISSUED: 15219 case SD_IMMEDIATE_RETRY_ISSUED: 15220 msgp = "retrying"; 15221 break; 15222 case SD_NO_RETRY_ISSUED: 15223 default: 15224 msgp = "giving up"; 15225 break; 15226 } 15227 15228 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15229 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15230 "incomplete %s- %s\n", cmdp, msgp); 15231 } 15232 } 15233 15234 15235 15236 /* 15237 * Function: sd_pkt_status_good 15238 * 15239 * Description: Processing for a STATUS_GOOD code in pkt_status. 15240 * 15241 * Context: May be called under interrupt context 15242 */ 15243 15244 static void 15245 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15246 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15247 { 15248 char *cmdp; 15249 15250 ASSERT(un != NULL); 15251 ASSERT(mutex_owned(SD_MUTEX(un))); 15252 ASSERT(bp != NULL); 15253 ASSERT(xp != NULL); 15254 ASSERT(pktp != NULL); 15255 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15256 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15257 ASSERT(pktp->pkt_resid != 0); 15258 15259 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15260 15261 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15262 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15263 case SCMD_READ: 15264 cmdp = "read"; 15265 break; 15266 case SCMD_WRITE: 15267 cmdp = "write"; 15268 break; 15269 default: 15270 SD_UPDATE_B_RESID(bp, pktp); 15271 sd_return_command(un, bp); 15272 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15273 return; 15274 } 15275 15276 /* 15277 * See if we can retry the read/write, preferrably immediately. 15278 * If retries are exhaused, then sd_retry_command() will update 15279 * the b_resid count. 15280 */ 15281 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15282 cmdp, EIO, (clock_t)0, NULL); 15283 15284 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15285 } 15286 15287 15288 15289 15290 15291 /* 15292 * Function: sd_handle_request_sense 15293 * 15294 * Description: Processing for non-auto Request Sense command. 15295 * 15296 * Arguments: un - ptr to associated softstate 15297 * sense_bp - ptr to buf(9S) for the RQS command 15298 * sense_xp - ptr to the sd_xbuf for the RQS command 15299 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15300 * 15301 * Context: May be called under interrupt context 15302 */ 15303 15304 static void 15305 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15306 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15307 { 15308 struct buf *cmd_bp; /* buf for the original command */ 15309 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15310 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15311 size_t actual_len; /* actual sense data length */ 15312 15313 ASSERT(un != NULL); 15314 ASSERT(mutex_owned(SD_MUTEX(un))); 15315 ASSERT(sense_bp != NULL); 15316 ASSERT(sense_xp != NULL); 15317 ASSERT(sense_pktp != NULL); 15318 15319 /* 15320 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15321 * RQS command and not the original command. 15322 */ 15323 ASSERT(sense_pktp == un->un_rqs_pktp); 15324 ASSERT(sense_bp == un->un_rqs_bp); 15325 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15326 (FLAG_SENSING | FLAG_HEAD)); 15327 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15328 FLAG_SENSING) == FLAG_SENSING); 15329 15330 /* These are the bp, xp, and pktp for the original command */ 15331 cmd_bp = sense_xp->xb_sense_bp; 15332 cmd_xp = SD_GET_XBUF(cmd_bp); 15333 cmd_pktp = SD_GET_PKTP(cmd_bp); 15334 15335 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15336 /* 15337 * The REQUEST SENSE command failed. Release the REQUEST 15338 * SENSE command for re-use, get back the bp for the original 15339 * command, and attempt to re-try the original command if 15340 * FLAG_DIAGNOSE is not set in the original packet. 15341 */ 15342 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15343 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15344 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15345 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15346 NULL, NULL, EIO, (clock_t)0, NULL); 15347 return; 15348 } 15349 } 15350 15351 /* 15352 * Save the relevant sense info into the xp for the original cmd. 15353 * 15354 * Note: if the request sense failed the state info will be zero 15355 * as set in sd_mark_rqs_busy() 15356 */ 15357 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15358 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15359 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15360 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15361 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15362 SENSE_LENGTH)) { 15363 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15364 MAX_SENSE_LENGTH); 15365 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15366 } else { 15367 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15368 SENSE_LENGTH); 15369 if (actual_len < SENSE_LENGTH) { 15370 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15371 } else { 15372 cmd_xp->xb_sense_resid = 0; 15373 } 15374 } 15375 15376 /* 15377 * Free up the RQS command.... 15378 * NOTE: 15379 * Must do this BEFORE calling sd_validate_sense_data! 15380 * sd_validate_sense_data may return the original command in 15381 * which case the pkt will be freed and the flags can no 15382 * longer be touched. 15383 * SD_MUTEX is held through this process until the command 15384 * is dispatched based upon the sense data, so there are 15385 * no race conditions. 15386 */ 15387 (void) sd_mark_rqs_idle(un, sense_xp); 15388 15389 /* 15390 * For a retryable command see if we have valid sense data, if so then 15391 * turn it over to sd_decode_sense() to figure out the right course of 15392 * action. Just fail a non-retryable command. 15393 */ 15394 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15395 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15396 SD_SENSE_DATA_IS_VALID) { 15397 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15398 } 15399 } else { 15400 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15401 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15402 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15403 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15404 sd_return_failed_command(un, cmd_bp, EIO); 15405 } 15406 } 15407 15408 15409 15410 15411 /* 15412 * Function: sd_handle_auto_request_sense 15413 * 15414 * Description: Processing for auto-request sense information. 15415 * 15416 * Arguments: un - ptr to associated softstate 15417 * bp - ptr to buf(9S) for the command 15418 * xp - ptr to the sd_xbuf for the command 15419 * pktp - ptr to the scsi_pkt(9S) for the command 15420 * 15421 * Context: May be called under interrupt context 15422 */ 15423 15424 static void 15425 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15426 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15427 { 15428 struct scsi_arq_status *asp; 15429 size_t actual_len; 15430 15431 ASSERT(un != NULL); 15432 ASSERT(mutex_owned(SD_MUTEX(un))); 15433 ASSERT(bp != NULL); 15434 ASSERT(xp != NULL); 15435 ASSERT(pktp != NULL); 15436 ASSERT(pktp != un->un_rqs_pktp); 15437 ASSERT(bp != un->un_rqs_bp); 15438 15439 /* 15440 * For auto-request sense, we get a scsi_arq_status back from 15441 * the HBA, with the sense data in the sts_sensedata member. 15442 * The pkt_scbp of the packet points to this scsi_arq_status. 15443 */ 15444 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15445 15446 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15447 /* 15448 * The auto REQUEST SENSE failed; see if we can re-try 15449 * the original command. 15450 */ 15451 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15452 "auto request sense failed (reason=%s)\n", 15453 scsi_rname(asp->sts_rqpkt_reason)); 15454 15455 sd_reset_target(un, pktp); 15456 15457 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15458 NULL, NULL, EIO, (clock_t)0, NULL); 15459 return; 15460 } 15461 15462 /* Save the relevant sense info into the xp for the original cmd. */ 15463 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15464 xp->xb_sense_state = asp->sts_rqpkt_state; 15465 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15466 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15467 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15468 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15469 MAX_SENSE_LENGTH); 15470 } else { 15471 if (xp->xb_sense_resid > SENSE_LENGTH) { 15472 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15473 } else { 15474 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15475 } 15476 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15477 xp->xb_sense_resid = (int)(((struct uscsi_cmd *) 15478 (xp->xb_pktinfo))->uscsi_rqlen) - actual_len; 15479 } 15480 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15481 } 15482 15483 /* 15484 * See if we have valid sense data, if so then turn it over to 15485 * sd_decode_sense() to figure out the right course of action. 15486 */ 15487 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15488 SD_SENSE_DATA_IS_VALID) { 15489 sd_decode_sense(un, bp, xp, pktp); 15490 } 15491 } 15492 15493 15494 /* 15495 * Function: sd_print_sense_failed_msg 15496 * 15497 * Description: Print log message when RQS has failed. 15498 * 15499 * Arguments: un - ptr to associated softstate 15500 * bp - ptr to buf(9S) for the command 15501 * arg - generic message string ptr 15502 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15503 * or SD_NO_RETRY_ISSUED 15504 * 15505 * Context: May be called from interrupt context 15506 */ 15507 15508 static void 15509 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15510 int code) 15511 { 15512 char *msgp = arg; 15513 15514 ASSERT(un != NULL); 15515 ASSERT(mutex_owned(SD_MUTEX(un))); 15516 ASSERT(bp != NULL); 15517 15518 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15519 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15520 } 15521 } 15522 15523 15524 /* 15525 * Function: sd_validate_sense_data 15526 * 15527 * Description: Check the given sense data for validity. 15528 * If the sense data is not valid, the command will 15529 * be either failed or retried! 15530 * 15531 * Return Code: SD_SENSE_DATA_IS_INVALID 15532 * SD_SENSE_DATA_IS_VALID 15533 * 15534 * Context: May be called from interrupt context 15535 */ 15536 15537 static int 15538 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15539 size_t actual_len) 15540 { 15541 struct scsi_extended_sense *esp; 15542 struct scsi_pkt *pktp; 15543 char *msgp = NULL; 15544 15545 ASSERT(un != NULL); 15546 ASSERT(mutex_owned(SD_MUTEX(un))); 15547 ASSERT(bp != NULL); 15548 ASSERT(bp != un->un_rqs_bp); 15549 ASSERT(xp != NULL); 15550 15551 pktp = SD_GET_PKTP(bp); 15552 ASSERT(pktp != NULL); 15553 15554 /* 15555 * Check the status of the RQS command (auto or manual). 15556 */ 15557 switch (xp->xb_sense_status & STATUS_MASK) { 15558 case STATUS_GOOD: 15559 break; 15560 15561 case STATUS_RESERVATION_CONFLICT: 15562 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15563 return (SD_SENSE_DATA_IS_INVALID); 15564 15565 case STATUS_BUSY: 15566 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15567 "Busy Status on REQUEST SENSE\n"); 15568 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15569 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15570 return (SD_SENSE_DATA_IS_INVALID); 15571 15572 case STATUS_QFULL: 15573 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15574 "QFULL Status on REQUEST SENSE\n"); 15575 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15576 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15577 return (SD_SENSE_DATA_IS_INVALID); 15578 15579 case STATUS_CHECK: 15580 case STATUS_TERMINATED: 15581 msgp = "Check Condition on REQUEST SENSE\n"; 15582 goto sense_failed; 15583 15584 default: 15585 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15586 goto sense_failed; 15587 } 15588 15589 /* 15590 * See if we got the minimum required amount of sense data. 15591 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15592 * or less. 15593 */ 15594 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15595 (actual_len == 0)) { 15596 msgp = "Request Sense couldn't get sense data\n"; 15597 goto sense_failed; 15598 } 15599 15600 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15601 msgp = "Not enough sense information\n"; 15602 goto sense_failed; 15603 } 15604 15605 /* 15606 * We require the extended sense data 15607 */ 15608 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15609 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15610 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15611 static char tmp[8]; 15612 static char buf[148]; 15613 char *p = (char *)(xp->xb_sense_data); 15614 int i; 15615 15616 mutex_enter(&sd_sense_mutex); 15617 (void) strcpy(buf, "undecodable sense information:"); 15618 for (i = 0; i < actual_len; i++) { 15619 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15620 (void) strcpy(&buf[strlen(buf)], tmp); 15621 } 15622 i = strlen(buf); 15623 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15624 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15625 mutex_exit(&sd_sense_mutex); 15626 } 15627 /* Note: Legacy behavior, fail the command with no retry */ 15628 sd_return_failed_command(un, bp, EIO); 15629 return (SD_SENSE_DATA_IS_INVALID); 15630 } 15631 15632 /* 15633 * Check that es_code is valid (es_class concatenated with es_code 15634 * make up the "response code" field. es_class will always be 7, so 15635 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15636 * format. 15637 */ 15638 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15639 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15640 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15641 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15642 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15643 goto sense_failed; 15644 } 15645 15646 return (SD_SENSE_DATA_IS_VALID); 15647 15648 sense_failed: 15649 /* 15650 * If the request sense failed (for whatever reason), attempt 15651 * to retry the original command. 15652 */ 15653 #if defined(__i386) || defined(__amd64) 15654 /* 15655 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15656 * sddef.h for Sparc platform, and x86 uses 1 binary 15657 * for both SCSI/FC. 15658 * The SD_RETRY_DELAY value need to be adjusted here 15659 * when SD_RETRY_DELAY change in sddef.h 15660 */ 15661 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15662 sd_print_sense_failed_msg, msgp, EIO, 15663 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15664 #else 15665 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15666 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15667 #endif 15668 15669 return (SD_SENSE_DATA_IS_INVALID); 15670 } 15671 15672 15673 15674 /* 15675 * Function: sd_decode_sense 15676 * 15677 * Description: Take recovery action(s) when SCSI Sense Data is received. 15678 * 15679 * Context: Interrupt context. 15680 */ 15681 15682 static void 15683 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15684 struct scsi_pkt *pktp) 15685 { 15686 uint8_t sense_key; 15687 15688 ASSERT(un != NULL); 15689 ASSERT(mutex_owned(SD_MUTEX(un))); 15690 ASSERT(bp != NULL); 15691 ASSERT(bp != un->un_rqs_bp); 15692 ASSERT(xp != NULL); 15693 ASSERT(pktp != NULL); 15694 15695 sense_key = scsi_sense_key(xp->xb_sense_data); 15696 15697 switch (sense_key) { 15698 case KEY_NO_SENSE: 15699 sd_sense_key_no_sense(un, bp, xp, pktp); 15700 break; 15701 case KEY_RECOVERABLE_ERROR: 15702 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15703 bp, xp, pktp); 15704 break; 15705 case KEY_NOT_READY: 15706 sd_sense_key_not_ready(un, xp->xb_sense_data, 15707 bp, xp, pktp); 15708 break; 15709 case KEY_MEDIUM_ERROR: 15710 case KEY_HARDWARE_ERROR: 15711 sd_sense_key_medium_or_hardware_error(un, 15712 xp->xb_sense_data, bp, xp, pktp); 15713 break; 15714 case KEY_ILLEGAL_REQUEST: 15715 sd_sense_key_illegal_request(un, bp, xp, pktp); 15716 break; 15717 case KEY_UNIT_ATTENTION: 15718 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15719 bp, xp, pktp); 15720 break; 15721 case KEY_WRITE_PROTECT: 15722 case KEY_VOLUME_OVERFLOW: 15723 case KEY_MISCOMPARE: 15724 sd_sense_key_fail_command(un, bp, xp, pktp); 15725 break; 15726 case KEY_BLANK_CHECK: 15727 sd_sense_key_blank_check(un, bp, xp, pktp); 15728 break; 15729 case KEY_ABORTED_COMMAND: 15730 sd_sense_key_aborted_command(un, bp, xp, pktp); 15731 break; 15732 case KEY_VENDOR_UNIQUE: 15733 case KEY_COPY_ABORTED: 15734 case KEY_EQUAL: 15735 case KEY_RESERVED: 15736 default: 15737 sd_sense_key_default(un, xp->xb_sense_data, 15738 bp, xp, pktp); 15739 break; 15740 } 15741 } 15742 15743 15744 /* 15745 * Function: sd_dump_memory 15746 * 15747 * Description: Debug logging routine to print the contents of a user provided 15748 * buffer. The output of the buffer is broken up into 256 byte 15749 * segments due to a size constraint of the scsi_log. 15750 * implementation. 15751 * 15752 * Arguments: un - ptr to softstate 15753 * comp - component mask 15754 * title - "title" string to preceed data when printed 15755 * data - ptr to data block to be printed 15756 * len - size of data block to be printed 15757 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15758 * 15759 * Context: May be called from interrupt context 15760 */ 15761 15762 #define SD_DUMP_MEMORY_BUF_SIZE 256 15763 15764 static char *sd_dump_format_string[] = { 15765 " 0x%02x", 15766 " %c" 15767 }; 15768 15769 static void 15770 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15771 int len, int fmt) 15772 { 15773 int i, j; 15774 int avail_count; 15775 int start_offset; 15776 int end_offset; 15777 size_t entry_len; 15778 char *bufp; 15779 char *local_buf; 15780 char *format_string; 15781 15782 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15783 15784 /* 15785 * In the debug version of the driver, this function is called from a 15786 * number of places which are NOPs in the release driver. 15787 * The debug driver therefore has additional methods of filtering 15788 * debug output. 15789 */ 15790 #ifdef SDDEBUG 15791 /* 15792 * In the debug version of the driver we can reduce the amount of debug 15793 * messages by setting sd_error_level to something other than 15794 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15795 * sd_component_mask. 15796 */ 15797 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15798 (sd_error_level != SCSI_ERR_ALL)) { 15799 return; 15800 } 15801 if (((sd_component_mask & comp) == 0) || 15802 (sd_error_level != SCSI_ERR_ALL)) { 15803 return; 15804 } 15805 #else 15806 if (sd_error_level != SCSI_ERR_ALL) { 15807 return; 15808 } 15809 #endif 15810 15811 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15812 bufp = local_buf; 15813 /* 15814 * Available length is the length of local_buf[], minus the 15815 * length of the title string, minus one for the ":", minus 15816 * one for the newline, minus one for the NULL terminator. 15817 * This gives the #bytes available for holding the printed 15818 * values from the given data buffer. 15819 */ 15820 if (fmt == SD_LOG_HEX) { 15821 format_string = sd_dump_format_string[0]; 15822 } else /* SD_LOG_CHAR */ { 15823 format_string = sd_dump_format_string[1]; 15824 } 15825 /* 15826 * Available count is the number of elements from the given 15827 * data buffer that we can fit into the available length. 15828 * This is based upon the size of the format string used. 15829 * Make one entry and find it's size. 15830 */ 15831 (void) sprintf(bufp, format_string, data[0]); 15832 entry_len = strlen(bufp); 15833 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15834 15835 j = 0; 15836 while (j < len) { 15837 bufp = local_buf; 15838 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15839 start_offset = j; 15840 15841 end_offset = start_offset + avail_count; 15842 15843 (void) sprintf(bufp, "%s:", title); 15844 bufp += strlen(bufp); 15845 for (i = start_offset; ((i < end_offset) && (j < len)); 15846 i++, j++) { 15847 (void) sprintf(bufp, format_string, data[i]); 15848 bufp += entry_len; 15849 } 15850 (void) sprintf(bufp, "\n"); 15851 15852 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15853 } 15854 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15855 } 15856 15857 /* 15858 * Function: sd_print_sense_msg 15859 * 15860 * Description: Log a message based upon the given sense data. 15861 * 15862 * Arguments: un - ptr to associated softstate 15863 * bp - ptr to buf(9S) for the command 15864 * arg - ptr to associate sd_sense_info struct 15865 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15866 * or SD_NO_RETRY_ISSUED 15867 * 15868 * Context: May be called from interrupt context 15869 */ 15870 15871 static void 15872 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15873 { 15874 struct sd_xbuf *xp; 15875 struct scsi_pkt *pktp; 15876 uint8_t *sensep; 15877 daddr_t request_blkno; 15878 diskaddr_t err_blkno; 15879 int severity; 15880 int pfa_flag; 15881 extern struct scsi_key_strings scsi_cmds[]; 15882 15883 ASSERT(un != NULL); 15884 ASSERT(mutex_owned(SD_MUTEX(un))); 15885 ASSERT(bp != NULL); 15886 xp = SD_GET_XBUF(bp); 15887 ASSERT(xp != NULL); 15888 pktp = SD_GET_PKTP(bp); 15889 ASSERT(pktp != NULL); 15890 ASSERT(arg != NULL); 15891 15892 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15893 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15894 15895 if ((code == SD_DELAYED_RETRY_ISSUED) || 15896 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15897 severity = SCSI_ERR_RETRYABLE; 15898 } 15899 15900 /* Use absolute block number for the request block number */ 15901 request_blkno = xp->xb_blkno; 15902 15903 /* 15904 * Now try to get the error block number from the sense data 15905 */ 15906 sensep = xp->xb_sense_data; 15907 15908 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15909 (uint64_t *)&err_blkno)) { 15910 /* 15911 * We retrieved the error block number from the information 15912 * portion of the sense data. 15913 * 15914 * For USCSI commands we are better off using the error 15915 * block no. as the requested block no. (This is the best 15916 * we can estimate.) 15917 */ 15918 if ((SD_IS_BUFIO(xp) == FALSE) && 15919 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15920 request_blkno = err_blkno; 15921 } 15922 } else { 15923 /* 15924 * Without the es_valid bit set (for fixed format) or an 15925 * information descriptor (for descriptor format) we cannot 15926 * be certain of the error blkno, so just use the 15927 * request_blkno. 15928 */ 15929 err_blkno = (diskaddr_t)request_blkno; 15930 } 15931 15932 /* 15933 * The following will log the buffer contents for the release driver 15934 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15935 * level is set to verbose. 15936 */ 15937 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15938 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15939 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15940 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15941 15942 if (pfa_flag == FALSE) { 15943 /* This is normally only set for USCSI */ 15944 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15945 return; 15946 } 15947 15948 if ((SD_IS_BUFIO(xp) == TRUE) && 15949 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15950 (severity < sd_error_level))) { 15951 return; 15952 } 15953 } 15954 15955 /* 15956 * Check for Sonoma Failover and keep a count of how many failed I/O's 15957 */ 15958 if ((SD_IS_LSI(un)) && 15959 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15960 (scsi_sense_asc(sensep) == 0x94) && 15961 (scsi_sense_ascq(sensep) == 0x01)) { 15962 un->un_sonoma_failure_count++; 15963 if (un->un_sonoma_failure_count > 1) { 15964 return; 15965 } 15966 } 15967 15968 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15969 request_blkno, err_blkno, scsi_cmds, 15970 (struct scsi_extended_sense *)sensep, 15971 un->un_additional_codes, NULL); 15972 } 15973 15974 /* 15975 * Function: sd_sense_key_no_sense 15976 * 15977 * Description: Recovery action when sense data was not received. 15978 * 15979 * Context: May be called from interrupt context 15980 */ 15981 15982 static void 15983 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15984 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15985 { 15986 struct sd_sense_info si; 15987 15988 ASSERT(un != NULL); 15989 ASSERT(mutex_owned(SD_MUTEX(un))); 15990 ASSERT(bp != NULL); 15991 ASSERT(xp != NULL); 15992 ASSERT(pktp != NULL); 15993 15994 si.ssi_severity = SCSI_ERR_FATAL; 15995 si.ssi_pfa_flag = FALSE; 15996 15997 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15998 15999 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16000 &si, EIO, (clock_t)0, NULL); 16001 } 16002 16003 16004 /* 16005 * Function: sd_sense_key_recoverable_error 16006 * 16007 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16008 * 16009 * Context: May be called from interrupt context 16010 */ 16011 16012 static void 16013 sd_sense_key_recoverable_error(struct sd_lun *un, 16014 uint8_t *sense_datap, 16015 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16016 { 16017 struct sd_sense_info si; 16018 uint8_t asc = scsi_sense_asc(sense_datap); 16019 16020 ASSERT(un != NULL); 16021 ASSERT(mutex_owned(SD_MUTEX(un))); 16022 ASSERT(bp != NULL); 16023 ASSERT(xp != NULL); 16024 ASSERT(pktp != NULL); 16025 16026 /* 16027 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16028 */ 16029 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16030 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16031 si.ssi_severity = SCSI_ERR_INFO; 16032 si.ssi_pfa_flag = TRUE; 16033 } else { 16034 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16035 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16036 si.ssi_severity = SCSI_ERR_RECOVERED; 16037 si.ssi_pfa_flag = FALSE; 16038 } 16039 16040 if (pktp->pkt_resid == 0) { 16041 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16042 sd_return_command(un, bp); 16043 return; 16044 } 16045 16046 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16047 &si, EIO, (clock_t)0, NULL); 16048 } 16049 16050 16051 16052 16053 /* 16054 * Function: sd_sense_key_not_ready 16055 * 16056 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16057 * 16058 * Context: May be called from interrupt context 16059 */ 16060 16061 static void 16062 sd_sense_key_not_ready(struct sd_lun *un, 16063 uint8_t *sense_datap, 16064 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16065 { 16066 struct sd_sense_info si; 16067 uint8_t asc = scsi_sense_asc(sense_datap); 16068 uint8_t ascq = scsi_sense_ascq(sense_datap); 16069 16070 ASSERT(un != NULL); 16071 ASSERT(mutex_owned(SD_MUTEX(un))); 16072 ASSERT(bp != NULL); 16073 ASSERT(xp != NULL); 16074 ASSERT(pktp != NULL); 16075 16076 si.ssi_severity = SCSI_ERR_FATAL; 16077 si.ssi_pfa_flag = FALSE; 16078 16079 /* 16080 * Update error stats after first NOT READY error. Disks may have 16081 * been powered down and may need to be restarted. For CDROMs, 16082 * report NOT READY errors only if media is present. 16083 */ 16084 if ((ISCD(un) && (asc == 0x3A)) || 16085 (xp->xb_nr_retry_count > 0)) { 16086 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16087 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16088 } 16089 16090 /* 16091 * Just fail if the "not ready" retry limit has been reached. 16092 */ 16093 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16094 /* Special check for error message printing for removables. */ 16095 if (un->un_f_has_removable_media && (asc == 0x04) && 16096 (ascq >= 0x04)) { 16097 si.ssi_severity = SCSI_ERR_ALL; 16098 } 16099 goto fail_command; 16100 } 16101 16102 /* 16103 * Check the ASC and ASCQ in the sense data as needed, to determine 16104 * what to do. 16105 */ 16106 switch (asc) { 16107 case 0x04: /* LOGICAL UNIT NOT READY */ 16108 /* 16109 * disk drives that don't spin up result in a very long delay 16110 * in format without warning messages. We will log a message 16111 * if the error level is set to verbose. 16112 */ 16113 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16114 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16115 "logical unit not ready, resetting disk\n"); 16116 } 16117 16118 /* 16119 * There are different requirements for CDROMs and disks for 16120 * the number of retries. If a CD-ROM is giving this, it is 16121 * probably reading TOC and is in the process of getting 16122 * ready, so we should keep on trying for a long time to make 16123 * sure that all types of media are taken in account (for 16124 * some media the drive takes a long time to read TOC). For 16125 * disks we do not want to retry this too many times as this 16126 * can cause a long hang in format when the drive refuses to 16127 * spin up (a very common failure). 16128 */ 16129 switch (ascq) { 16130 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16131 /* 16132 * Disk drives frequently refuse to spin up which 16133 * results in a very long hang in format without 16134 * warning messages. 16135 * 16136 * Note: This code preserves the legacy behavior of 16137 * comparing xb_nr_retry_count against zero for fibre 16138 * channel targets instead of comparing against the 16139 * un_reset_retry_count value. The reason for this 16140 * discrepancy has been so utterly lost beneath the 16141 * Sands of Time that even Indiana Jones could not 16142 * find it. 16143 */ 16144 if (un->un_f_is_fibre == TRUE) { 16145 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16146 (xp->xb_nr_retry_count > 0)) && 16147 (un->un_startstop_timeid == NULL)) { 16148 scsi_log(SD_DEVINFO(un), sd_label, 16149 CE_WARN, "logical unit not ready, " 16150 "resetting disk\n"); 16151 sd_reset_target(un, pktp); 16152 } 16153 } else { 16154 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16155 (xp->xb_nr_retry_count > 16156 un->un_reset_retry_count)) && 16157 (un->un_startstop_timeid == NULL)) { 16158 scsi_log(SD_DEVINFO(un), sd_label, 16159 CE_WARN, "logical unit not ready, " 16160 "resetting disk\n"); 16161 sd_reset_target(un, pktp); 16162 } 16163 } 16164 break; 16165 16166 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16167 /* 16168 * If the target is in the process of becoming 16169 * ready, just proceed with the retry. This can 16170 * happen with CD-ROMs that take a long time to 16171 * read TOC after a power cycle or reset. 16172 */ 16173 goto do_retry; 16174 16175 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16176 break; 16177 16178 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16179 /* 16180 * Retries cannot help here so just fail right away. 16181 */ 16182 goto fail_command; 16183 16184 case 0x88: 16185 /* 16186 * Vendor-unique code for T3/T4: it indicates a 16187 * path problem in a mutipathed config, but as far as 16188 * the target driver is concerned it equates to a fatal 16189 * error, so we should just fail the command right away 16190 * (without printing anything to the console). If this 16191 * is not a T3/T4, fall thru to the default recovery 16192 * action. 16193 * T3/T4 is FC only, don't need to check is_fibre 16194 */ 16195 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16196 sd_return_failed_command(un, bp, EIO); 16197 return; 16198 } 16199 /* FALLTHRU */ 16200 16201 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16202 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16203 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16204 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16205 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16206 default: /* Possible future codes in SCSI spec? */ 16207 /* 16208 * For removable-media devices, do not retry if 16209 * ASCQ > 2 as these result mostly from USCSI commands 16210 * on MMC devices issued to check status of an 16211 * operation initiated in immediate mode. Also for 16212 * ASCQ >= 4 do not print console messages as these 16213 * mainly represent a user-initiated operation 16214 * instead of a system failure. 16215 */ 16216 if (un->un_f_has_removable_media) { 16217 si.ssi_severity = SCSI_ERR_ALL; 16218 goto fail_command; 16219 } 16220 break; 16221 } 16222 16223 /* 16224 * As part of our recovery attempt for the NOT READY 16225 * condition, we issue a START STOP UNIT command. However 16226 * we want to wait for a short delay before attempting this 16227 * as there may still be more commands coming back from the 16228 * target with the check condition. To do this we use 16229 * timeout(9F) to call sd_start_stop_unit_callback() after 16230 * the delay interval expires. (sd_start_stop_unit_callback() 16231 * dispatches sd_start_stop_unit_task(), which will issue 16232 * the actual START STOP UNIT command. The delay interval 16233 * is one-half of the delay that we will use to retry the 16234 * command that generated the NOT READY condition. 16235 * 16236 * Note that we could just dispatch sd_start_stop_unit_task() 16237 * from here and allow it to sleep for the delay interval, 16238 * but then we would be tying up the taskq thread 16239 * uncesessarily for the duration of the delay. 16240 * 16241 * Do not issue the START STOP UNIT if the current command 16242 * is already a START STOP UNIT. 16243 */ 16244 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16245 break; 16246 } 16247 16248 /* 16249 * Do not schedule the timeout if one is already pending. 16250 */ 16251 if (un->un_startstop_timeid != NULL) { 16252 SD_INFO(SD_LOG_ERROR, un, 16253 "sd_sense_key_not_ready: restart already issued to" 16254 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16255 ddi_get_instance(SD_DEVINFO(un))); 16256 break; 16257 } 16258 16259 /* 16260 * Schedule the START STOP UNIT command, then queue the command 16261 * for a retry. 16262 * 16263 * Note: A timeout is not scheduled for this retry because we 16264 * want the retry to be serial with the START_STOP_UNIT. The 16265 * retry will be started when the START_STOP_UNIT is completed 16266 * in sd_start_stop_unit_task. 16267 */ 16268 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16269 un, SD_BSY_TIMEOUT / 2); 16270 xp->xb_nr_retry_count++; 16271 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16272 return; 16273 16274 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16275 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16276 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16277 "unit does not respond to selection\n"); 16278 } 16279 break; 16280 16281 case 0x3A: /* MEDIUM NOT PRESENT */ 16282 if (sd_error_level >= SCSI_ERR_FATAL) { 16283 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16284 "Caddy not inserted in drive\n"); 16285 } 16286 16287 sr_ejected(un); 16288 un->un_mediastate = DKIO_EJECTED; 16289 /* The state has changed, inform the media watch routines */ 16290 cv_broadcast(&un->un_state_cv); 16291 /* Just fail if no media is present in the drive. */ 16292 goto fail_command; 16293 16294 default: 16295 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16296 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16297 "Unit not Ready. Additional sense code 0x%x\n", 16298 asc); 16299 } 16300 break; 16301 } 16302 16303 do_retry: 16304 16305 /* 16306 * Retry the command, as some targets may report NOT READY for 16307 * several seconds after being reset. 16308 */ 16309 xp->xb_nr_retry_count++; 16310 si.ssi_severity = SCSI_ERR_RETRYABLE; 16311 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16312 &si, EIO, SD_BSY_TIMEOUT, NULL); 16313 16314 return; 16315 16316 fail_command: 16317 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16318 sd_return_failed_command(un, bp, EIO); 16319 } 16320 16321 16322 16323 /* 16324 * Function: sd_sense_key_medium_or_hardware_error 16325 * 16326 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16327 * sense key. 16328 * 16329 * Context: May be called from interrupt context 16330 */ 16331 16332 static void 16333 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16334 uint8_t *sense_datap, 16335 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16336 { 16337 struct sd_sense_info si; 16338 uint8_t sense_key = scsi_sense_key(sense_datap); 16339 uint8_t asc = scsi_sense_asc(sense_datap); 16340 16341 ASSERT(un != NULL); 16342 ASSERT(mutex_owned(SD_MUTEX(un))); 16343 ASSERT(bp != NULL); 16344 ASSERT(xp != NULL); 16345 ASSERT(pktp != NULL); 16346 16347 si.ssi_severity = SCSI_ERR_FATAL; 16348 si.ssi_pfa_flag = FALSE; 16349 16350 if (sense_key == KEY_MEDIUM_ERROR) { 16351 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16352 } 16353 16354 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16355 16356 if ((un->un_reset_retry_count != 0) && 16357 (xp->xb_retry_count == un->un_reset_retry_count)) { 16358 mutex_exit(SD_MUTEX(un)); 16359 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16360 if (un->un_f_allow_bus_device_reset == TRUE) { 16361 16362 boolean_t try_resetting_target = B_TRUE; 16363 16364 /* 16365 * We need to be able to handle specific ASC when we are 16366 * handling a KEY_HARDWARE_ERROR. In particular 16367 * taking the default action of resetting the target may 16368 * not be the appropriate way to attempt recovery. 16369 * Resetting a target because of a single LUN failure 16370 * victimizes all LUNs on that target. 16371 * 16372 * This is true for the LSI arrays, if an LSI 16373 * array controller returns an ASC of 0x84 (LUN Dead) we 16374 * should trust it. 16375 */ 16376 16377 if (sense_key == KEY_HARDWARE_ERROR) { 16378 switch (asc) { 16379 case 0x84: 16380 if (SD_IS_LSI(un)) { 16381 try_resetting_target = B_FALSE; 16382 } 16383 break; 16384 default: 16385 break; 16386 } 16387 } 16388 16389 if (try_resetting_target == B_TRUE) { 16390 int reset_retval = 0; 16391 if (un->un_f_lun_reset_enabled == TRUE) { 16392 SD_TRACE(SD_LOG_IO_CORE, un, 16393 "sd_sense_key_medium_or_hardware_" 16394 "error: issuing RESET_LUN\n"); 16395 reset_retval = 16396 scsi_reset(SD_ADDRESS(un), 16397 RESET_LUN); 16398 } 16399 if (reset_retval == 0) { 16400 SD_TRACE(SD_LOG_IO_CORE, un, 16401 "sd_sense_key_medium_or_hardware_" 16402 "error: issuing RESET_TARGET\n"); 16403 (void) scsi_reset(SD_ADDRESS(un), 16404 RESET_TARGET); 16405 } 16406 } 16407 } 16408 mutex_enter(SD_MUTEX(un)); 16409 } 16410 16411 /* 16412 * This really ought to be a fatal error, but we will retry anyway 16413 * as some drives report this as a spurious error. 16414 */ 16415 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16416 &si, EIO, (clock_t)0, NULL); 16417 } 16418 16419 16420 16421 /* 16422 * Function: sd_sense_key_illegal_request 16423 * 16424 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16425 * 16426 * Context: May be called from interrupt context 16427 */ 16428 16429 static void 16430 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16431 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16432 { 16433 struct sd_sense_info si; 16434 16435 ASSERT(un != NULL); 16436 ASSERT(mutex_owned(SD_MUTEX(un))); 16437 ASSERT(bp != NULL); 16438 ASSERT(xp != NULL); 16439 ASSERT(pktp != NULL); 16440 16441 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16442 16443 si.ssi_severity = SCSI_ERR_INFO; 16444 si.ssi_pfa_flag = FALSE; 16445 16446 /* Pointless to retry if the target thinks it's an illegal request */ 16447 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16448 sd_return_failed_command(un, bp, EIO); 16449 } 16450 16451 16452 16453 16454 /* 16455 * Function: sd_sense_key_unit_attention 16456 * 16457 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16458 * 16459 * Context: May be called from interrupt context 16460 */ 16461 16462 static void 16463 sd_sense_key_unit_attention(struct sd_lun *un, 16464 uint8_t *sense_datap, 16465 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16466 { 16467 /* 16468 * For UNIT ATTENTION we allow retries for one minute. Devices 16469 * like Sonoma can return UNIT ATTENTION close to a minute 16470 * under certain conditions. 16471 */ 16472 int retry_check_flag = SD_RETRIES_UA; 16473 boolean_t kstat_updated = B_FALSE; 16474 struct sd_sense_info si; 16475 uint8_t asc = scsi_sense_asc(sense_datap); 16476 16477 ASSERT(un != NULL); 16478 ASSERT(mutex_owned(SD_MUTEX(un))); 16479 ASSERT(bp != NULL); 16480 ASSERT(xp != NULL); 16481 ASSERT(pktp != NULL); 16482 16483 si.ssi_severity = SCSI_ERR_INFO; 16484 si.ssi_pfa_flag = FALSE; 16485 16486 16487 switch (asc) { 16488 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16489 if (sd_report_pfa != 0) { 16490 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16491 si.ssi_pfa_flag = TRUE; 16492 retry_check_flag = SD_RETRIES_STANDARD; 16493 goto do_retry; 16494 } 16495 16496 break; 16497 16498 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16499 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16500 un->un_resvd_status |= 16501 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16502 } 16503 #ifdef _LP64 16504 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16505 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16506 un, KM_NOSLEEP) == 0) { 16507 /* 16508 * If we can't dispatch the task we'll just 16509 * live without descriptor sense. We can 16510 * try again on the next "unit attention" 16511 */ 16512 SD_ERROR(SD_LOG_ERROR, un, 16513 "sd_sense_key_unit_attention: " 16514 "Could not dispatch " 16515 "sd_reenable_dsense_task\n"); 16516 } 16517 } 16518 #endif /* _LP64 */ 16519 /* FALLTHRU */ 16520 16521 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16522 if (!un->un_f_has_removable_media) { 16523 break; 16524 } 16525 16526 /* 16527 * When we get a unit attention from a removable-media device, 16528 * it may be in a state that will take a long time to recover 16529 * (e.g., from a reset). Since we are executing in interrupt 16530 * context here, we cannot wait around for the device to come 16531 * back. So hand this command off to sd_media_change_task() 16532 * for deferred processing under taskq thread context. (Note 16533 * that the command still may be failed if a problem is 16534 * encountered at a later time.) 16535 */ 16536 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16537 KM_NOSLEEP) == 0) { 16538 /* 16539 * Cannot dispatch the request so fail the command. 16540 */ 16541 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16542 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16543 si.ssi_severity = SCSI_ERR_FATAL; 16544 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16545 sd_return_failed_command(un, bp, EIO); 16546 } 16547 16548 /* 16549 * If failed to dispatch sd_media_change_task(), we already 16550 * updated kstat. If succeed to dispatch sd_media_change_task(), 16551 * we should update kstat later if it encounters an error. So, 16552 * we update kstat_updated flag here. 16553 */ 16554 kstat_updated = B_TRUE; 16555 16556 /* 16557 * Either the command has been successfully dispatched to a 16558 * task Q for retrying, or the dispatch failed. In either case 16559 * do NOT retry again by calling sd_retry_command. This sets up 16560 * two retries of the same command and when one completes and 16561 * frees the resources the other will access freed memory, 16562 * a bad thing. 16563 */ 16564 return; 16565 16566 default: 16567 break; 16568 } 16569 16570 /* 16571 * Update kstat if we haven't done that. 16572 */ 16573 if (!kstat_updated) { 16574 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16575 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16576 } 16577 16578 do_retry: 16579 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16580 EIO, SD_UA_RETRY_DELAY, NULL); 16581 } 16582 16583 16584 16585 /* 16586 * Function: sd_sense_key_fail_command 16587 * 16588 * Description: Use to fail a command when we don't like the sense key that 16589 * was returned. 16590 * 16591 * Context: May be called from interrupt context 16592 */ 16593 16594 static void 16595 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16596 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16597 { 16598 struct sd_sense_info si; 16599 16600 ASSERT(un != NULL); 16601 ASSERT(mutex_owned(SD_MUTEX(un))); 16602 ASSERT(bp != NULL); 16603 ASSERT(xp != NULL); 16604 ASSERT(pktp != NULL); 16605 16606 si.ssi_severity = SCSI_ERR_FATAL; 16607 si.ssi_pfa_flag = FALSE; 16608 16609 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16610 sd_return_failed_command(un, bp, EIO); 16611 } 16612 16613 16614 16615 /* 16616 * Function: sd_sense_key_blank_check 16617 * 16618 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16619 * Has no monetary connotation. 16620 * 16621 * Context: May be called from interrupt context 16622 */ 16623 16624 static void 16625 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16626 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16627 { 16628 struct sd_sense_info si; 16629 16630 ASSERT(un != NULL); 16631 ASSERT(mutex_owned(SD_MUTEX(un))); 16632 ASSERT(bp != NULL); 16633 ASSERT(xp != NULL); 16634 ASSERT(pktp != NULL); 16635 16636 /* 16637 * Blank check is not fatal for removable devices, therefore 16638 * it does not require a console message. 16639 */ 16640 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16641 SCSI_ERR_FATAL; 16642 si.ssi_pfa_flag = FALSE; 16643 16644 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16645 sd_return_failed_command(un, bp, EIO); 16646 } 16647 16648 16649 16650 16651 /* 16652 * Function: sd_sense_key_aborted_command 16653 * 16654 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16655 * 16656 * Context: May be called from interrupt context 16657 */ 16658 16659 static void 16660 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16661 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16662 { 16663 struct sd_sense_info si; 16664 16665 ASSERT(un != NULL); 16666 ASSERT(mutex_owned(SD_MUTEX(un))); 16667 ASSERT(bp != NULL); 16668 ASSERT(xp != NULL); 16669 ASSERT(pktp != NULL); 16670 16671 si.ssi_severity = SCSI_ERR_FATAL; 16672 si.ssi_pfa_flag = FALSE; 16673 16674 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16675 16676 /* 16677 * This really ought to be a fatal error, but we will retry anyway 16678 * as some drives report this as a spurious error. 16679 */ 16680 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16681 &si, EIO, drv_usectohz(100000), NULL); 16682 } 16683 16684 16685 16686 /* 16687 * Function: sd_sense_key_default 16688 * 16689 * Description: Default recovery action for several SCSI sense keys (basically 16690 * attempts a retry). 16691 * 16692 * Context: May be called from interrupt context 16693 */ 16694 16695 static void 16696 sd_sense_key_default(struct sd_lun *un, 16697 uint8_t *sense_datap, 16698 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16699 { 16700 struct sd_sense_info si; 16701 uint8_t sense_key = scsi_sense_key(sense_datap); 16702 16703 ASSERT(un != NULL); 16704 ASSERT(mutex_owned(SD_MUTEX(un))); 16705 ASSERT(bp != NULL); 16706 ASSERT(xp != NULL); 16707 ASSERT(pktp != NULL); 16708 16709 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16710 16711 /* 16712 * Undecoded sense key. Attempt retries and hope that will fix 16713 * the problem. Otherwise, we're dead. 16714 */ 16715 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16716 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16717 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16718 } 16719 16720 si.ssi_severity = SCSI_ERR_FATAL; 16721 si.ssi_pfa_flag = FALSE; 16722 16723 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16724 &si, EIO, (clock_t)0, NULL); 16725 } 16726 16727 16728 16729 /* 16730 * Function: sd_print_retry_msg 16731 * 16732 * Description: Print a message indicating the retry action being taken. 16733 * 16734 * Arguments: un - ptr to associated softstate 16735 * bp - ptr to buf(9S) for the command 16736 * arg - not used. 16737 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16738 * or SD_NO_RETRY_ISSUED 16739 * 16740 * Context: May be called from interrupt context 16741 */ 16742 /* ARGSUSED */ 16743 static void 16744 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16745 { 16746 struct sd_xbuf *xp; 16747 struct scsi_pkt *pktp; 16748 char *reasonp; 16749 char *msgp; 16750 16751 ASSERT(un != NULL); 16752 ASSERT(mutex_owned(SD_MUTEX(un))); 16753 ASSERT(bp != NULL); 16754 pktp = SD_GET_PKTP(bp); 16755 ASSERT(pktp != NULL); 16756 xp = SD_GET_XBUF(bp); 16757 ASSERT(xp != NULL); 16758 16759 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16760 mutex_enter(&un->un_pm_mutex); 16761 if ((un->un_state == SD_STATE_SUSPENDED) || 16762 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16763 (pktp->pkt_flags & FLAG_SILENT)) { 16764 mutex_exit(&un->un_pm_mutex); 16765 goto update_pkt_reason; 16766 } 16767 mutex_exit(&un->un_pm_mutex); 16768 16769 /* 16770 * Suppress messages if they are all the same pkt_reason; with 16771 * TQ, many (up to 256) are returned with the same pkt_reason. 16772 * If we are in panic, then suppress the retry messages. 16773 */ 16774 switch (flag) { 16775 case SD_NO_RETRY_ISSUED: 16776 msgp = "giving up"; 16777 break; 16778 case SD_IMMEDIATE_RETRY_ISSUED: 16779 case SD_DELAYED_RETRY_ISSUED: 16780 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16781 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16782 (sd_error_level != SCSI_ERR_ALL))) { 16783 return; 16784 } 16785 msgp = "retrying command"; 16786 break; 16787 default: 16788 goto update_pkt_reason; 16789 } 16790 16791 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16792 scsi_rname(pktp->pkt_reason)); 16793 16794 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16795 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16796 16797 update_pkt_reason: 16798 /* 16799 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16800 * This is to prevent multiple console messages for the same failure 16801 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16802 * when the command is retried successfully because there still may be 16803 * more commands coming back with the same value of pktp->pkt_reason. 16804 */ 16805 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16806 un->un_last_pkt_reason = pktp->pkt_reason; 16807 } 16808 } 16809 16810 16811 /* 16812 * Function: sd_print_cmd_incomplete_msg 16813 * 16814 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16815 * 16816 * Arguments: un - ptr to associated softstate 16817 * bp - ptr to buf(9S) for the command 16818 * arg - passed to sd_print_retry_msg() 16819 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16820 * or SD_NO_RETRY_ISSUED 16821 * 16822 * Context: May be called from interrupt context 16823 */ 16824 16825 static void 16826 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16827 int code) 16828 { 16829 dev_info_t *dip; 16830 16831 ASSERT(un != NULL); 16832 ASSERT(mutex_owned(SD_MUTEX(un))); 16833 ASSERT(bp != NULL); 16834 16835 switch (code) { 16836 case SD_NO_RETRY_ISSUED: 16837 /* Command was failed. Someone turned off this target? */ 16838 if (un->un_state != SD_STATE_OFFLINE) { 16839 /* 16840 * Suppress message if we are detaching and 16841 * device has been disconnected 16842 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16843 * private interface and not part of the DDI 16844 */ 16845 dip = un->un_sd->sd_dev; 16846 if (!(DEVI_IS_DETACHING(dip) && 16847 DEVI_IS_DEVICE_REMOVED(dip))) { 16848 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16849 "disk not responding to selection\n"); 16850 } 16851 New_state(un, SD_STATE_OFFLINE); 16852 } 16853 break; 16854 16855 case SD_DELAYED_RETRY_ISSUED: 16856 case SD_IMMEDIATE_RETRY_ISSUED: 16857 default: 16858 /* Command was successfully queued for retry */ 16859 sd_print_retry_msg(un, bp, arg, code); 16860 break; 16861 } 16862 } 16863 16864 16865 /* 16866 * Function: sd_pkt_reason_cmd_incomplete 16867 * 16868 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16869 * 16870 * Context: May be called from interrupt context 16871 */ 16872 16873 static void 16874 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16875 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16876 { 16877 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16878 16879 ASSERT(un != NULL); 16880 ASSERT(mutex_owned(SD_MUTEX(un))); 16881 ASSERT(bp != NULL); 16882 ASSERT(xp != NULL); 16883 ASSERT(pktp != NULL); 16884 16885 /* Do not do a reset if selection did not complete */ 16886 /* Note: Should this not just check the bit? */ 16887 if (pktp->pkt_state != STATE_GOT_BUS) { 16888 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16889 sd_reset_target(un, pktp); 16890 } 16891 16892 /* 16893 * If the target was not successfully selected, then set 16894 * SD_RETRIES_FAILFAST to indicate that we lost communication 16895 * with the target, and further retries and/or commands are 16896 * likely to take a long time. 16897 */ 16898 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16899 flag |= SD_RETRIES_FAILFAST; 16900 } 16901 16902 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16903 16904 sd_retry_command(un, bp, flag, 16905 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16906 } 16907 16908 16909 16910 /* 16911 * Function: sd_pkt_reason_cmd_tran_err 16912 * 16913 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16914 * 16915 * Context: May be called from interrupt context 16916 */ 16917 16918 static void 16919 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16920 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16921 { 16922 ASSERT(un != NULL); 16923 ASSERT(mutex_owned(SD_MUTEX(un))); 16924 ASSERT(bp != NULL); 16925 ASSERT(xp != NULL); 16926 ASSERT(pktp != NULL); 16927 16928 /* 16929 * Do not reset if we got a parity error, or if 16930 * selection did not complete. 16931 */ 16932 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16933 /* Note: Should this not just check the bit for pkt_state? */ 16934 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16935 (pktp->pkt_state != STATE_GOT_BUS)) { 16936 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16937 sd_reset_target(un, pktp); 16938 } 16939 16940 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16941 16942 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16943 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16944 } 16945 16946 16947 16948 /* 16949 * Function: sd_pkt_reason_cmd_reset 16950 * 16951 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16952 * 16953 * Context: May be called from interrupt context 16954 */ 16955 16956 static void 16957 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16958 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16959 { 16960 ASSERT(un != NULL); 16961 ASSERT(mutex_owned(SD_MUTEX(un))); 16962 ASSERT(bp != NULL); 16963 ASSERT(xp != NULL); 16964 ASSERT(pktp != NULL); 16965 16966 /* The target may still be running the command, so try to reset. */ 16967 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16968 sd_reset_target(un, pktp); 16969 16970 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16971 16972 /* 16973 * If pkt_reason is CMD_RESET chances are that this pkt got 16974 * reset because another target on this bus caused it. The target 16975 * that caused it should get CMD_TIMEOUT with pkt_statistics 16976 * of STAT_TIMEOUT/STAT_DEV_RESET. 16977 */ 16978 16979 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16980 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16981 } 16982 16983 16984 16985 16986 /* 16987 * Function: sd_pkt_reason_cmd_aborted 16988 * 16989 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16990 * 16991 * Context: May be called from interrupt context 16992 */ 16993 16994 static void 16995 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16996 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16997 { 16998 ASSERT(un != NULL); 16999 ASSERT(mutex_owned(SD_MUTEX(un))); 17000 ASSERT(bp != NULL); 17001 ASSERT(xp != NULL); 17002 ASSERT(pktp != NULL); 17003 17004 /* The target may still be running the command, so try to reset. */ 17005 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17006 sd_reset_target(un, pktp); 17007 17008 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17009 17010 /* 17011 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17012 * aborted because another target on this bus caused it. The target 17013 * that caused it should get CMD_TIMEOUT with pkt_statistics 17014 * of STAT_TIMEOUT/STAT_DEV_RESET. 17015 */ 17016 17017 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17018 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17019 } 17020 17021 17022 17023 /* 17024 * Function: sd_pkt_reason_cmd_timeout 17025 * 17026 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17027 * 17028 * Context: May be called from interrupt context 17029 */ 17030 17031 static void 17032 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17033 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17034 { 17035 ASSERT(un != NULL); 17036 ASSERT(mutex_owned(SD_MUTEX(un))); 17037 ASSERT(bp != NULL); 17038 ASSERT(xp != NULL); 17039 ASSERT(pktp != NULL); 17040 17041 17042 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17043 sd_reset_target(un, pktp); 17044 17045 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17046 17047 /* 17048 * A command timeout indicates that we could not establish 17049 * communication with the target, so set SD_RETRIES_FAILFAST 17050 * as further retries/commands are likely to take a long time. 17051 */ 17052 sd_retry_command(un, bp, 17053 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17054 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17055 } 17056 17057 17058 17059 /* 17060 * Function: sd_pkt_reason_cmd_unx_bus_free 17061 * 17062 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17063 * 17064 * Context: May be called from interrupt context 17065 */ 17066 17067 static void 17068 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17069 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17070 { 17071 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17072 17073 ASSERT(un != NULL); 17074 ASSERT(mutex_owned(SD_MUTEX(un))); 17075 ASSERT(bp != NULL); 17076 ASSERT(xp != NULL); 17077 ASSERT(pktp != NULL); 17078 17079 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17080 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17081 17082 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17083 sd_print_retry_msg : NULL; 17084 17085 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17086 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17087 } 17088 17089 17090 /* 17091 * Function: sd_pkt_reason_cmd_tag_reject 17092 * 17093 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17094 * 17095 * Context: May be called from interrupt context 17096 */ 17097 17098 static void 17099 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17100 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17101 { 17102 ASSERT(un != NULL); 17103 ASSERT(mutex_owned(SD_MUTEX(un))); 17104 ASSERT(bp != NULL); 17105 ASSERT(xp != NULL); 17106 ASSERT(pktp != NULL); 17107 17108 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17109 pktp->pkt_flags = 0; 17110 un->un_tagflags = 0; 17111 if (un->un_f_opt_queueing == TRUE) { 17112 un->un_throttle = min(un->un_throttle, 3); 17113 } else { 17114 un->un_throttle = 1; 17115 } 17116 mutex_exit(SD_MUTEX(un)); 17117 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17118 mutex_enter(SD_MUTEX(un)); 17119 17120 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17121 17122 /* Legacy behavior not to check retry counts here. */ 17123 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17124 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17125 } 17126 17127 17128 /* 17129 * Function: sd_pkt_reason_default 17130 * 17131 * Description: Default recovery actions for SCSA pkt_reason values that 17132 * do not have more explicit recovery actions. 17133 * 17134 * Context: May be called from interrupt context 17135 */ 17136 17137 static void 17138 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17139 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17140 { 17141 ASSERT(un != NULL); 17142 ASSERT(mutex_owned(SD_MUTEX(un))); 17143 ASSERT(bp != NULL); 17144 ASSERT(xp != NULL); 17145 ASSERT(pktp != NULL); 17146 17147 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17148 sd_reset_target(un, pktp); 17149 17150 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17151 17152 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17153 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17154 } 17155 17156 17157 17158 /* 17159 * Function: sd_pkt_status_check_condition 17160 * 17161 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17162 * 17163 * Context: May be called from interrupt context 17164 */ 17165 17166 static void 17167 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17168 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17169 { 17170 ASSERT(un != NULL); 17171 ASSERT(mutex_owned(SD_MUTEX(un))); 17172 ASSERT(bp != NULL); 17173 ASSERT(xp != NULL); 17174 ASSERT(pktp != NULL); 17175 17176 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17177 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17178 17179 /* 17180 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17181 * command will be retried after the request sense). Otherwise, retry 17182 * the command. Note: we are issuing the request sense even though the 17183 * retry limit may have been reached for the failed command. 17184 */ 17185 if (un->un_f_arq_enabled == FALSE) { 17186 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17187 "no ARQ, sending request sense command\n"); 17188 sd_send_request_sense_command(un, bp, pktp); 17189 } else { 17190 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17191 "ARQ,retrying request sense command\n"); 17192 #if defined(__i386) || defined(__amd64) 17193 /* 17194 * The SD_RETRY_DELAY value need to be adjusted here 17195 * when SD_RETRY_DELAY change in sddef.h 17196 */ 17197 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17198 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17199 NULL); 17200 #else 17201 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17202 EIO, SD_RETRY_DELAY, NULL); 17203 #endif 17204 } 17205 17206 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17207 } 17208 17209 17210 /* 17211 * Function: sd_pkt_status_busy 17212 * 17213 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17214 * 17215 * Context: May be called from interrupt context 17216 */ 17217 17218 static void 17219 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17220 struct scsi_pkt *pktp) 17221 { 17222 ASSERT(un != NULL); 17223 ASSERT(mutex_owned(SD_MUTEX(un))); 17224 ASSERT(bp != NULL); 17225 ASSERT(xp != NULL); 17226 ASSERT(pktp != NULL); 17227 17228 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17229 "sd_pkt_status_busy: entry\n"); 17230 17231 /* If retries are exhausted, just fail the command. */ 17232 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17233 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17234 "device busy too long\n"); 17235 sd_return_failed_command(un, bp, EIO); 17236 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17237 "sd_pkt_status_busy: exit\n"); 17238 return; 17239 } 17240 xp->xb_retry_count++; 17241 17242 /* 17243 * Try to reset the target. However, we do not want to perform 17244 * more than one reset if the device continues to fail. The reset 17245 * will be performed when the retry count reaches the reset 17246 * threshold. This threshold should be set such that at least 17247 * one retry is issued before the reset is performed. 17248 */ 17249 if (xp->xb_retry_count == 17250 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17251 int rval = 0; 17252 mutex_exit(SD_MUTEX(un)); 17253 if (un->un_f_allow_bus_device_reset == TRUE) { 17254 /* 17255 * First try to reset the LUN; if we cannot then 17256 * try to reset the target. 17257 */ 17258 if (un->un_f_lun_reset_enabled == TRUE) { 17259 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17260 "sd_pkt_status_busy: RESET_LUN\n"); 17261 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17262 } 17263 if (rval == 0) { 17264 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17265 "sd_pkt_status_busy: RESET_TARGET\n"); 17266 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17267 } 17268 } 17269 if (rval == 0) { 17270 /* 17271 * If the RESET_LUN and/or RESET_TARGET failed, 17272 * try RESET_ALL 17273 */ 17274 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17275 "sd_pkt_status_busy: RESET_ALL\n"); 17276 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17277 } 17278 mutex_enter(SD_MUTEX(un)); 17279 if (rval == 0) { 17280 /* 17281 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17282 * At this point we give up & fail the command. 17283 */ 17284 sd_return_failed_command(un, bp, EIO); 17285 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17286 "sd_pkt_status_busy: exit (failed cmd)\n"); 17287 return; 17288 } 17289 } 17290 17291 /* 17292 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17293 * we have already checked the retry counts above. 17294 */ 17295 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17296 EIO, SD_BSY_TIMEOUT, NULL); 17297 17298 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17299 "sd_pkt_status_busy: exit\n"); 17300 } 17301 17302 17303 /* 17304 * Function: sd_pkt_status_reservation_conflict 17305 * 17306 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17307 * command status. 17308 * 17309 * Context: May be called from interrupt context 17310 */ 17311 17312 static void 17313 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17314 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17315 { 17316 ASSERT(un != NULL); 17317 ASSERT(mutex_owned(SD_MUTEX(un))); 17318 ASSERT(bp != NULL); 17319 ASSERT(xp != NULL); 17320 ASSERT(pktp != NULL); 17321 17322 /* 17323 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17324 * conflict could be due to various reasons like incorrect keys, not 17325 * registered or not reserved etc. So, we return EACCES to the caller. 17326 */ 17327 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17328 int cmd = SD_GET_PKT_OPCODE(pktp); 17329 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17330 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17331 sd_return_failed_command(un, bp, EACCES); 17332 return; 17333 } 17334 } 17335 17336 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17337 17338 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17339 if (sd_failfast_enable != 0) { 17340 /* By definition, we must panic here.... */ 17341 sd_panic_for_res_conflict(un); 17342 /*NOTREACHED*/ 17343 } 17344 SD_ERROR(SD_LOG_IO, un, 17345 "sd_handle_resv_conflict: Disk Reserved\n"); 17346 sd_return_failed_command(un, bp, EACCES); 17347 return; 17348 } 17349 17350 /* 17351 * 1147670: retry only if sd_retry_on_reservation_conflict 17352 * property is set (default is 1). Retries will not succeed 17353 * on a disk reserved by another initiator. HA systems 17354 * may reset this via sd.conf to avoid these retries. 17355 * 17356 * Note: The legacy return code for this failure is EIO, however EACCES 17357 * seems more appropriate for a reservation conflict. 17358 */ 17359 if (sd_retry_on_reservation_conflict == 0) { 17360 SD_ERROR(SD_LOG_IO, un, 17361 "sd_handle_resv_conflict: Device Reserved\n"); 17362 sd_return_failed_command(un, bp, EIO); 17363 return; 17364 } 17365 17366 /* 17367 * Retry the command if we can. 17368 * 17369 * Note: The legacy return code for this failure is EIO, however EACCES 17370 * seems more appropriate for a reservation conflict. 17371 */ 17372 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17373 (clock_t)2, NULL); 17374 } 17375 17376 17377 17378 /* 17379 * Function: sd_pkt_status_qfull 17380 * 17381 * Description: Handle a QUEUE FULL condition from the target. This can 17382 * occur if the HBA does not handle the queue full condition. 17383 * (Basically this means third-party HBAs as Sun HBAs will 17384 * handle the queue full condition.) Note that if there are 17385 * some commands already in the transport, then the queue full 17386 * has occurred because the queue for this nexus is actually 17387 * full. If there are no commands in the transport, then the 17388 * queue full is resulting from some other initiator or lun 17389 * consuming all the resources at the target. 17390 * 17391 * Context: May be called from interrupt context 17392 */ 17393 17394 static void 17395 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17396 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17397 { 17398 ASSERT(un != NULL); 17399 ASSERT(mutex_owned(SD_MUTEX(un))); 17400 ASSERT(bp != NULL); 17401 ASSERT(xp != NULL); 17402 ASSERT(pktp != NULL); 17403 17404 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17405 "sd_pkt_status_qfull: entry\n"); 17406 17407 /* 17408 * Just lower the QFULL throttle and retry the command. Note that 17409 * we do not limit the number of retries here. 17410 */ 17411 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17412 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17413 SD_RESTART_TIMEOUT, NULL); 17414 17415 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17416 "sd_pkt_status_qfull: exit\n"); 17417 } 17418 17419 17420 /* 17421 * Function: sd_reset_target 17422 * 17423 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17424 * RESET_TARGET, or RESET_ALL. 17425 * 17426 * Context: May be called under interrupt context. 17427 */ 17428 17429 static void 17430 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17431 { 17432 int rval = 0; 17433 17434 ASSERT(un != NULL); 17435 ASSERT(mutex_owned(SD_MUTEX(un))); 17436 ASSERT(pktp != NULL); 17437 17438 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17439 17440 /* 17441 * No need to reset if the transport layer has already done so. 17442 */ 17443 if ((pktp->pkt_statistics & 17444 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17445 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17446 "sd_reset_target: no reset\n"); 17447 return; 17448 } 17449 17450 mutex_exit(SD_MUTEX(un)); 17451 17452 if (un->un_f_allow_bus_device_reset == TRUE) { 17453 if (un->un_f_lun_reset_enabled == TRUE) { 17454 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17455 "sd_reset_target: RESET_LUN\n"); 17456 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17457 } 17458 if (rval == 0) { 17459 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17460 "sd_reset_target: RESET_TARGET\n"); 17461 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17462 } 17463 } 17464 17465 if (rval == 0) { 17466 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17467 "sd_reset_target: RESET_ALL\n"); 17468 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17469 } 17470 17471 mutex_enter(SD_MUTEX(un)); 17472 17473 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17474 } 17475 17476 17477 /* 17478 * Function: sd_media_change_task 17479 * 17480 * Description: Recovery action for CDROM to become available. 17481 * 17482 * Context: Executes in a taskq() thread context 17483 */ 17484 17485 static void 17486 sd_media_change_task(void *arg) 17487 { 17488 struct scsi_pkt *pktp = arg; 17489 struct sd_lun *un; 17490 struct buf *bp; 17491 struct sd_xbuf *xp; 17492 int err = 0; 17493 int retry_count = 0; 17494 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17495 struct sd_sense_info si; 17496 17497 ASSERT(pktp != NULL); 17498 bp = (struct buf *)pktp->pkt_private; 17499 ASSERT(bp != NULL); 17500 xp = SD_GET_XBUF(bp); 17501 ASSERT(xp != NULL); 17502 un = SD_GET_UN(bp); 17503 ASSERT(un != NULL); 17504 ASSERT(!mutex_owned(SD_MUTEX(un))); 17505 ASSERT(un->un_f_monitor_media_state); 17506 17507 si.ssi_severity = SCSI_ERR_INFO; 17508 si.ssi_pfa_flag = FALSE; 17509 17510 /* 17511 * When a reset is issued on a CDROM, it takes a long time to 17512 * recover. First few attempts to read capacity and other things 17513 * related to handling unit attention fail (with a ASC 0x4 and 17514 * ASCQ 0x1). In that case we want to do enough retries and we want 17515 * to limit the retries in other cases of genuine failures like 17516 * no media in drive. 17517 */ 17518 while (retry_count++ < retry_limit) { 17519 if ((err = sd_handle_mchange(un)) == 0) { 17520 break; 17521 } 17522 if (err == EAGAIN) { 17523 retry_limit = SD_UNIT_ATTENTION_RETRY; 17524 } 17525 /* Sleep for 0.5 sec. & try again */ 17526 delay(drv_usectohz(500000)); 17527 } 17528 17529 /* 17530 * Dispatch (retry or fail) the original command here, 17531 * along with appropriate console messages.... 17532 * 17533 * Must grab the mutex before calling sd_retry_command, 17534 * sd_print_sense_msg and sd_return_failed_command. 17535 */ 17536 mutex_enter(SD_MUTEX(un)); 17537 if (err != SD_CMD_SUCCESS) { 17538 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17539 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17540 si.ssi_severity = SCSI_ERR_FATAL; 17541 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17542 sd_return_failed_command(un, bp, EIO); 17543 } else { 17544 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17545 &si, EIO, (clock_t)0, NULL); 17546 } 17547 mutex_exit(SD_MUTEX(un)); 17548 } 17549 17550 17551 17552 /* 17553 * Function: sd_handle_mchange 17554 * 17555 * Description: Perform geometry validation & other recovery when CDROM 17556 * has been removed from drive. 17557 * 17558 * Return Code: 0 for success 17559 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17560 * sd_send_scsi_READ_CAPACITY() 17561 * 17562 * Context: Executes in a taskq() thread context 17563 */ 17564 17565 static int 17566 sd_handle_mchange(struct sd_lun *un) 17567 { 17568 uint64_t capacity; 17569 uint32_t lbasize; 17570 int rval; 17571 17572 ASSERT(!mutex_owned(SD_MUTEX(un))); 17573 ASSERT(un->un_f_monitor_media_state); 17574 17575 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17576 SD_PATH_DIRECT_PRIORITY)) != 0) { 17577 return (rval); 17578 } 17579 17580 mutex_enter(SD_MUTEX(un)); 17581 sd_update_block_info(un, lbasize, capacity); 17582 17583 if (un->un_errstats != NULL) { 17584 struct sd_errstats *stp = 17585 (struct sd_errstats *)un->un_errstats->ks_data; 17586 stp->sd_capacity.value.ui64 = (uint64_t) 17587 ((uint64_t)un->un_blockcount * 17588 (uint64_t)un->un_tgt_blocksize); 17589 } 17590 17591 17592 /* 17593 * Check if the media in the device is writable or not 17594 */ 17595 if (ISCD(un)) 17596 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17597 17598 /* 17599 * Note: Maybe let the strategy/partitioning chain worry about getting 17600 * valid geometry. 17601 */ 17602 mutex_exit(SD_MUTEX(un)); 17603 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17604 17605 17606 if (cmlb_validate(un->un_cmlbhandle, 0, 17607 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17608 return (EIO); 17609 } else { 17610 if (un->un_f_pkstats_enabled) { 17611 sd_set_pstats(un); 17612 SD_TRACE(SD_LOG_IO_PARTITION, un, 17613 "sd_handle_mchange: un:0x%p pstats created and " 17614 "set\n", un); 17615 } 17616 } 17617 17618 17619 /* 17620 * Try to lock the door 17621 */ 17622 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17623 SD_PATH_DIRECT_PRIORITY)); 17624 } 17625 17626 17627 /* 17628 * Function: sd_send_scsi_DOORLOCK 17629 * 17630 * Description: Issue the scsi DOOR LOCK command 17631 * 17632 * Arguments: un - pointer to driver soft state (unit) structure for 17633 * this target. 17634 * flag - SD_REMOVAL_ALLOW 17635 * SD_REMOVAL_PREVENT 17636 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17637 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17638 * to use the USCSI "direct" chain and bypass the normal 17639 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17640 * command is issued as part of an error recovery action. 17641 * 17642 * Return Code: 0 - Success 17643 * errno return code from sd_send_scsi_cmd() 17644 * 17645 * Context: Can sleep. 17646 */ 17647 17648 static int 17649 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17650 { 17651 union scsi_cdb cdb; 17652 struct uscsi_cmd ucmd_buf; 17653 struct scsi_extended_sense sense_buf; 17654 int status; 17655 17656 ASSERT(un != NULL); 17657 ASSERT(!mutex_owned(SD_MUTEX(un))); 17658 17659 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17660 17661 /* already determined doorlock is not supported, fake success */ 17662 if (un->un_f_doorlock_supported == FALSE) { 17663 return (0); 17664 } 17665 17666 /* 17667 * If we are ejecting and see an SD_REMOVAL_PREVENT 17668 * ignore the command so we can complete the eject 17669 * operation. 17670 */ 17671 if (flag == SD_REMOVAL_PREVENT) { 17672 mutex_enter(SD_MUTEX(un)); 17673 if (un->un_f_ejecting == TRUE) { 17674 mutex_exit(SD_MUTEX(un)); 17675 return (EAGAIN); 17676 } 17677 mutex_exit(SD_MUTEX(un)); 17678 } 17679 17680 bzero(&cdb, sizeof (cdb)); 17681 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17682 17683 cdb.scc_cmd = SCMD_DOORLOCK; 17684 cdb.cdb_opaque[4] = (uchar_t)flag; 17685 17686 ucmd_buf.uscsi_cdb = (char *)&cdb; 17687 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17688 ucmd_buf.uscsi_bufaddr = NULL; 17689 ucmd_buf.uscsi_buflen = 0; 17690 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17691 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17692 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17693 ucmd_buf.uscsi_timeout = 15; 17694 17695 SD_TRACE(SD_LOG_IO, un, 17696 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17697 17698 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17699 UIO_SYSSPACE, path_flag); 17700 17701 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17702 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17703 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17704 /* fake success and skip subsequent doorlock commands */ 17705 un->un_f_doorlock_supported = FALSE; 17706 return (0); 17707 } 17708 17709 return (status); 17710 } 17711 17712 /* 17713 * Function: sd_send_scsi_READ_CAPACITY 17714 * 17715 * Description: This routine uses the scsi READ CAPACITY command to determine 17716 * the device capacity in number of blocks and the device native 17717 * block size. If this function returns a failure, then the 17718 * values in *capp and *lbap are undefined. If the capacity 17719 * returned is 0xffffffff then the lun is too large for a 17720 * normal READ CAPACITY command and the results of a 17721 * READ CAPACITY 16 will be used instead. 17722 * 17723 * Arguments: un - ptr to soft state struct for the target 17724 * capp - ptr to unsigned 64-bit variable to receive the 17725 * capacity value from the command. 17726 * lbap - ptr to unsigned 32-bit varaible to receive the 17727 * block size value from the command 17728 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17729 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17730 * to use the USCSI "direct" chain and bypass the normal 17731 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17732 * command is issued as part of an error recovery action. 17733 * 17734 * Return Code: 0 - Success 17735 * EIO - IO error 17736 * EACCES - Reservation conflict detected 17737 * EAGAIN - Device is becoming ready 17738 * errno return code from sd_send_scsi_cmd() 17739 * 17740 * Context: Can sleep. Blocks until command completes. 17741 */ 17742 17743 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17744 17745 static int 17746 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17747 int path_flag) 17748 { 17749 struct scsi_extended_sense sense_buf; 17750 struct uscsi_cmd ucmd_buf; 17751 union scsi_cdb cdb; 17752 uint32_t *capacity_buf; 17753 uint64_t capacity; 17754 uint32_t lbasize; 17755 int status; 17756 17757 ASSERT(un != NULL); 17758 ASSERT(!mutex_owned(SD_MUTEX(un))); 17759 ASSERT(capp != NULL); 17760 ASSERT(lbap != NULL); 17761 17762 SD_TRACE(SD_LOG_IO, un, 17763 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17764 17765 /* 17766 * First send a READ_CAPACITY command to the target. 17767 * (This command is mandatory under SCSI-2.) 17768 * 17769 * Set up the CDB for the READ_CAPACITY command. The Partial 17770 * Medium Indicator bit is cleared. The address field must be 17771 * zero if the PMI bit is zero. 17772 */ 17773 bzero(&cdb, sizeof (cdb)); 17774 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17775 17776 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17777 17778 cdb.scc_cmd = SCMD_READ_CAPACITY; 17779 17780 ucmd_buf.uscsi_cdb = (char *)&cdb; 17781 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17782 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17783 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17784 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17785 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17786 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17787 ucmd_buf.uscsi_timeout = 60; 17788 17789 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17790 UIO_SYSSPACE, path_flag); 17791 17792 switch (status) { 17793 case 0: 17794 /* Return failure if we did not get valid capacity data. */ 17795 if (ucmd_buf.uscsi_resid != 0) { 17796 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17797 return (EIO); 17798 } 17799 17800 /* 17801 * Read capacity and block size from the READ CAPACITY 10 data. 17802 * This data may be adjusted later due to device specific 17803 * issues. 17804 * 17805 * According to the SCSI spec, the READ CAPACITY 10 17806 * command returns the following: 17807 * 17808 * bytes 0-3: Maximum logical block address available. 17809 * (MSB in byte:0 & LSB in byte:3) 17810 * 17811 * bytes 4-7: Block length in bytes 17812 * (MSB in byte:4 & LSB in byte:7) 17813 * 17814 */ 17815 capacity = BE_32(capacity_buf[0]); 17816 lbasize = BE_32(capacity_buf[1]); 17817 17818 /* 17819 * Done with capacity_buf 17820 */ 17821 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17822 17823 /* 17824 * if the reported capacity is set to all 0xf's, then 17825 * this disk is too large and requires SBC-2 commands. 17826 * Reissue the request using READ CAPACITY 16. 17827 */ 17828 if (capacity == 0xffffffff) { 17829 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17830 &lbasize, path_flag); 17831 if (status != 0) { 17832 return (status); 17833 } 17834 } 17835 break; /* Success! */ 17836 case EIO: 17837 switch (ucmd_buf.uscsi_status) { 17838 case STATUS_RESERVATION_CONFLICT: 17839 status = EACCES; 17840 break; 17841 case STATUS_CHECK: 17842 /* 17843 * Check condition; look for ASC/ASCQ of 0x04/0x01 17844 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17845 */ 17846 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17847 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17848 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17849 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17850 return (EAGAIN); 17851 } 17852 break; 17853 default: 17854 break; 17855 } 17856 /* FALLTHRU */ 17857 default: 17858 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17859 return (status); 17860 } 17861 17862 /* 17863 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17864 * (2352 and 0 are common) so for these devices always force the value 17865 * to 2048 as required by the ATAPI specs. 17866 */ 17867 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17868 lbasize = 2048; 17869 } 17870 17871 /* 17872 * Get the maximum LBA value from the READ CAPACITY data. 17873 * Here we assume that the Partial Medium Indicator (PMI) bit 17874 * was cleared when issuing the command. This means that the LBA 17875 * returned from the device is the LBA of the last logical block 17876 * on the logical unit. The actual logical block count will be 17877 * this value plus one. 17878 * 17879 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17880 * so scale the capacity value to reflect this. 17881 */ 17882 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17883 17884 /* 17885 * Copy the values from the READ CAPACITY command into the space 17886 * provided by the caller. 17887 */ 17888 *capp = capacity; 17889 *lbap = lbasize; 17890 17891 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17892 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17893 17894 /* 17895 * Both the lbasize and capacity from the device must be nonzero, 17896 * otherwise we assume that the values are not valid and return 17897 * failure to the caller. (4203735) 17898 */ 17899 if ((capacity == 0) || (lbasize == 0)) { 17900 return (EIO); 17901 } 17902 17903 return (0); 17904 } 17905 17906 /* 17907 * Function: sd_send_scsi_READ_CAPACITY_16 17908 * 17909 * Description: This routine uses the scsi READ CAPACITY 16 command to 17910 * determine the device capacity in number of blocks and the 17911 * device native block size. If this function returns a failure, 17912 * then the values in *capp and *lbap are undefined. 17913 * This routine should always be called by 17914 * sd_send_scsi_READ_CAPACITY which will appy any device 17915 * specific adjustments to capacity and lbasize. 17916 * 17917 * Arguments: un - ptr to soft state struct for the target 17918 * capp - ptr to unsigned 64-bit variable to receive the 17919 * capacity value from the command. 17920 * lbap - ptr to unsigned 32-bit varaible to receive the 17921 * block size value from the command 17922 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17923 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17924 * to use the USCSI "direct" chain and bypass the normal 17925 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17926 * this command is issued as part of an error recovery 17927 * action. 17928 * 17929 * Return Code: 0 - Success 17930 * EIO - IO error 17931 * EACCES - Reservation conflict detected 17932 * EAGAIN - Device is becoming ready 17933 * errno return code from sd_send_scsi_cmd() 17934 * 17935 * Context: Can sleep. Blocks until command completes. 17936 */ 17937 17938 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17939 17940 static int 17941 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17942 uint32_t *lbap, int path_flag) 17943 { 17944 struct scsi_extended_sense sense_buf; 17945 struct uscsi_cmd ucmd_buf; 17946 union scsi_cdb cdb; 17947 uint64_t *capacity16_buf; 17948 uint64_t capacity; 17949 uint32_t lbasize; 17950 int status; 17951 17952 ASSERT(un != NULL); 17953 ASSERT(!mutex_owned(SD_MUTEX(un))); 17954 ASSERT(capp != NULL); 17955 ASSERT(lbap != NULL); 17956 17957 SD_TRACE(SD_LOG_IO, un, 17958 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17959 17960 /* 17961 * First send a READ_CAPACITY_16 command to the target. 17962 * 17963 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17964 * Medium Indicator bit is cleared. The address field must be 17965 * zero if the PMI bit is zero. 17966 */ 17967 bzero(&cdb, sizeof (cdb)); 17968 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17969 17970 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17971 17972 ucmd_buf.uscsi_cdb = (char *)&cdb; 17973 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17974 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17975 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17976 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17977 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17978 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17979 ucmd_buf.uscsi_timeout = 60; 17980 17981 /* 17982 * Read Capacity (16) is a Service Action In command. One 17983 * command byte (0x9E) is overloaded for multiple operations, 17984 * with the second CDB byte specifying the desired operation 17985 */ 17986 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17987 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17988 17989 /* 17990 * Fill in allocation length field 17991 */ 17992 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17993 17994 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17995 UIO_SYSSPACE, path_flag); 17996 17997 switch (status) { 17998 case 0: 17999 /* Return failure if we did not get valid capacity data. */ 18000 if (ucmd_buf.uscsi_resid > 20) { 18001 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18002 return (EIO); 18003 } 18004 18005 /* 18006 * Read capacity and block size from the READ CAPACITY 10 data. 18007 * This data may be adjusted later due to device specific 18008 * issues. 18009 * 18010 * According to the SCSI spec, the READ CAPACITY 10 18011 * command returns the following: 18012 * 18013 * bytes 0-7: Maximum logical block address available. 18014 * (MSB in byte:0 & LSB in byte:7) 18015 * 18016 * bytes 8-11: Block length in bytes 18017 * (MSB in byte:8 & LSB in byte:11) 18018 * 18019 */ 18020 capacity = BE_64(capacity16_buf[0]); 18021 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18022 18023 /* 18024 * Done with capacity16_buf 18025 */ 18026 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18027 18028 /* 18029 * if the reported capacity is set to all 0xf's, then 18030 * this disk is too large. This could only happen with 18031 * a device that supports LBAs larger than 64 bits which 18032 * are not defined by any current T10 standards. 18033 */ 18034 if (capacity == 0xffffffffffffffff) { 18035 return (EIO); 18036 } 18037 break; /* Success! */ 18038 case EIO: 18039 switch (ucmd_buf.uscsi_status) { 18040 case STATUS_RESERVATION_CONFLICT: 18041 status = EACCES; 18042 break; 18043 case STATUS_CHECK: 18044 /* 18045 * Check condition; look for ASC/ASCQ of 0x04/0x01 18046 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18047 */ 18048 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18049 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18050 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18051 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18052 return (EAGAIN); 18053 } 18054 break; 18055 default: 18056 break; 18057 } 18058 /* FALLTHRU */ 18059 default: 18060 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18061 return (status); 18062 } 18063 18064 *capp = capacity; 18065 *lbap = lbasize; 18066 18067 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18068 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18069 18070 return (0); 18071 } 18072 18073 18074 /* 18075 * Function: sd_send_scsi_START_STOP_UNIT 18076 * 18077 * Description: Issue a scsi START STOP UNIT command to the target. 18078 * 18079 * Arguments: un - pointer to driver soft state (unit) structure for 18080 * this target. 18081 * flag - SD_TARGET_START 18082 * SD_TARGET_STOP 18083 * SD_TARGET_EJECT 18084 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18085 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18086 * to use the USCSI "direct" chain and bypass the normal 18087 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18088 * command is issued as part of an error recovery action. 18089 * 18090 * Return Code: 0 - Success 18091 * EIO - IO error 18092 * EACCES - Reservation conflict detected 18093 * ENXIO - Not Ready, medium not present 18094 * errno return code from sd_send_scsi_cmd() 18095 * 18096 * Context: Can sleep. 18097 */ 18098 18099 static int 18100 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18101 { 18102 struct scsi_extended_sense sense_buf; 18103 union scsi_cdb cdb; 18104 struct uscsi_cmd ucmd_buf; 18105 int status; 18106 18107 ASSERT(un != NULL); 18108 ASSERT(!mutex_owned(SD_MUTEX(un))); 18109 18110 SD_TRACE(SD_LOG_IO, un, 18111 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18112 18113 if (un->un_f_check_start_stop && 18114 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18115 (un->un_f_start_stop_supported != TRUE)) { 18116 return (0); 18117 } 18118 18119 /* 18120 * If we are performing an eject operation and 18121 * we receive any command other than SD_TARGET_EJECT 18122 * we should immediately return. 18123 */ 18124 if (flag != SD_TARGET_EJECT) { 18125 mutex_enter(SD_MUTEX(un)); 18126 if (un->un_f_ejecting == TRUE) { 18127 mutex_exit(SD_MUTEX(un)); 18128 return (EAGAIN); 18129 } 18130 mutex_exit(SD_MUTEX(un)); 18131 } 18132 18133 bzero(&cdb, sizeof (cdb)); 18134 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18135 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18136 18137 cdb.scc_cmd = SCMD_START_STOP; 18138 cdb.cdb_opaque[4] = (uchar_t)flag; 18139 18140 ucmd_buf.uscsi_cdb = (char *)&cdb; 18141 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18142 ucmd_buf.uscsi_bufaddr = NULL; 18143 ucmd_buf.uscsi_buflen = 0; 18144 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18145 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18146 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18147 ucmd_buf.uscsi_timeout = 200; 18148 18149 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18150 UIO_SYSSPACE, path_flag); 18151 18152 switch (status) { 18153 case 0: 18154 break; /* Success! */ 18155 case EIO: 18156 switch (ucmd_buf.uscsi_status) { 18157 case STATUS_RESERVATION_CONFLICT: 18158 status = EACCES; 18159 break; 18160 case STATUS_CHECK: 18161 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18162 switch (scsi_sense_key( 18163 (uint8_t *)&sense_buf)) { 18164 case KEY_ILLEGAL_REQUEST: 18165 status = ENOTSUP; 18166 break; 18167 case KEY_NOT_READY: 18168 if (scsi_sense_asc( 18169 (uint8_t *)&sense_buf) 18170 == 0x3A) { 18171 status = ENXIO; 18172 } 18173 break; 18174 default: 18175 break; 18176 } 18177 } 18178 break; 18179 default: 18180 break; 18181 } 18182 break; 18183 default: 18184 break; 18185 } 18186 18187 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18188 18189 return (status); 18190 } 18191 18192 18193 /* 18194 * Function: sd_start_stop_unit_callback 18195 * 18196 * Description: timeout(9F) callback to begin recovery process for a 18197 * device that has spun down. 18198 * 18199 * Arguments: arg - pointer to associated softstate struct. 18200 * 18201 * Context: Executes in a timeout(9F) thread context 18202 */ 18203 18204 static void 18205 sd_start_stop_unit_callback(void *arg) 18206 { 18207 struct sd_lun *un = arg; 18208 ASSERT(un != NULL); 18209 ASSERT(!mutex_owned(SD_MUTEX(un))); 18210 18211 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18212 18213 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18214 } 18215 18216 18217 /* 18218 * Function: sd_start_stop_unit_task 18219 * 18220 * Description: Recovery procedure when a drive is spun down. 18221 * 18222 * Arguments: arg - pointer to associated softstate struct. 18223 * 18224 * Context: Executes in a taskq() thread context 18225 */ 18226 18227 static void 18228 sd_start_stop_unit_task(void *arg) 18229 { 18230 struct sd_lun *un = arg; 18231 18232 ASSERT(un != NULL); 18233 ASSERT(!mutex_owned(SD_MUTEX(un))); 18234 18235 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18236 18237 /* 18238 * Some unformatted drives report not ready error, no need to 18239 * restart if format has been initiated. 18240 */ 18241 mutex_enter(SD_MUTEX(un)); 18242 if (un->un_f_format_in_progress == TRUE) { 18243 mutex_exit(SD_MUTEX(un)); 18244 return; 18245 } 18246 mutex_exit(SD_MUTEX(un)); 18247 18248 /* 18249 * When a START STOP command is issued from here, it is part of a 18250 * failure recovery operation and must be issued before any other 18251 * commands, including any pending retries. Thus it must be sent 18252 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18253 * succeeds or not, we will start I/O after the attempt. 18254 */ 18255 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18256 SD_PATH_DIRECT_PRIORITY); 18257 18258 /* 18259 * The above call blocks until the START_STOP_UNIT command completes. 18260 * Now that it has completed, we must re-try the original IO that 18261 * received the NOT READY condition in the first place. There are 18262 * three possible conditions here: 18263 * 18264 * (1) The original IO is on un_retry_bp. 18265 * (2) The original IO is on the regular wait queue, and un_retry_bp 18266 * is NULL. 18267 * (3) The original IO is on the regular wait queue, and un_retry_bp 18268 * points to some other, unrelated bp. 18269 * 18270 * For each case, we must call sd_start_cmds() with un_retry_bp 18271 * as the argument. If un_retry_bp is NULL, this will initiate 18272 * processing of the regular wait queue. If un_retry_bp is not NULL, 18273 * then this will process the bp on un_retry_bp. That may or may not 18274 * be the original IO, but that does not matter: the important thing 18275 * is to keep the IO processing going at this point. 18276 * 18277 * Note: This is a very specific error recovery sequence associated 18278 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18279 * serialize the I/O with completion of the spin-up. 18280 */ 18281 mutex_enter(SD_MUTEX(un)); 18282 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18283 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18284 un, un->un_retry_bp); 18285 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18286 sd_start_cmds(un, un->un_retry_bp); 18287 mutex_exit(SD_MUTEX(un)); 18288 18289 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18290 } 18291 18292 18293 /* 18294 * Function: sd_send_scsi_INQUIRY 18295 * 18296 * Description: Issue the scsi INQUIRY command. 18297 * 18298 * Arguments: un 18299 * bufaddr 18300 * buflen 18301 * evpd 18302 * page_code 18303 * page_length 18304 * 18305 * Return Code: 0 - Success 18306 * errno return code from sd_send_scsi_cmd() 18307 * 18308 * Context: Can sleep. Does not return until command is completed. 18309 */ 18310 18311 static int 18312 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18313 uchar_t evpd, uchar_t page_code, size_t *residp) 18314 { 18315 union scsi_cdb cdb; 18316 struct uscsi_cmd ucmd_buf; 18317 int status; 18318 18319 ASSERT(un != NULL); 18320 ASSERT(!mutex_owned(SD_MUTEX(un))); 18321 ASSERT(bufaddr != NULL); 18322 18323 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18324 18325 bzero(&cdb, sizeof (cdb)); 18326 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18327 bzero(bufaddr, buflen); 18328 18329 cdb.scc_cmd = SCMD_INQUIRY; 18330 cdb.cdb_opaque[1] = evpd; 18331 cdb.cdb_opaque[2] = page_code; 18332 FORMG0COUNT(&cdb, buflen); 18333 18334 ucmd_buf.uscsi_cdb = (char *)&cdb; 18335 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18336 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18337 ucmd_buf.uscsi_buflen = buflen; 18338 ucmd_buf.uscsi_rqbuf = NULL; 18339 ucmd_buf.uscsi_rqlen = 0; 18340 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18341 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18342 18343 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18344 UIO_SYSSPACE, SD_PATH_DIRECT); 18345 18346 if ((status == 0) && (residp != NULL)) { 18347 *residp = ucmd_buf.uscsi_resid; 18348 } 18349 18350 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18351 18352 return (status); 18353 } 18354 18355 18356 /* 18357 * Function: sd_send_scsi_TEST_UNIT_READY 18358 * 18359 * Description: Issue the scsi TEST UNIT READY command. 18360 * This routine can be told to set the flag USCSI_DIAGNOSE to 18361 * prevent retrying failed commands. Use this when the intent 18362 * is either to check for device readiness, to clear a Unit 18363 * Attention, or to clear any outstanding sense data. 18364 * However under specific conditions the expected behavior 18365 * is for retries to bring a device ready, so use the flag 18366 * with caution. 18367 * 18368 * Arguments: un 18369 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18370 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18371 * 0: dont check for media present, do retries on cmd. 18372 * 18373 * Return Code: 0 - Success 18374 * EIO - IO error 18375 * EACCES - Reservation conflict detected 18376 * ENXIO - Not Ready, medium not present 18377 * errno return code from sd_send_scsi_cmd() 18378 * 18379 * Context: Can sleep. Does not return until command is completed. 18380 */ 18381 18382 static int 18383 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18384 { 18385 struct scsi_extended_sense sense_buf; 18386 union scsi_cdb cdb; 18387 struct uscsi_cmd ucmd_buf; 18388 int status; 18389 18390 ASSERT(un != NULL); 18391 ASSERT(!mutex_owned(SD_MUTEX(un))); 18392 18393 SD_TRACE(SD_LOG_IO, un, 18394 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18395 18396 /* 18397 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18398 * timeouts when they receive a TUR and the queue is not empty. Check 18399 * the configuration flag set during attach (indicating the drive has 18400 * this firmware bug) and un_ncmds_in_transport before issuing the 18401 * TUR. If there are 18402 * pending commands return success, this is a bit arbitrary but is ok 18403 * for non-removables (i.e. the eliteI disks) and non-clustering 18404 * configurations. 18405 */ 18406 if (un->un_f_cfg_tur_check == TRUE) { 18407 mutex_enter(SD_MUTEX(un)); 18408 if (un->un_ncmds_in_transport != 0) { 18409 mutex_exit(SD_MUTEX(un)); 18410 return (0); 18411 } 18412 mutex_exit(SD_MUTEX(un)); 18413 } 18414 18415 bzero(&cdb, sizeof (cdb)); 18416 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18417 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18418 18419 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18420 18421 ucmd_buf.uscsi_cdb = (char *)&cdb; 18422 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18423 ucmd_buf.uscsi_bufaddr = NULL; 18424 ucmd_buf.uscsi_buflen = 0; 18425 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18426 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18427 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18428 18429 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18430 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18431 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18432 } 18433 ucmd_buf.uscsi_timeout = 60; 18434 18435 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18436 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18437 SD_PATH_STANDARD)); 18438 18439 switch (status) { 18440 case 0: 18441 break; /* Success! */ 18442 case EIO: 18443 switch (ucmd_buf.uscsi_status) { 18444 case STATUS_RESERVATION_CONFLICT: 18445 status = EACCES; 18446 break; 18447 case STATUS_CHECK: 18448 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18449 break; 18450 } 18451 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18452 (scsi_sense_key((uint8_t *)&sense_buf) == 18453 KEY_NOT_READY) && 18454 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18455 status = ENXIO; 18456 } 18457 break; 18458 default: 18459 break; 18460 } 18461 break; 18462 default: 18463 break; 18464 } 18465 18466 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18467 18468 return (status); 18469 } 18470 18471 18472 /* 18473 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18474 * 18475 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18476 * 18477 * Arguments: un 18478 * 18479 * Return Code: 0 - Success 18480 * EACCES 18481 * ENOTSUP 18482 * errno return code from sd_send_scsi_cmd() 18483 * 18484 * Context: Can sleep. Does not return until command is completed. 18485 */ 18486 18487 static int 18488 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18489 uint16_t data_len, uchar_t *data_bufp) 18490 { 18491 struct scsi_extended_sense sense_buf; 18492 union scsi_cdb cdb; 18493 struct uscsi_cmd ucmd_buf; 18494 int status; 18495 int no_caller_buf = FALSE; 18496 18497 ASSERT(un != NULL); 18498 ASSERT(!mutex_owned(SD_MUTEX(un))); 18499 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18500 18501 SD_TRACE(SD_LOG_IO, un, 18502 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18503 18504 bzero(&cdb, sizeof (cdb)); 18505 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18506 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18507 if (data_bufp == NULL) { 18508 /* Allocate a default buf if the caller did not give one */ 18509 ASSERT(data_len == 0); 18510 data_len = MHIOC_RESV_KEY_SIZE; 18511 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18512 no_caller_buf = TRUE; 18513 } 18514 18515 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18516 cdb.cdb_opaque[1] = usr_cmd; 18517 FORMG1COUNT(&cdb, data_len); 18518 18519 ucmd_buf.uscsi_cdb = (char *)&cdb; 18520 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18521 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18522 ucmd_buf.uscsi_buflen = data_len; 18523 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18524 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18525 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18526 ucmd_buf.uscsi_timeout = 60; 18527 18528 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18529 UIO_SYSSPACE, SD_PATH_STANDARD); 18530 18531 switch (status) { 18532 case 0: 18533 break; /* Success! */ 18534 case EIO: 18535 switch (ucmd_buf.uscsi_status) { 18536 case STATUS_RESERVATION_CONFLICT: 18537 status = EACCES; 18538 break; 18539 case STATUS_CHECK: 18540 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18541 (scsi_sense_key((uint8_t *)&sense_buf) == 18542 KEY_ILLEGAL_REQUEST)) { 18543 status = ENOTSUP; 18544 } 18545 break; 18546 default: 18547 break; 18548 } 18549 break; 18550 default: 18551 break; 18552 } 18553 18554 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18555 18556 if (no_caller_buf == TRUE) { 18557 kmem_free(data_bufp, data_len); 18558 } 18559 18560 return (status); 18561 } 18562 18563 18564 /* 18565 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18566 * 18567 * Description: This routine is the driver entry point for handling CD-ROM 18568 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18569 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18570 * device. 18571 * 18572 * Arguments: un - Pointer to soft state struct for the target. 18573 * usr_cmd SCSI-3 reservation facility command (one of 18574 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18575 * SD_SCSI3_PREEMPTANDABORT) 18576 * usr_bufp - user provided pointer register, reserve descriptor or 18577 * preempt and abort structure (mhioc_register_t, 18578 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18579 * 18580 * Return Code: 0 - Success 18581 * EACCES 18582 * ENOTSUP 18583 * errno return code from sd_send_scsi_cmd() 18584 * 18585 * Context: Can sleep. Does not return until command is completed. 18586 */ 18587 18588 static int 18589 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18590 uchar_t *usr_bufp) 18591 { 18592 struct scsi_extended_sense sense_buf; 18593 union scsi_cdb cdb; 18594 struct uscsi_cmd ucmd_buf; 18595 int status; 18596 uchar_t data_len = sizeof (sd_prout_t); 18597 sd_prout_t *prp; 18598 18599 ASSERT(un != NULL); 18600 ASSERT(!mutex_owned(SD_MUTEX(un))); 18601 ASSERT(data_len == 24); /* required by scsi spec */ 18602 18603 SD_TRACE(SD_LOG_IO, un, 18604 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18605 18606 if (usr_bufp == NULL) { 18607 return (EINVAL); 18608 } 18609 18610 bzero(&cdb, sizeof (cdb)); 18611 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18612 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18613 prp = kmem_zalloc(data_len, KM_SLEEP); 18614 18615 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18616 cdb.cdb_opaque[1] = usr_cmd; 18617 FORMG1COUNT(&cdb, data_len); 18618 18619 ucmd_buf.uscsi_cdb = (char *)&cdb; 18620 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18621 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18622 ucmd_buf.uscsi_buflen = data_len; 18623 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18624 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18625 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18626 ucmd_buf.uscsi_timeout = 60; 18627 18628 switch (usr_cmd) { 18629 case SD_SCSI3_REGISTER: { 18630 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18631 18632 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18633 bcopy(ptr->newkey.key, prp->service_key, 18634 MHIOC_RESV_KEY_SIZE); 18635 prp->aptpl = ptr->aptpl; 18636 break; 18637 } 18638 case SD_SCSI3_RESERVE: 18639 case SD_SCSI3_RELEASE: { 18640 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18641 18642 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18643 prp->scope_address = BE_32(ptr->scope_specific_addr); 18644 cdb.cdb_opaque[2] = ptr->type; 18645 break; 18646 } 18647 case SD_SCSI3_PREEMPTANDABORT: { 18648 mhioc_preemptandabort_t *ptr = 18649 (mhioc_preemptandabort_t *)usr_bufp; 18650 18651 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18652 bcopy(ptr->victim_key.key, prp->service_key, 18653 MHIOC_RESV_KEY_SIZE); 18654 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18655 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18656 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18657 break; 18658 } 18659 case SD_SCSI3_REGISTERANDIGNOREKEY: 18660 { 18661 mhioc_registerandignorekey_t *ptr; 18662 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18663 bcopy(ptr->newkey.key, 18664 prp->service_key, MHIOC_RESV_KEY_SIZE); 18665 prp->aptpl = ptr->aptpl; 18666 break; 18667 } 18668 default: 18669 ASSERT(FALSE); 18670 break; 18671 } 18672 18673 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18674 UIO_SYSSPACE, SD_PATH_STANDARD); 18675 18676 switch (status) { 18677 case 0: 18678 break; /* Success! */ 18679 case EIO: 18680 switch (ucmd_buf.uscsi_status) { 18681 case STATUS_RESERVATION_CONFLICT: 18682 status = EACCES; 18683 break; 18684 case STATUS_CHECK: 18685 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18686 (scsi_sense_key((uint8_t *)&sense_buf) == 18687 KEY_ILLEGAL_REQUEST)) { 18688 status = ENOTSUP; 18689 } 18690 break; 18691 default: 18692 break; 18693 } 18694 break; 18695 default: 18696 break; 18697 } 18698 18699 kmem_free(prp, data_len); 18700 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18701 return (status); 18702 } 18703 18704 18705 /* 18706 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18707 * 18708 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18709 * 18710 * Arguments: un - pointer to the target's soft state struct 18711 * dkc - pointer to the callback structure 18712 * 18713 * Return Code: 0 - success 18714 * errno-type error code 18715 * 18716 * Context: kernel thread context only. 18717 * 18718 * _______________________________________________________________ 18719 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 18720 * |FLUSH_VOLATILE| | operation | 18721 * |______________|______________|_________________________________| 18722 * | 0 | NULL | Synchronous flush on both | 18723 * | | | volatile and non-volatile cache | 18724 * |______________|______________|_________________________________| 18725 * | 1 | NULL | Synchronous flush on volatile | 18726 * | | | cache; disk drivers may suppress| 18727 * | | | flush if disk table indicates | 18728 * | | | non-volatile cache | 18729 * |______________|______________|_________________________________| 18730 * | 0 | !NULL | Asynchronous flush on both | 18731 * | | | volatile and non-volatile cache;| 18732 * |______________|______________|_________________________________| 18733 * | 1 | !NULL | Asynchronous flush on volatile | 18734 * | | | cache; disk drivers may suppress| 18735 * | | | flush if disk table indicates | 18736 * | | | non-volatile cache | 18737 * |______________|______________|_________________________________| 18738 * 18739 */ 18740 18741 static int 18742 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18743 { 18744 struct sd_uscsi_info *uip; 18745 struct uscsi_cmd *uscmd; 18746 union scsi_cdb *cdb; 18747 struct buf *bp; 18748 int rval = 0; 18749 int is_async; 18750 18751 SD_TRACE(SD_LOG_IO, un, 18752 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18753 18754 ASSERT(un != NULL); 18755 ASSERT(!mutex_owned(SD_MUTEX(un))); 18756 18757 if (dkc == NULL || dkc->dkc_callback == NULL) { 18758 is_async = FALSE; 18759 } else { 18760 is_async = TRUE; 18761 } 18762 18763 mutex_enter(SD_MUTEX(un)); 18764 /* check whether cache flush should be suppressed */ 18765 if (un->un_f_suppress_cache_flush == TRUE) { 18766 mutex_exit(SD_MUTEX(un)); 18767 /* 18768 * suppress the cache flush if the device is told to do 18769 * so by sd.conf or disk table 18770 */ 18771 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 18772 skip the cache flush since suppress_cache_flush is %d!\n", 18773 un->un_f_suppress_cache_flush); 18774 18775 if (is_async == TRUE) { 18776 /* invoke callback for asynchronous flush */ 18777 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 18778 } 18779 return (rval); 18780 } 18781 mutex_exit(SD_MUTEX(un)); 18782 18783 /* 18784 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 18785 * set properly 18786 */ 18787 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18788 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18789 18790 mutex_enter(SD_MUTEX(un)); 18791 if (dkc != NULL && un->un_f_sync_nv_supported && 18792 (dkc->dkc_flag & FLUSH_VOLATILE)) { 18793 /* 18794 * if the device supports SYNC_NV bit, turn on 18795 * the SYNC_NV bit to only flush volatile cache 18796 */ 18797 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 18798 } 18799 mutex_exit(SD_MUTEX(un)); 18800 18801 /* 18802 * First get some memory for the uscsi_cmd struct and cdb 18803 * and initialize for SYNCHRONIZE_CACHE cmd. 18804 */ 18805 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18806 uscmd->uscsi_cdblen = CDB_GROUP1; 18807 uscmd->uscsi_cdb = (caddr_t)cdb; 18808 uscmd->uscsi_bufaddr = NULL; 18809 uscmd->uscsi_buflen = 0; 18810 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18811 uscmd->uscsi_rqlen = SENSE_LENGTH; 18812 uscmd->uscsi_rqresid = SENSE_LENGTH; 18813 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18814 uscmd->uscsi_timeout = sd_io_time; 18815 18816 /* 18817 * Allocate an sd_uscsi_info struct and fill it with the info 18818 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18819 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18820 * since we allocate the buf here in this function, we do not 18821 * need to preserve the prior contents of b_private. 18822 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18823 */ 18824 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18825 uip->ui_flags = SD_PATH_DIRECT; 18826 uip->ui_cmdp = uscmd; 18827 18828 bp = getrbuf(KM_SLEEP); 18829 bp->b_private = uip; 18830 18831 /* 18832 * Setup buffer to carry uscsi request. 18833 */ 18834 bp->b_flags = B_BUSY; 18835 bp->b_bcount = 0; 18836 bp->b_blkno = 0; 18837 18838 if (is_async == TRUE) { 18839 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18840 uip->ui_dkc = *dkc; 18841 } 18842 18843 bp->b_edev = SD_GET_DEV(un); 18844 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18845 18846 (void) sd_uscsi_strategy(bp); 18847 18848 /* 18849 * If synchronous request, wait for completion 18850 * If async just return and let b_iodone callback 18851 * cleanup. 18852 * NOTE: On return, u_ncmds_in_driver will be decremented, 18853 * but it was also incremented in sd_uscsi_strategy(), so 18854 * we should be ok. 18855 */ 18856 if (is_async == FALSE) { 18857 (void) biowait(bp); 18858 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18859 } 18860 18861 return (rval); 18862 } 18863 18864 18865 static int 18866 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18867 { 18868 struct sd_uscsi_info *uip; 18869 struct uscsi_cmd *uscmd; 18870 uint8_t *sense_buf; 18871 struct sd_lun *un; 18872 int status; 18873 union scsi_cdb *cdb; 18874 18875 uip = (struct sd_uscsi_info *)(bp->b_private); 18876 ASSERT(uip != NULL); 18877 18878 uscmd = uip->ui_cmdp; 18879 ASSERT(uscmd != NULL); 18880 18881 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18882 ASSERT(sense_buf != NULL); 18883 18884 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18885 ASSERT(un != NULL); 18886 18887 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 18888 18889 status = geterror(bp); 18890 switch (status) { 18891 case 0: 18892 break; /* Success! */ 18893 case EIO: 18894 switch (uscmd->uscsi_status) { 18895 case STATUS_RESERVATION_CONFLICT: 18896 /* Ignore reservation conflict */ 18897 status = 0; 18898 goto done; 18899 18900 case STATUS_CHECK: 18901 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18902 (scsi_sense_key(sense_buf) == 18903 KEY_ILLEGAL_REQUEST)) { 18904 /* Ignore Illegal Request error */ 18905 if (cdb->cdb_un.tag|SD_SYNC_NV_BIT) { 18906 mutex_enter(SD_MUTEX(un)); 18907 un->un_f_sync_nv_supported = FALSE; 18908 mutex_exit(SD_MUTEX(un)); 18909 status = 0; 18910 SD_TRACE(SD_LOG_IO, un, 18911 "un_f_sync_nv_supported \ 18912 is set to false.\n"); 18913 goto done; 18914 } 18915 18916 mutex_enter(SD_MUTEX(un)); 18917 un->un_f_sync_cache_supported = FALSE; 18918 mutex_exit(SD_MUTEX(un)); 18919 SD_TRACE(SD_LOG_IO, un, 18920 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 18921 un_f_sync_cache_supported set to false \ 18922 with asc = %x, ascq = %x\n", 18923 scsi_sense_asc(sense_buf), 18924 scsi_sense_ascq(sense_buf)); 18925 status = ENOTSUP; 18926 goto done; 18927 } 18928 break; 18929 default: 18930 break; 18931 } 18932 /* FALLTHRU */ 18933 default: 18934 /* 18935 * Don't log an error message if this device 18936 * has removable media. 18937 */ 18938 if (!un->un_f_has_removable_media) { 18939 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18940 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18941 } 18942 break; 18943 } 18944 18945 done: 18946 if (uip->ui_dkc.dkc_callback != NULL) { 18947 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18948 } 18949 18950 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18951 freerbuf(bp); 18952 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18953 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18954 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18955 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18956 18957 return (status); 18958 } 18959 18960 18961 /* 18962 * Function: sd_send_scsi_GET_CONFIGURATION 18963 * 18964 * Description: Issues the get configuration command to the device. 18965 * Called from sd_check_for_writable_cd & sd_get_media_info 18966 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18967 * Arguments: un 18968 * ucmdbuf 18969 * rqbuf 18970 * rqbuflen 18971 * bufaddr 18972 * buflen 18973 * path_flag 18974 * 18975 * Return Code: 0 - Success 18976 * errno return code from sd_send_scsi_cmd() 18977 * 18978 * Context: Can sleep. Does not return until command is completed. 18979 * 18980 */ 18981 18982 static int 18983 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18984 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18985 int path_flag) 18986 { 18987 char cdb[CDB_GROUP1]; 18988 int status; 18989 18990 ASSERT(un != NULL); 18991 ASSERT(!mutex_owned(SD_MUTEX(un))); 18992 ASSERT(bufaddr != NULL); 18993 ASSERT(ucmdbuf != NULL); 18994 ASSERT(rqbuf != NULL); 18995 18996 SD_TRACE(SD_LOG_IO, un, 18997 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18998 18999 bzero(cdb, sizeof (cdb)); 19000 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19001 bzero(rqbuf, rqbuflen); 19002 bzero(bufaddr, buflen); 19003 19004 /* 19005 * Set up cdb field for the get configuration command. 19006 */ 19007 cdb[0] = SCMD_GET_CONFIGURATION; 19008 cdb[1] = 0x02; /* Requested Type */ 19009 cdb[8] = SD_PROFILE_HEADER_LEN; 19010 ucmdbuf->uscsi_cdb = cdb; 19011 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19012 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19013 ucmdbuf->uscsi_buflen = buflen; 19014 ucmdbuf->uscsi_timeout = sd_io_time; 19015 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19016 ucmdbuf->uscsi_rqlen = rqbuflen; 19017 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19018 19019 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19020 UIO_SYSSPACE, path_flag); 19021 19022 switch (status) { 19023 case 0: 19024 break; /* Success! */ 19025 case EIO: 19026 switch (ucmdbuf->uscsi_status) { 19027 case STATUS_RESERVATION_CONFLICT: 19028 status = EACCES; 19029 break; 19030 default: 19031 break; 19032 } 19033 break; 19034 default: 19035 break; 19036 } 19037 19038 if (status == 0) { 19039 SD_DUMP_MEMORY(un, SD_LOG_IO, 19040 "sd_send_scsi_GET_CONFIGURATION: data", 19041 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19042 } 19043 19044 SD_TRACE(SD_LOG_IO, un, 19045 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19046 19047 return (status); 19048 } 19049 19050 /* 19051 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19052 * 19053 * Description: Issues the get configuration command to the device to 19054 * retrieve a specific feature. Called from 19055 * sd_check_for_writable_cd & sd_set_mmc_caps. 19056 * Arguments: un 19057 * ucmdbuf 19058 * rqbuf 19059 * rqbuflen 19060 * bufaddr 19061 * buflen 19062 * feature 19063 * 19064 * Return Code: 0 - Success 19065 * errno return code from sd_send_scsi_cmd() 19066 * 19067 * Context: Can sleep. Does not return until command is completed. 19068 * 19069 */ 19070 static int 19071 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19072 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19073 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19074 { 19075 char cdb[CDB_GROUP1]; 19076 int status; 19077 19078 ASSERT(un != NULL); 19079 ASSERT(!mutex_owned(SD_MUTEX(un))); 19080 ASSERT(bufaddr != NULL); 19081 ASSERT(ucmdbuf != NULL); 19082 ASSERT(rqbuf != NULL); 19083 19084 SD_TRACE(SD_LOG_IO, un, 19085 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19086 19087 bzero(cdb, sizeof (cdb)); 19088 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19089 bzero(rqbuf, rqbuflen); 19090 bzero(bufaddr, buflen); 19091 19092 /* 19093 * Set up cdb field for the get configuration command. 19094 */ 19095 cdb[0] = SCMD_GET_CONFIGURATION; 19096 cdb[1] = 0x02; /* Requested Type */ 19097 cdb[3] = feature; 19098 cdb[8] = buflen; 19099 ucmdbuf->uscsi_cdb = cdb; 19100 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19101 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19102 ucmdbuf->uscsi_buflen = buflen; 19103 ucmdbuf->uscsi_timeout = sd_io_time; 19104 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19105 ucmdbuf->uscsi_rqlen = rqbuflen; 19106 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19107 19108 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19109 UIO_SYSSPACE, path_flag); 19110 19111 switch (status) { 19112 case 0: 19113 break; /* Success! */ 19114 case EIO: 19115 switch (ucmdbuf->uscsi_status) { 19116 case STATUS_RESERVATION_CONFLICT: 19117 status = EACCES; 19118 break; 19119 default: 19120 break; 19121 } 19122 break; 19123 default: 19124 break; 19125 } 19126 19127 if (status == 0) { 19128 SD_DUMP_MEMORY(un, SD_LOG_IO, 19129 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19130 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19131 } 19132 19133 SD_TRACE(SD_LOG_IO, un, 19134 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19135 19136 return (status); 19137 } 19138 19139 19140 /* 19141 * Function: sd_send_scsi_MODE_SENSE 19142 * 19143 * Description: Utility function for issuing a scsi MODE SENSE command. 19144 * Note: This routine uses a consistent implementation for Group0, 19145 * Group1, and Group2 commands across all platforms. ATAPI devices 19146 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19147 * 19148 * Arguments: un - pointer to the softstate struct for the target. 19149 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19150 * CDB_GROUP[1|2] (10 byte). 19151 * bufaddr - buffer for page data retrieved from the target. 19152 * buflen - size of page to be retrieved. 19153 * page_code - page code of data to be retrieved from the target. 19154 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19155 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19156 * to use the USCSI "direct" chain and bypass the normal 19157 * command waitq. 19158 * 19159 * Return Code: 0 - Success 19160 * errno return code from sd_send_scsi_cmd() 19161 * 19162 * Context: Can sleep. Does not return until command is completed. 19163 */ 19164 19165 static int 19166 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19167 size_t buflen, uchar_t page_code, int path_flag) 19168 { 19169 struct scsi_extended_sense sense_buf; 19170 union scsi_cdb cdb; 19171 struct uscsi_cmd ucmd_buf; 19172 int status; 19173 int headlen; 19174 19175 ASSERT(un != NULL); 19176 ASSERT(!mutex_owned(SD_MUTEX(un))); 19177 ASSERT(bufaddr != NULL); 19178 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19179 (cdbsize == CDB_GROUP2)); 19180 19181 SD_TRACE(SD_LOG_IO, un, 19182 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19183 19184 bzero(&cdb, sizeof (cdb)); 19185 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19186 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19187 bzero(bufaddr, buflen); 19188 19189 if (cdbsize == CDB_GROUP0) { 19190 cdb.scc_cmd = SCMD_MODE_SENSE; 19191 cdb.cdb_opaque[2] = page_code; 19192 FORMG0COUNT(&cdb, buflen); 19193 headlen = MODE_HEADER_LENGTH; 19194 } else { 19195 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19196 cdb.cdb_opaque[2] = page_code; 19197 FORMG1COUNT(&cdb, buflen); 19198 headlen = MODE_HEADER_LENGTH_GRP2; 19199 } 19200 19201 ASSERT(headlen <= buflen); 19202 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19203 19204 ucmd_buf.uscsi_cdb = (char *)&cdb; 19205 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19206 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19207 ucmd_buf.uscsi_buflen = buflen; 19208 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19209 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19210 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19211 ucmd_buf.uscsi_timeout = 60; 19212 19213 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19214 UIO_SYSSPACE, path_flag); 19215 19216 switch (status) { 19217 case 0: 19218 /* 19219 * sr_check_wp() uses 0x3f page code and check the header of 19220 * mode page to determine if target device is write-protected. 19221 * But some USB devices return 0 bytes for 0x3f page code. For 19222 * this case, make sure that mode page header is returned at 19223 * least. 19224 */ 19225 if (buflen - ucmd_buf.uscsi_resid < headlen) 19226 status = EIO; 19227 break; /* Success! */ 19228 case EIO: 19229 switch (ucmd_buf.uscsi_status) { 19230 case STATUS_RESERVATION_CONFLICT: 19231 status = EACCES; 19232 break; 19233 default: 19234 break; 19235 } 19236 break; 19237 default: 19238 break; 19239 } 19240 19241 if (status == 0) { 19242 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19243 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19244 } 19245 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19246 19247 return (status); 19248 } 19249 19250 19251 /* 19252 * Function: sd_send_scsi_MODE_SELECT 19253 * 19254 * Description: Utility function for issuing a scsi MODE SELECT command. 19255 * Note: This routine uses a consistent implementation for Group0, 19256 * Group1, and Group2 commands across all platforms. ATAPI devices 19257 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19258 * 19259 * Arguments: un - pointer to the softstate struct for the target. 19260 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19261 * CDB_GROUP[1|2] (10 byte). 19262 * bufaddr - buffer for page data retrieved from the target. 19263 * buflen - size of page to be retrieved. 19264 * save_page - boolean to determin if SP bit should be set. 19265 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19266 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19267 * to use the USCSI "direct" chain and bypass the normal 19268 * command waitq. 19269 * 19270 * Return Code: 0 - Success 19271 * errno return code from sd_send_scsi_cmd() 19272 * 19273 * Context: Can sleep. Does not return until command is completed. 19274 */ 19275 19276 static int 19277 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19278 size_t buflen, uchar_t save_page, int path_flag) 19279 { 19280 struct scsi_extended_sense sense_buf; 19281 union scsi_cdb cdb; 19282 struct uscsi_cmd ucmd_buf; 19283 int status; 19284 19285 ASSERT(un != NULL); 19286 ASSERT(!mutex_owned(SD_MUTEX(un))); 19287 ASSERT(bufaddr != NULL); 19288 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19289 (cdbsize == CDB_GROUP2)); 19290 19291 SD_TRACE(SD_LOG_IO, un, 19292 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19293 19294 bzero(&cdb, sizeof (cdb)); 19295 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19296 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19297 19298 /* Set the PF bit for many third party drives */ 19299 cdb.cdb_opaque[1] = 0x10; 19300 19301 /* Set the savepage(SP) bit if given */ 19302 if (save_page == SD_SAVE_PAGE) { 19303 cdb.cdb_opaque[1] |= 0x01; 19304 } 19305 19306 if (cdbsize == CDB_GROUP0) { 19307 cdb.scc_cmd = SCMD_MODE_SELECT; 19308 FORMG0COUNT(&cdb, buflen); 19309 } else { 19310 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19311 FORMG1COUNT(&cdb, buflen); 19312 } 19313 19314 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19315 19316 ucmd_buf.uscsi_cdb = (char *)&cdb; 19317 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19318 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19319 ucmd_buf.uscsi_buflen = buflen; 19320 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19321 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19322 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19323 ucmd_buf.uscsi_timeout = 60; 19324 19325 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19326 UIO_SYSSPACE, path_flag); 19327 19328 switch (status) { 19329 case 0: 19330 break; /* Success! */ 19331 case EIO: 19332 switch (ucmd_buf.uscsi_status) { 19333 case STATUS_RESERVATION_CONFLICT: 19334 status = EACCES; 19335 break; 19336 default: 19337 break; 19338 } 19339 break; 19340 default: 19341 break; 19342 } 19343 19344 if (status == 0) { 19345 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19346 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19347 } 19348 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19349 19350 return (status); 19351 } 19352 19353 19354 /* 19355 * Function: sd_send_scsi_RDWR 19356 * 19357 * Description: Issue a scsi READ or WRITE command with the given parameters. 19358 * 19359 * Arguments: un: Pointer to the sd_lun struct for the target. 19360 * cmd: SCMD_READ or SCMD_WRITE 19361 * bufaddr: Address of caller's buffer to receive the RDWR data 19362 * buflen: Length of caller's buffer receive the RDWR data. 19363 * start_block: Block number for the start of the RDWR operation. 19364 * (Assumes target-native block size.) 19365 * residp: Pointer to variable to receive the redisual of the 19366 * RDWR operation (may be NULL of no residual requested). 19367 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19368 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19369 * to use the USCSI "direct" chain and bypass the normal 19370 * command waitq. 19371 * 19372 * Return Code: 0 - Success 19373 * errno return code from sd_send_scsi_cmd() 19374 * 19375 * Context: Can sleep. Does not return until command is completed. 19376 */ 19377 19378 static int 19379 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19380 size_t buflen, daddr_t start_block, int path_flag) 19381 { 19382 struct scsi_extended_sense sense_buf; 19383 union scsi_cdb cdb; 19384 struct uscsi_cmd ucmd_buf; 19385 uint32_t block_count; 19386 int status; 19387 int cdbsize; 19388 uchar_t flag; 19389 19390 ASSERT(un != NULL); 19391 ASSERT(!mutex_owned(SD_MUTEX(un))); 19392 ASSERT(bufaddr != NULL); 19393 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19394 19395 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19396 19397 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19398 return (EINVAL); 19399 } 19400 19401 mutex_enter(SD_MUTEX(un)); 19402 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19403 mutex_exit(SD_MUTEX(un)); 19404 19405 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19406 19407 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19408 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19409 bufaddr, buflen, start_block, block_count); 19410 19411 bzero(&cdb, sizeof (cdb)); 19412 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19413 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19414 19415 /* Compute CDB size to use */ 19416 if (start_block > 0xffffffff) 19417 cdbsize = CDB_GROUP4; 19418 else if ((start_block & 0xFFE00000) || 19419 (un->un_f_cfg_is_atapi == TRUE)) 19420 cdbsize = CDB_GROUP1; 19421 else 19422 cdbsize = CDB_GROUP0; 19423 19424 switch (cdbsize) { 19425 case CDB_GROUP0: /* 6-byte CDBs */ 19426 cdb.scc_cmd = cmd; 19427 FORMG0ADDR(&cdb, start_block); 19428 FORMG0COUNT(&cdb, block_count); 19429 break; 19430 case CDB_GROUP1: /* 10-byte CDBs */ 19431 cdb.scc_cmd = cmd | SCMD_GROUP1; 19432 FORMG1ADDR(&cdb, start_block); 19433 FORMG1COUNT(&cdb, block_count); 19434 break; 19435 case CDB_GROUP4: /* 16-byte CDBs */ 19436 cdb.scc_cmd = cmd | SCMD_GROUP4; 19437 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19438 FORMG4COUNT(&cdb, block_count); 19439 break; 19440 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19441 default: 19442 /* All others reserved */ 19443 return (EINVAL); 19444 } 19445 19446 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19447 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19448 19449 ucmd_buf.uscsi_cdb = (char *)&cdb; 19450 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19451 ucmd_buf.uscsi_bufaddr = bufaddr; 19452 ucmd_buf.uscsi_buflen = buflen; 19453 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19454 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19455 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19456 ucmd_buf.uscsi_timeout = 60; 19457 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19458 UIO_SYSSPACE, path_flag); 19459 switch (status) { 19460 case 0: 19461 break; /* Success! */ 19462 case EIO: 19463 switch (ucmd_buf.uscsi_status) { 19464 case STATUS_RESERVATION_CONFLICT: 19465 status = EACCES; 19466 break; 19467 default: 19468 break; 19469 } 19470 break; 19471 default: 19472 break; 19473 } 19474 19475 if (status == 0) { 19476 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19477 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19478 } 19479 19480 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19481 19482 return (status); 19483 } 19484 19485 19486 /* 19487 * Function: sd_send_scsi_LOG_SENSE 19488 * 19489 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19490 * 19491 * Arguments: un: Pointer to the sd_lun struct for the target. 19492 * 19493 * Return Code: 0 - Success 19494 * errno return code from sd_send_scsi_cmd() 19495 * 19496 * Context: Can sleep. Does not return until command is completed. 19497 */ 19498 19499 static int 19500 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19501 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19502 int path_flag) 19503 19504 { 19505 struct scsi_extended_sense sense_buf; 19506 union scsi_cdb cdb; 19507 struct uscsi_cmd ucmd_buf; 19508 int status; 19509 19510 ASSERT(un != NULL); 19511 ASSERT(!mutex_owned(SD_MUTEX(un))); 19512 19513 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19514 19515 bzero(&cdb, sizeof (cdb)); 19516 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19517 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19518 19519 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19520 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19521 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19522 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19523 FORMG1COUNT(&cdb, buflen); 19524 19525 ucmd_buf.uscsi_cdb = (char *)&cdb; 19526 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19527 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19528 ucmd_buf.uscsi_buflen = buflen; 19529 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19530 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19531 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19532 ucmd_buf.uscsi_timeout = 60; 19533 19534 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19535 UIO_SYSSPACE, path_flag); 19536 19537 switch (status) { 19538 case 0: 19539 break; 19540 case EIO: 19541 switch (ucmd_buf.uscsi_status) { 19542 case STATUS_RESERVATION_CONFLICT: 19543 status = EACCES; 19544 break; 19545 case STATUS_CHECK: 19546 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19547 (scsi_sense_key((uint8_t *)&sense_buf) == 19548 KEY_ILLEGAL_REQUEST) && 19549 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19550 /* 19551 * ASC 0x24: INVALID FIELD IN CDB 19552 */ 19553 switch (page_code) { 19554 case START_STOP_CYCLE_PAGE: 19555 /* 19556 * The start stop cycle counter is 19557 * implemented as page 0x31 in earlier 19558 * generation disks. In new generation 19559 * disks the start stop cycle counter is 19560 * implemented as page 0xE. To properly 19561 * handle this case if an attempt for 19562 * log page 0xE is made and fails we 19563 * will try again using page 0x31. 19564 * 19565 * Network storage BU committed to 19566 * maintain the page 0x31 for this 19567 * purpose and will not have any other 19568 * page implemented with page code 0x31 19569 * until all disks transition to the 19570 * standard page. 19571 */ 19572 mutex_enter(SD_MUTEX(un)); 19573 un->un_start_stop_cycle_page = 19574 START_STOP_CYCLE_VU_PAGE; 19575 cdb.cdb_opaque[2] = 19576 (char)(page_control << 6) | 19577 un->un_start_stop_cycle_page; 19578 mutex_exit(SD_MUTEX(un)); 19579 status = sd_send_scsi_cmd( 19580 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19581 UIO_SYSSPACE, path_flag); 19582 19583 break; 19584 case TEMPERATURE_PAGE: 19585 status = ENOTTY; 19586 break; 19587 default: 19588 break; 19589 } 19590 } 19591 break; 19592 default: 19593 break; 19594 } 19595 break; 19596 default: 19597 break; 19598 } 19599 19600 if (status == 0) { 19601 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19602 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19603 } 19604 19605 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19606 19607 return (status); 19608 } 19609 19610 19611 /* 19612 * Function: sdioctl 19613 * 19614 * Description: Driver's ioctl(9e) entry point function. 19615 * 19616 * Arguments: dev - device number 19617 * cmd - ioctl operation to be performed 19618 * arg - user argument, contains data to be set or reference 19619 * parameter for get 19620 * flag - bit flag, indicating open settings, 32/64 bit type 19621 * cred_p - user credential pointer 19622 * rval_p - calling process return value (OPT) 19623 * 19624 * Return Code: EINVAL 19625 * ENOTTY 19626 * ENXIO 19627 * EIO 19628 * EFAULT 19629 * ENOTSUP 19630 * EPERM 19631 * 19632 * Context: Called from the device switch at normal priority. 19633 */ 19634 19635 static int 19636 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19637 { 19638 struct sd_lun *un = NULL; 19639 int err = 0; 19640 int i = 0; 19641 cred_t *cr; 19642 int tmprval = EINVAL; 19643 int is_valid; 19644 19645 /* 19646 * All device accesses go thru sdstrategy where we check on suspend 19647 * status 19648 */ 19649 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19650 return (ENXIO); 19651 } 19652 19653 ASSERT(!mutex_owned(SD_MUTEX(un))); 19654 19655 19656 is_valid = SD_IS_VALID_LABEL(un); 19657 19658 /* 19659 * Moved this wait from sd_uscsi_strategy to here for 19660 * reasons of deadlock prevention. Internal driver commands, 19661 * specifically those to change a devices power level, result 19662 * in a call to sd_uscsi_strategy. 19663 */ 19664 mutex_enter(SD_MUTEX(un)); 19665 while ((un->un_state == SD_STATE_SUSPENDED) || 19666 (un->un_state == SD_STATE_PM_CHANGING)) { 19667 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19668 } 19669 /* 19670 * Twiddling the counter here protects commands from now 19671 * through to the top of sd_uscsi_strategy. Without the 19672 * counter inc. a power down, for example, could get in 19673 * after the above check for state is made and before 19674 * execution gets to the top of sd_uscsi_strategy. 19675 * That would cause problems. 19676 */ 19677 un->un_ncmds_in_driver++; 19678 19679 if (!is_valid && 19680 (flag & (FNDELAY | FNONBLOCK))) { 19681 switch (cmd) { 19682 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19683 case DKIOCGVTOC: 19684 case DKIOCGAPART: 19685 case DKIOCPARTINFO: 19686 case DKIOCSGEOM: 19687 case DKIOCSAPART: 19688 case DKIOCGETEFI: 19689 case DKIOCPARTITION: 19690 case DKIOCSVTOC: 19691 case DKIOCSETEFI: 19692 case DKIOCGMBOOT: 19693 case DKIOCSMBOOT: 19694 case DKIOCG_PHYGEOM: 19695 case DKIOCG_VIRTGEOM: 19696 /* let cmlb handle it */ 19697 goto skip_ready_valid; 19698 19699 case CDROMPAUSE: 19700 case CDROMRESUME: 19701 case CDROMPLAYMSF: 19702 case CDROMPLAYTRKIND: 19703 case CDROMREADTOCHDR: 19704 case CDROMREADTOCENTRY: 19705 case CDROMSTOP: 19706 case CDROMSTART: 19707 case CDROMVOLCTRL: 19708 case CDROMSUBCHNL: 19709 case CDROMREADMODE2: 19710 case CDROMREADMODE1: 19711 case CDROMREADOFFSET: 19712 case CDROMSBLKMODE: 19713 case CDROMGBLKMODE: 19714 case CDROMGDRVSPEED: 19715 case CDROMSDRVSPEED: 19716 case CDROMCDDA: 19717 case CDROMCDXA: 19718 case CDROMSUBCODE: 19719 if (!ISCD(un)) { 19720 un->un_ncmds_in_driver--; 19721 ASSERT(un->un_ncmds_in_driver >= 0); 19722 mutex_exit(SD_MUTEX(un)); 19723 return (ENOTTY); 19724 } 19725 break; 19726 case FDEJECT: 19727 case DKIOCEJECT: 19728 case CDROMEJECT: 19729 if (!un->un_f_eject_media_supported) { 19730 un->un_ncmds_in_driver--; 19731 ASSERT(un->un_ncmds_in_driver >= 0); 19732 mutex_exit(SD_MUTEX(un)); 19733 return (ENOTTY); 19734 } 19735 break; 19736 case DKIOCFLUSHWRITECACHE: 19737 mutex_exit(SD_MUTEX(un)); 19738 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19739 if (err != 0) { 19740 mutex_enter(SD_MUTEX(un)); 19741 un->un_ncmds_in_driver--; 19742 ASSERT(un->un_ncmds_in_driver >= 0); 19743 mutex_exit(SD_MUTEX(un)); 19744 return (EIO); 19745 } 19746 mutex_enter(SD_MUTEX(un)); 19747 /* FALLTHROUGH */ 19748 case DKIOCREMOVABLE: 19749 case DKIOCHOTPLUGGABLE: 19750 case DKIOCINFO: 19751 case DKIOCGMEDIAINFO: 19752 case MHIOCENFAILFAST: 19753 case MHIOCSTATUS: 19754 case MHIOCTKOWN: 19755 case MHIOCRELEASE: 19756 case MHIOCGRP_INKEYS: 19757 case MHIOCGRP_INRESV: 19758 case MHIOCGRP_REGISTER: 19759 case MHIOCGRP_RESERVE: 19760 case MHIOCGRP_PREEMPTANDABORT: 19761 case MHIOCGRP_REGISTERANDIGNOREKEY: 19762 case CDROMCLOSETRAY: 19763 case USCSICMD: 19764 goto skip_ready_valid; 19765 default: 19766 break; 19767 } 19768 19769 mutex_exit(SD_MUTEX(un)); 19770 err = sd_ready_and_valid(un); 19771 mutex_enter(SD_MUTEX(un)); 19772 19773 if (err != SD_READY_VALID) { 19774 switch (cmd) { 19775 case DKIOCSTATE: 19776 case CDROMGDRVSPEED: 19777 case CDROMSDRVSPEED: 19778 case FDEJECT: /* for eject command */ 19779 case DKIOCEJECT: 19780 case CDROMEJECT: 19781 case DKIOCREMOVABLE: 19782 case DKIOCHOTPLUGGABLE: 19783 break; 19784 default: 19785 if (un->un_f_has_removable_media) { 19786 err = ENXIO; 19787 } else { 19788 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19789 if (err == SD_RESERVED_BY_OTHERS) { 19790 err = EACCES; 19791 } else { 19792 err = EIO; 19793 } 19794 } 19795 un->un_ncmds_in_driver--; 19796 ASSERT(un->un_ncmds_in_driver >= 0); 19797 mutex_exit(SD_MUTEX(un)); 19798 return (err); 19799 } 19800 } 19801 } 19802 19803 skip_ready_valid: 19804 mutex_exit(SD_MUTEX(un)); 19805 19806 switch (cmd) { 19807 case DKIOCINFO: 19808 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19809 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19810 break; 19811 19812 case DKIOCGMEDIAINFO: 19813 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19814 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19815 break; 19816 19817 case DKIOCGGEOM: 19818 case DKIOCGVTOC: 19819 case DKIOCGAPART: 19820 case DKIOCPARTINFO: 19821 case DKIOCSGEOM: 19822 case DKIOCSAPART: 19823 case DKIOCGETEFI: 19824 case DKIOCPARTITION: 19825 case DKIOCSVTOC: 19826 case DKIOCSETEFI: 19827 case DKIOCGMBOOT: 19828 case DKIOCSMBOOT: 19829 case DKIOCG_PHYGEOM: 19830 case DKIOCG_VIRTGEOM: 19831 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19832 19833 /* TUR should spin up */ 19834 19835 if (un->un_f_has_removable_media) 19836 err = sd_send_scsi_TEST_UNIT_READY(un, 19837 SD_CHECK_FOR_MEDIA); 19838 else 19839 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19840 19841 if (err != 0) 19842 break; 19843 19844 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19845 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19846 19847 if ((err == 0) && 19848 ((cmd == DKIOCSETEFI) || 19849 (un->un_f_pkstats_enabled) && 19850 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19851 19852 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19853 (void *)SD_PATH_DIRECT); 19854 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19855 sd_set_pstats(un); 19856 SD_TRACE(SD_LOG_IO_PARTITION, un, 19857 "sd_ioctl: un:0x%p pstats created and " 19858 "set\n", un); 19859 } 19860 } 19861 19862 if ((cmd == DKIOCSVTOC) || 19863 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19864 19865 mutex_enter(SD_MUTEX(un)); 19866 if (un->un_f_devid_supported && 19867 (un->un_f_opt_fab_devid == TRUE)) { 19868 if (un->un_devid == NULL) { 19869 sd_register_devid(un, SD_DEVINFO(un), 19870 SD_TARGET_IS_UNRESERVED); 19871 } else { 19872 /* 19873 * The device id for this disk 19874 * has been fabricated. The 19875 * device id must be preserved 19876 * by writing it back out to 19877 * disk. 19878 */ 19879 if (sd_write_deviceid(un) != 0) { 19880 ddi_devid_free(un->un_devid); 19881 un->un_devid = NULL; 19882 } 19883 } 19884 } 19885 mutex_exit(SD_MUTEX(un)); 19886 } 19887 19888 break; 19889 19890 case DKIOCLOCK: 19891 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19892 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19893 SD_PATH_STANDARD); 19894 break; 19895 19896 case DKIOCUNLOCK: 19897 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19898 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19899 SD_PATH_STANDARD); 19900 break; 19901 19902 case DKIOCSTATE: { 19903 enum dkio_state state; 19904 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19905 19906 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19907 err = EFAULT; 19908 } else { 19909 err = sd_check_media(dev, state); 19910 if (err == 0) { 19911 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19912 sizeof (int), flag) != 0) 19913 err = EFAULT; 19914 } 19915 } 19916 break; 19917 } 19918 19919 case DKIOCREMOVABLE: 19920 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19921 i = un->un_f_has_removable_media ? 1 : 0; 19922 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19923 err = EFAULT; 19924 } else { 19925 err = 0; 19926 } 19927 break; 19928 19929 case DKIOCHOTPLUGGABLE: 19930 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19931 i = un->un_f_is_hotpluggable ? 1 : 0; 19932 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19933 err = EFAULT; 19934 } else { 19935 err = 0; 19936 } 19937 break; 19938 19939 case DKIOCGTEMPERATURE: 19940 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19941 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19942 break; 19943 19944 case MHIOCENFAILFAST: 19945 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19946 if ((err = drv_priv(cred_p)) == 0) { 19947 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19948 } 19949 break; 19950 19951 case MHIOCTKOWN: 19952 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19953 if ((err = drv_priv(cred_p)) == 0) { 19954 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19955 } 19956 break; 19957 19958 case MHIOCRELEASE: 19959 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19960 if ((err = drv_priv(cred_p)) == 0) { 19961 err = sd_mhdioc_release(dev); 19962 } 19963 break; 19964 19965 case MHIOCSTATUS: 19966 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19967 if ((err = drv_priv(cred_p)) == 0) { 19968 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19969 case 0: 19970 err = 0; 19971 break; 19972 case EACCES: 19973 *rval_p = 1; 19974 err = 0; 19975 break; 19976 default: 19977 err = EIO; 19978 break; 19979 } 19980 } 19981 break; 19982 19983 case MHIOCQRESERVE: 19984 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19985 if ((err = drv_priv(cred_p)) == 0) { 19986 err = sd_reserve_release(dev, SD_RESERVE); 19987 } 19988 break; 19989 19990 case MHIOCREREGISTERDEVID: 19991 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19992 if (drv_priv(cred_p) == EPERM) { 19993 err = EPERM; 19994 } else if (!un->un_f_devid_supported) { 19995 err = ENOTTY; 19996 } else { 19997 err = sd_mhdioc_register_devid(dev); 19998 } 19999 break; 20000 20001 case MHIOCGRP_INKEYS: 20002 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20003 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20004 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20005 err = ENOTSUP; 20006 } else { 20007 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20008 flag); 20009 } 20010 } 20011 break; 20012 20013 case MHIOCGRP_INRESV: 20014 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20015 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20016 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20017 err = ENOTSUP; 20018 } else { 20019 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20020 } 20021 } 20022 break; 20023 20024 case MHIOCGRP_REGISTER: 20025 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20026 if ((err = drv_priv(cred_p)) != EPERM) { 20027 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20028 err = ENOTSUP; 20029 } else if (arg != NULL) { 20030 mhioc_register_t reg; 20031 if (ddi_copyin((void *)arg, ®, 20032 sizeof (mhioc_register_t), flag) != 0) { 20033 err = EFAULT; 20034 } else { 20035 err = 20036 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20037 un, SD_SCSI3_REGISTER, 20038 (uchar_t *)®); 20039 } 20040 } 20041 } 20042 break; 20043 20044 case MHIOCGRP_RESERVE: 20045 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20046 if ((err = drv_priv(cred_p)) != EPERM) { 20047 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20048 err = ENOTSUP; 20049 } else if (arg != NULL) { 20050 mhioc_resv_desc_t resv_desc; 20051 if (ddi_copyin((void *)arg, &resv_desc, 20052 sizeof (mhioc_resv_desc_t), flag) != 0) { 20053 err = EFAULT; 20054 } else { 20055 err = 20056 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20057 un, SD_SCSI3_RESERVE, 20058 (uchar_t *)&resv_desc); 20059 } 20060 } 20061 } 20062 break; 20063 20064 case MHIOCGRP_PREEMPTANDABORT: 20065 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20066 if ((err = drv_priv(cred_p)) != EPERM) { 20067 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20068 err = ENOTSUP; 20069 } else if (arg != NULL) { 20070 mhioc_preemptandabort_t preempt_abort; 20071 if (ddi_copyin((void *)arg, &preempt_abort, 20072 sizeof (mhioc_preemptandabort_t), 20073 flag) != 0) { 20074 err = EFAULT; 20075 } else { 20076 err = 20077 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20078 un, SD_SCSI3_PREEMPTANDABORT, 20079 (uchar_t *)&preempt_abort); 20080 } 20081 } 20082 } 20083 break; 20084 20085 case MHIOCGRP_REGISTERANDIGNOREKEY: 20086 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20087 if ((err = drv_priv(cred_p)) != EPERM) { 20088 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20089 err = ENOTSUP; 20090 } else if (arg != NULL) { 20091 mhioc_registerandignorekey_t r_and_i; 20092 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20093 sizeof (mhioc_registerandignorekey_t), 20094 flag) != 0) { 20095 err = EFAULT; 20096 } else { 20097 err = 20098 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20099 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20100 (uchar_t *)&r_and_i); 20101 } 20102 } 20103 } 20104 break; 20105 20106 case USCSICMD: 20107 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20108 cr = ddi_get_cred(); 20109 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20110 err = EPERM; 20111 } else { 20112 enum uio_seg uioseg; 20113 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20114 UIO_USERSPACE; 20115 if (un->un_f_format_in_progress == TRUE) { 20116 err = EAGAIN; 20117 break; 20118 } 20119 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20120 flag, uioseg, SD_PATH_STANDARD); 20121 } 20122 break; 20123 20124 case CDROMPAUSE: 20125 case CDROMRESUME: 20126 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20127 if (!ISCD(un)) { 20128 err = ENOTTY; 20129 } else { 20130 err = sr_pause_resume(dev, cmd); 20131 } 20132 break; 20133 20134 case CDROMPLAYMSF: 20135 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20136 if (!ISCD(un)) { 20137 err = ENOTTY; 20138 } else { 20139 err = sr_play_msf(dev, (caddr_t)arg, flag); 20140 } 20141 break; 20142 20143 case CDROMPLAYTRKIND: 20144 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20145 #if defined(__i386) || defined(__amd64) 20146 /* 20147 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20148 */ 20149 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20150 #else 20151 if (!ISCD(un)) { 20152 #endif 20153 err = ENOTTY; 20154 } else { 20155 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20156 } 20157 break; 20158 20159 case CDROMREADTOCHDR: 20160 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20161 if (!ISCD(un)) { 20162 err = ENOTTY; 20163 } else { 20164 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20165 } 20166 break; 20167 20168 case CDROMREADTOCENTRY: 20169 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20170 if (!ISCD(un)) { 20171 err = ENOTTY; 20172 } else { 20173 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20174 } 20175 break; 20176 20177 case CDROMSTOP: 20178 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20179 if (!ISCD(un)) { 20180 err = ENOTTY; 20181 } else { 20182 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20183 SD_PATH_STANDARD); 20184 } 20185 break; 20186 20187 case CDROMSTART: 20188 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20189 if (!ISCD(un)) { 20190 err = ENOTTY; 20191 } else { 20192 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20193 SD_PATH_STANDARD); 20194 } 20195 break; 20196 20197 case CDROMCLOSETRAY: 20198 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20199 if (!ISCD(un)) { 20200 err = ENOTTY; 20201 } else { 20202 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20203 SD_PATH_STANDARD); 20204 } 20205 break; 20206 20207 case FDEJECT: /* for eject command */ 20208 case DKIOCEJECT: 20209 case CDROMEJECT: 20210 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20211 if (!un->un_f_eject_media_supported) { 20212 err = ENOTTY; 20213 } else { 20214 err = sr_eject(dev); 20215 } 20216 break; 20217 20218 case CDROMVOLCTRL: 20219 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20220 if (!ISCD(un)) { 20221 err = ENOTTY; 20222 } else { 20223 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20224 } 20225 break; 20226 20227 case CDROMSUBCHNL: 20228 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20229 if (!ISCD(un)) { 20230 err = ENOTTY; 20231 } else { 20232 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20233 } 20234 break; 20235 20236 case CDROMREADMODE2: 20237 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20238 if (!ISCD(un)) { 20239 err = ENOTTY; 20240 } else if (un->un_f_cfg_is_atapi == TRUE) { 20241 /* 20242 * If the drive supports READ CD, use that instead of 20243 * switching the LBA size via a MODE SELECT 20244 * Block Descriptor 20245 */ 20246 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20247 } else { 20248 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20249 } 20250 break; 20251 20252 case CDROMREADMODE1: 20253 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20254 if (!ISCD(un)) { 20255 err = ENOTTY; 20256 } else { 20257 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20258 } 20259 break; 20260 20261 case CDROMREADOFFSET: 20262 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20263 if (!ISCD(un)) { 20264 err = ENOTTY; 20265 } else { 20266 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20267 flag); 20268 } 20269 break; 20270 20271 case CDROMSBLKMODE: 20272 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20273 /* 20274 * There is no means of changing block size in case of atapi 20275 * drives, thus return ENOTTY if drive type is atapi 20276 */ 20277 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20278 err = ENOTTY; 20279 } else if (un->un_f_mmc_cap == TRUE) { 20280 20281 /* 20282 * MMC Devices do not support changing the 20283 * logical block size 20284 * 20285 * Note: EINVAL is being returned instead of ENOTTY to 20286 * maintain consistancy with the original mmc 20287 * driver update. 20288 */ 20289 err = EINVAL; 20290 } else { 20291 mutex_enter(SD_MUTEX(un)); 20292 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20293 (un->un_ncmds_in_transport > 0)) { 20294 mutex_exit(SD_MUTEX(un)); 20295 err = EINVAL; 20296 } else { 20297 mutex_exit(SD_MUTEX(un)); 20298 err = sr_change_blkmode(dev, cmd, arg, flag); 20299 } 20300 } 20301 break; 20302 20303 case CDROMGBLKMODE: 20304 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20305 if (!ISCD(un)) { 20306 err = ENOTTY; 20307 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20308 (un->un_f_blockcount_is_valid != FALSE)) { 20309 /* 20310 * Drive is an ATAPI drive so return target block 20311 * size for ATAPI drives since we cannot change the 20312 * blocksize on ATAPI drives. Used primarily to detect 20313 * if an ATAPI cdrom is present. 20314 */ 20315 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20316 sizeof (int), flag) != 0) { 20317 err = EFAULT; 20318 } else { 20319 err = 0; 20320 } 20321 20322 } else { 20323 /* 20324 * Drive supports changing block sizes via a Mode 20325 * Select. 20326 */ 20327 err = sr_change_blkmode(dev, cmd, arg, flag); 20328 } 20329 break; 20330 20331 case CDROMGDRVSPEED: 20332 case CDROMSDRVSPEED: 20333 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20334 if (!ISCD(un)) { 20335 err = ENOTTY; 20336 } else if (un->un_f_mmc_cap == TRUE) { 20337 /* 20338 * Note: In the future the driver implementation 20339 * for getting and 20340 * setting cd speed should entail: 20341 * 1) If non-mmc try the Toshiba mode page 20342 * (sr_change_speed) 20343 * 2) If mmc but no support for Real Time Streaming try 20344 * the SET CD SPEED (0xBB) command 20345 * (sr_atapi_change_speed) 20346 * 3) If mmc and support for Real Time Streaming 20347 * try the GET PERFORMANCE and SET STREAMING 20348 * commands (not yet implemented, 4380808) 20349 */ 20350 /* 20351 * As per recent MMC spec, CD-ROM speed is variable 20352 * and changes with LBA. Since there is no such 20353 * things as drive speed now, fail this ioctl. 20354 * 20355 * Note: EINVAL is returned for consistancy of original 20356 * implementation which included support for getting 20357 * the drive speed of mmc devices but not setting 20358 * the drive speed. Thus EINVAL would be returned 20359 * if a set request was made for an mmc device. 20360 * We no longer support get or set speed for 20361 * mmc but need to remain consistent with regard 20362 * to the error code returned. 20363 */ 20364 err = EINVAL; 20365 } else if (un->un_f_cfg_is_atapi == TRUE) { 20366 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20367 } else { 20368 err = sr_change_speed(dev, cmd, arg, flag); 20369 } 20370 break; 20371 20372 case CDROMCDDA: 20373 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20374 if (!ISCD(un)) { 20375 err = ENOTTY; 20376 } else { 20377 err = sr_read_cdda(dev, (void *)arg, flag); 20378 } 20379 break; 20380 20381 case CDROMCDXA: 20382 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20383 if (!ISCD(un)) { 20384 err = ENOTTY; 20385 } else { 20386 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20387 } 20388 break; 20389 20390 case CDROMSUBCODE: 20391 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20392 if (!ISCD(un)) { 20393 err = ENOTTY; 20394 } else { 20395 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20396 } 20397 break; 20398 20399 20400 #ifdef SDDEBUG 20401 /* RESET/ABORTS testing ioctls */ 20402 case DKIOCRESET: { 20403 int reset_level; 20404 20405 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20406 err = EFAULT; 20407 } else { 20408 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20409 "reset_level = 0x%lx\n", reset_level); 20410 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20411 err = 0; 20412 } else { 20413 err = EIO; 20414 } 20415 } 20416 break; 20417 } 20418 20419 case DKIOCABORT: 20420 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20421 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20422 err = 0; 20423 } else { 20424 err = EIO; 20425 } 20426 break; 20427 #endif 20428 20429 #ifdef SD_FAULT_INJECTION 20430 /* SDIOC FaultInjection testing ioctls */ 20431 case SDIOCSTART: 20432 case SDIOCSTOP: 20433 case SDIOCINSERTPKT: 20434 case SDIOCINSERTXB: 20435 case SDIOCINSERTUN: 20436 case SDIOCINSERTARQ: 20437 case SDIOCPUSH: 20438 case SDIOCRETRIEVE: 20439 case SDIOCRUN: 20440 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20441 "SDIOC detected cmd:0x%X:\n", cmd); 20442 /* call error generator */ 20443 sd_faultinjection_ioctl(cmd, arg, un); 20444 err = 0; 20445 break; 20446 20447 #endif /* SD_FAULT_INJECTION */ 20448 20449 case DKIOCFLUSHWRITECACHE: 20450 { 20451 struct dk_callback *dkc = (struct dk_callback *)arg; 20452 20453 mutex_enter(SD_MUTEX(un)); 20454 if (!un->un_f_sync_cache_supported || 20455 !un->un_f_write_cache_enabled) { 20456 err = un->un_f_sync_cache_supported ? 20457 0 : ENOTSUP; 20458 mutex_exit(SD_MUTEX(un)); 20459 if ((flag & FKIOCTL) && dkc != NULL && 20460 dkc->dkc_callback != NULL) { 20461 (*dkc->dkc_callback)(dkc->dkc_cookie, 20462 err); 20463 /* 20464 * Did callback and reported error. 20465 * Since we did a callback, ioctl 20466 * should return 0. 20467 */ 20468 err = 0; 20469 } 20470 break; 20471 } 20472 mutex_exit(SD_MUTEX(un)); 20473 20474 if ((flag & FKIOCTL) && dkc != NULL && 20475 dkc->dkc_callback != NULL) { 20476 /* async SYNC CACHE request */ 20477 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20478 } else { 20479 /* synchronous SYNC CACHE request */ 20480 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20481 } 20482 } 20483 break; 20484 20485 case DKIOCGETWCE: { 20486 20487 int wce; 20488 20489 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20490 break; 20491 } 20492 20493 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20494 err = EFAULT; 20495 } 20496 break; 20497 } 20498 20499 case DKIOCSETWCE: { 20500 20501 int wce, sync_supported; 20502 20503 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20504 err = EFAULT; 20505 break; 20506 } 20507 20508 /* 20509 * Synchronize multiple threads trying to enable 20510 * or disable the cache via the un_f_wcc_cv 20511 * condition variable. 20512 */ 20513 mutex_enter(SD_MUTEX(un)); 20514 20515 /* 20516 * Don't allow the cache to be enabled if the 20517 * config file has it disabled. 20518 */ 20519 if (un->un_f_opt_disable_cache && wce) { 20520 mutex_exit(SD_MUTEX(un)); 20521 err = EINVAL; 20522 break; 20523 } 20524 20525 /* 20526 * Wait for write cache change in progress 20527 * bit to be clear before proceeding. 20528 */ 20529 while (un->un_f_wcc_inprog) 20530 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20531 20532 un->un_f_wcc_inprog = 1; 20533 20534 if (un->un_f_write_cache_enabled && wce == 0) { 20535 /* 20536 * Disable the write cache. Don't clear 20537 * un_f_write_cache_enabled until after 20538 * the mode select and flush are complete. 20539 */ 20540 sync_supported = un->un_f_sync_cache_supported; 20541 20542 /* 20543 * If cache flush is suppressed, we assume that the 20544 * controller firmware will take care of managing the 20545 * write cache for us: no need to explicitly 20546 * disable it. 20547 */ 20548 if (!un->un_f_suppress_cache_flush) { 20549 mutex_exit(SD_MUTEX(un)); 20550 if ((err = sd_cache_control(un, 20551 SD_CACHE_NOCHANGE, 20552 SD_CACHE_DISABLE)) == 0 && 20553 sync_supported) { 20554 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20555 NULL); 20556 } 20557 } else { 20558 mutex_exit(SD_MUTEX(un)); 20559 } 20560 20561 mutex_enter(SD_MUTEX(un)); 20562 if (err == 0) { 20563 un->un_f_write_cache_enabled = 0; 20564 } 20565 20566 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20567 /* 20568 * Set un_f_write_cache_enabled first, so there is 20569 * no window where the cache is enabled, but the 20570 * bit says it isn't. 20571 */ 20572 un->un_f_write_cache_enabled = 1; 20573 20574 /* 20575 * If cache flush is suppressed, we assume that the 20576 * controller firmware will take care of managing the 20577 * write cache for us: no need to explicitly 20578 * enable it. 20579 */ 20580 if (!un->un_f_suppress_cache_flush) { 20581 mutex_exit(SD_MUTEX(un)); 20582 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20583 SD_CACHE_ENABLE); 20584 } else { 20585 mutex_exit(SD_MUTEX(un)); 20586 } 20587 20588 mutex_enter(SD_MUTEX(un)); 20589 20590 if (err) { 20591 un->un_f_write_cache_enabled = 0; 20592 } 20593 } 20594 20595 un->un_f_wcc_inprog = 0; 20596 cv_broadcast(&un->un_wcc_cv); 20597 mutex_exit(SD_MUTEX(un)); 20598 break; 20599 } 20600 20601 default: 20602 err = ENOTTY; 20603 break; 20604 } 20605 mutex_enter(SD_MUTEX(un)); 20606 un->un_ncmds_in_driver--; 20607 ASSERT(un->un_ncmds_in_driver >= 0); 20608 mutex_exit(SD_MUTEX(un)); 20609 20610 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20611 return (err); 20612 } 20613 20614 20615 /* 20616 * Function: sd_dkio_ctrl_info 20617 * 20618 * Description: This routine is the driver entry point for handling controller 20619 * information ioctl requests (DKIOCINFO). 20620 * 20621 * Arguments: dev - the device number 20622 * arg - pointer to user provided dk_cinfo structure 20623 * specifying the controller type and attributes. 20624 * flag - this argument is a pass through to ddi_copyxxx() 20625 * directly from the mode argument of ioctl(). 20626 * 20627 * Return Code: 0 20628 * EFAULT 20629 * ENXIO 20630 */ 20631 20632 static int 20633 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20634 { 20635 struct sd_lun *un = NULL; 20636 struct dk_cinfo *info; 20637 dev_info_t *pdip; 20638 int lun, tgt; 20639 20640 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20641 return (ENXIO); 20642 } 20643 20644 info = (struct dk_cinfo *) 20645 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20646 20647 switch (un->un_ctype) { 20648 case CTYPE_CDROM: 20649 info->dki_ctype = DKC_CDROM; 20650 break; 20651 default: 20652 info->dki_ctype = DKC_SCSI_CCS; 20653 break; 20654 } 20655 pdip = ddi_get_parent(SD_DEVINFO(un)); 20656 info->dki_cnum = ddi_get_instance(pdip); 20657 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20658 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20659 } else { 20660 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20661 DK_DEVLEN - 1); 20662 } 20663 20664 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20665 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20666 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20667 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20668 20669 /* Unit Information */ 20670 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20671 info->dki_slave = ((tgt << 3) | lun); 20672 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20673 DK_DEVLEN - 1); 20674 info->dki_flags = DKI_FMTVOL; 20675 info->dki_partition = SDPART(dev); 20676 20677 /* Max Transfer size of this device in blocks */ 20678 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20679 info->dki_addr = 0; 20680 info->dki_space = 0; 20681 info->dki_prio = 0; 20682 info->dki_vec = 0; 20683 20684 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20685 kmem_free(info, sizeof (struct dk_cinfo)); 20686 return (EFAULT); 20687 } else { 20688 kmem_free(info, sizeof (struct dk_cinfo)); 20689 return (0); 20690 } 20691 } 20692 20693 20694 /* 20695 * Function: sd_get_media_info 20696 * 20697 * Description: This routine is the driver entry point for handling ioctl 20698 * requests for the media type or command set profile used by the 20699 * drive to operate on the media (DKIOCGMEDIAINFO). 20700 * 20701 * Arguments: dev - the device number 20702 * arg - pointer to user provided dk_minfo structure 20703 * specifying the media type, logical block size and 20704 * drive capacity. 20705 * flag - this argument is a pass through to ddi_copyxxx() 20706 * directly from the mode argument of ioctl(). 20707 * 20708 * Return Code: 0 20709 * EACCESS 20710 * EFAULT 20711 * ENXIO 20712 * EIO 20713 */ 20714 20715 static int 20716 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20717 { 20718 struct sd_lun *un = NULL; 20719 struct uscsi_cmd com; 20720 struct scsi_inquiry *sinq; 20721 struct dk_minfo media_info; 20722 u_longlong_t media_capacity; 20723 uint64_t capacity; 20724 uint_t lbasize; 20725 uchar_t *out_data; 20726 uchar_t *rqbuf; 20727 int rval = 0; 20728 int rtn; 20729 20730 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20731 (un->un_state == SD_STATE_OFFLINE)) { 20732 return (ENXIO); 20733 } 20734 20735 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20736 20737 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20738 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20739 20740 /* Issue a TUR to determine if the drive is ready with media present */ 20741 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20742 if (rval == ENXIO) { 20743 goto done; 20744 } 20745 20746 /* Now get configuration data */ 20747 if (ISCD(un)) { 20748 media_info.dki_media_type = DK_CDROM; 20749 20750 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20751 if (un->un_f_mmc_cap == TRUE) { 20752 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20753 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20754 SD_PATH_STANDARD); 20755 20756 if (rtn) { 20757 /* 20758 * Failed for other than an illegal request 20759 * or command not supported 20760 */ 20761 if ((com.uscsi_status == STATUS_CHECK) && 20762 (com.uscsi_rqstatus == STATUS_GOOD)) { 20763 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20764 (rqbuf[12] != 0x20)) { 20765 rval = EIO; 20766 goto done; 20767 } 20768 } 20769 } else { 20770 /* 20771 * The GET CONFIGURATION command succeeded 20772 * so set the media type according to the 20773 * returned data 20774 */ 20775 media_info.dki_media_type = out_data[6]; 20776 media_info.dki_media_type <<= 8; 20777 media_info.dki_media_type |= out_data[7]; 20778 } 20779 } 20780 } else { 20781 /* 20782 * The profile list is not available, so we attempt to identify 20783 * the media type based on the inquiry data 20784 */ 20785 sinq = un->un_sd->sd_inq; 20786 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20787 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20788 /* This is a direct access device or optical disk */ 20789 media_info.dki_media_type = DK_FIXED_DISK; 20790 20791 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20792 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20793 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20794 media_info.dki_media_type = DK_ZIP; 20795 } else if ( 20796 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20797 media_info.dki_media_type = DK_JAZ; 20798 } 20799 } 20800 } else { 20801 /* 20802 * Not a CD, direct access or optical disk so return 20803 * unknown media 20804 */ 20805 media_info.dki_media_type = DK_UNKNOWN; 20806 } 20807 } 20808 20809 /* Now read the capacity so we can provide the lbasize and capacity */ 20810 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20811 SD_PATH_DIRECT)) { 20812 case 0: 20813 break; 20814 case EACCES: 20815 rval = EACCES; 20816 goto done; 20817 default: 20818 rval = EIO; 20819 goto done; 20820 } 20821 20822 media_info.dki_lbsize = lbasize; 20823 media_capacity = capacity; 20824 20825 /* 20826 * sd_send_scsi_READ_CAPACITY() reports capacity in 20827 * un->un_sys_blocksize chunks. So we need to convert it into 20828 * cap.lbasize chunks. 20829 */ 20830 media_capacity *= un->un_sys_blocksize; 20831 media_capacity /= lbasize; 20832 media_info.dki_capacity = media_capacity; 20833 20834 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20835 rval = EFAULT; 20836 /* Put goto. Anybody might add some code below in future */ 20837 goto done; 20838 } 20839 done: 20840 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20841 kmem_free(rqbuf, SENSE_LENGTH); 20842 return (rval); 20843 } 20844 20845 20846 /* 20847 * Function: sd_check_media 20848 * 20849 * Description: This utility routine implements the functionality for the 20850 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20851 * driver state changes from that specified by the user 20852 * (inserted or ejected). For example, if the user specifies 20853 * DKIO_EJECTED and the current media state is inserted this 20854 * routine will immediately return DKIO_INSERTED. However, if the 20855 * current media state is not inserted the user thread will be 20856 * blocked until the drive state changes. If DKIO_NONE is specified 20857 * the user thread will block until a drive state change occurs. 20858 * 20859 * Arguments: dev - the device number 20860 * state - user pointer to a dkio_state, updated with the current 20861 * drive state at return. 20862 * 20863 * Return Code: ENXIO 20864 * EIO 20865 * EAGAIN 20866 * EINTR 20867 */ 20868 20869 static int 20870 sd_check_media(dev_t dev, enum dkio_state state) 20871 { 20872 struct sd_lun *un = NULL; 20873 enum dkio_state prev_state; 20874 opaque_t token = NULL; 20875 int rval = 0; 20876 20877 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20878 return (ENXIO); 20879 } 20880 20881 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20882 20883 mutex_enter(SD_MUTEX(un)); 20884 20885 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20886 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20887 20888 prev_state = un->un_mediastate; 20889 20890 /* is there anything to do? */ 20891 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20892 /* 20893 * submit the request to the scsi_watch service; 20894 * scsi_media_watch_cb() does the real work 20895 */ 20896 mutex_exit(SD_MUTEX(un)); 20897 20898 /* 20899 * This change handles the case where a scsi watch request is 20900 * added to a device that is powered down. To accomplish this 20901 * we power up the device before adding the scsi watch request, 20902 * since the scsi watch sends a TUR directly to the device 20903 * which the device cannot handle if it is powered down. 20904 */ 20905 if (sd_pm_entry(un) != DDI_SUCCESS) { 20906 mutex_enter(SD_MUTEX(un)); 20907 goto done; 20908 } 20909 20910 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20911 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20912 (caddr_t)dev); 20913 20914 sd_pm_exit(un); 20915 20916 mutex_enter(SD_MUTEX(un)); 20917 if (token == NULL) { 20918 rval = EAGAIN; 20919 goto done; 20920 } 20921 20922 /* 20923 * This is a special case IOCTL that doesn't return 20924 * until the media state changes. Routine sdpower 20925 * knows about and handles this so don't count it 20926 * as an active cmd in the driver, which would 20927 * keep the device busy to the pm framework. 20928 * If the count isn't decremented the device can't 20929 * be powered down. 20930 */ 20931 un->un_ncmds_in_driver--; 20932 ASSERT(un->un_ncmds_in_driver >= 0); 20933 20934 /* 20935 * if a prior request had been made, this will be the same 20936 * token, as scsi_watch was designed that way. 20937 */ 20938 un->un_swr_token = token; 20939 un->un_specified_mediastate = state; 20940 20941 /* 20942 * now wait for media change 20943 * we will not be signalled unless mediastate == state but it is 20944 * still better to test for this condition, since there is a 20945 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20946 */ 20947 SD_TRACE(SD_LOG_COMMON, un, 20948 "sd_check_media: waiting for media state change\n"); 20949 while (un->un_mediastate == state) { 20950 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20951 SD_TRACE(SD_LOG_COMMON, un, 20952 "sd_check_media: waiting for media state " 20953 "was interrupted\n"); 20954 un->un_ncmds_in_driver++; 20955 rval = EINTR; 20956 goto done; 20957 } 20958 SD_TRACE(SD_LOG_COMMON, un, 20959 "sd_check_media: received signal, state=%x\n", 20960 un->un_mediastate); 20961 } 20962 /* 20963 * Inc the counter to indicate the device once again 20964 * has an active outstanding cmd. 20965 */ 20966 un->un_ncmds_in_driver++; 20967 } 20968 20969 /* invalidate geometry */ 20970 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20971 sr_ejected(un); 20972 } 20973 20974 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20975 uint64_t capacity; 20976 uint_t lbasize; 20977 20978 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20979 mutex_exit(SD_MUTEX(un)); 20980 /* 20981 * Since the following routines use SD_PATH_DIRECT, we must 20982 * call PM directly before the upcoming disk accesses. This 20983 * may cause the disk to be power/spin up. 20984 */ 20985 20986 if (sd_pm_entry(un) == DDI_SUCCESS) { 20987 rval = sd_send_scsi_READ_CAPACITY(un, 20988 &capacity, 20989 &lbasize, SD_PATH_DIRECT); 20990 if (rval != 0) { 20991 sd_pm_exit(un); 20992 mutex_enter(SD_MUTEX(un)); 20993 goto done; 20994 } 20995 } else { 20996 rval = EIO; 20997 mutex_enter(SD_MUTEX(un)); 20998 goto done; 20999 } 21000 mutex_enter(SD_MUTEX(un)); 21001 21002 sd_update_block_info(un, lbasize, capacity); 21003 21004 /* 21005 * Check if the media in the device is writable or not 21006 */ 21007 if (ISCD(un)) 21008 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21009 21010 mutex_exit(SD_MUTEX(un)); 21011 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21012 if ((cmlb_validate(un->un_cmlbhandle, 0, 21013 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21014 sd_set_pstats(un); 21015 SD_TRACE(SD_LOG_IO_PARTITION, un, 21016 "sd_check_media: un:0x%p pstats created and " 21017 "set\n", un); 21018 } 21019 21020 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21021 SD_PATH_DIRECT); 21022 sd_pm_exit(un); 21023 21024 mutex_enter(SD_MUTEX(un)); 21025 } 21026 done: 21027 un->un_f_watcht_stopped = FALSE; 21028 if (un->un_swr_token) { 21029 /* 21030 * Use of this local token and the mutex ensures that we avoid 21031 * some race conditions associated with terminating the 21032 * scsi watch. 21033 */ 21034 token = un->un_swr_token; 21035 un->un_swr_token = (opaque_t)NULL; 21036 mutex_exit(SD_MUTEX(un)); 21037 (void) scsi_watch_request_terminate(token, 21038 SCSI_WATCH_TERMINATE_WAIT); 21039 mutex_enter(SD_MUTEX(un)); 21040 } 21041 21042 /* 21043 * Update the capacity kstat value, if no media previously 21044 * (capacity kstat is 0) and a media has been inserted 21045 * (un_f_blockcount_is_valid == TRUE) 21046 */ 21047 if (un->un_errstats) { 21048 struct sd_errstats *stp = NULL; 21049 21050 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21051 if ((stp->sd_capacity.value.ui64 == 0) && 21052 (un->un_f_blockcount_is_valid == TRUE)) { 21053 stp->sd_capacity.value.ui64 = 21054 (uint64_t)((uint64_t)un->un_blockcount * 21055 un->un_sys_blocksize); 21056 } 21057 } 21058 mutex_exit(SD_MUTEX(un)); 21059 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21060 return (rval); 21061 } 21062 21063 21064 /* 21065 * Function: sd_delayed_cv_broadcast 21066 * 21067 * Description: Delayed cv_broadcast to allow for target to recover from media 21068 * insertion. 21069 * 21070 * Arguments: arg - driver soft state (unit) structure 21071 */ 21072 21073 static void 21074 sd_delayed_cv_broadcast(void *arg) 21075 { 21076 struct sd_lun *un = arg; 21077 21078 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21079 21080 mutex_enter(SD_MUTEX(un)); 21081 un->un_dcvb_timeid = NULL; 21082 cv_broadcast(&un->un_state_cv); 21083 mutex_exit(SD_MUTEX(un)); 21084 } 21085 21086 21087 /* 21088 * Function: sd_media_watch_cb 21089 * 21090 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21091 * routine processes the TUR sense data and updates the driver 21092 * state if a transition has occurred. The user thread 21093 * (sd_check_media) is then signalled. 21094 * 21095 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21096 * among multiple watches that share this callback function 21097 * resultp - scsi watch facility result packet containing scsi 21098 * packet, status byte and sense data 21099 * 21100 * Return Code: 0 for success, -1 for failure 21101 */ 21102 21103 static int 21104 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21105 { 21106 struct sd_lun *un; 21107 struct scsi_status *statusp = resultp->statusp; 21108 uint8_t *sensep = (uint8_t *)resultp->sensep; 21109 enum dkio_state state = DKIO_NONE; 21110 dev_t dev = (dev_t)arg; 21111 uchar_t actual_sense_length; 21112 uint8_t skey, asc, ascq; 21113 21114 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21115 return (-1); 21116 } 21117 actual_sense_length = resultp->actual_sense_length; 21118 21119 mutex_enter(SD_MUTEX(un)); 21120 SD_TRACE(SD_LOG_COMMON, un, 21121 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21122 *((char *)statusp), (void *)sensep, actual_sense_length); 21123 21124 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21125 un->un_mediastate = DKIO_DEV_GONE; 21126 cv_broadcast(&un->un_state_cv); 21127 mutex_exit(SD_MUTEX(un)); 21128 21129 return (0); 21130 } 21131 21132 /* 21133 * If there was a check condition then sensep points to valid sense data 21134 * If status was not a check condition but a reservation or busy status 21135 * then the new state is DKIO_NONE 21136 */ 21137 if (sensep != NULL) { 21138 skey = scsi_sense_key(sensep); 21139 asc = scsi_sense_asc(sensep); 21140 ascq = scsi_sense_ascq(sensep); 21141 21142 SD_INFO(SD_LOG_COMMON, un, 21143 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21144 skey, asc, ascq); 21145 /* This routine only uses up to 13 bytes of sense data. */ 21146 if (actual_sense_length >= 13) { 21147 if (skey == KEY_UNIT_ATTENTION) { 21148 if (asc == 0x28) { 21149 state = DKIO_INSERTED; 21150 } 21151 } else if (skey == KEY_NOT_READY) { 21152 /* 21153 * if 02/04/02 means that the host 21154 * should send start command. Explicitly 21155 * leave the media state as is 21156 * (inserted) as the media is inserted 21157 * and host has stopped device for PM 21158 * reasons. Upon next true read/write 21159 * to this media will bring the 21160 * device to the right state good for 21161 * media access. 21162 */ 21163 if (asc == 0x3a) { 21164 state = DKIO_EJECTED; 21165 } else { 21166 /* 21167 * If the drive is busy with an 21168 * operation or long write, keep the 21169 * media in an inserted state. 21170 */ 21171 21172 if ((asc == 0x04) && 21173 ((ascq == 0x02) || 21174 (ascq == 0x07) || 21175 (ascq == 0x08))) { 21176 state = DKIO_INSERTED; 21177 } 21178 } 21179 } else if (skey == KEY_NO_SENSE) { 21180 if ((asc == 0x00) && (ascq == 0x00)) { 21181 /* 21182 * Sense Data 00/00/00 does not provide 21183 * any information about the state of 21184 * the media. Ignore it. 21185 */ 21186 mutex_exit(SD_MUTEX(un)); 21187 return (0); 21188 } 21189 } 21190 } 21191 } else if ((*((char *)statusp) == STATUS_GOOD) && 21192 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21193 state = DKIO_INSERTED; 21194 } 21195 21196 SD_TRACE(SD_LOG_COMMON, un, 21197 "sd_media_watch_cb: state=%x, specified=%x\n", 21198 state, un->un_specified_mediastate); 21199 21200 /* 21201 * now signal the waiting thread if this is *not* the specified state; 21202 * delay the signal if the state is DKIO_INSERTED to allow the target 21203 * to recover 21204 */ 21205 if (state != un->un_specified_mediastate) { 21206 un->un_mediastate = state; 21207 if (state == DKIO_INSERTED) { 21208 /* 21209 * delay the signal to give the drive a chance 21210 * to do what it apparently needs to do 21211 */ 21212 SD_TRACE(SD_LOG_COMMON, un, 21213 "sd_media_watch_cb: delayed cv_broadcast\n"); 21214 if (un->un_dcvb_timeid == NULL) { 21215 un->un_dcvb_timeid = 21216 timeout(sd_delayed_cv_broadcast, un, 21217 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21218 } 21219 } else { 21220 SD_TRACE(SD_LOG_COMMON, un, 21221 "sd_media_watch_cb: immediate cv_broadcast\n"); 21222 cv_broadcast(&un->un_state_cv); 21223 } 21224 } 21225 mutex_exit(SD_MUTEX(un)); 21226 return (0); 21227 } 21228 21229 21230 /* 21231 * Function: sd_dkio_get_temp 21232 * 21233 * Description: This routine is the driver entry point for handling ioctl 21234 * requests to get the disk temperature. 21235 * 21236 * Arguments: dev - the device number 21237 * arg - pointer to user provided dk_temperature structure. 21238 * flag - this argument is a pass through to ddi_copyxxx() 21239 * directly from the mode argument of ioctl(). 21240 * 21241 * Return Code: 0 21242 * EFAULT 21243 * ENXIO 21244 * EAGAIN 21245 */ 21246 21247 static int 21248 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21249 { 21250 struct sd_lun *un = NULL; 21251 struct dk_temperature *dktemp = NULL; 21252 uchar_t *temperature_page; 21253 int rval = 0; 21254 int path_flag = SD_PATH_STANDARD; 21255 21256 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21257 return (ENXIO); 21258 } 21259 21260 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21261 21262 /* copyin the disk temp argument to get the user flags */ 21263 if (ddi_copyin((void *)arg, dktemp, 21264 sizeof (struct dk_temperature), flag) != 0) { 21265 rval = EFAULT; 21266 goto done; 21267 } 21268 21269 /* Initialize the temperature to invalid. */ 21270 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21271 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21272 21273 /* 21274 * Note: Investigate removing the "bypass pm" semantic. 21275 * Can we just bypass PM always? 21276 */ 21277 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21278 path_flag = SD_PATH_DIRECT; 21279 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21280 mutex_enter(&un->un_pm_mutex); 21281 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21282 /* 21283 * If DKT_BYPASS_PM is set, and the drive happens to be 21284 * in low power mode, we can not wake it up, Need to 21285 * return EAGAIN. 21286 */ 21287 mutex_exit(&un->un_pm_mutex); 21288 rval = EAGAIN; 21289 goto done; 21290 } else { 21291 /* 21292 * Indicate to PM the device is busy. This is required 21293 * to avoid a race - i.e. the ioctl is issuing a 21294 * command and the pm framework brings down the device 21295 * to low power mode (possible power cut-off on some 21296 * platforms). 21297 */ 21298 mutex_exit(&un->un_pm_mutex); 21299 if (sd_pm_entry(un) != DDI_SUCCESS) { 21300 rval = EAGAIN; 21301 goto done; 21302 } 21303 } 21304 } 21305 21306 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21307 21308 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21309 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21310 goto done2; 21311 } 21312 21313 /* 21314 * For the current temperature verify that the parameter length is 0x02 21315 * and the parameter code is 0x00 21316 */ 21317 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21318 (temperature_page[5] == 0x00)) { 21319 if (temperature_page[9] == 0xFF) { 21320 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21321 } else { 21322 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21323 } 21324 } 21325 21326 /* 21327 * For the reference temperature verify that the parameter 21328 * length is 0x02 and the parameter code is 0x01 21329 */ 21330 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21331 (temperature_page[11] == 0x01)) { 21332 if (temperature_page[15] == 0xFF) { 21333 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21334 } else { 21335 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21336 } 21337 } 21338 21339 /* Do the copyout regardless of the temperature commands status. */ 21340 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21341 flag) != 0) { 21342 rval = EFAULT; 21343 } 21344 21345 done2: 21346 if (path_flag == SD_PATH_DIRECT) { 21347 sd_pm_exit(un); 21348 } 21349 21350 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21351 done: 21352 if (dktemp != NULL) { 21353 kmem_free(dktemp, sizeof (struct dk_temperature)); 21354 } 21355 21356 return (rval); 21357 } 21358 21359 21360 /* 21361 * Function: sd_log_page_supported 21362 * 21363 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21364 * supported log pages. 21365 * 21366 * Arguments: un - 21367 * log_page - 21368 * 21369 * Return Code: -1 - on error (log sense is optional and may not be supported). 21370 * 0 - log page not found. 21371 * 1 - log page found. 21372 */ 21373 21374 static int 21375 sd_log_page_supported(struct sd_lun *un, int log_page) 21376 { 21377 uchar_t *log_page_data; 21378 int i; 21379 int match = 0; 21380 int log_size; 21381 21382 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21383 21384 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21385 SD_PATH_DIRECT) != 0) { 21386 SD_ERROR(SD_LOG_COMMON, un, 21387 "sd_log_page_supported: failed log page retrieval\n"); 21388 kmem_free(log_page_data, 0xFF); 21389 return (-1); 21390 } 21391 log_size = log_page_data[3]; 21392 21393 /* 21394 * The list of supported log pages start from the fourth byte. Check 21395 * until we run out of log pages or a match is found. 21396 */ 21397 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21398 if (log_page_data[i] == log_page) { 21399 match++; 21400 } 21401 } 21402 kmem_free(log_page_data, 0xFF); 21403 return (match); 21404 } 21405 21406 21407 /* 21408 * Function: sd_mhdioc_failfast 21409 * 21410 * Description: This routine is the driver entry point for handling ioctl 21411 * requests to enable/disable the multihost failfast option. 21412 * (MHIOCENFAILFAST) 21413 * 21414 * Arguments: dev - the device number 21415 * arg - user specified probing interval. 21416 * flag - this argument is a pass through to ddi_copyxxx() 21417 * directly from the mode argument of ioctl(). 21418 * 21419 * Return Code: 0 21420 * EFAULT 21421 * ENXIO 21422 */ 21423 21424 static int 21425 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21426 { 21427 struct sd_lun *un = NULL; 21428 int mh_time; 21429 int rval = 0; 21430 21431 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21432 return (ENXIO); 21433 } 21434 21435 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21436 return (EFAULT); 21437 21438 if (mh_time) { 21439 mutex_enter(SD_MUTEX(un)); 21440 un->un_resvd_status |= SD_FAILFAST; 21441 mutex_exit(SD_MUTEX(un)); 21442 /* 21443 * If mh_time is INT_MAX, then this ioctl is being used for 21444 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21445 */ 21446 if (mh_time != INT_MAX) { 21447 rval = sd_check_mhd(dev, mh_time); 21448 } 21449 } else { 21450 (void) sd_check_mhd(dev, 0); 21451 mutex_enter(SD_MUTEX(un)); 21452 un->un_resvd_status &= ~SD_FAILFAST; 21453 mutex_exit(SD_MUTEX(un)); 21454 } 21455 return (rval); 21456 } 21457 21458 21459 /* 21460 * Function: sd_mhdioc_takeown 21461 * 21462 * Description: This routine is the driver entry point for handling ioctl 21463 * requests to forcefully acquire exclusive access rights to the 21464 * multihost disk (MHIOCTKOWN). 21465 * 21466 * Arguments: dev - the device number 21467 * arg - user provided structure specifying the delay 21468 * parameters in milliseconds 21469 * flag - this argument is a pass through to ddi_copyxxx() 21470 * directly from the mode argument of ioctl(). 21471 * 21472 * Return Code: 0 21473 * EFAULT 21474 * ENXIO 21475 */ 21476 21477 static int 21478 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21479 { 21480 struct sd_lun *un = NULL; 21481 struct mhioctkown *tkown = NULL; 21482 int rval = 0; 21483 21484 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21485 return (ENXIO); 21486 } 21487 21488 if (arg != NULL) { 21489 tkown = (struct mhioctkown *) 21490 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21491 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21492 if (rval != 0) { 21493 rval = EFAULT; 21494 goto error; 21495 } 21496 } 21497 21498 rval = sd_take_ownership(dev, tkown); 21499 mutex_enter(SD_MUTEX(un)); 21500 if (rval == 0) { 21501 un->un_resvd_status |= SD_RESERVE; 21502 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21503 sd_reinstate_resv_delay = 21504 tkown->reinstate_resv_delay * 1000; 21505 } else { 21506 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21507 } 21508 /* 21509 * Give the scsi_watch routine interval set by 21510 * the MHIOCENFAILFAST ioctl precedence here. 21511 */ 21512 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21513 mutex_exit(SD_MUTEX(un)); 21514 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21515 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21516 "sd_mhdioc_takeown : %d\n", 21517 sd_reinstate_resv_delay); 21518 } else { 21519 mutex_exit(SD_MUTEX(un)); 21520 } 21521 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21522 sd_mhd_reset_notify_cb, (caddr_t)un); 21523 } else { 21524 un->un_resvd_status &= ~SD_RESERVE; 21525 mutex_exit(SD_MUTEX(un)); 21526 } 21527 21528 error: 21529 if (tkown != NULL) { 21530 kmem_free(tkown, sizeof (struct mhioctkown)); 21531 } 21532 return (rval); 21533 } 21534 21535 21536 /* 21537 * Function: sd_mhdioc_release 21538 * 21539 * Description: This routine is the driver entry point for handling ioctl 21540 * requests to release exclusive access rights to the multihost 21541 * disk (MHIOCRELEASE). 21542 * 21543 * Arguments: dev - the device number 21544 * 21545 * Return Code: 0 21546 * ENXIO 21547 */ 21548 21549 static int 21550 sd_mhdioc_release(dev_t dev) 21551 { 21552 struct sd_lun *un = NULL; 21553 timeout_id_t resvd_timeid_save; 21554 int resvd_status_save; 21555 int rval = 0; 21556 21557 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21558 return (ENXIO); 21559 } 21560 21561 mutex_enter(SD_MUTEX(un)); 21562 resvd_status_save = un->un_resvd_status; 21563 un->un_resvd_status &= 21564 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21565 if (un->un_resvd_timeid) { 21566 resvd_timeid_save = un->un_resvd_timeid; 21567 un->un_resvd_timeid = NULL; 21568 mutex_exit(SD_MUTEX(un)); 21569 (void) untimeout(resvd_timeid_save); 21570 } else { 21571 mutex_exit(SD_MUTEX(un)); 21572 } 21573 21574 /* 21575 * destroy any pending timeout thread that may be attempting to 21576 * reinstate reservation on this device. 21577 */ 21578 sd_rmv_resv_reclaim_req(dev); 21579 21580 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21581 mutex_enter(SD_MUTEX(un)); 21582 if ((un->un_mhd_token) && 21583 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21584 mutex_exit(SD_MUTEX(un)); 21585 (void) sd_check_mhd(dev, 0); 21586 } else { 21587 mutex_exit(SD_MUTEX(un)); 21588 } 21589 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21590 sd_mhd_reset_notify_cb, (caddr_t)un); 21591 } else { 21592 /* 21593 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21594 */ 21595 mutex_enter(SD_MUTEX(un)); 21596 un->un_resvd_status = resvd_status_save; 21597 mutex_exit(SD_MUTEX(un)); 21598 } 21599 return (rval); 21600 } 21601 21602 21603 /* 21604 * Function: sd_mhdioc_register_devid 21605 * 21606 * Description: This routine is the driver entry point for handling ioctl 21607 * requests to register the device id (MHIOCREREGISTERDEVID). 21608 * 21609 * Note: The implementation for this ioctl has been updated to 21610 * be consistent with the original PSARC case (1999/357) 21611 * (4375899, 4241671, 4220005) 21612 * 21613 * Arguments: dev - the device number 21614 * 21615 * Return Code: 0 21616 * ENXIO 21617 */ 21618 21619 static int 21620 sd_mhdioc_register_devid(dev_t dev) 21621 { 21622 struct sd_lun *un = NULL; 21623 int rval = 0; 21624 21625 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21626 return (ENXIO); 21627 } 21628 21629 ASSERT(!mutex_owned(SD_MUTEX(un))); 21630 21631 mutex_enter(SD_MUTEX(un)); 21632 21633 /* If a devid already exists, de-register it */ 21634 if (un->un_devid != NULL) { 21635 ddi_devid_unregister(SD_DEVINFO(un)); 21636 /* 21637 * After unregister devid, needs to free devid memory 21638 */ 21639 ddi_devid_free(un->un_devid); 21640 un->un_devid = NULL; 21641 } 21642 21643 /* Check for reservation conflict */ 21644 mutex_exit(SD_MUTEX(un)); 21645 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21646 mutex_enter(SD_MUTEX(un)); 21647 21648 switch (rval) { 21649 case 0: 21650 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21651 break; 21652 case EACCES: 21653 break; 21654 default: 21655 rval = EIO; 21656 } 21657 21658 mutex_exit(SD_MUTEX(un)); 21659 return (rval); 21660 } 21661 21662 21663 /* 21664 * Function: sd_mhdioc_inkeys 21665 * 21666 * Description: This routine is the driver entry point for handling ioctl 21667 * requests to issue the SCSI-3 Persistent In Read Keys command 21668 * to the device (MHIOCGRP_INKEYS). 21669 * 21670 * Arguments: dev - the device number 21671 * arg - user provided in_keys structure 21672 * flag - this argument is a pass through to ddi_copyxxx() 21673 * directly from the mode argument of ioctl(). 21674 * 21675 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21676 * ENXIO 21677 * EFAULT 21678 */ 21679 21680 static int 21681 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21682 { 21683 struct sd_lun *un; 21684 mhioc_inkeys_t inkeys; 21685 int rval = 0; 21686 21687 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21688 return (ENXIO); 21689 } 21690 21691 #ifdef _MULTI_DATAMODEL 21692 switch (ddi_model_convert_from(flag & FMODELS)) { 21693 case DDI_MODEL_ILP32: { 21694 struct mhioc_inkeys32 inkeys32; 21695 21696 if (ddi_copyin(arg, &inkeys32, 21697 sizeof (struct mhioc_inkeys32), flag) != 0) { 21698 return (EFAULT); 21699 } 21700 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21701 if ((rval = sd_persistent_reservation_in_read_keys(un, 21702 &inkeys, flag)) != 0) { 21703 return (rval); 21704 } 21705 inkeys32.generation = inkeys.generation; 21706 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21707 flag) != 0) { 21708 return (EFAULT); 21709 } 21710 break; 21711 } 21712 case DDI_MODEL_NONE: 21713 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21714 flag) != 0) { 21715 return (EFAULT); 21716 } 21717 if ((rval = sd_persistent_reservation_in_read_keys(un, 21718 &inkeys, flag)) != 0) { 21719 return (rval); 21720 } 21721 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21722 flag) != 0) { 21723 return (EFAULT); 21724 } 21725 break; 21726 } 21727 21728 #else /* ! _MULTI_DATAMODEL */ 21729 21730 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21731 return (EFAULT); 21732 } 21733 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21734 if (rval != 0) { 21735 return (rval); 21736 } 21737 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21738 return (EFAULT); 21739 } 21740 21741 #endif /* _MULTI_DATAMODEL */ 21742 21743 return (rval); 21744 } 21745 21746 21747 /* 21748 * Function: sd_mhdioc_inresv 21749 * 21750 * Description: This routine is the driver entry point for handling ioctl 21751 * requests to issue the SCSI-3 Persistent In Read Reservations 21752 * command to the device (MHIOCGRP_INKEYS). 21753 * 21754 * Arguments: dev - the device number 21755 * arg - user provided in_resv structure 21756 * flag - this argument is a pass through to ddi_copyxxx() 21757 * directly from the mode argument of ioctl(). 21758 * 21759 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21760 * ENXIO 21761 * EFAULT 21762 */ 21763 21764 static int 21765 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21766 { 21767 struct sd_lun *un; 21768 mhioc_inresvs_t inresvs; 21769 int rval = 0; 21770 21771 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21772 return (ENXIO); 21773 } 21774 21775 #ifdef _MULTI_DATAMODEL 21776 21777 switch (ddi_model_convert_from(flag & FMODELS)) { 21778 case DDI_MODEL_ILP32: { 21779 struct mhioc_inresvs32 inresvs32; 21780 21781 if (ddi_copyin(arg, &inresvs32, 21782 sizeof (struct mhioc_inresvs32), flag) != 0) { 21783 return (EFAULT); 21784 } 21785 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21786 if ((rval = sd_persistent_reservation_in_read_resv(un, 21787 &inresvs, flag)) != 0) { 21788 return (rval); 21789 } 21790 inresvs32.generation = inresvs.generation; 21791 if (ddi_copyout(&inresvs32, arg, 21792 sizeof (struct mhioc_inresvs32), flag) != 0) { 21793 return (EFAULT); 21794 } 21795 break; 21796 } 21797 case DDI_MODEL_NONE: 21798 if (ddi_copyin(arg, &inresvs, 21799 sizeof (mhioc_inresvs_t), flag) != 0) { 21800 return (EFAULT); 21801 } 21802 if ((rval = sd_persistent_reservation_in_read_resv(un, 21803 &inresvs, flag)) != 0) { 21804 return (rval); 21805 } 21806 if (ddi_copyout(&inresvs, arg, 21807 sizeof (mhioc_inresvs_t), flag) != 0) { 21808 return (EFAULT); 21809 } 21810 break; 21811 } 21812 21813 #else /* ! _MULTI_DATAMODEL */ 21814 21815 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21816 return (EFAULT); 21817 } 21818 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21819 if (rval != 0) { 21820 return (rval); 21821 } 21822 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21823 return (EFAULT); 21824 } 21825 21826 #endif /* ! _MULTI_DATAMODEL */ 21827 21828 return (rval); 21829 } 21830 21831 21832 /* 21833 * The following routines support the clustering functionality described below 21834 * and implement lost reservation reclaim functionality. 21835 * 21836 * Clustering 21837 * ---------- 21838 * The clustering code uses two different, independent forms of SCSI 21839 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21840 * Persistent Group Reservations. For any particular disk, it will use either 21841 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21842 * 21843 * SCSI-2 21844 * The cluster software takes ownership of a multi-hosted disk by issuing the 21845 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21846 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 21847 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 21848 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 21849 * driver. The meaning of failfast is that if the driver (on this host) ever 21850 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 21851 * it should immediately panic the host. The motivation for this ioctl is that 21852 * if this host does encounter reservation conflict, the underlying cause is 21853 * that some other host of the cluster has decided that this host is no longer 21854 * in the cluster and has seized control of the disks for itself. Since this 21855 * host is no longer in the cluster, it ought to panic itself. The 21856 * MHIOCENFAILFAST ioctl does two things: 21857 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21858 * error to panic the host 21859 * (b) it sets up a periodic timer to test whether this host still has 21860 * "access" (in that no other host has reserved the device): if the 21861 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21862 * purpose of that periodic timer is to handle scenarios where the host is 21863 * otherwise temporarily quiescent, temporarily doing no real i/o. 21864 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21865 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21866 * the device itself. 21867 * 21868 * SCSI-3 PGR 21869 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21870 * facility is supported through the shared multihost disk ioctls 21871 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21872 * MHIOCGRP_PREEMPTANDABORT) 21873 * 21874 * Reservation Reclaim: 21875 * -------------------- 21876 * To support the lost reservation reclaim operations this driver creates a 21877 * single thread to handle reinstating reservations on all devices that have 21878 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21879 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21880 * and the reservation reclaim thread loops through the requests to regain the 21881 * lost reservations. 21882 */ 21883 21884 /* 21885 * Function: sd_check_mhd() 21886 * 21887 * Description: This function sets up and submits a scsi watch request or 21888 * terminates an existing watch request. This routine is used in 21889 * support of reservation reclaim. 21890 * 21891 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21892 * among multiple watches that share the callback function 21893 * interval - the number of microseconds specifying the watch 21894 * interval for issuing TEST UNIT READY commands. If 21895 * set to 0 the watch should be terminated. If the 21896 * interval is set to 0 and if the device is required 21897 * to hold reservation while disabling failfast, the 21898 * watch is restarted with an interval of 21899 * reinstate_resv_delay. 21900 * 21901 * Return Code: 0 - Successful submit/terminate of scsi watch request 21902 * ENXIO - Indicates an invalid device was specified 21903 * EAGAIN - Unable to submit the scsi watch request 21904 */ 21905 21906 static int 21907 sd_check_mhd(dev_t dev, int interval) 21908 { 21909 struct sd_lun *un; 21910 opaque_t token; 21911 21912 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21913 return (ENXIO); 21914 } 21915 21916 /* is this a watch termination request? */ 21917 if (interval == 0) { 21918 mutex_enter(SD_MUTEX(un)); 21919 /* if there is an existing watch task then terminate it */ 21920 if (un->un_mhd_token) { 21921 token = un->un_mhd_token; 21922 un->un_mhd_token = NULL; 21923 mutex_exit(SD_MUTEX(un)); 21924 (void) scsi_watch_request_terminate(token, 21925 SCSI_WATCH_TERMINATE_WAIT); 21926 mutex_enter(SD_MUTEX(un)); 21927 } else { 21928 mutex_exit(SD_MUTEX(un)); 21929 /* 21930 * Note: If we return here we don't check for the 21931 * failfast case. This is the original legacy 21932 * implementation but perhaps we should be checking 21933 * the failfast case. 21934 */ 21935 return (0); 21936 } 21937 /* 21938 * If the device is required to hold reservation while 21939 * disabling failfast, we need to restart the scsi_watch 21940 * routine with an interval of reinstate_resv_delay. 21941 */ 21942 if (un->un_resvd_status & SD_RESERVE) { 21943 interval = sd_reinstate_resv_delay/1000; 21944 } else { 21945 /* no failfast so bail */ 21946 mutex_exit(SD_MUTEX(un)); 21947 return (0); 21948 } 21949 mutex_exit(SD_MUTEX(un)); 21950 } 21951 21952 /* 21953 * adjust minimum time interval to 1 second, 21954 * and convert from msecs to usecs 21955 */ 21956 if (interval > 0 && interval < 1000) { 21957 interval = 1000; 21958 } 21959 interval *= 1000; 21960 21961 /* 21962 * submit the request to the scsi_watch service 21963 */ 21964 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21965 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21966 if (token == NULL) { 21967 return (EAGAIN); 21968 } 21969 21970 /* 21971 * save token for termination later on 21972 */ 21973 mutex_enter(SD_MUTEX(un)); 21974 un->un_mhd_token = token; 21975 mutex_exit(SD_MUTEX(un)); 21976 return (0); 21977 } 21978 21979 21980 /* 21981 * Function: sd_mhd_watch_cb() 21982 * 21983 * Description: This function is the call back function used by the scsi watch 21984 * facility. The scsi watch facility sends the "Test Unit Ready" 21985 * and processes the status. If applicable (i.e. a "Unit Attention" 21986 * status and automatic "Request Sense" not used) the scsi watch 21987 * facility will send a "Request Sense" and retrieve the sense data 21988 * to be passed to this callback function. In either case the 21989 * automatic "Request Sense" or the facility submitting one, this 21990 * callback is passed the status and sense data. 21991 * 21992 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21993 * among multiple watches that share this callback function 21994 * resultp - scsi watch facility result packet containing scsi 21995 * packet, status byte and sense data 21996 * 21997 * Return Code: 0 - continue the watch task 21998 * non-zero - terminate the watch task 21999 */ 22000 22001 static int 22002 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22003 { 22004 struct sd_lun *un; 22005 struct scsi_status *statusp; 22006 uint8_t *sensep; 22007 struct scsi_pkt *pkt; 22008 uchar_t actual_sense_length; 22009 dev_t dev = (dev_t)arg; 22010 22011 ASSERT(resultp != NULL); 22012 statusp = resultp->statusp; 22013 sensep = (uint8_t *)resultp->sensep; 22014 pkt = resultp->pkt; 22015 actual_sense_length = resultp->actual_sense_length; 22016 22017 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22018 return (ENXIO); 22019 } 22020 22021 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22022 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22023 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22024 22025 /* Begin processing of the status and/or sense data */ 22026 if (pkt->pkt_reason != CMD_CMPLT) { 22027 /* Handle the incomplete packet */ 22028 sd_mhd_watch_incomplete(un, pkt); 22029 return (0); 22030 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22031 if (*((unsigned char *)statusp) 22032 == STATUS_RESERVATION_CONFLICT) { 22033 /* 22034 * Handle a reservation conflict by panicking if 22035 * configured for failfast or by logging the conflict 22036 * and updating the reservation status 22037 */ 22038 mutex_enter(SD_MUTEX(un)); 22039 if ((un->un_resvd_status & SD_FAILFAST) && 22040 (sd_failfast_enable)) { 22041 sd_panic_for_res_conflict(un); 22042 /*NOTREACHED*/ 22043 } 22044 SD_INFO(SD_LOG_IOCTL_MHD, un, 22045 "sd_mhd_watch_cb: Reservation Conflict\n"); 22046 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22047 mutex_exit(SD_MUTEX(un)); 22048 } 22049 } 22050 22051 if (sensep != NULL) { 22052 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22053 mutex_enter(SD_MUTEX(un)); 22054 if ((scsi_sense_asc(sensep) == 22055 SD_SCSI_RESET_SENSE_CODE) && 22056 (un->un_resvd_status & SD_RESERVE)) { 22057 /* 22058 * The additional sense code indicates a power 22059 * on or bus device reset has occurred; update 22060 * the reservation status. 22061 */ 22062 un->un_resvd_status |= 22063 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22064 SD_INFO(SD_LOG_IOCTL_MHD, un, 22065 "sd_mhd_watch_cb: Lost Reservation\n"); 22066 } 22067 } else { 22068 return (0); 22069 } 22070 } else { 22071 mutex_enter(SD_MUTEX(un)); 22072 } 22073 22074 if ((un->un_resvd_status & SD_RESERVE) && 22075 (un->un_resvd_status & SD_LOST_RESERVE)) { 22076 if (un->un_resvd_status & SD_WANT_RESERVE) { 22077 /* 22078 * A reset occurred in between the last probe and this 22079 * one so if a timeout is pending cancel it. 22080 */ 22081 if (un->un_resvd_timeid) { 22082 timeout_id_t temp_id = un->un_resvd_timeid; 22083 un->un_resvd_timeid = NULL; 22084 mutex_exit(SD_MUTEX(un)); 22085 (void) untimeout(temp_id); 22086 mutex_enter(SD_MUTEX(un)); 22087 } 22088 un->un_resvd_status &= ~SD_WANT_RESERVE; 22089 } 22090 if (un->un_resvd_timeid == 0) { 22091 /* Schedule a timeout to handle the lost reservation */ 22092 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22093 (void *)dev, 22094 drv_usectohz(sd_reinstate_resv_delay)); 22095 } 22096 } 22097 mutex_exit(SD_MUTEX(un)); 22098 return (0); 22099 } 22100 22101 22102 /* 22103 * Function: sd_mhd_watch_incomplete() 22104 * 22105 * Description: This function is used to find out why a scsi pkt sent by the 22106 * scsi watch facility was not completed. Under some scenarios this 22107 * routine will return. Otherwise it will send a bus reset to see 22108 * if the drive is still online. 22109 * 22110 * Arguments: un - driver soft state (unit) structure 22111 * pkt - incomplete scsi pkt 22112 */ 22113 22114 static void 22115 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22116 { 22117 int be_chatty; 22118 int perr; 22119 22120 ASSERT(pkt != NULL); 22121 ASSERT(un != NULL); 22122 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22123 perr = (pkt->pkt_statistics & STAT_PERR); 22124 22125 mutex_enter(SD_MUTEX(un)); 22126 if (un->un_state == SD_STATE_DUMPING) { 22127 mutex_exit(SD_MUTEX(un)); 22128 return; 22129 } 22130 22131 switch (pkt->pkt_reason) { 22132 case CMD_UNX_BUS_FREE: 22133 /* 22134 * If we had a parity error that caused the target to drop BSY*, 22135 * don't be chatty about it. 22136 */ 22137 if (perr && be_chatty) { 22138 be_chatty = 0; 22139 } 22140 break; 22141 case CMD_TAG_REJECT: 22142 /* 22143 * The SCSI-2 spec states that a tag reject will be sent by the 22144 * target if tagged queuing is not supported. A tag reject may 22145 * also be sent during certain initialization periods or to 22146 * control internal resources. For the latter case the target 22147 * may also return Queue Full. 22148 * 22149 * If this driver receives a tag reject from a target that is 22150 * going through an init period or controlling internal 22151 * resources tagged queuing will be disabled. This is a less 22152 * than optimal behavior but the driver is unable to determine 22153 * the target state and assumes tagged queueing is not supported 22154 */ 22155 pkt->pkt_flags = 0; 22156 un->un_tagflags = 0; 22157 22158 if (un->un_f_opt_queueing == TRUE) { 22159 un->un_throttle = min(un->un_throttle, 3); 22160 } else { 22161 un->un_throttle = 1; 22162 } 22163 mutex_exit(SD_MUTEX(un)); 22164 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22165 mutex_enter(SD_MUTEX(un)); 22166 break; 22167 case CMD_INCOMPLETE: 22168 /* 22169 * The transport stopped with an abnormal state, fallthrough and 22170 * reset the target and/or bus unless selection did not complete 22171 * (indicated by STATE_GOT_BUS) in which case we don't want to 22172 * go through a target/bus reset 22173 */ 22174 if (pkt->pkt_state == STATE_GOT_BUS) { 22175 break; 22176 } 22177 /*FALLTHROUGH*/ 22178 22179 case CMD_TIMEOUT: 22180 default: 22181 /* 22182 * The lun may still be running the command, so a lun reset 22183 * should be attempted. If the lun reset fails or cannot be 22184 * issued, than try a target reset. Lastly try a bus reset. 22185 */ 22186 if ((pkt->pkt_statistics & 22187 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22188 int reset_retval = 0; 22189 mutex_exit(SD_MUTEX(un)); 22190 if (un->un_f_allow_bus_device_reset == TRUE) { 22191 if (un->un_f_lun_reset_enabled == TRUE) { 22192 reset_retval = 22193 scsi_reset(SD_ADDRESS(un), 22194 RESET_LUN); 22195 } 22196 if (reset_retval == 0) { 22197 reset_retval = 22198 scsi_reset(SD_ADDRESS(un), 22199 RESET_TARGET); 22200 } 22201 } 22202 if (reset_retval == 0) { 22203 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22204 } 22205 mutex_enter(SD_MUTEX(un)); 22206 } 22207 break; 22208 } 22209 22210 /* A device/bus reset has occurred; update the reservation status. */ 22211 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22212 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22213 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22214 un->un_resvd_status |= 22215 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22216 SD_INFO(SD_LOG_IOCTL_MHD, un, 22217 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22218 } 22219 } 22220 22221 /* 22222 * The disk has been turned off; Update the device state. 22223 * 22224 * Note: Should we be offlining the disk here? 22225 */ 22226 if (pkt->pkt_state == STATE_GOT_BUS) { 22227 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22228 "Disk not responding to selection\n"); 22229 if (un->un_state != SD_STATE_OFFLINE) { 22230 New_state(un, SD_STATE_OFFLINE); 22231 } 22232 } else if (be_chatty) { 22233 /* 22234 * suppress messages if they are all the same pkt reason; 22235 * with TQ, many (up to 256) are returned with the same 22236 * pkt_reason 22237 */ 22238 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22239 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22240 "sd_mhd_watch_incomplete: " 22241 "SCSI transport failed: reason '%s'\n", 22242 scsi_rname(pkt->pkt_reason)); 22243 } 22244 } 22245 un->un_last_pkt_reason = pkt->pkt_reason; 22246 mutex_exit(SD_MUTEX(un)); 22247 } 22248 22249 22250 /* 22251 * Function: sd_sname() 22252 * 22253 * Description: This is a simple little routine to return a string containing 22254 * a printable description of command status byte for use in 22255 * logging. 22256 * 22257 * Arguments: status - pointer to a status byte 22258 * 22259 * Return Code: char * - string containing status description. 22260 */ 22261 22262 static char * 22263 sd_sname(uchar_t status) 22264 { 22265 switch (status & STATUS_MASK) { 22266 case STATUS_GOOD: 22267 return ("good status"); 22268 case STATUS_CHECK: 22269 return ("check condition"); 22270 case STATUS_MET: 22271 return ("condition met"); 22272 case STATUS_BUSY: 22273 return ("busy"); 22274 case STATUS_INTERMEDIATE: 22275 return ("intermediate"); 22276 case STATUS_INTERMEDIATE_MET: 22277 return ("intermediate - condition met"); 22278 case STATUS_RESERVATION_CONFLICT: 22279 return ("reservation_conflict"); 22280 case STATUS_TERMINATED: 22281 return ("command terminated"); 22282 case STATUS_QFULL: 22283 return ("queue full"); 22284 default: 22285 return ("<unknown status>"); 22286 } 22287 } 22288 22289 22290 /* 22291 * Function: sd_mhd_resvd_recover() 22292 * 22293 * Description: This function adds a reservation entry to the 22294 * sd_resv_reclaim_request list and signals the reservation 22295 * reclaim thread that there is work pending. If the reservation 22296 * reclaim thread has not been previously created this function 22297 * will kick it off. 22298 * 22299 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22300 * among multiple watches that share this callback function 22301 * 22302 * Context: This routine is called by timeout() and is run in interrupt 22303 * context. It must not sleep or call other functions which may 22304 * sleep. 22305 */ 22306 22307 static void 22308 sd_mhd_resvd_recover(void *arg) 22309 { 22310 dev_t dev = (dev_t)arg; 22311 struct sd_lun *un; 22312 struct sd_thr_request *sd_treq = NULL; 22313 struct sd_thr_request *sd_cur = NULL; 22314 struct sd_thr_request *sd_prev = NULL; 22315 int already_there = 0; 22316 22317 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22318 return; 22319 } 22320 22321 mutex_enter(SD_MUTEX(un)); 22322 un->un_resvd_timeid = NULL; 22323 if (un->un_resvd_status & SD_WANT_RESERVE) { 22324 /* 22325 * There was a reset so don't issue the reserve, allow the 22326 * sd_mhd_watch_cb callback function to notice this and 22327 * reschedule the timeout for reservation. 22328 */ 22329 mutex_exit(SD_MUTEX(un)); 22330 return; 22331 } 22332 mutex_exit(SD_MUTEX(un)); 22333 22334 /* 22335 * Add this device to the sd_resv_reclaim_request list and the 22336 * sd_resv_reclaim_thread should take care of the rest. 22337 * 22338 * Note: We can't sleep in this context so if the memory allocation 22339 * fails allow the sd_mhd_watch_cb callback function to notice this and 22340 * reschedule the timeout for reservation. (4378460) 22341 */ 22342 sd_treq = (struct sd_thr_request *) 22343 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22344 if (sd_treq == NULL) { 22345 return; 22346 } 22347 22348 sd_treq->sd_thr_req_next = NULL; 22349 sd_treq->dev = dev; 22350 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22351 if (sd_tr.srq_thr_req_head == NULL) { 22352 sd_tr.srq_thr_req_head = sd_treq; 22353 } else { 22354 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22355 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22356 if (sd_cur->dev == dev) { 22357 /* 22358 * already in Queue so don't log 22359 * another request for the device 22360 */ 22361 already_there = 1; 22362 break; 22363 } 22364 sd_prev = sd_cur; 22365 } 22366 if (!already_there) { 22367 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22368 "logging request for %lx\n", dev); 22369 sd_prev->sd_thr_req_next = sd_treq; 22370 } else { 22371 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22372 } 22373 } 22374 22375 /* 22376 * Create a kernel thread to do the reservation reclaim and free up this 22377 * thread. We cannot block this thread while we go away to do the 22378 * reservation reclaim 22379 */ 22380 if (sd_tr.srq_resv_reclaim_thread == NULL) 22381 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22382 sd_resv_reclaim_thread, NULL, 22383 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22384 22385 /* Tell the reservation reclaim thread that it has work to do */ 22386 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22387 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22388 } 22389 22390 /* 22391 * Function: sd_resv_reclaim_thread() 22392 * 22393 * Description: This function implements the reservation reclaim operations 22394 * 22395 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22396 * among multiple watches that share this callback function 22397 */ 22398 22399 static void 22400 sd_resv_reclaim_thread() 22401 { 22402 struct sd_lun *un; 22403 struct sd_thr_request *sd_mhreq; 22404 22405 /* Wait for work */ 22406 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22407 if (sd_tr.srq_thr_req_head == NULL) { 22408 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22409 &sd_tr.srq_resv_reclaim_mutex); 22410 } 22411 22412 /* Loop while we have work */ 22413 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22414 un = ddi_get_soft_state(sd_state, 22415 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22416 if (un == NULL) { 22417 /* 22418 * softstate structure is NULL so just 22419 * dequeue the request and continue 22420 */ 22421 sd_tr.srq_thr_req_head = 22422 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22423 kmem_free(sd_tr.srq_thr_cur_req, 22424 sizeof (struct sd_thr_request)); 22425 continue; 22426 } 22427 22428 /* dequeue the request */ 22429 sd_mhreq = sd_tr.srq_thr_cur_req; 22430 sd_tr.srq_thr_req_head = 22431 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22432 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22433 22434 /* 22435 * Reclaim reservation only if SD_RESERVE is still set. There 22436 * may have been a call to MHIOCRELEASE before we got here. 22437 */ 22438 mutex_enter(SD_MUTEX(un)); 22439 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22440 /* 22441 * Note: The SD_LOST_RESERVE flag is cleared before 22442 * reclaiming the reservation. If this is done after the 22443 * call to sd_reserve_release a reservation loss in the 22444 * window between pkt completion of reserve cmd and 22445 * mutex_enter below may not be recognized 22446 */ 22447 un->un_resvd_status &= ~SD_LOST_RESERVE; 22448 mutex_exit(SD_MUTEX(un)); 22449 22450 if (sd_reserve_release(sd_mhreq->dev, 22451 SD_RESERVE) == 0) { 22452 mutex_enter(SD_MUTEX(un)); 22453 un->un_resvd_status |= SD_RESERVE; 22454 mutex_exit(SD_MUTEX(un)); 22455 SD_INFO(SD_LOG_IOCTL_MHD, un, 22456 "sd_resv_reclaim_thread: " 22457 "Reservation Recovered\n"); 22458 } else { 22459 mutex_enter(SD_MUTEX(un)); 22460 un->un_resvd_status |= SD_LOST_RESERVE; 22461 mutex_exit(SD_MUTEX(un)); 22462 SD_INFO(SD_LOG_IOCTL_MHD, un, 22463 "sd_resv_reclaim_thread: Failed " 22464 "Reservation Recovery\n"); 22465 } 22466 } else { 22467 mutex_exit(SD_MUTEX(un)); 22468 } 22469 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22470 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22471 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22472 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22473 /* 22474 * wakeup the destroy thread if anyone is waiting on 22475 * us to complete. 22476 */ 22477 cv_signal(&sd_tr.srq_inprocess_cv); 22478 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22479 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22480 } 22481 22482 /* 22483 * cleanup the sd_tr structure now that this thread will not exist 22484 */ 22485 ASSERT(sd_tr.srq_thr_req_head == NULL); 22486 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22487 sd_tr.srq_resv_reclaim_thread = NULL; 22488 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22489 thread_exit(); 22490 } 22491 22492 22493 /* 22494 * Function: sd_rmv_resv_reclaim_req() 22495 * 22496 * Description: This function removes any pending reservation reclaim requests 22497 * for the specified device. 22498 * 22499 * Arguments: dev - the device 'dev_t' 22500 */ 22501 22502 static void 22503 sd_rmv_resv_reclaim_req(dev_t dev) 22504 { 22505 struct sd_thr_request *sd_mhreq; 22506 struct sd_thr_request *sd_prev; 22507 22508 /* Remove a reservation reclaim request from the list */ 22509 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22510 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22511 /* 22512 * We are attempting to reinstate reservation for 22513 * this device. We wait for sd_reserve_release() 22514 * to return before we return. 22515 */ 22516 cv_wait(&sd_tr.srq_inprocess_cv, 22517 &sd_tr.srq_resv_reclaim_mutex); 22518 } else { 22519 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22520 if (sd_mhreq && sd_mhreq->dev == dev) { 22521 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22522 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22523 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22524 return; 22525 } 22526 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22527 if (sd_mhreq && sd_mhreq->dev == dev) { 22528 break; 22529 } 22530 sd_prev = sd_mhreq; 22531 } 22532 if (sd_mhreq != NULL) { 22533 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22534 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22535 } 22536 } 22537 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22538 } 22539 22540 22541 /* 22542 * Function: sd_mhd_reset_notify_cb() 22543 * 22544 * Description: This is a call back function for scsi_reset_notify. This 22545 * function updates the softstate reserved status and logs the 22546 * reset. The driver scsi watch facility callback function 22547 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22548 * will reclaim the reservation. 22549 * 22550 * Arguments: arg - driver soft state (unit) structure 22551 */ 22552 22553 static void 22554 sd_mhd_reset_notify_cb(caddr_t arg) 22555 { 22556 struct sd_lun *un = (struct sd_lun *)arg; 22557 22558 mutex_enter(SD_MUTEX(un)); 22559 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22560 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22561 SD_INFO(SD_LOG_IOCTL_MHD, un, 22562 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22563 } 22564 mutex_exit(SD_MUTEX(un)); 22565 } 22566 22567 22568 /* 22569 * Function: sd_take_ownership() 22570 * 22571 * Description: This routine implements an algorithm to achieve a stable 22572 * reservation on disks which don't implement priority reserve, 22573 * and makes sure that other host lose re-reservation attempts. 22574 * This algorithm contains of a loop that keeps issuing the RESERVE 22575 * for some period of time (min_ownership_delay, default 6 seconds) 22576 * During that loop, it looks to see if there has been a bus device 22577 * reset or bus reset (both of which cause an existing reservation 22578 * to be lost). If the reservation is lost issue RESERVE until a 22579 * period of min_ownership_delay with no resets has gone by, or 22580 * until max_ownership_delay has expired. This loop ensures that 22581 * the host really did manage to reserve the device, in spite of 22582 * resets. The looping for min_ownership_delay (default six 22583 * seconds) is important to early generation clustering products, 22584 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22585 * MHIOCENFAILFAST periodic timer of two seconds. By having 22586 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22587 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22588 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22589 * have already noticed, via the MHIOCENFAILFAST polling, that it 22590 * no longer "owns" the disk and will have panicked itself. Thus, 22591 * the host issuing the MHIOCTKOWN is assured (with timing 22592 * dependencies) that by the time it actually starts to use the 22593 * disk for real work, the old owner is no longer accessing it. 22594 * 22595 * min_ownership_delay is the minimum amount of time for which the 22596 * disk must be reserved continuously devoid of resets before the 22597 * MHIOCTKOWN ioctl will return success. 22598 * 22599 * max_ownership_delay indicates the amount of time by which the 22600 * take ownership should succeed or timeout with an error. 22601 * 22602 * Arguments: dev - the device 'dev_t' 22603 * *p - struct containing timing info. 22604 * 22605 * Return Code: 0 for success or error code 22606 */ 22607 22608 static int 22609 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22610 { 22611 struct sd_lun *un; 22612 int rval; 22613 int err; 22614 int reservation_count = 0; 22615 int min_ownership_delay = 6000000; /* in usec */ 22616 int max_ownership_delay = 30000000; /* in usec */ 22617 clock_t start_time; /* starting time of this algorithm */ 22618 clock_t end_time; /* time limit for giving up */ 22619 clock_t ownership_time; /* time limit for stable ownership */ 22620 clock_t current_time; 22621 clock_t previous_current_time; 22622 22623 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22624 return (ENXIO); 22625 } 22626 22627 /* 22628 * Attempt a device reservation. A priority reservation is requested. 22629 */ 22630 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22631 != SD_SUCCESS) { 22632 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22633 "sd_take_ownership: return(1)=%d\n", rval); 22634 return (rval); 22635 } 22636 22637 /* Update the softstate reserved status to indicate the reservation */ 22638 mutex_enter(SD_MUTEX(un)); 22639 un->un_resvd_status |= SD_RESERVE; 22640 un->un_resvd_status &= 22641 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22642 mutex_exit(SD_MUTEX(un)); 22643 22644 if (p != NULL) { 22645 if (p->min_ownership_delay != 0) { 22646 min_ownership_delay = p->min_ownership_delay * 1000; 22647 } 22648 if (p->max_ownership_delay != 0) { 22649 max_ownership_delay = p->max_ownership_delay * 1000; 22650 } 22651 } 22652 SD_INFO(SD_LOG_IOCTL_MHD, un, 22653 "sd_take_ownership: min, max delays: %d, %d\n", 22654 min_ownership_delay, max_ownership_delay); 22655 22656 start_time = ddi_get_lbolt(); 22657 current_time = start_time; 22658 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22659 end_time = start_time + drv_usectohz(max_ownership_delay); 22660 22661 while (current_time - end_time < 0) { 22662 delay(drv_usectohz(500000)); 22663 22664 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22665 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22666 mutex_enter(SD_MUTEX(un)); 22667 rval = (un->un_resvd_status & 22668 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22669 mutex_exit(SD_MUTEX(un)); 22670 break; 22671 } 22672 } 22673 previous_current_time = current_time; 22674 current_time = ddi_get_lbolt(); 22675 mutex_enter(SD_MUTEX(un)); 22676 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22677 ownership_time = ddi_get_lbolt() + 22678 drv_usectohz(min_ownership_delay); 22679 reservation_count = 0; 22680 } else { 22681 reservation_count++; 22682 } 22683 un->un_resvd_status |= SD_RESERVE; 22684 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22685 mutex_exit(SD_MUTEX(un)); 22686 22687 SD_INFO(SD_LOG_IOCTL_MHD, un, 22688 "sd_take_ownership: ticks for loop iteration=%ld, " 22689 "reservation=%s\n", (current_time - previous_current_time), 22690 reservation_count ? "ok" : "reclaimed"); 22691 22692 if (current_time - ownership_time >= 0 && 22693 reservation_count >= 4) { 22694 rval = 0; /* Achieved a stable ownership */ 22695 break; 22696 } 22697 if (current_time - end_time >= 0) { 22698 rval = EACCES; /* No ownership in max possible time */ 22699 break; 22700 } 22701 } 22702 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22703 "sd_take_ownership: return(2)=%d\n", rval); 22704 return (rval); 22705 } 22706 22707 22708 /* 22709 * Function: sd_reserve_release() 22710 * 22711 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22712 * PRIORITY RESERVE commands based on a user specified command type 22713 * 22714 * Arguments: dev - the device 'dev_t' 22715 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22716 * SD_RESERVE, SD_RELEASE 22717 * 22718 * Return Code: 0 or Error Code 22719 */ 22720 22721 static int 22722 sd_reserve_release(dev_t dev, int cmd) 22723 { 22724 struct uscsi_cmd *com = NULL; 22725 struct sd_lun *un = NULL; 22726 char cdb[CDB_GROUP0]; 22727 int rval; 22728 22729 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22730 (cmd == SD_PRIORITY_RESERVE)); 22731 22732 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22733 return (ENXIO); 22734 } 22735 22736 /* instantiate and initialize the command and cdb */ 22737 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22738 bzero(cdb, CDB_GROUP0); 22739 com->uscsi_flags = USCSI_SILENT; 22740 com->uscsi_timeout = un->un_reserve_release_time; 22741 com->uscsi_cdblen = CDB_GROUP0; 22742 com->uscsi_cdb = cdb; 22743 if (cmd == SD_RELEASE) { 22744 cdb[0] = SCMD_RELEASE; 22745 } else { 22746 cdb[0] = SCMD_RESERVE; 22747 } 22748 22749 /* Send the command. */ 22750 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22751 SD_PATH_STANDARD); 22752 22753 /* 22754 * "break" a reservation that is held by another host, by issuing a 22755 * reset if priority reserve is desired, and we could not get the 22756 * device. 22757 */ 22758 if ((cmd == SD_PRIORITY_RESERVE) && 22759 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22760 /* 22761 * First try to reset the LUN. If we cannot, then try a target 22762 * reset, followed by a bus reset if the target reset fails. 22763 */ 22764 int reset_retval = 0; 22765 if (un->un_f_lun_reset_enabled == TRUE) { 22766 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22767 } 22768 if (reset_retval == 0) { 22769 /* The LUN reset either failed or was not issued */ 22770 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22771 } 22772 if ((reset_retval == 0) && 22773 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22774 rval = EIO; 22775 kmem_free(com, sizeof (*com)); 22776 return (rval); 22777 } 22778 22779 bzero(com, sizeof (struct uscsi_cmd)); 22780 com->uscsi_flags = USCSI_SILENT; 22781 com->uscsi_cdb = cdb; 22782 com->uscsi_cdblen = CDB_GROUP0; 22783 com->uscsi_timeout = 5; 22784 22785 /* 22786 * Reissue the last reserve command, this time without request 22787 * sense. Assume that it is just a regular reserve command. 22788 */ 22789 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22790 SD_PATH_STANDARD); 22791 } 22792 22793 /* Return an error if still getting a reservation conflict. */ 22794 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22795 rval = EACCES; 22796 } 22797 22798 kmem_free(com, sizeof (*com)); 22799 return (rval); 22800 } 22801 22802 22803 #define SD_NDUMP_RETRIES 12 22804 /* 22805 * System Crash Dump routine 22806 */ 22807 22808 static int 22809 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22810 { 22811 int instance; 22812 int partition; 22813 int i; 22814 int err; 22815 struct sd_lun *un; 22816 struct scsi_pkt *wr_pktp; 22817 struct buf *wr_bp; 22818 struct buf wr_buf; 22819 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22820 daddr_t tgt_blkno; /* rmw - blkno for target */ 22821 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22822 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22823 size_t io_start_offset; 22824 int doing_rmw = FALSE; 22825 int rval; 22826 ssize_t dma_resid; 22827 daddr_t oblkno; 22828 diskaddr_t nblks = 0; 22829 diskaddr_t start_block; 22830 22831 instance = SDUNIT(dev); 22832 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22833 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22834 return (ENXIO); 22835 } 22836 22837 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22838 22839 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22840 22841 partition = SDPART(dev); 22842 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22843 22844 /* Validate blocks to dump at against partition size. */ 22845 22846 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22847 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22848 22849 if ((blkno + nblk) > nblks) { 22850 SD_TRACE(SD_LOG_DUMP, un, 22851 "sddump: dump range larger than partition: " 22852 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22853 blkno, nblk, nblks); 22854 return (EINVAL); 22855 } 22856 22857 mutex_enter(&un->un_pm_mutex); 22858 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22859 struct scsi_pkt *start_pktp; 22860 22861 mutex_exit(&un->un_pm_mutex); 22862 22863 /* 22864 * use pm framework to power on HBA 1st 22865 */ 22866 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22867 22868 /* 22869 * Dump no long uses sdpower to power on a device, it's 22870 * in-line here so it can be done in polled mode. 22871 */ 22872 22873 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22874 22875 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22876 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22877 22878 if (start_pktp == NULL) { 22879 /* We were not given a SCSI packet, fail. */ 22880 return (EIO); 22881 } 22882 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22883 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22884 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22885 start_pktp->pkt_flags = FLAG_NOINTR; 22886 22887 mutex_enter(SD_MUTEX(un)); 22888 SD_FILL_SCSI1_LUN(un, start_pktp); 22889 mutex_exit(SD_MUTEX(un)); 22890 /* 22891 * Scsi_poll returns 0 (success) if the command completes and 22892 * the status block is STATUS_GOOD. 22893 */ 22894 if (sd_scsi_poll(un, start_pktp) != 0) { 22895 scsi_destroy_pkt(start_pktp); 22896 return (EIO); 22897 } 22898 scsi_destroy_pkt(start_pktp); 22899 (void) sd_ddi_pm_resume(un); 22900 } else { 22901 mutex_exit(&un->un_pm_mutex); 22902 } 22903 22904 mutex_enter(SD_MUTEX(un)); 22905 un->un_throttle = 0; 22906 22907 /* 22908 * The first time through, reset the specific target device. 22909 * However, when cpr calls sddump we know that sd is in a 22910 * a good state so no bus reset is required. 22911 * Clear sense data via Request Sense cmd. 22912 * In sddump we don't care about allow_bus_device_reset anymore 22913 */ 22914 22915 if ((un->un_state != SD_STATE_SUSPENDED) && 22916 (un->un_state != SD_STATE_DUMPING)) { 22917 22918 New_state(un, SD_STATE_DUMPING); 22919 22920 if (un->un_f_is_fibre == FALSE) { 22921 mutex_exit(SD_MUTEX(un)); 22922 /* 22923 * Attempt a bus reset for parallel scsi. 22924 * 22925 * Note: A bus reset is required because on some host 22926 * systems (i.e. E420R) a bus device reset is 22927 * insufficient to reset the state of the target. 22928 * 22929 * Note: Don't issue the reset for fibre-channel, 22930 * because this tends to hang the bus (loop) for 22931 * too long while everyone is logging out and in 22932 * and the deadman timer for dumping will fire 22933 * before the dump is complete. 22934 */ 22935 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22936 mutex_enter(SD_MUTEX(un)); 22937 Restore_state(un); 22938 mutex_exit(SD_MUTEX(un)); 22939 return (EIO); 22940 } 22941 22942 /* Delay to give the device some recovery time. */ 22943 drv_usecwait(10000); 22944 22945 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22946 SD_INFO(SD_LOG_DUMP, un, 22947 "sddump: sd_send_polled_RQS failed\n"); 22948 } 22949 mutex_enter(SD_MUTEX(un)); 22950 } 22951 } 22952 22953 /* 22954 * Convert the partition-relative block number to a 22955 * disk physical block number. 22956 */ 22957 blkno += start_block; 22958 22959 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22960 22961 22962 /* 22963 * Check if the device has a non-512 block size. 22964 */ 22965 wr_bp = NULL; 22966 if (NOT_DEVBSIZE(un)) { 22967 tgt_byte_offset = blkno * un->un_sys_blocksize; 22968 tgt_byte_count = nblk * un->un_sys_blocksize; 22969 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22970 (tgt_byte_count % un->un_tgt_blocksize)) { 22971 doing_rmw = TRUE; 22972 /* 22973 * Calculate the block number and number of block 22974 * in terms of the media block size. 22975 */ 22976 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22977 tgt_nblk = 22978 ((tgt_byte_offset + tgt_byte_count + 22979 (un->un_tgt_blocksize - 1)) / 22980 un->un_tgt_blocksize) - tgt_blkno; 22981 22982 /* 22983 * Invoke the routine which is going to do read part 22984 * of read-modify-write. 22985 * Note that this routine returns a pointer to 22986 * a valid bp in wr_bp. 22987 */ 22988 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22989 &wr_bp); 22990 if (err) { 22991 mutex_exit(SD_MUTEX(un)); 22992 return (err); 22993 } 22994 /* 22995 * Offset is being calculated as - 22996 * (original block # * system block size) - 22997 * (new block # * target block size) 22998 */ 22999 io_start_offset = 23000 ((uint64_t)(blkno * un->un_sys_blocksize)) - 23001 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 23002 23003 ASSERT((io_start_offset >= 0) && 23004 (io_start_offset < un->un_tgt_blocksize)); 23005 /* 23006 * Do the modify portion of read modify write. 23007 */ 23008 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23009 (size_t)nblk * un->un_sys_blocksize); 23010 } else { 23011 doing_rmw = FALSE; 23012 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23013 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23014 } 23015 23016 /* Convert blkno and nblk to target blocks */ 23017 blkno = tgt_blkno; 23018 nblk = tgt_nblk; 23019 } else { 23020 wr_bp = &wr_buf; 23021 bzero(wr_bp, sizeof (struct buf)); 23022 wr_bp->b_flags = B_BUSY; 23023 wr_bp->b_un.b_addr = addr; 23024 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23025 wr_bp->b_resid = 0; 23026 } 23027 23028 mutex_exit(SD_MUTEX(un)); 23029 23030 /* 23031 * Obtain a SCSI packet for the write command. 23032 * It should be safe to call the allocator here without 23033 * worrying about being locked for DVMA mapping because 23034 * the address we're passed is already a DVMA mapping 23035 * 23036 * We are also not going to worry about semaphore ownership 23037 * in the dump buffer. Dumping is single threaded at present. 23038 */ 23039 23040 wr_pktp = NULL; 23041 23042 dma_resid = wr_bp->b_bcount; 23043 oblkno = blkno; 23044 23045 while (dma_resid != 0) { 23046 23047 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23048 wr_bp->b_flags &= ~B_ERROR; 23049 23050 if (un->un_partial_dma_supported == 1) { 23051 blkno = oblkno + 23052 ((wr_bp->b_bcount - dma_resid) / 23053 un->un_tgt_blocksize); 23054 nblk = dma_resid / un->un_tgt_blocksize; 23055 23056 if (wr_pktp) { 23057 /* 23058 * Partial DMA transfers after initial transfer 23059 */ 23060 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23061 blkno, nblk); 23062 } else { 23063 /* Initial transfer */ 23064 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23065 un->un_pkt_flags, NULL_FUNC, NULL, 23066 blkno, nblk); 23067 } 23068 } else { 23069 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23070 0, NULL_FUNC, NULL, blkno, nblk); 23071 } 23072 23073 if (rval == 0) { 23074 /* We were given a SCSI packet, continue. */ 23075 break; 23076 } 23077 23078 if (i == 0) { 23079 if (wr_bp->b_flags & B_ERROR) { 23080 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23081 "no resources for dumping; " 23082 "error code: 0x%x, retrying", 23083 geterror(wr_bp)); 23084 } else { 23085 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23086 "no resources for dumping; retrying"); 23087 } 23088 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23089 if (wr_bp->b_flags & B_ERROR) { 23090 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23091 "no resources for dumping; error code: " 23092 "0x%x, retrying\n", geterror(wr_bp)); 23093 } 23094 } else { 23095 if (wr_bp->b_flags & B_ERROR) { 23096 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23097 "no resources for dumping; " 23098 "error code: 0x%x, retries failed, " 23099 "giving up.\n", geterror(wr_bp)); 23100 } else { 23101 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23102 "no resources for dumping; " 23103 "retries failed, giving up.\n"); 23104 } 23105 mutex_enter(SD_MUTEX(un)); 23106 Restore_state(un); 23107 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23108 mutex_exit(SD_MUTEX(un)); 23109 scsi_free_consistent_buf(wr_bp); 23110 } else { 23111 mutex_exit(SD_MUTEX(un)); 23112 } 23113 return (EIO); 23114 } 23115 drv_usecwait(10000); 23116 } 23117 23118 if (un->un_partial_dma_supported == 1) { 23119 /* 23120 * save the resid from PARTIAL_DMA 23121 */ 23122 dma_resid = wr_pktp->pkt_resid; 23123 if (dma_resid != 0) 23124 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23125 wr_pktp->pkt_resid = 0; 23126 } else { 23127 dma_resid = 0; 23128 } 23129 23130 /* SunBug 1222170 */ 23131 wr_pktp->pkt_flags = FLAG_NOINTR; 23132 23133 err = EIO; 23134 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23135 23136 /* 23137 * Scsi_poll returns 0 (success) if the command completes and 23138 * the status block is STATUS_GOOD. We should only check 23139 * errors if this condition is not true. Even then we should 23140 * send our own request sense packet only if we have a check 23141 * condition and auto request sense has not been performed by 23142 * the hba. 23143 */ 23144 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23145 23146 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23147 (wr_pktp->pkt_resid == 0)) { 23148 err = SD_SUCCESS; 23149 break; 23150 } 23151 23152 /* 23153 * Check CMD_DEV_GONE 1st, give up if device is gone. 23154 */ 23155 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23156 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23157 "Error while dumping state...Device is gone\n"); 23158 break; 23159 } 23160 23161 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23162 SD_INFO(SD_LOG_DUMP, un, 23163 "sddump: write failed with CHECK, try # %d\n", i); 23164 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23165 (void) sd_send_polled_RQS(un); 23166 } 23167 23168 continue; 23169 } 23170 23171 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23172 int reset_retval = 0; 23173 23174 SD_INFO(SD_LOG_DUMP, un, 23175 "sddump: write failed with BUSY, try # %d\n", i); 23176 23177 if (un->un_f_lun_reset_enabled == TRUE) { 23178 reset_retval = scsi_reset(SD_ADDRESS(un), 23179 RESET_LUN); 23180 } 23181 if (reset_retval == 0) { 23182 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23183 } 23184 (void) sd_send_polled_RQS(un); 23185 23186 } else { 23187 SD_INFO(SD_LOG_DUMP, un, 23188 "sddump: write failed with 0x%x, try # %d\n", 23189 SD_GET_PKT_STATUS(wr_pktp), i); 23190 mutex_enter(SD_MUTEX(un)); 23191 sd_reset_target(un, wr_pktp); 23192 mutex_exit(SD_MUTEX(un)); 23193 } 23194 23195 /* 23196 * If we are not getting anywhere with lun/target resets, 23197 * let's reset the bus. 23198 */ 23199 if (i == SD_NDUMP_RETRIES/2) { 23200 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23201 (void) sd_send_polled_RQS(un); 23202 } 23203 } 23204 } 23205 23206 scsi_destroy_pkt(wr_pktp); 23207 mutex_enter(SD_MUTEX(un)); 23208 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23209 mutex_exit(SD_MUTEX(un)); 23210 scsi_free_consistent_buf(wr_bp); 23211 } else { 23212 mutex_exit(SD_MUTEX(un)); 23213 } 23214 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23215 return (err); 23216 } 23217 23218 /* 23219 * Function: sd_scsi_poll() 23220 * 23221 * Description: This is a wrapper for the scsi_poll call. 23222 * 23223 * Arguments: sd_lun - The unit structure 23224 * scsi_pkt - The scsi packet being sent to the device. 23225 * 23226 * Return Code: 0 - Command completed successfully with good status 23227 * -1 - Command failed. This could indicate a check condition 23228 * or other status value requiring recovery action. 23229 * 23230 * NOTE: This code is only called off sddump(). 23231 */ 23232 23233 static int 23234 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23235 { 23236 int status; 23237 23238 ASSERT(un != NULL); 23239 ASSERT(!mutex_owned(SD_MUTEX(un))); 23240 ASSERT(pktp != NULL); 23241 23242 status = SD_SUCCESS; 23243 23244 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23245 pktp->pkt_flags |= un->un_tagflags; 23246 pktp->pkt_flags &= ~FLAG_NODISCON; 23247 } 23248 23249 status = sd_ddi_scsi_poll(pktp); 23250 /* 23251 * Scsi_poll returns 0 (success) if the command completes and the 23252 * status block is STATUS_GOOD. We should only check errors if this 23253 * condition is not true. Even then we should send our own request 23254 * sense packet only if we have a check condition and auto 23255 * request sense has not been performed by the hba. 23256 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23257 */ 23258 if ((status != SD_SUCCESS) && 23259 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23260 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23261 (pktp->pkt_reason != CMD_DEV_GONE)) 23262 (void) sd_send_polled_RQS(un); 23263 23264 return (status); 23265 } 23266 23267 /* 23268 * Function: sd_send_polled_RQS() 23269 * 23270 * Description: This sends the request sense command to a device. 23271 * 23272 * Arguments: sd_lun - The unit structure 23273 * 23274 * Return Code: 0 - Command completed successfully with good status 23275 * -1 - Command failed. 23276 * 23277 */ 23278 23279 static int 23280 sd_send_polled_RQS(struct sd_lun *un) 23281 { 23282 int ret_val; 23283 struct scsi_pkt *rqs_pktp; 23284 struct buf *rqs_bp; 23285 23286 ASSERT(un != NULL); 23287 ASSERT(!mutex_owned(SD_MUTEX(un))); 23288 23289 ret_val = SD_SUCCESS; 23290 23291 rqs_pktp = un->un_rqs_pktp; 23292 rqs_bp = un->un_rqs_bp; 23293 23294 mutex_enter(SD_MUTEX(un)); 23295 23296 if (un->un_sense_isbusy) { 23297 ret_val = SD_FAILURE; 23298 mutex_exit(SD_MUTEX(un)); 23299 return (ret_val); 23300 } 23301 23302 /* 23303 * If the request sense buffer (and packet) is not in use, 23304 * let's set the un_sense_isbusy and send our packet 23305 */ 23306 un->un_sense_isbusy = 1; 23307 rqs_pktp->pkt_resid = 0; 23308 rqs_pktp->pkt_reason = 0; 23309 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23310 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23311 23312 mutex_exit(SD_MUTEX(un)); 23313 23314 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23315 " 0x%p\n", rqs_bp->b_un.b_addr); 23316 23317 /* 23318 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23319 * axle - it has a call into us! 23320 */ 23321 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23322 SD_INFO(SD_LOG_COMMON, un, 23323 "sd_send_polled_RQS: RQS failed\n"); 23324 } 23325 23326 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23327 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23328 23329 mutex_enter(SD_MUTEX(un)); 23330 un->un_sense_isbusy = 0; 23331 mutex_exit(SD_MUTEX(un)); 23332 23333 return (ret_val); 23334 } 23335 23336 /* 23337 * Defines needed for localized version of the scsi_poll routine. 23338 */ 23339 #define CSEC 10000 /* usecs */ 23340 #define SEC_TO_CSEC (1000000/CSEC) 23341 23342 /* 23343 * Function: sd_ddi_scsi_poll() 23344 * 23345 * Description: Localized version of the scsi_poll routine. The purpose is to 23346 * send a scsi_pkt to a device as a polled command. This version 23347 * is to ensure more robust handling of transport errors. 23348 * Specifically this routine cures not ready, coming ready 23349 * transition for power up and reset of sonoma's. This can take 23350 * up to 45 seconds for power-on and 20 seconds for reset of a 23351 * sonoma lun. 23352 * 23353 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23354 * 23355 * Return Code: 0 - Command completed successfully with good status 23356 * -1 - Command failed. 23357 * 23358 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 23359 * be fixed (removing this code), we need to determine how to handle the 23360 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 23361 * 23362 * NOTE: This code is only called off sddump(). 23363 */ 23364 static int 23365 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23366 { 23367 int rval = -1; 23368 int savef; 23369 long savet; 23370 void (*savec)(); 23371 int timeout; 23372 int busy_count; 23373 int poll_delay; 23374 int rc; 23375 uint8_t *sensep; 23376 struct scsi_arq_status *arqstat; 23377 extern int do_polled_io; 23378 23379 ASSERT(pkt->pkt_scbp); 23380 23381 /* 23382 * save old flags.. 23383 */ 23384 savef = pkt->pkt_flags; 23385 savec = pkt->pkt_comp; 23386 savet = pkt->pkt_time; 23387 23388 pkt->pkt_flags |= FLAG_NOINTR; 23389 23390 /* 23391 * XXX there is nothing in the SCSA spec that states that we should not 23392 * do a callback for polled cmds; however, removing this will break sd 23393 * and probably other target drivers 23394 */ 23395 pkt->pkt_comp = NULL; 23396 23397 /* 23398 * we don't like a polled command without timeout. 23399 * 60 seconds seems long enough. 23400 */ 23401 if (pkt->pkt_time == 0) 23402 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23403 23404 /* 23405 * Send polled cmd. 23406 * 23407 * We do some error recovery for various errors. Tran_busy, 23408 * queue full, and non-dispatched commands are retried every 10 msec. 23409 * as they are typically transient failures. Busy status and Not 23410 * Ready are retried every second as this status takes a while to 23411 * change. 23412 */ 23413 timeout = pkt->pkt_time * SEC_TO_CSEC; 23414 23415 for (busy_count = 0; busy_count < timeout; busy_count++) { 23416 /* 23417 * Initialize pkt status variables. 23418 */ 23419 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23420 23421 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23422 if (rc != TRAN_BUSY) { 23423 /* Transport failed - give up. */ 23424 break; 23425 } else { 23426 /* Transport busy - try again. */ 23427 poll_delay = 1 * CSEC; /* 10 msec. */ 23428 } 23429 } else { 23430 /* 23431 * Transport accepted - check pkt status. 23432 */ 23433 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23434 if ((pkt->pkt_reason == CMD_CMPLT) && 23435 (rc == STATUS_CHECK) && 23436 (pkt->pkt_state & STATE_ARQ_DONE)) { 23437 arqstat = 23438 (struct scsi_arq_status *)(pkt->pkt_scbp); 23439 sensep = (uint8_t *)&arqstat->sts_sensedata; 23440 } else { 23441 sensep = NULL; 23442 } 23443 23444 if ((pkt->pkt_reason == CMD_CMPLT) && 23445 (rc == STATUS_GOOD)) { 23446 /* No error - we're done */ 23447 rval = 0; 23448 break; 23449 23450 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23451 /* Lost connection - give up */ 23452 break; 23453 23454 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23455 (pkt->pkt_state == 0)) { 23456 /* Pkt not dispatched - try again. */ 23457 poll_delay = 1 * CSEC; /* 10 msec. */ 23458 23459 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23460 (rc == STATUS_QFULL)) { 23461 /* Queue full - try again. */ 23462 poll_delay = 1 * CSEC; /* 10 msec. */ 23463 23464 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23465 (rc == STATUS_BUSY)) { 23466 /* Busy - try again. */ 23467 poll_delay = 100 * CSEC; /* 1 sec. */ 23468 busy_count += (SEC_TO_CSEC - 1); 23469 23470 } else if ((sensep != NULL) && 23471 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 23472 /* 23473 * Unit Attention - try again. 23474 * Pretend it took 1 sec. 23475 * NOTE: 'continue' avoids poll_delay 23476 */ 23477 busy_count += (SEC_TO_CSEC - 1); 23478 continue; 23479 23480 } else if ((sensep != NULL) && 23481 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23482 (scsi_sense_asc(sensep) == 0x04) && 23483 (scsi_sense_ascq(sensep) == 0x01)) { 23484 /* 23485 * Not ready -> ready - try again. 23486 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 23487 * ...same as STATUS_BUSY 23488 */ 23489 poll_delay = 100 * CSEC; /* 1 sec. */ 23490 busy_count += (SEC_TO_CSEC - 1); 23491 23492 } else { 23493 /* BAD status - give up. */ 23494 break; 23495 } 23496 } 23497 23498 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 23499 !do_polled_io) { 23500 delay(drv_usectohz(poll_delay)); 23501 } else { 23502 /* we busy wait during cpr_dump or interrupt threads */ 23503 drv_usecwait(poll_delay); 23504 } 23505 } 23506 23507 pkt->pkt_flags = savef; 23508 pkt->pkt_comp = savec; 23509 pkt->pkt_time = savet; 23510 23511 /* return on error */ 23512 if (rval) 23513 return (rval); 23514 23515 /* 23516 * This is not a performance critical code path. 23517 * 23518 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 23519 * issues associated with looking at DMA memory prior to 23520 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 23521 */ 23522 scsi_sync_pkt(pkt); 23523 return (0); 23524 } 23525 23526 23527 23528 /* 23529 * Function: sd_persistent_reservation_in_read_keys 23530 * 23531 * Description: This routine is the driver entry point for handling CD-ROM 23532 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23533 * by sending the SCSI-3 PRIN commands to the device. 23534 * Processes the read keys command response by copying the 23535 * reservation key information into the user provided buffer. 23536 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23537 * 23538 * Arguments: un - Pointer to soft state struct for the target. 23539 * usrp - user provided pointer to multihost Persistent In Read 23540 * Keys structure (mhioc_inkeys_t) 23541 * flag - this argument is a pass through to ddi_copyxxx() 23542 * directly from the mode argument of ioctl(). 23543 * 23544 * Return Code: 0 - Success 23545 * EACCES 23546 * ENOTSUP 23547 * errno return code from sd_send_scsi_cmd() 23548 * 23549 * Context: Can sleep. Does not return until command is completed. 23550 */ 23551 23552 static int 23553 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23554 mhioc_inkeys_t *usrp, int flag) 23555 { 23556 #ifdef _MULTI_DATAMODEL 23557 struct mhioc_key_list32 li32; 23558 #endif 23559 sd_prin_readkeys_t *in; 23560 mhioc_inkeys_t *ptr; 23561 mhioc_key_list_t li; 23562 uchar_t *data_bufp; 23563 int data_len; 23564 int rval; 23565 size_t copysz; 23566 23567 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23568 return (EINVAL); 23569 } 23570 bzero(&li, sizeof (mhioc_key_list_t)); 23571 23572 /* 23573 * Get the listsize from user 23574 */ 23575 #ifdef _MULTI_DATAMODEL 23576 23577 switch (ddi_model_convert_from(flag & FMODELS)) { 23578 case DDI_MODEL_ILP32: 23579 copysz = sizeof (struct mhioc_key_list32); 23580 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23581 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23582 "sd_persistent_reservation_in_read_keys: " 23583 "failed ddi_copyin: mhioc_key_list32_t\n"); 23584 rval = EFAULT; 23585 goto done; 23586 } 23587 li.listsize = li32.listsize; 23588 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23589 break; 23590 23591 case DDI_MODEL_NONE: 23592 copysz = sizeof (mhioc_key_list_t); 23593 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23594 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23595 "sd_persistent_reservation_in_read_keys: " 23596 "failed ddi_copyin: mhioc_key_list_t\n"); 23597 rval = EFAULT; 23598 goto done; 23599 } 23600 break; 23601 } 23602 23603 #else /* ! _MULTI_DATAMODEL */ 23604 copysz = sizeof (mhioc_key_list_t); 23605 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23606 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23607 "sd_persistent_reservation_in_read_keys: " 23608 "failed ddi_copyin: mhioc_key_list_t\n"); 23609 rval = EFAULT; 23610 goto done; 23611 } 23612 #endif 23613 23614 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23615 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23616 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23617 23618 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23619 data_len, data_bufp)) != 0) { 23620 goto done; 23621 } 23622 in = (sd_prin_readkeys_t *)data_bufp; 23623 ptr->generation = BE_32(in->generation); 23624 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23625 23626 /* 23627 * Return the min(listsize, listlen) keys 23628 */ 23629 #ifdef _MULTI_DATAMODEL 23630 23631 switch (ddi_model_convert_from(flag & FMODELS)) { 23632 case DDI_MODEL_ILP32: 23633 li32.listlen = li.listlen; 23634 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23635 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23636 "sd_persistent_reservation_in_read_keys: " 23637 "failed ddi_copyout: mhioc_key_list32_t\n"); 23638 rval = EFAULT; 23639 goto done; 23640 } 23641 break; 23642 23643 case DDI_MODEL_NONE: 23644 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23645 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23646 "sd_persistent_reservation_in_read_keys: " 23647 "failed ddi_copyout: mhioc_key_list_t\n"); 23648 rval = EFAULT; 23649 goto done; 23650 } 23651 break; 23652 } 23653 23654 #else /* ! _MULTI_DATAMODEL */ 23655 23656 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23657 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23658 "sd_persistent_reservation_in_read_keys: " 23659 "failed ddi_copyout: mhioc_key_list_t\n"); 23660 rval = EFAULT; 23661 goto done; 23662 } 23663 23664 #endif /* _MULTI_DATAMODEL */ 23665 23666 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23667 li.listsize * MHIOC_RESV_KEY_SIZE); 23668 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23669 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23670 "sd_persistent_reservation_in_read_keys: " 23671 "failed ddi_copyout: keylist\n"); 23672 rval = EFAULT; 23673 } 23674 done: 23675 kmem_free(data_bufp, data_len); 23676 return (rval); 23677 } 23678 23679 23680 /* 23681 * Function: sd_persistent_reservation_in_read_resv 23682 * 23683 * Description: This routine is the driver entry point for handling CD-ROM 23684 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23685 * by sending the SCSI-3 PRIN commands to the device. 23686 * Process the read persistent reservations command response by 23687 * copying the reservation information into the user provided 23688 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23689 * 23690 * Arguments: un - Pointer to soft state struct for the target. 23691 * usrp - user provided pointer to multihost Persistent In Read 23692 * Keys structure (mhioc_inkeys_t) 23693 * flag - this argument is a pass through to ddi_copyxxx() 23694 * directly from the mode argument of ioctl(). 23695 * 23696 * Return Code: 0 - Success 23697 * EACCES 23698 * ENOTSUP 23699 * errno return code from sd_send_scsi_cmd() 23700 * 23701 * Context: Can sleep. Does not return until command is completed. 23702 */ 23703 23704 static int 23705 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23706 mhioc_inresvs_t *usrp, int flag) 23707 { 23708 #ifdef _MULTI_DATAMODEL 23709 struct mhioc_resv_desc_list32 resvlist32; 23710 #endif 23711 sd_prin_readresv_t *in; 23712 mhioc_inresvs_t *ptr; 23713 sd_readresv_desc_t *readresv_ptr; 23714 mhioc_resv_desc_list_t resvlist; 23715 mhioc_resv_desc_t resvdesc; 23716 uchar_t *data_bufp; 23717 int data_len; 23718 int rval; 23719 int i; 23720 size_t copysz; 23721 mhioc_resv_desc_t *bufp; 23722 23723 if ((ptr = usrp) == NULL) { 23724 return (EINVAL); 23725 } 23726 23727 /* 23728 * Get the listsize from user 23729 */ 23730 #ifdef _MULTI_DATAMODEL 23731 switch (ddi_model_convert_from(flag & FMODELS)) { 23732 case DDI_MODEL_ILP32: 23733 copysz = sizeof (struct mhioc_resv_desc_list32); 23734 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23735 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23736 "sd_persistent_reservation_in_read_resv: " 23737 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23738 rval = EFAULT; 23739 goto done; 23740 } 23741 resvlist.listsize = resvlist32.listsize; 23742 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23743 break; 23744 23745 case DDI_MODEL_NONE: 23746 copysz = sizeof (mhioc_resv_desc_list_t); 23747 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23748 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23749 "sd_persistent_reservation_in_read_resv: " 23750 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23751 rval = EFAULT; 23752 goto done; 23753 } 23754 break; 23755 } 23756 #else /* ! _MULTI_DATAMODEL */ 23757 copysz = sizeof (mhioc_resv_desc_list_t); 23758 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23759 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23760 "sd_persistent_reservation_in_read_resv: " 23761 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23762 rval = EFAULT; 23763 goto done; 23764 } 23765 #endif /* ! _MULTI_DATAMODEL */ 23766 23767 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23768 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23769 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23770 23771 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23772 data_len, data_bufp)) != 0) { 23773 goto done; 23774 } 23775 in = (sd_prin_readresv_t *)data_bufp; 23776 ptr->generation = BE_32(in->generation); 23777 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23778 23779 /* 23780 * Return the min(listsize, listlen( keys 23781 */ 23782 #ifdef _MULTI_DATAMODEL 23783 23784 switch (ddi_model_convert_from(flag & FMODELS)) { 23785 case DDI_MODEL_ILP32: 23786 resvlist32.listlen = resvlist.listlen; 23787 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23788 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23789 "sd_persistent_reservation_in_read_resv: " 23790 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23791 rval = EFAULT; 23792 goto done; 23793 } 23794 break; 23795 23796 case DDI_MODEL_NONE: 23797 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23798 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23799 "sd_persistent_reservation_in_read_resv: " 23800 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23801 rval = EFAULT; 23802 goto done; 23803 } 23804 break; 23805 } 23806 23807 #else /* ! _MULTI_DATAMODEL */ 23808 23809 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23810 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23811 "sd_persistent_reservation_in_read_resv: " 23812 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23813 rval = EFAULT; 23814 goto done; 23815 } 23816 23817 #endif /* ! _MULTI_DATAMODEL */ 23818 23819 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23820 bufp = resvlist.list; 23821 copysz = sizeof (mhioc_resv_desc_t); 23822 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23823 i++, readresv_ptr++, bufp++) { 23824 23825 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23826 MHIOC_RESV_KEY_SIZE); 23827 resvdesc.type = readresv_ptr->type; 23828 resvdesc.scope = readresv_ptr->scope; 23829 resvdesc.scope_specific_addr = 23830 BE_32(readresv_ptr->scope_specific_addr); 23831 23832 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23833 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23834 "sd_persistent_reservation_in_read_resv: " 23835 "failed ddi_copyout: resvlist\n"); 23836 rval = EFAULT; 23837 goto done; 23838 } 23839 } 23840 done: 23841 kmem_free(data_bufp, data_len); 23842 return (rval); 23843 } 23844 23845 23846 /* 23847 * Function: sr_change_blkmode() 23848 * 23849 * Description: This routine is the driver entry point for handling CD-ROM 23850 * block mode ioctl requests. Support for returning and changing 23851 * the current block size in use by the device is implemented. The 23852 * LBA size is changed via a MODE SELECT Block Descriptor. 23853 * 23854 * This routine issues a mode sense with an allocation length of 23855 * 12 bytes for the mode page header and a single block descriptor. 23856 * 23857 * Arguments: dev - the device 'dev_t' 23858 * cmd - the request type; one of CDROMGBLKMODE (get) or 23859 * CDROMSBLKMODE (set) 23860 * data - current block size or requested block size 23861 * flag - this argument is a pass through to ddi_copyxxx() directly 23862 * from the mode argument of ioctl(). 23863 * 23864 * Return Code: the code returned by sd_send_scsi_cmd() 23865 * EINVAL if invalid arguments are provided 23866 * EFAULT if ddi_copyxxx() fails 23867 * ENXIO if fail ddi_get_soft_state 23868 * EIO if invalid mode sense block descriptor length 23869 * 23870 */ 23871 23872 static int 23873 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23874 { 23875 struct sd_lun *un = NULL; 23876 struct mode_header *sense_mhp, *select_mhp; 23877 struct block_descriptor *sense_desc, *select_desc; 23878 int current_bsize; 23879 int rval = EINVAL; 23880 uchar_t *sense = NULL; 23881 uchar_t *select = NULL; 23882 23883 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23884 23885 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23886 return (ENXIO); 23887 } 23888 23889 /* 23890 * The block length is changed via the Mode Select block descriptor, the 23891 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23892 * required as part of this routine. Therefore the mode sense allocation 23893 * length is specified to be the length of a mode page header and a 23894 * block descriptor. 23895 */ 23896 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23897 23898 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23899 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23900 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23901 "sr_change_blkmode: Mode Sense Failed\n"); 23902 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23903 return (rval); 23904 } 23905 23906 /* Check the block descriptor len to handle only 1 block descriptor */ 23907 sense_mhp = (struct mode_header *)sense; 23908 if ((sense_mhp->bdesc_length == 0) || 23909 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23910 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23911 "sr_change_blkmode: Mode Sense returned invalid block" 23912 " descriptor length\n"); 23913 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23914 return (EIO); 23915 } 23916 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23917 current_bsize = ((sense_desc->blksize_hi << 16) | 23918 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23919 23920 /* Process command */ 23921 switch (cmd) { 23922 case CDROMGBLKMODE: 23923 /* Return the block size obtained during the mode sense */ 23924 if (ddi_copyout(¤t_bsize, (void *)data, 23925 sizeof (int), flag) != 0) 23926 rval = EFAULT; 23927 break; 23928 case CDROMSBLKMODE: 23929 /* Validate the requested block size */ 23930 switch (data) { 23931 case CDROM_BLK_512: 23932 case CDROM_BLK_1024: 23933 case CDROM_BLK_2048: 23934 case CDROM_BLK_2056: 23935 case CDROM_BLK_2336: 23936 case CDROM_BLK_2340: 23937 case CDROM_BLK_2352: 23938 case CDROM_BLK_2368: 23939 case CDROM_BLK_2448: 23940 case CDROM_BLK_2646: 23941 case CDROM_BLK_2647: 23942 break; 23943 default: 23944 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23945 "sr_change_blkmode: " 23946 "Block Size '%ld' Not Supported\n", data); 23947 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23948 return (EINVAL); 23949 } 23950 23951 /* 23952 * The current block size matches the requested block size so 23953 * there is no need to send the mode select to change the size 23954 */ 23955 if (current_bsize == data) { 23956 break; 23957 } 23958 23959 /* Build the select data for the requested block size */ 23960 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23961 select_mhp = (struct mode_header *)select; 23962 select_desc = 23963 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23964 /* 23965 * The LBA size is changed via the block descriptor, so the 23966 * descriptor is built according to the user data 23967 */ 23968 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23969 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23970 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23971 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23972 23973 /* Send the mode select for the requested block size */ 23974 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23975 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23976 SD_PATH_STANDARD)) != 0) { 23977 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23978 "sr_change_blkmode: Mode Select Failed\n"); 23979 /* 23980 * The mode select failed for the requested block size, 23981 * so reset the data for the original block size and 23982 * send it to the target. The error is indicated by the 23983 * return value for the failed mode select. 23984 */ 23985 select_desc->blksize_hi = sense_desc->blksize_hi; 23986 select_desc->blksize_mid = sense_desc->blksize_mid; 23987 select_desc->blksize_lo = sense_desc->blksize_lo; 23988 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23989 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23990 SD_PATH_STANDARD); 23991 } else { 23992 ASSERT(!mutex_owned(SD_MUTEX(un))); 23993 mutex_enter(SD_MUTEX(un)); 23994 sd_update_block_info(un, (uint32_t)data, 0); 23995 mutex_exit(SD_MUTEX(un)); 23996 } 23997 break; 23998 default: 23999 /* should not reach here, but check anyway */ 24000 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24001 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 24002 rval = EINVAL; 24003 break; 24004 } 24005 24006 if (select) { 24007 kmem_free(select, BUFLEN_CHG_BLK_MODE); 24008 } 24009 if (sense) { 24010 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24011 } 24012 return (rval); 24013 } 24014 24015 24016 /* 24017 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 24018 * implement driver support for getting and setting the CD speed. The command 24019 * set used will be based on the device type. If the device has not been 24020 * identified as MMC the Toshiba vendor specific mode page will be used. If 24021 * the device is MMC but does not support the Real Time Streaming feature 24022 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24023 * be used to read the speed. 24024 */ 24025 24026 /* 24027 * Function: sr_change_speed() 24028 * 24029 * Description: This routine is the driver entry point for handling CD-ROM 24030 * drive speed ioctl requests for devices supporting the Toshiba 24031 * vendor specific drive speed mode page. Support for returning 24032 * and changing the current drive speed in use by the device is 24033 * implemented. 24034 * 24035 * Arguments: dev - the device 'dev_t' 24036 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24037 * CDROMSDRVSPEED (set) 24038 * data - current drive speed or requested drive speed 24039 * flag - this argument is a pass through to ddi_copyxxx() directly 24040 * from the mode argument of ioctl(). 24041 * 24042 * Return Code: the code returned by sd_send_scsi_cmd() 24043 * EINVAL if invalid arguments are provided 24044 * EFAULT if ddi_copyxxx() fails 24045 * ENXIO if fail ddi_get_soft_state 24046 * EIO if invalid mode sense block descriptor length 24047 */ 24048 24049 static int 24050 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24051 { 24052 struct sd_lun *un = NULL; 24053 struct mode_header *sense_mhp, *select_mhp; 24054 struct mode_speed *sense_page, *select_page; 24055 int current_speed; 24056 int rval = EINVAL; 24057 int bd_len; 24058 uchar_t *sense = NULL; 24059 uchar_t *select = NULL; 24060 24061 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24062 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24063 return (ENXIO); 24064 } 24065 24066 /* 24067 * Note: The drive speed is being modified here according to a Toshiba 24068 * vendor specific mode page (0x31). 24069 */ 24070 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24071 24072 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24073 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24074 SD_PATH_STANDARD)) != 0) { 24075 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24076 "sr_change_speed: Mode Sense Failed\n"); 24077 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24078 return (rval); 24079 } 24080 sense_mhp = (struct mode_header *)sense; 24081 24082 /* Check the block descriptor len to handle only 1 block descriptor */ 24083 bd_len = sense_mhp->bdesc_length; 24084 if (bd_len > MODE_BLK_DESC_LENGTH) { 24085 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24086 "sr_change_speed: Mode Sense returned invalid block " 24087 "descriptor length\n"); 24088 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24089 return (EIO); 24090 } 24091 24092 sense_page = (struct mode_speed *) 24093 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24094 current_speed = sense_page->speed; 24095 24096 /* Process command */ 24097 switch (cmd) { 24098 case CDROMGDRVSPEED: 24099 /* Return the drive speed obtained during the mode sense */ 24100 if (current_speed == 0x2) { 24101 current_speed = CDROM_TWELVE_SPEED; 24102 } 24103 if (ddi_copyout(¤t_speed, (void *)data, 24104 sizeof (int), flag) != 0) { 24105 rval = EFAULT; 24106 } 24107 break; 24108 case CDROMSDRVSPEED: 24109 /* Validate the requested drive speed */ 24110 switch ((uchar_t)data) { 24111 case CDROM_TWELVE_SPEED: 24112 data = 0x2; 24113 /*FALLTHROUGH*/ 24114 case CDROM_NORMAL_SPEED: 24115 case CDROM_DOUBLE_SPEED: 24116 case CDROM_QUAD_SPEED: 24117 case CDROM_MAXIMUM_SPEED: 24118 break; 24119 default: 24120 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24121 "sr_change_speed: " 24122 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24123 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24124 return (EINVAL); 24125 } 24126 24127 /* 24128 * The current drive speed matches the requested drive speed so 24129 * there is no need to send the mode select to change the speed 24130 */ 24131 if (current_speed == data) { 24132 break; 24133 } 24134 24135 /* Build the select data for the requested drive speed */ 24136 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24137 select_mhp = (struct mode_header *)select; 24138 select_mhp->bdesc_length = 0; 24139 select_page = 24140 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24141 select_page = 24142 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24143 select_page->mode_page.code = CDROM_MODE_SPEED; 24144 select_page->mode_page.length = 2; 24145 select_page->speed = (uchar_t)data; 24146 24147 /* Send the mode select for the requested block size */ 24148 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24149 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24150 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24151 /* 24152 * The mode select failed for the requested drive speed, 24153 * so reset the data for the original drive speed and 24154 * send it to the target. The error is indicated by the 24155 * return value for the failed mode select. 24156 */ 24157 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24158 "sr_drive_speed: Mode Select Failed\n"); 24159 select_page->speed = sense_page->speed; 24160 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24161 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24162 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24163 } 24164 break; 24165 default: 24166 /* should not reach here, but check anyway */ 24167 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24168 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24169 rval = EINVAL; 24170 break; 24171 } 24172 24173 if (select) { 24174 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24175 } 24176 if (sense) { 24177 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24178 } 24179 24180 return (rval); 24181 } 24182 24183 24184 /* 24185 * Function: sr_atapi_change_speed() 24186 * 24187 * Description: This routine is the driver entry point for handling CD-ROM 24188 * drive speed ioctl requests for MMC devices that do not support 24189 * the Real Time Streaming feature (0x107). 24190 * 24191 * Note: This routine will use the SET SPEED command which may not 24192 * be supported by all devices. 24193 * 24194 * Arguments: dev- the device 'dev_t' 24195 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24196 * CDROMSDRVSPEED (set) 24197 * data- current drive speed or requested drive speed 24198 * flag- this argument is a pass through to ddi_copyxxx() directly 24199 * from the mode argument of ioctl(). 24200 * 24201 * Return Code: the code returned by sd_send_scsi_cmd() 24202 * EINVAL if invalid arguments are provided 24203 * EFAULT if ddi_copyxxx() fails 24204 * ENXIO if fail ddi_get_soft_state 24205 * EIO if invalid mode sense block descriptor length 24206 */ 24207 24208 static int 24209 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24210 { 24211 struct sd_lun *un; 24212 struct uscsi_cmd *com = NULL; 24213 struct mode_header_grp2 *sense_mhp; 24214 uchar_t *sense_page; 24215 uchar_t *sense = NULL; 24216 char cdb[CDB_GROUP5]; 24217 int bd_len; 24218 int current_speed = 0; 24219 int max_speed = 0; 24220 int rval; 24221 24222 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24223 24224 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24225 return (ENXIO); 24226 } 24227 24228 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24229 24230 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24231 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24232 SD_PATH_STANDARD)) != 0) { 24233 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24234 "sr_atapi_change_speed: Mode Sense Failed\n"); 24235 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24236 return (rval); 24237 } 24238 24239 /* Check the block descriptor len to handle only 1 block descriptor */ 24240 sense_mhp = (struct mode_header_grp2 *)sense; 24241 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24242 if (bd_len > MODE_BLK_DESC_LENGTH) { 24243 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24244 "sr_atapi_change_speed: Mode Sense returned invalid " 24245 "block descriptor length\n"); 24246 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24247 return (EIO); 24248 } 24249 24250 /* Calculate the current and maximum drive speeds */ 24251 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24252 current_speed = (sense_page[14] << 8) | sense_page[15]; 24253 max_speed = (sense_page[8] << 8) | sense_page[9]; 24254 24255 /* Process the command */ 24256 switch (cmd) { 24257 case CDROMGDRVSPEED: 24258 current_speed /= SD_SPEED_1X; 24259 if (ddi_copyout(¤t_speed, (void *)data, 24260 sizeof (int), flag) != 0) 24261 rval = EFAULT; 24262 break; 24263 case CDROMSDRVSPEED: 24264 /* Convert the speed code to KB/sec */ 24265 switch ((uchar_t)data) { 24266 case CDROM_NORMAL_SPEED: 24267 current_speed = SD_SPEED_1X; 24268 break; 24269 case CDROM_DOUBLE_SPEED: 24270 current_speed = 2 * SD_SPEED_1X; 24271 break; 24272 case CDROM_QUAD_SPEED: 24273 current_speed = 4 * SD_SPEED_1X; 24274 break; 24275 case CDROM_TWELVE_SPEED: 24276 current_speed = 12 * SD_SPEED_1X; 24277 break; 24278 case CDROM_MAXIMUM_SPEED: 24279 current_speed = 0xffff; 24280 break; 24281 default: 24282 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24283 "sr_atapi_change_speed: invalid drive speed %d\n", 24284 (uchar_t)data); 24285 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24286 return (EINVAL); 24287 } 24288 24289 /* Check the request against the drive's max speed. */ 24290 if (current_speed != 0xffff) { 24291 if (current_speed > max_speed) { 24292 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24293 return (EINVAL); 24294 } 24295 } 24296 24297 /* 24298 * Build and send the SET SPEED command 24299 * 24300 * Note: The SET SPEED (0xBB) command used in this routine is 24301 * obsolete per the SCSI MMC spec but still supported in the 24302 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24303 * therefore the command is still implemented in this routine. 24304 */ 24305 bzero(cdb, sizeof (cdb)); 24306 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24307 cdb[2] = (uchar_t)(current_speed >> 8); 24308 cdb[3] = (uchar_t)current_speed; 24309 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24310 com->uscsi_cdb = (caddr_t)cdb; 24311 com->uscsi_cdblen = CDB_GROUP5; 24312 com->uscsi_bufaddr = NULL; 24313 com->uscsi_buflen = 0; 24314 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24315 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24316 break; 24317 default: 24318 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24319 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24320 rval = EINVAL; 24321 } 24322 24323 if (sense) { 24324 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24325 } 24326 if (com) { 24327 kmem_free(com, sizeof (*com)); 24328 } 24329 return (rval); 24330 } 24331 24332 24333 /* 24334 * Function: sr_pause_resume() 24335 * 24336 * Description: This routine is the driver entry point for handling CD-ROM 24337 * pause/resume ioctl requests. This only affects the audio play 24338 * operation. 24339 * 24340 * Arguments: dev - the device 'dev_t' 24341 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24342 * for setting the resume bit of the cdb. 24343 * 24344 * Return Code: the code returned by sd_send_scsi_cmd() 24345 * EINVAL if invalid mode specified 24346 * 24347 */ 24348 24349 static int 24350 sr_pause_resume(dev_t dev, int cmd) 24351 { 24352 struct sd_lun *un; 24353 struct uscsi_cmd *com; 24354 char cdb[CDB_GROUP1]; 24355 int rval; 24356 24357 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24358 return (ENXIO); 24359 } 24360 24361 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24362 bzero(cdb, CDB_GROUP1); 24363 cdb[0] = SCMD_PAUSE_RESUME; 24364 switch (cmd) { 24365 case CDROMRESUME: 24366 cdb[8] = 1; 24367 break; 24368 case CDROMPAUSE: 24369 cdb[8] = 0; 24370 break; 24371 default: 24372 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24373 " Command '%x' Not Supported\n", cmd); 24374 rval = EINVAL; 24375 goto done; 24376 } 24377 24378 com->uscsi_cdb = cdb; 24379 com->uscsi_cdblen = CDB_GROUP1; 24380 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24381 24382 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24383 SD_PATH_STANDARD); 24384 24385 done: 24386 kmem_free(com, sizeof (*com)); 24387 return (rval); 24388 } 24389 24390 24391 /* 24392 * Function: sr_play_msf() 24393 * 24394 * Description: This routine is the driver entry point for handling CD-ROM 24395 * ioctl requests to output the audio signals at the specified 24396 * starting address and continue the audio play until the specified 24397 * ending address (CDROMPLAYMSF) The address is in Minute Second 24398 * Frame (MSF) format. 24399 * 24400 * Arguments: dev - the device 'dev_t' 24401 * data - pointer to user provided audio msf structure, 24402 * specifying start/end addresses. 24403 * flag - this argument is a pass through to ddi_copyxxx() 24404 * directly from the mode argument of ioctl(). 24405 * 24406 * Return Code: the code returned by sd_send_scsi_cmd() 24407 * EFAULT if ddi_copyxxx() fails 24408 * ENXIO if fail ddi_get_soft_state 24409 * EINVAL if data pointer is NULL 24410 */ 24411 24412 static int 24413 sr_play_msf(dev_t dev, caddr_t data, int flag) 24414 { 24415 struct sd_lun *un; 24416 struct uscsi_cmd *com; 24417 struct cdrom_msf msf_struct; 24418 struct cdrom_msf *msf = &msf_struct; 24419 char cdb[CDB_GROUP1]; 24420 int rval; 24421 24422 if (data == NULL) { 24423 return (EINVAL); 24424 } 24425 24426 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24427 return (ENXIO); 24428 } 24429 24430 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24431 return (EFAULT); 24432 } 24433 24434 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24435 bzero(cdb, CDB_GROUP1); 24436 cdb[0] = SCMD_PLAYAUDIO_MSF; 24437 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24438 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24439 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24440 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24441 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24442 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24443 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24444 } else { 24445 cdb[3] = msf->cdmsf_min0; 24446 cdb[4] = msf->cdmsf_sec0; 24447 cdb[5] = msf->cdmsf_frame0; 24448 cdb[6] = msf->cdmsf_min1; 24449 cdb[7] = msf->cdmsf_sec1; 24450 cdb[8] = msf->cdmsf_frame1; 24451 } 24452 com->uscsi_cdb = cdb; 24453 com->uscsi_cdblen = CDB_GROUP1; 24454 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24455 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24456 SD_PATH_STANDARD); 24457 kmem_free(com, sizeof (*com)); 24458 return (rval); 24459 } 24460 24461 24462 /* 24463 * Function: sr_play_trkind() 24464 * 24465 * Description: This routine is the driver entry point for handling CD-ROM 24466 * ioctl requests to output the audio signals at the specified 24467 * starting address and continue the audio play until the specified 24468 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24469 * format. 24470 * 24471 * Arguments: dev - the device 'dev_t' 24472 * data - pointer to user provided audio track/index structure, 24473 * specifying start/end addresses. 24474 * flag - this argument is a pass through to ddi_copyxxx() 24475 * directly from the mode argument of ioctl(). 24476 * 24477 * Return Code: the code returned by sd_send_scsi_cmd() 24478 * EFAULT if ddi_copyxxx() fails 24479 * ENXIO if fail ddi_get_soft_state 24480 * EINVAL if data pointer is NULL 24481 */ 24482 24483 static int 24484 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24485 { 24486 struct cdrom_ti ti_struct; 24487 struct cdrom_ti *ti = &ti_struct; 24488 struct uscsi_cmd *com = NULL; 24489 char cdb[CDB_GROUP1]; 24490 int rval; 24491 24492 if (data == NULL) { 24493 return (EINVAL); 24494 } 24495 24496 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24497 return (EFAULT); 24498 } 24499 24500 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24501 bzero(cdb, CDB_GROUP1); 24502 cdb[0] = SCMD_PLAYAUDIO_TI; 24503 cdb[4] = ti->cdti_trk0; 24504 cdb[5] = ti->cdti_ind0; 24505 cdb[7] = ti->cdti_trk1; 24506 cdb[8] = ti->cdti_ind1; 24507 com->uscsi_cdb = cdb; 24508 com->uscsi_cdblen = CDB_GROUP1; 24509 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24510 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24511 SD_PATH_STANDARD); 24512 kmem_free(com, sizeof (*com)); 24513 return (rval); 24514 } 24515 24516 24517 /* 24518 * Function: sr_read_all_subcodes() 24519 * 24520 * Description: This routine is the driver entry point for handling CD-ROM 24521 * ioctl requests to return raw subcode data while the target is 24522 * playing audio (CDROMSUBCODE). 24523 * 24524 * Arguments: dev - the device 'dev_t' 24525 * data - pointer to user provided cdrom subcode structure, 24526 * specifying the transfer length and address. 24527 * flag - this argument is a pass through to ddi_copyxxx() 24528 * directly from the mode argument of ioctl(). 24529 * 24530 * Return Code: the code returned by sd_send_scsi_cmd() 24531 * EFAULT if ddi_copyxxx() fails 24532 * ENXIO if fail ddi_get_soft_state 24533 * EINVAL if data pointer is NULL 24534 */ 24535 24536 static int 24537 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24538 { 24539 struct sd_lun *un = NULL; 24540 struct uscsi_cmd *com = NULL; 24541 struct cdrom_subcode *subcode = NULL; 24542 int rval; 24543 size_t buflen; 24544 char cdb[CDB_GROUP5]; 24545 24546 #ifdef _MULTI_DATAMODEL 24547 /* To support ILP32 applications in an LP64 world */ 24548 struct cdrom_subcode32 cdrom_subcode32; 24549 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24550 #endif 24551 if (data == NULL) { 24552 return (EINVAL); 24553 } 24554 24555 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24556 return (ENXIO); 24557 } 24558 24559 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24560 24561 #ifdef _MULTI_DATAMODEL 24562 switch (ddi_model_convert_from(flag & FMODELS)) { 24563 case DDI_MODEL_ILP32: 24564 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24565 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24566 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24567 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24568 return (EFAULT); 24569 } 24570 /* Convert the ILP32 uscsi data from the application to LP64 */ 24571 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24572 break; 24573 case DDI_MODEL_NONE: 24574 if (ddi_copyin(data, subcode, 24575 sizeof (struct cdrom_subcode), flag)) { 24576 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24577 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24578 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24579 return (EFAULT); 24580 } 24581 break; 24582 } 24583 #else /* ! _MULTI_DATAMODEL */ 24584 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24585 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24586 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24587 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24588 return (EFAULT); 24589 } 24590 #endif /* _MULTI_DATAMODEL */ 24591 24592 /* 24593 * Since MMC-2 expects max 3 bytes for length, check if the 24594 * length input is greater than 3 bytes 24595 */ 24596 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24597 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24598 "sr_read_all_subcodes: " 24599 "cdrom transfer length too large: %d (limit %d)\n", 24600 subcode->cdsc_length, 0xFFFFFF); 24601 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24602 return (EINVAL); 24603 } 24604 24605 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24606 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24607 bzero(cdb, CDB_GROUP5); 24608 24609 if (un->un_f_mmc_cap == TRUE) { 24610 cdb[0] = (char)SCMD_READ_CD; 24611 cdb[2] = (char)0xff; 24612 cdb[3] = (char)0xff; 24613 cdb[4] = (char)0xff; 24614 cdb[5] = (char)0xff; 24615 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24616 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24617 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24618 cdb[10] = 1; 24619 } else { 24620 /* 24621 * Note: A vendor specific command (0xDF) is being used her to 24622 * request a read of all subcodes. 24623 */ 24624 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24625 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24626 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24627 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24628 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24629 } 24630 com->uscsi_cdb = cdb; 24631 com->uscsi_cdblen = CDB_GROUP5; 24632 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24633 com->uscsi_buflen = buflen; 24634 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24635 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24636 SD_PATH_STANDARD); 24637 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24638 kmem_free(com, sizeof (*com)); 24639 return (rval); 24640 } 24641 24642 24643 /* 24644 * Function: sr_read_subchannel() 24645 * 24646 * Description: This routine is the driver entry point for handling CD-ROM 24647 * ioctl requests to return the Q sub-channel data of the CD 24648 * current position block. (CDROMSUBCHNL) The data includes the 24649 * track number, index number, absolute CD-ROM address (LBA or MSF 24650 * format per the user) , track relative CD-ROM address (LBA or MSF 24651 * format per the user), control data and audio status. 24652 * 24653 * Arguments: dev - the device 'dev_t' 24654 * data - pointer to user provided cdrom sub-channel structure 24655 * flag - this argument is a pass through to ddi_copyxxx() 24656 * directly from the mode argument of ioctl(). 24657 * 24658 * Return Code: the code returned by sd_send_scsi_cmd() 24659 * EFAULT if ddi_copyxxx() fails 24660 * ENXIO if fail ddi_get_soft_state 24661 * EINVAL if data pointer is NULL 24662 */ 24663 24664 static int 24665 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24666 { 24667 struct sd_lun *un; 24668 struct uscsi_cmd *com; 24669 struct cdrom_subchnl subchanel; 24670 struct cdrom_subchnl *subchnl = &subchanel; 24671 char cdb[CDB_GROUP1]; 24672 caddr_t buffer; 24673 int rval; 24674 24675 if (data == NULL) { 24676 return (EINVAL); 24677 } 24678 24679 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24680 (un->un_state == SD_STATE_OFFLINE)) { 24681 return (ENXIO); 24682 } 24683 24684 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24685 return (EFAULT); 24686 } 24687 24688 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24689 bzero(cdb, CDB_GROUP1); 24690 cdb[0] = SCMD_READ_SUBCHANNEL; 24691 /* Set the MSF bit based on the user requested address format */ 24692 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24693 /* 24694 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24695 * returned 24696 */ 24697 cdb[2] = 0x40; 24698 /* 24699 * Set byte 3 to specify the return data format. A value of 0x01 24700 * indicates that the CD-ROM current position should be returned. 24701 */ 24702 cdb[3] = 0x01; 24703 cdb[8] = 0x10; 24704 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24705 com->uscsi_cdb = cdb; 24706 com->uscsi_cdblen = CDB_GROUP1; 24707 com->uscsi_bufaddr = buffer; 24708 com->uscsi_buflen = 16; 24709 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24710 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24711 SD_PATH_STANDARD); 24712 if (rval != 0) { 24713 kmem_free(buffer, 16); 24714 kmem_free(com, sizeof (*com)); 24715 return (rval); 24716 } 24717 24718 /* Process the returned Q sub-channel data */ 24719 subchnl->cdsc_audiostatus = buffer[1]; 24720 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24721 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24722 subchnl->cdsc_trk = buffer[6]; 24723 subchnl->cdsc_ind = buffer[7]; 24724 if (subchnl->cdsc_format & CDROM_LBA) { 24725 subchnl->cdsc_absaddr.lba = 24726 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24727 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24728 subchnl->cdsc_reladdr.lba = 24729 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24730 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24731 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24732 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24733 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24734 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24735 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24736 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24737 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24738 } else { 24739 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24740 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24741 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24742 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24743 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24744 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24745 } 24746 kmem_free(buffer, 16); 24747 kmem_free(com, sizeof (*com)); 24748 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24749 != 0) { 24750 return (EFAULT); 24751 } 24752 return (rval); 24753 } 24754 24755 24756 /* 24757 * Function: sr_read_tocentry() 24758 * 24759 * Description: This routine is the driver entry point for handling CD-ROM 24760 * ioctl requests to read from the Table of Contents (TOC) 24761 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24762 * fields, the starting address (LBA or MSF format per the user) 24763 * and the data mode if the user specified track is a data track. 24764 * 24765 * Note: The READ HEADER (0x44) command used in this routine is 24766 * obsolete per the SCSI MMC spec but still supported in the 24767 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24768 * therefore the command is still implemented in this routine. 24769 * 24770 * Arguments: dev - the device 'dev_t' 24771 * data - pointer to user provided toc entry structure, 24772 * specifying the track # and the address format 24773 * (LBA or MSF). 24774 * flag - this argument is a pass through to ddi_copyxxx() 24775 * directly from the mode argument of ioctl(). 24776 * 24777 * Return Code: the code returned by sd_send_scsi_cmd() 24778 * EFAULT if ddi_copyxxx() fails 24779 * ENXIO if fail ddi_get_soft_state 24780 * EINVAL if data pointer is NULL 24781 */ 24782 24783 static int 24784 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24785 { 24786 struct sd_lun *un = NULL; 24787 struct uscsi_cmd *com; 24788 struct cdrom_tocentry toc_entry; 24789 struct cdrom_tocentry *entry = &toc_entry; 24790 caddr_t buffer; 24791 int rval; 24792 char cdb[CDB_GROUP1]; 24793 24794 if (data == NULL) { 24795 return (EINVAL); 24796 } 24797 24798 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24799 (un->un_state == SD_STATE_OFFLINE)) { 24800 return (ENXIO); 24801 } 24802 24803 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24804 return (EFAULT); 24805 } 24806 24807 /* Validate the requested track and address format */ 24808 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24809 return (EINVAL); 24810 } 24811 24812 if (entry->cdte_track == 0) { 24813 return (EINVAL); 24814 } 24815 24816 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24817 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24818 bzero(cdb, CDB_GROUP1); 24819 24820 cdb[0] = SCMD_READ_TOC; 24821 /* Set the MSF bit based on the user requested address format */ 24822 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24823 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24824 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24825 } else { 24826 cdb[6] = entry->cdte_track; 24827 } 24828 24829 /* 24830 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24831 * (4 byte TOC response header + 8 byte track descriptor) 24832 */ 24833 cdb[8] = 12; 24834 com->uscsi_cdb = cdb; 24835 com->uscsi_cdblen = CDB_GROUP1; 24836 com->uscsi_bufaddr = buffer; 24837 com->uscsi_buflen = 0x0C; 24838 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24839 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24840 SD_PATH_STANDARD); 24841 if (rval != 0) { 24842 kmem_free(buffer, 12); 24843 kmem_free(com, sizeof (*com)); 24844 return (rval); 24845 } 24846 24847 /* Process the toc entry */ 24848 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24849 entry->cdte_ctrl = (buffer[5] & 0x0F); 24850 if (entry->cdte_format & CDROM_LBA) { 24851 entry->cdte_addr.lba = 24852 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24853 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24854 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24855 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24856 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24857 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24858 /* 24859 * Send a READ TOC command using the LBA address format to get 24860 * the LBA for the track requested so it can be used in the 24861 * READ HEADER request 24862 * 24863 * Note: The MSF bit of the READ HEADER command specifies the 24864 * output format. The block address specified in that command 24865 * must be in LBA format. 24866 */ 24867 cdb[1] = 0; 24868 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24869 SD_PATH_STANDARD); 24870 if (rval != 0) { 24871 kmem_free(buffer, 12); 24872 kmem_free(com, sizeof (*com)); 24873 return (rval); 24874 } 24875 } else { 24876 entry->cdte_addr.msf.minute = buffer[9]; 24877 entry->cdte_addr.msf.second = buffer[10]; 24878 entry->cdte_addr.msf.frame = buffer[11]; 24879 /* 24880 * Send a READ TOC command using the LBA address format to get 24881 * the LBA for the track requested so it can be used in the 24882 * READ HEADER request 24883 * 24884 * Note: The MSF bit of the READ HEADER command specifies the 24885 * output format. The block address specified in that command 24886 * must be in LBA format. 24887 */ 24888 cdb[1] = 0; 24889 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24890 SD_PATH_STANDARD); 24891 if (rval != 0) { 24892 kmem_free(buffer, 12); 24893 kmem_free(com, sizeof (*com)); 24894 return (rval); 24895 } 24896 } 24897 24898 /* 24899 * Build and send the READ HEADER command to determine the data mode of 24900 * the user specified track. 24901 */ 24902 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24903 (entry->cdte_track != CDROM_LEADOUT)) { 24904 bzero(cdb, CDB_GROUP1); 24905 cdb[0] = SCMD_READ_HEADER; 24906 cdb[2] = buffer[8]; 24907 cdb[3] = buffer[9]; 24908 cdb[4] = buffer[10]; 24909 cdb[5] = buffer[11]; 24910 cdb[8] = 0x08; 24911 com->uscsi_buflen = 0x08; 24912 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24913 SD_PATH_STANDARD); 24914 if (rval == 0) { 24915 entry->cdte_datamode = buffer[0]; 24916 } else { 24917 /* 24918 * READ HEADER command failed, since this is 24919 * obsoleted in one spec, its better to return 24920 * -1 for an invlid track so that we can still 24921 * receive the rest of the TOC data. 24922 */ 24923 entry->cdte_datamode = (uchar_t)-1; 24924 } 24925 } else { 24926 entry->cdte_datamode = (uchar_t)-1; 24927 } 24928 24929 kmem_free(buffer, 12); 24930 kmem_free(com, sizeof (*com)); 24931 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24932 return (EFAULT); 24933 24934 return (rval); 24935 } 24936 24937 24938 /* 24939 * Function: sr_read_tochdr() 24940 * 24941 * Description: This routine is the driver entry point for handling CD-ROM 24942 * ioctl requests to read the Table of Contents (TOC) header 24943 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24944 * and ending track numbers 24945 * 24946 * Arguments: dev - the device 'dev_t' 24947 * data - pointer to user provided toc header structure, 24948 * specifying the starting and ending track numbers. 24949 * flag - this argument is a pass through to ddi_copyxxx() 24950 * directly from the mode argument of ioctl(). 24951 * 24952 * Return Code: the code returned by sd_send_scsi_cmd() 24953 * EFAULT if ddi_copyxxx() fails 24954 * ENXIO if fail ddi_get_soft_state 24955 * EINVAL if data pointer is NULL 24956 */ 24957 24958 static int 24959 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24960 { 24961 struct sd_lun *un; 24962 struct uscsi_cmd *com; 24963 struct cdrom_tochdr toc_header; 24964 struct cdrom_tochdr *hdr = &toc_header; 24965 char cdb[CDB_GROUP1]; 24966 int rval; 24967 caddr_t buffer; 24968 24969 if (data == NULL) { 24970 return (EINVAL); 24971 } 24972 24973 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24974 (un->un_state == SD_STATE_OFFLINE)) { 24975 return (ENXIO); 24976 } 24977 24978 buffer = kmem_zalloc(4, KM_SLEEP); 24979 bzero(cdb, CDB_GROUP1); 24980 cdb[0] = SCMD_READ_TOC; 24981 /* 24982 * Specifying a track number of 0x00 in the READ TOC command indicates 24983 * that the TOC header should be returned 24984 */ 24985 cdb[6] = 0x00; 24986 /* 24987 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24988 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24989 */ 24990 cdb[8] = 0x04; 24991 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24992 com->uscsi_cdb = cdb; 24993 com->uscsi_cdblen = CDB_GROUP1; 24994 com->uscsi_bufaddr = buffer; 24995 com->uscsi_buflen = 0x04; 24996 com->uscsi_timeout = 300; 24997 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24998 24999 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25000 SD_PATH_STANDARD); 25001 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25002 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 25003 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 25004 } else { 25005 hdr->cdth_trk0 = buffer[2]; 25006 hdr->cdth_trk1 = buffer[3]; 25007 } 25008 kmem_free(buffer, 4); 25009 kmem_free(com, sizeof (*com)); 25010 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 25011 return (EFAULT); 25012 } 25013 return (rval); 25014 } 25015 25016 25017 /* 25018 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 25019 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 25020 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 25021 * digital audio and extended architecture digital audio. These modes are 25022 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25023 * MMC specs. 25024 * 25025 * In addition to support for the various data formats these routines also 25026 * include support for devices that implement only the direct access READ 25027 * commands (0x08, 0x28), devices that implement the READ_CD commands 25028 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25029 * READ CDXA commands (0xD8, 0xDB) 25030 */ 25031 25032 /* 25033 * Function: sr_read_mode1() 25034 * 25035 * Description: This routine is the driver entry point for handling CD-ROM 25036 * ioctl read mode1 requests (CDROMREADMODE1). 25037 * 25038 * Arguments: dev - the device 'dev_t' 25039 * data - pointer to user provided cd read structure specifying 25040 * the lba buffer address and length. 25041 * flag - this argument is a pass through to ddi_copyxxx() 25042 * directly from the mode argument of ioctl(). 25043 * 25044 * Return Code: the code returned by sd_send_scsi_cmd() 25045 * EFAULT if ddi_copyxxx() fails 25046 * ENXIO if fail ddi_get_soft_state 25047 * EINVAL if data pointer is NULL 25048 */ 25049 25050 static int 25051 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25052 { 25053 struct sd_lun *un; 25054 struct cdrom_read mode1_struct; 25055 struct cdrom_read *mode1 = &mode1_struct; 25056 int rval; 25057 #ifdef _MULTI_DATAMODEL 25058 /* To support ILP32 applications in an LP64 world */ 25059 struct cdrom_read32 cdrom_read32; 25060 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25061 #endif /* _MULTI_DATAMODEL */ 25062 25063 if (data == NULL) { 25064 return (EINVAL); 25065 } 25066 25067 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25068 (un->un_state == SD_STATE_OFFLINE)) { 25069 return (ENXIO); 25070 } 25071 25072 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25073 "sd_read_mode1: entry: un:0x%p\n", un); 25074 25075 #ifdef _MULTI_DATAMODEL 25076 switch (ddi_model_convert_from(flag & FMODELS)) { 25077 case DDI_MODEL_ILP32: 25078 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25079 return (EFAULT); 25080 } 25081 /* Convert the ILP32 uscsi data from the application to LP64 */ 25082 cdrom_read32tocdrom_read(cdrd32, mode1); 25083 break; 25084 case DDI_MODEL_NONE: 25085 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25086 return (EFAULT); 25087 } 25088 } 25089 #else /* ! _MULTI_DATAMODEL */ 25090 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25091 return (EFAULT); 25092 } 25093 #endif /* _MULTI_DATAMODEL */ 25094 25095 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25096 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25097 25098 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25099 "sd_read_mode1: exit: un:0x%p\n", un); 25100 25101 return (rval); 25102 } 25103 25104 25105 /* 25106 * Function: sr_read_cd_mode2() 25107 * 25108 * Description: This routine is the driver entry point for handling CD-ROM 25109 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25110 * support the READ CD (0xBE) command or the 1st generation 25111 * READ CD (0xD4) command. 25112 * 25113 * Arguments: dev - the device 'dev_t' 25114 * data - pointer to user provided cd read structure specifying 25115 * the lba buffer address and length. 25116 * flag - this argument is a pass through to ddi_copyxxx() 25117 * directly from the mode argument of ioctl(). 25118 * 25119 * Return Code: the code returned by sd_send_scsi_cmd() 25120 * EFAULT if ddi_copyxxx() fails 25121 * ENXIO if fail ddi_get_soft_state 25122 * EINVAL if data pointer is NULL 25123 */ 25124 25125 static int 25126 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25127 { 25128 struct sd_lun *un; 25129 struct uscsi_cmd *com; 25130 struct cdrom_read mode2_struct; 25131 struct cdrom_read *mode2 = &mode2_struct; 25132 uchar_t cdb[CDB_GROUP5]; 25133 int nblocks; 25134 int rval; 25135 #ifdef _MULTI_DATAMODEL 25136 /* To support ILP32 applications in an LP64 world */ 25137 struct cdrom_read32 cdrom_read32; 25138 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25139 #endif /* _MULTI_DATAMODEL */ 25140 25141 if (data == NULL) { 25142 return (EINVAL); 25143 } 25144 25145 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25146 (un->un_state == SD_STATE_OFFLINE)) { 25147 return (ENXIO); 25148 } 25149 25150 #ifdef _MULTI_DATAMODEL 25151 switch (ddi_model_convert_from(flag & FMODELS)) { 25152 case DDI_MODEL_ILP32: 25153 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25154 return (EFAULT); 25155 } 25156 /* Convert the ILP32 uscsi data from the application to LP64 */ 25157 cdrom_read32tocdrom_read(cdrd32, mode2); 25158 break; 25159 case DDI_MODEL_NONE: 25160 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25161 return (EFAULT); 25162 } 25163 break; 25164 } 25165 25166 #else /* ! _MULTI_DATAMODEL */ 25167 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25168 return (EFAULT); 25169 } 25170 #endif /* _MULTI_DATAMODEL */ 25171 25172 bzero(cdb, sizeof (cdb)); 25173 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25174 /* Read command supported by 1st generation atapi drives */ 25175 cdb[0] = SCMD_READ_CDD4; 25176 } else { 25177 /* Universal CD Access Command */ 25178 cdb[0] = SCMD_READ_CD; 25179 } 25180 25181 /* 25182 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25183 */ 25184 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25185 25186 /* set the start address */ 25187 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25188 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25189 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25190 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25191 25192 /* set the transfer length */ 25193 nblocks = mode2->cdread_buflen / 2336; 25194 cdb[6] = (uchar_t)(nblocks >> 16); 25195 cdb[7] = (uchar_t)(nblocks >> 8); 25196 cdb[8] = (uchar_t)nblocks; 25197 25198 /* set the filter bits */ 25199 cdb[9] = CDROM_READ_CD_USERDATA; 25200 25201 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25202 com->uscsi_cdb = (caddr_t)cdb; 25203 com->uscsi_cdblen = sizeof (cdb); 25204 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25205 com->uscsi_buflen = mode2->cdread_buflen; 25206 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25207 25208 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25209 SD_PATH_STANDARD); 25210 kmem_free(com, sizeof (*com)); 25211 return (rval); 25212 } 25213 25214 25215 /* 25216 * Function: sr_read_mode2() 25217 * 25218 * Description: This routine is the driver entry point for handling CD-ROM 25219 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25220 * do not support the READ CD (0xBE) command. 25221 * 25222 * Arguments: dev - the device 'dev_t' 25223 * data - pointer to user provided cd read structure specifying 25224 * the lba buffer address and length. 25225 * flag - this argument is a pass through to ddi_copyxxx() 25226 * directly from the mode argument of ioctl(). 25227 * 25228 * Return Code: the code returned by sd_send_scsi_cmd() 25229 * EFAULT if ddi_copyxxx() fails 25230 * ENXIO if fail ddi_get_soft_state 25231 * EINVAL if data pointer is NULL 25232 * EIO if fail to reset block size 25233 * EAGAIN if commands are in progress in the driver 25234 */ 25235 25236 static int 25237 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25238 { 25239 struct sd_lun *un; 25240 struct cdrom_read mode2_struct; 25241 struct cdrom_read *mode2 = &mode2_struct; 25242 int rval; 25243 uint32_t restore_blksize; 25244 struct uscsi_cmd *com; 25245 uchar_t cdb[CDB_GROUP0]; 25246 int nblocks; 25247 25248 #ifdef _MULTI_DATAMODEL 25249 /* To support ILP32 applications in an LP64 world */ 25250 struct cdrom_read32 cdrom_read32; 25251 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25252 #endif /* _MULTI_DATAMODEL */ 25253 25254 if (data == NULL) { 25255 return (EINVAL); 25256 } 25257 25258 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25259 (un->un_state == SD_STATE_OFFLINE)) { 25260 return (ENXIO); 25261 } 25262 25263 /* 25264 * Because this routine will update the device and driver block size 25265 * being used we want to make sure there are no commands in progress. 25266 * If commands are in progress the user will have to try again. 25267 * 25268 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25269 * in sdioctl to protect commands from sdioctl through to the top of 25270 * sd_uscsi_strategy. See sdioctl for details. 25271 */ 25272 mutex_enter(SD_MUTEX(un)); 25273 if (un->un_ncmds_in_driver != 1) { 25274 mutex_exit(SD_MUTEX(un)); 25275 return (EAGAIN); 25276 } 25277 mutex_exit(SD_MUTEX(un)); 25278 25279 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25280 "sd_read_mode2: entry: un:0x%p\n", un); 25281 25282 #ifdef _MULTI_DATAMODEL 25283 switch (ddi_model_convert_from(flag & FMODELS)) { 25284 case DDI_MODEL_ILP32: 25285 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25286 return (EFAULT); 25287 } 25288 /* Convert the ILP32 uscsi data from the application to LP64 */ 25289 cdrom_read32tocdrom_read(cdrd32, mode2); 25290 break; 25291 case DDI_MODEL_NONE: 25292 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25293 return (EFAULT); 25294 } 25295 break; 25296 } 25297 #else /* ! _MULTI_DATAMODEL */ 25298 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25299 return (EFAULT); 25300 } 25301 #endif /* _MULTI_DATAMODEL */ 25302 25303 /* Store the current target block size for restoration later */ 25304 restore_blksize = un->un_tgt_blocksize; 25305 25306 /* Change the device and soft state target block size to 2336 */ 25307 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25308 rval = EIO; 25309 goto done; 25310 } 25311 25312 25313 bzero(cdb, sizeof (cdb)); 25314 25315 /* set READ operation */ 25316 cdb[0] = SCMD_READ; 25317 25318 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25319 mode2->cdread_lba >>= 2; 25320 25321 /* set the start address */ 25322 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25323 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25324 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25325 25326 /* set the transfer length */ 25327 nblocks = mode2->cdread_buflen / 2336; 25328 cdb[4] = (uchar_t)nblocks & 0xFF; 25329 25330 /* build command */ 25331 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25332 com->uscsi_cdb = (caddr_t)cdb; 25333 com->uscsi_cdblen = sizeof (cdb); 25334 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25335 com->uscsi_buflen = mode2->cdread_buflen; 25336 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25337 25338 /* 25339 * Issue SCSI command with user space address for read buffer. 25340 * 25341 * This sends the command through main channel in the driver. 25342 * 25343 * Since this is accessed via an IOCTL call, we go through the 25344 * standard path, so that if the device was powered down, then 25345 * it would be 'awakened' to handle the command. 25346 */ 25347 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25348 SD_PATH_STANDARD); 25349 25350 kmem_free(com, sizeof (*com)); 25351 25352 /* Restore the device and soft state target block size */ 25353 if (sr_sector_mode(dev, restore_blksize) != 0) { 25354 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25355 "can't do switch back to mode 1\n"); 25356 /* 25357 * If sd_send_scsi_READ succeeded we still need to report 25358 * an error because we failed to reset the block size 25359 */ 25360 if (rval == 0) { 25361 rval = EIO; 25362 } 25363 } 25364 25365 done: 25366 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25367 "sd_read_mode2: exit: un:0x%p\n", un); 25368 25369 return (rval); 25370 } 25371 25372 25373 /* 25374 * Function: sr_sector_mode() 25375 * 25376 * Description: This utility function is used by sr_read_mode2 to set the target 25377 * block size based on the user specified size. This is a legacy 25378 * implementation based upon a vendor specific mode page 25379 * 25380 * Arguments: dev - the device 'dev_t' 25381 * data - flag indicating if block size is being set to 2336 or 25382 * 512. 25383 * 25384 * Return Code: the code returned by sd_send_scsi_cmd() 25385 * EFAULT if ddi_copyxxx() fails 25386 * ENXIO if fail ddi_get_soft_state 25387 * EINVAL if data pointer is NULL 25388 */ 25389 25390 static int 25391 sr_sector_mode(dev_t dev, uint32_t blksize) 25392 { 25393 struct sd_lun *un; 25394 uchar_t *sense; 25395 uchar_t *select; 25396 int rval; 25397 25398 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25399 (un->un_state == SD_STATE_OFFLINE)) { 25400 return (ENXIO); 25401 } 25402 25403 sense = kmem_zalloc(20, KM_SLEEP); 25404 25405 /* Note: This is a vendor specific mode page (0x81) */ 25406 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25407 SD_PATH_STANDARD)) != 0) { 25408 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25409 "sr_sector_mode: Mode Sense failed\n"); 25410 kmem_free(sense, 20); 25411 return (rval); 25412 } 25413 select = kmem_zalloc(20, KM_SLEEP); 25414 select[3] = 0x08; 25415 select[10] = ((blksize >> 8) & 0xff); 25416 select[11] = (blksize & 0xff); 25417 select[12] = 0x01; 25418 select[13] = 0x06; 25419 select[14] = sense[14]; 25420 select[15] = sense[15]; 25421 if (blksize == SD_MODE2_BLKSIZE) { 25422 select[14] |= 0x01; 25423 } 25424 25425 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25426 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25427 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25428 "sr_sector_mode: Mode Select failed\n"); 25429 } else { 25430 /* 25431 * Only update the softstate block size if we successfully 25432 * changed the device block mode. 25433 */ 25434 mutex_enter(SD_MUTEX(un)); 25435 sd_update_block_info(un, blksize, 0); 25436 mutex_exit(SD_MUTEX(un)); 25437 } 25438 kmem_free(sense, 20); 25439 kmem_free(select, 20); 25440 return (rval); 25441 } 25442 25443 25444 /* 25445 * Function: sr_read_cdda() 25446 * 25447 * Description: This routine is the driver entry point for handling CD-ROM 25448 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25449 * the target supports CDDA these requests are handled via a vendor 25450 * specific command (0xD8) If the target does not support CDDA 25451 * these requests are handled via the READ CD command (0xBE). 25452 * 25453 * Arguments: dev - the device 'dev_t' 25454 * data - pointer to user provided CD-DA structure specifying 25455 * the track starting address, transfer length, and 25456 * subcode options. 25457 * flag - this argument is a pass through to ddi_copyxxx() 25458 * directly from the mode argument of ioctl(). 25459 * 25460 * Return Code: the code returned by sd_send_scsi_cmd() 25461 * EFAULT if ddi_copyxxx() fails 25462 * ENXIO if fail ddi_get_soft_state 25463 * EINVAL if invalid arguments are provided 25464 * ENOTTY 25465 */ 25466 25467 static int 25468 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25469 { 25470 struct sd_lun *un; 25471 struct uscsi_cmd *com; 25472 struct cdrom_cdda *cdda; 25473 int rval; 25474 size_t buflen; 25475 char cdb[CDB_GROUP5]; 25476 25477 #ifdef _MULTI_DATAMODEL 25478 /* To support ILP32 applications in an LP64 world */ 25479 struct cdrom_cdda32 cdrom_cdda32; 25480 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25481 #endif /* _MULTI_DATAMODEL */ 25482 25483 if (data == NULL) { 25484 return (EINVAL); 25485 } 25486 25487 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25488 return (ENXIO); 25489 } 25490 25491 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25492 25493 #ifdef _MULTI_DATAMODEL 25494 switch (ddi_model_convert_from(flag & FMODELS)) { 25495 case DDI_MODEL_ILP32: 25496 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25497 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25498 "sr_read_cdda: ddi_copyin Failed\n"); 25499 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25500 return (EFAULT); 25501 } 25502 /* Convert the ILP32 uscsi data from the application to LP64 */ 25503 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25504 break; 25505 case DDI_MODEL_NONE: 25506 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25507 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25508 "sr_read_cdda: ddi_copyin Failed\n"); 25509 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25510 return (EFAULT); 25511 } 25512 break; 25513 } 25514 #else /* ! _MULTI_DATAMODEL */ 25515 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25516 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25517 "sr_read_cdda: ddi_copyin Failed\n"); 25518 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25519 return (EFAULT); 25520 } 25521 #endif /* _MULTI_DATAMODEL */ 25522 25523 /* 25524 * Since MMC-2 expects max 3 bytes for length, check if the 25525 * length input is greater than 3 bytes 25526 */ 25527 if ((cdda->cdda_length & 0xFF000000) != 0) { 25528 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25529 "cdrom transfer length too large: %d (limit %d)\n", 25530 cdda->cdda_length, 0xFFFFFF); 25531 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25532 return (EINVAL); 25533 } 25534 25535 switch (cdda->cdda_subcode) { 25536 case CDROM_DA_NO_SUBCODE: 25537 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25538 break; 25539 case CDROM_DA_SUBQ: 25540 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25541 break; 25542 case CDROM_DA_ALL_SUBCODE: 25543 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25544 break; 25545 case CDROM_DA_SUBCODE_ONLY: 25546 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25547 break; 25548 default: 25549 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25550 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25551 cdda->cdda_subcode); 25552 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25553 return (EINVAL); 25554 } 25555 25556 /* Build and send the command */ 25557 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25558 bzero(cdb, CDB_GROUP5); 25559 25560 if (un->un_f_cfg_cdda == TRUE) { 25561 cdb[0] = (char)SCMD_READ_CD; 25562 cdb[1] = 0x04; 25563 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25564 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25565 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25566 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25567 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25568 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25569 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25570 cdb[9] = 0x10; 25571 switch (cdda->cdda_subcode) { 25572 case CDROM_DA_NO_SUBCODE : 25573 cdb[10] = 0x0; 25574 break; 25575 case CDROM_DA_SUBQ : 25576 cdb[10] = 0x2; 25577 break; 25578 case CDROM_DA_ALL_SUBCODE : 25579 cdb[10] = 0x1; 25580 break; 25581 case CDROM_DA_SUBCODE_ONLY : 25582 /* FALLTHROUGH */ 25583 default : 25584 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25585 kmem_free(com, sizeof (*com)); 25586 return (ENOTTY); 25587 } 25588 } else { 25589 cdb[0] = (char)SCMD_READ_CDDA; 25590 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25591 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25592 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25593 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25594 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25595 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25596 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25597 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25598 cdb[10] = cdda->cdda_subcode; 25599 } 25600 25601 com->uscsi_cdb = cdb; 25602 com->uscsi_cdblen = CDB_GROUP5; 25603 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25604 com->uscsi_buflen = buflen; 25605 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25606 25607 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25608 SD_PATH_STANDARD); 25609 25610 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25611 kmem_free(com, sizeof (*com)); 25612 return (rval); 25613 } 25614 25615 25616 /* 25617 * Function: sr_read_cdxa() 25618 * 25619 * Description: This routine is the driver entry point for handling CD-ROM 25620 * ioctl requests to return CD-XA (Extended Architecture) data. 25621 * (CDROMCDXA). 25622 * 25623 * Arguments: dev - the device 'dev_t' 25624 * data - pointer to user provided CD-XA structure specifying 25625 * the data starting address, transfer length, and format 25626 * flag - this argument is a pass through to ddi_copyxxx() 25627 * directly from the mode argument of ioctl(). 25628 * 25629 * Return Code: the code returned by sd_send_scsi_cmd() 25630 * EFAULT if ddi_copyxxx() fails 25631 * ENXIO if fail ddi_get_soft_state 25632 * EINVAL if data pointer is NULL 25633 */ 25634 25635 static int 25636 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25637 { 25638 struct sd_lun *un; 25639 struct uscsi_cmd *com; 25640 struct cdrom_cdxa *cdxa; 25641 int rval; 25642 size_t buflen; 25643 char cdb[CDB_GROUP5]; 25644 uchar_t read_flags; 25645 25646 #ifdef _MULTI_DATAMODEL 25647 /* To support ILP32 applications in an LP64 world */ 25648 struct cdrom_cdxa32 cdrom_cdxa32; 25649 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25650 #endif /* _MULTI_DATAMODEL */ 25651 25652 if (data == NULL) { 25653 return (EINVAL); 25654 } 25655 25656 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25657 return (ENXIO); 25658 } 25659 25660 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25661 25662 #ifdef _MULTI_DATAMODEL 25663 switch (ddi_model_convert_from(flag & FMODELS)) { 25664 case DDI_MODEL_ILP32: 25665 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25666 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25667 return (EFAULT); 25668 } 25669 /* 25670 * Convert the ILP32 uscsi data from the 25671 * application to LP64 for internal use. 25672 */ 25673 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25674 break; 25675 case DDI_MODEL_NONE: 25676 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25677 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25678 return (EFAULT); 25679 } 25680 break; 25681 } 25682 #else /* ! _MULTI_DATAMODEL */ 25683 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25684 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25685 return (EFAULT); 25686 } 25687 #endif /* _MULTI_DATAMODEL */ 25688 25689 /* 25690 * Since MMC-2 expects max 3 bytes for length, check if the 25691 * length input is greater than 3 bytes 25692 */ 25693 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25694 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25695 "cdrom transfer length too large: %d (limit %d)\n", 25696 cdxa->cdxa_length, 0xFFFFFF); 25697 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25698 return (EINVAL); 25699 } 25700 25701 switch (cdxa->cdxa_format) { 25702 case CDROM_XA_DATA: 25703 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25704 read_flags = 0x10; 25705 break; 25706 case CDROM_XA_SECTOR_DATA: 25707 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25708 read_flags = 0xf8; 25709 break; 25710 case CDROM_XA_DATA_W_ERROR: 25711 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25712 read_flags = 0xfc; 25713 break; 25714 default: 25715 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25716 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25717 cdxa->cdxa_format); 25718 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25719 return (EINVAL); 25720 } 25721 25722 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25723 bzero(cdb, CDB_GROUP5); 25724 if (un->un_f_mmc_cap == TRUE) { 25725 cdb[0] = (char)SCMD_READ_CD; 25726 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25727 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25728 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25729 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25730 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25731 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25732 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25733 cdb[9] = (char)read_flags; 25734 } else { 25735 /* 25736 * Note: A vendor specific command (0xDB) is being used her to 25737 * request a read of all subcodes. 25738 */ 25739 cdb[0] = (char)SCMD_READ_CDXA; 25740 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25741 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25742 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25743 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25744 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25745 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25746 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25747 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25748 cdb[10] = cdxa->cdxa_format; 25749 } 25750 com->uscsi_cdb = cdb; 25751 com->uscsi_cdblen = CDB_GROUP5; 25752 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25753 com->uscsi_buflen = buflen; 25754 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25755 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25756 SD_PATH_STANDARD); 25757 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25758 kmem_free(com, sizeof (*com)); 25759 return (rval); 25760 } 25761 25762 25763 /* 25764 * Function: sr_eject() 25765 * 25766 * Description: This routine is the driver entry point for handling CD-ROM 25767 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25768 * 25769 * Arguments: dev - the device 'dev_t' 25770 * 25771 * Return Code: the code returned by sd_send_scsi_cmd() 25772 */ 25773 25774 static int 25775 sr_eject(dev_t dev) 25776 { 25777 struct sd_lun *un; 25778 int rval; 25779 25780 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25781 (un->un_state == SD_STATE_OFFLINE)) { 25782 return (ENXIO); 25783 } 25784 25785 /* 25786 * To prevent race conditions with the eject 25787 * command, keep track of an eject command as 25788 * it progresses. If we are already handling 25789 * an eject command in the driver for the given 25790 * unit and another request to eject is received 25791 * immediately return EAGAIN so we don't lose 25792 * the command if the current eject command fails. 25793 */ 25794 mutex_enter(SD_MUTEX(un)); 25795 if (un->un_f_ejecting == TRUE) { 25796 mutex_exit(SD_MUTEX(un)); 25797 return (EAGAIN); 25798 } 25799 un->un_f_ejecting = TRUE; 25800 mutex_exit(SD_MUTEX(un)); 25801 25802 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25803 SD_PATH_STANDARD)) != 0) { 25804 mutex_enter(SD_MUTEX(un)); 25805 un->un_f_ejecting = FALSE; 25806 mutex_exit(SD_MUTEX(un)); 25807 return (rval); 25808 } 25809 25810 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25811 SD_PATH_STANDARD); 25812 25813 if (rval == 0) { 25814 mutex_enter(SD_MUTEX(un)); 25815 sr_ejected(un); 25816 un->un_mediastate = DKIO_EJECTED; 25817 un->un_f_ejecting = FALSE; 25818 cv_broadcast(&un->un_state_cv); 25819 mutex_exit(SD_MUTEX(un)); 25820 } else { 25821 mutex_enter(SD_MUTEX(un)); 25822 un->un_f_ejecting = FALSE; 25823 mutex_exit(SD_MUTEX(un)); 25824 } 25825 return (rval); 25826 } 25827 25828 25829 /* 25830 * Function: sr_ejected() 25831 * 25832 * Description: This routine updates the soft state structure to invalidate the 25833 * geometry information after the media has been ejected or a 25834 * media eject has been detected. 25835 * 25836 * Arguments: un - driver soft state (unit) structure 25837 */ 25838 25839 static void 25840 sr_ejected(struct sd_lun *un) 25841 { 25842 struct sd_errstats *stp; 25843 25844 ASSERT(un != NULL); 25845 ASSERT(mutex_owned(SD_MUTEX(un))); 25846 25847 un->un_f_blockcount_is_valid = FALSE; 25848 un->un_f_tgt_blocksize_is_valid = FALSE; 25849 mutex_exit(SD_MUTEX(un)); 25850 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25851 mutex_enter(SD_MUTEX(un)); 25852 25853 if (un->un_errstats != NULL) { 25854 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25855 stp->sd_capacity.value.ui64 = 0; 25856 } 25857 25858 /* remove "capacity-of-device" properties */ 25859 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25860 "device-nblocks"); 25861 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25862 "device-blksize"); 25863 } 25864 25865 25866 /* 25867 * Function: sr_check_wp() 25868 * 25869 * Description: This routine checks the write protection of a removable 25870 * media disk and hotpluggable devices via the write protect bit of 25871 * the Mode Page Header device specific field. Some devices choke 25872 * on unsupported mode page. In order to workaround this issue, 25873 * this routine has been implemented to use 0x3f mode page(request 25874 * for all pages) for all device types. 25875 * 25876 * Arguments: dev - the device 'dev_t' 25877 * 25878 * Return Code: int indicating if the device is write protected (1) or not (0) 25879 * 25880 * Context: Kernel thread. 25881 * 25882 */ 25883 25884 static int 25885 sr_check_wp(dev_t dev) 25886 { 25887 struct sd_lun *un; 25888 uchar_t device_specific; 25889 uchar_t *sense; 25890 int hdrlen; 25891 int rval = FALSE; 25892 25893 /* 25894 * Note: The return codes for this routine should be reworked to 25895 * properly handle the case of a NULL softstate. 25896 */ 25897 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25898 return (FALSE); 25899 } 25900 25901 if (un->un_f_cfg_is_atapi == TRUE) { 25902 /* 25903 * The mode page contents are not required; set the allocation 25904 * length for the mode page header only 25905 */ 25906 hdrlen = MODE_HEADER_LENGTH_GRP2; 25907 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25908 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25909 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25910 goto err_exit; 25911 device_specific = 25912 ((struct mode_header_grp2 *)sense)->device_specific; 25913 } else { 25914 hdrlen = MODE_HEADER_LENGTH; 25915 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25916 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25917 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25918 goto err_exit; 25919 device_specific = 25920 ((struct mode_header *)sense)->device_specific; 25921 } 25922 25923 /* 25924 * Write protect mode sense failed; not all disks 25925 * understand this query. Return FALSE assuming that 25926 * these devices are not writable. 25927 */ 25928 if (device_specific & WRITE_PROTECT) { 25929 rval = TRUE; 25930 } 25931 25932 err_exit: 25933 kmem_free(sense, hdrlen); 25934 return (rval); 25935 } 25936 25937 /* 25938 * Function: sr_volume_ctrl() 25939 * 25940 * Description: This routine is the driver entry point for handling CD-ROM 25941 * audio output volume ioctl requests. (CDROMVOLCTRL) 25942 * 25943 * Arguments: dev - the device 'dev_t' 25944 * data - pointer to user audio volume control structure 25945 * flag - this argument is a pass through to ddi_copyxxx() 25946 * directly from the mode argument of ioctl(). 25947 * 25948 * Return Code: the code returned by sd_send_scsi_cmd() 25949 * EFAULT if ddi_copyxxx() fails 25950 * ENXIO if fail ddi_get_soft_state 25951 * EINVAL if data pointer is NULL 25952 * 25953 */ 25954 25955 static int 25956 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25957 { 25958 struct sd_lun *un; 25959 struct cdrom_volctrl volume; 25960 struct cdrom_volctrl *vol = &volume; 25961 uchar_t *sense_page; 25962 uchar_t *select_page; 25963 uchar_t *sense; 25964 uchar_t *select; 25965 int sense_buflen; 25966 int select_buflen; 25967 int rval; 25968 25969 if (data == NULL) { 25970 return (EINVAL); 25971 } 25972 25973 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25974 (un->un_state == SD_STATE_OFFLINE)) { 25975 return (ENXIO); 25976 } 25977 25978 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25979 return (EFAULT); 25980 } 25981 25982 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25983 struct mode_header_grp2 *sense_mhp; 25984 struct mode_header_grp2 *select_mhp; 25985 int bd_len; 25986 25987 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25988 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25989 MODEPAGE_AUDIO_CTRL_LEN; 25990 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25991 select = kmem_zalloc(select_buflen, KM_SLEEP); 25992 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25993 sense_buflen, MODEPAGE_AUDIO_CTRL, 25994 SD_PATH_STANDARD)) != 0) { 25995 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25996 "sr_volume_ctrl: Mode Sense Failed\n"); 25997 kmem_free(sense, sense_buflen); 25998 kmem_free(select, select_buflen); 25999 return (rval); 26000 } 26001 sense_mhp = (struct mode_header_grp2 *)sense; 26002 select_mhp = (struct mode_header_grp2 *)select; 26003 bd_len = (sense_mhp->bdesc_length_hi << 8) | 26004 sense_mhp->bdesc_length_lo; 26005 if (bd_len > MODE_BLK_DESC_LENGTH) { 26006 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26007 "sr_volume_ctrl: Mode Sense returned invalid " 26008 "block descriptor length\n"); 26009 kmem_free(sense, sense_buflen); 26010 kmem_free(select, select_buflen); 26011 return (EIO); 26012 } 26013 sense_page = (uchar_t *) 26014 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26015 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 26016 select_mhp->length_msb = 0; 26017 select_mhp->length_lsb = 0; 26018 select_mhp->bdesc_length_hi = 0; 26019 select_mhp->bdesc_length_lo = 0; 26020 } else { 26021 struct mode_header *sense_mhp, *select_mhp; 26022 26023 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26024 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26025 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26026 select = kmem_zalloc(select_buflen, KM_SLEEP); 26027 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26028 sense_buflen, MODEPAGE_AUDIO_CTRL, 26029 SD_PATH_STANDARD)) != 0) { 26030 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26031 "sr_volume_ctrl: Mode Sense Failed\n"); 26032 kmem_free(sense, sense_buflen); 26033 kmem_free(select, select_buflen); 26034 return (rval); 26035 } 26036 sense_mhp = (struct mode_header *)sense; 26037 select_mhp = (struct mode_header *)select; 26038 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26039 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26040 "sr_volume_ctrl: Mode Sense returned invalid " 26041 "block descriptor length\n"); 26042 kmem_free(sense, sense_buflen); 26043 kmem_free(select, select_buflen); 26044 return (EIO); 26045 } 26046 sense_page = (uchar_t *) 26047 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26048 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26049 select_mhp->length = 0; 26050 select_mhp->bdesc_length = 0; 26051 } 26052 /* 26053 * Note: An audio control data structure could be created and overlayed 26054 * on the following in place of the array indexing method implemented. 26055 */ 26056 26057 /* Build the select data for the user volume data */ 26058 select_page[0] = MODEPAGE_AUDIO_CTRL; 26059 select_page[1] = 0xE; 26060 /* Set the immediate bit */ 26061 select_page[2] = 0x04; 26062 /* Zero out reserved fields */ 26063 select_page[3] = 0x00; 26064 select_page[4] = 0x00; 26065 /* Return sense data for fields not to be modified */ 26066 select_page[5] = sense_page[5]; 26067 select_page[6] = sense_page[6]; 26068 select_page[7] = sense_page[7]; 26069 /* Set the user specified volume levels for channel 0 and 1 */ 26070 select_page[8] = 0x01; 26071 select_page[9] = vol->channel0; 26072 select_page[10] = 0x02; 26073 select_page[11] = vol->channel1; 26074 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26075 select_page[12] = sense_page[12]; 26076 select_page[13] = sense_page[13]; 26077 select_page[14] = sense_page[14]; 26078 select_page[15] = sense_page[15]; 26079 26080 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26081 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26082 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26083 } else { 26084 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26085 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26086 } 26087 26088 kmem_free(sense, sense_buflen); 26089 kmem_free(select, select_buflen); 26090 return (rval); 26091 } 26092 26093 26094 /* 26095 * Function: sr_read_sony_session_offset() 26096 * 26097 * Description: This routine is the driver entry point for handling CD-ROM 26098 * ioctl requests for session offset information. (CDROMREADOFFSET) 26099 * The address of the first track in the last session of a 26100 * multi-session CD-ROM is returned 26101 * 26102 * Note: This routine uses a vendor specific key value in the 26103 * command control field without implementing any vendor check here 26104 * or in the ioctl routine. 26105 * 26106 * Arguments: dev - the device 'dev_t' 26107 * data - pointer to an int to hold the requested address 26108 * flag - this argument is a pass through to ddi_copyxxx() 26109 * directly from the mode argument of ioctl(). 26110 * 26111 * Return Code: the code returned by sd_send_scsi_cmd() 26112 * EFAULT if ddi_copyxxx() fails 26113 * ENXIO if fail ddi_get_soft_state 26114 * EINVAL if data pointer is NULL 26115 */ 26116 26117 static int 26118 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26119 { 26120 struct sd_lun *un; 26121 struct uscsi_cmd *com; 26122 caddr_t buffer; 26123 char cdb[CDB_GROUP1]; 26124 int session_offset = 0; 26125 int rval; 26126 26127 if (data == NULL) { 26128 return (EINVAL); 26129 } 26130 26131 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26132 (un->un_state == SD_STATE_OFFLINE)) { 26133 return (ENXIO); 26134 } 26135 26136 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26137 bzero(cdb, CDB_GROUP1); 26138 cdb[0] = SCMD_READ_TOC; 26139 /* 26140 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26141 * (4 byte TOC response header + 8 byte response data) 26142 */ 26143 cdb[8] = SONY_SESSION_OFFSET_LEN; 26144 /* Byte 9 is the control byte. A vendor specific value is used */ 26145 cdb[9] = SONY_SESSION_OFFSET_KEY; 26146 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26147 com->uscsi_cdb = cdb; 26148 com->uscsi_cdblen = CDB_GROUP1; 26149 com->uscsi_bufaddr = buffer; 26150 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26151 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26152 26153 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26154 SD_PATH_STANDARD); 26155 if (rval != 0) { 26156 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26157 kmem_free(com, sizeof (*com)); 26158 return (rval); 26159 } 26160 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26161 session_offset = 26162 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26163 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26164 /* 26165 * Offset returned offset in current lbasize block's. Convert to 26166 * 2k block's to return to the user 26167 */ 26168 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26169 session_offset >>= 2; 26170 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26171 session_offset >>= 1; 26172 } 26173 } 26174 26175 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26176 rval = EFAULT; 26177 } 26178 26179 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26180 kmem_free(com, sizeof (*com)); 26181 return (rval); 26182 } 26183 26184 26185 /* 26186 * Function: sd_wm_cache_constructor() 26187 * 26188 * Description: Cache Constructor for the wmap cache for the read/modify/write 26189 * devices. 26190 * 26191 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26192 * un - sd_lun structure for the device. 26193 * flag - the km flags passed to constructor 26194 * 26195 * Return Code: 0 on success. 26196 * -1 on failure. 26197 */ 26198 26199 /*ARGSUSED*/ 26200 static int 26201 sd_wm_cache_constructor(void *wm, void *un, int flags) 26202 { 26203 bzero(wm, sizeof (struct sd_w_map)); 26204 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26205 return (0); 26206 } 26207 26208 26209 /* 26210 * Function: sd_wm_cache_destructor() 26211 * 26212 * Description: Cache destructor for the wmap cache for the read/modify/write 26213 * devices. 26214 * 26215 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26216 * un - sd_lun structure for the device. 26217 */ 26218 /*ARGSUSED*/ 26219 static void 26220 sd_wm_cache_destructor(void *wm, void *un) 26221 { 26222 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26223 } 26224 26225 26226 /* 26227 * Function: sd_range_lock() 26228 * 26229 * Description: Lock the range of blocks specified as parameter to ensure 26230 * that read, modify write is atomic and no other i/o writes 26231 * to the same location. The range is specified in terms 26232 * of start and end blocks. Block numbers are the actual 26233 * media block numbers and not system. 26234 * 26235 * Arguments: un - sd_lun structure for the device. 26236 * startb - The starting block number 26237 * endb - The end block number 26238 * typ - type of i/o - simple/read_modify_write 26239 * 26240 * Return Code: wm - pointer to the wmap structure. 26241 * 26242 * Context: This routine can sleep. 26243 */ 26244 26245 static struct sd_w_map * 26246 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26247 { 26248 struct sd_w_map *wmp = NULL; 26249 struct sd_w_map *sl_wmp = NULL; 26250 struct sd_w_map *tmp_wmp; 26251 wm_state state = SD_WM_CHK_LIST; 26252 26253 26254 ASSERT(un != NULL); 26255 ASSERT(!mutex_owned(SD_MUTEX(un))); 26256 26257 mutex_enter(SD_MUTEX(un)); 26258 26259 while (state != SD_WM_DONE) { 26260 26261 switch (state) { 26262 case SD_WM_CHK_LIST: 26263 /* 26264 * This is the starting state. Check the wmap list 26265 * to see if the range is currently available. 26266 */ 26267 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26268 /* 26269 * If this is a simple write and no rmw 26270 * i/o is pending then try to lock the 26271 * range as the range should be available. 26272 */ 26273 state = SD_WM_LOCK_RANGE; 26274 } else { 26275 tmp_wmp = sd_get_range(un, startb, endb); 26276 if (tmp_wmp != NULL) { 26277 if ((wmp != NULL) && ONLIST(un, wmp)) { 26278 /* 26279 * Should not keep onlist wmps 26280 * while waiting this macro 26281 * will also do wmp = NULL; 26282 */ 26283 FREE_ONLIST_WMAP(un, wmp); 26284 } 26285 /* 26286 * sl_wmp is the wmap on which wait 26287 * is done, since the tmp_wmp points 26288 * to the inuse wmap, set sl_wmp to 26289 * tmp_wmp and change the state to sleep 26290 */ 26291 sl_wmp = tmp_wmp; 26292 state = SD_WM_WAIT_MAP; 26293 } else { 26294 state = SD_WM_LOCK_RANGE; 26295 } 26296 26297 } 26298 break; 26299 26300 case SD_WM_LOCK_RANGE: 26301 ASSERT(un->un_wm_cache); 26302 /* 26303 * The range need to be locked, try to get a wmap. 26304 * First attempt it with NO_SLEEP, want to avoid a sleep 26305 * if possible as we will have to release the sd mutex 26306 * if we have to sleep. 26307 */ 26308 if (wmp == NULL) 26309 wmp = kmem_cache_alloc(un->un_wm_cache, 26310 KM_NOSLEEP); 26311 if (wmp == NULL) { 26312 mutex_exit(SD_MUTEX(un)); 26313 _NOTE(DATA_READABLE_WITHOUT_LOCK 26314 (sd_lun::un_wm_cache)) 26315 wmp = kmem_cache_alloc(un->un_wm_cache, 26316 KM_SLEEP); 26317 mutex_enter(SD_MUTEX(un)); 26318 /* 26319 * we released the mutex so recheck and go to 26320 * check list state. 26321 */ 26322 state = SD_WM_CHK_LIST; 26323 } else { 26324 /* 26325 * We exit out of state machine since we 26326 * have the wmap. Do the housekeeping first. 26327 * place the wmap on the wmap list if it is not 26328 * on it already and then set the state to done. 26329 */ 26330 wmp->wm_start = startb; 26331 wmp->wm_end = endb; 26332 wmp->wm_flags = typ | SD_WM_BUSY; 26333 if (typ & SD_WTYPE_RMW) { 26334 un->un_rmw_count++; 26335 } 26336 /* 26337 * If not already on the list then link 26338 */ 26339 if (!ONLIST(un, wmp)) { 26340 wmp->wm_next = un->un_wm; 26341 wmp->wm_prev = NULL; 26342 if (wmp->wm_next) 26343 wmp->wm_next->wm_prev = wmp; 26344 un->un_wm = wmp; 26345 } 26346 state = SD_WM_DONE; 26347 } 26348 break; 26349 26350 case SD_WM_WAIT_MAP: 26351 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26352 /* 26353 * Wait is done on sl_wmp, which is set in the 26354 * check_list state. 26355 */ 26356 sl_wmp->wm_wanted_count++; 26357 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26358 sl_wmp->wm_wanted_count--; 26359 /* 26360 * We can reuse the memory from the completed sl_wmp 26361 * lock range for our new lock, but only if noone is 26362 * waiting for it. 26363 */ 26364 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26365 if (sl_wmp->wm_wanted_count == 0) { 26366 if (wmp != NULL) 26367 CHK_N_FREEWMP(un, wmp); 26368 wmp = sl_wmp; 26369 } 26370 sl_wmp = NULL; 26371 /* 26372 * After waking up, need to recheck for availability of 26373 * range. 26374 */ 26375 state = SD_WM_CHK_LIST; 26376 break; 26377 26378 default: 26379 panic("sd_range_lock: " 26380 "Unknown state %d in sd_range_lock", state); 26381 /*NOTREACHED*/ 26382 } /* switch(state) */ 26383 26384 } /* while(state != SD_WM_DONE) */ 26385 26386 mutex_exit(SD_MUTEX(un)); 26387 26388 ASSERT(wmp != NULL); 26389 26390 return (wmp); 26391 } 26392 26393 26394 /* 26395 * Function: sd_get_range() 26396 * 26397 * Description: Find if there any overlapping I/O to this one 26398 * Returns the write-map of 1st such I/O, NULL otherwise. 26399 * 26400 * Arguments: un - sd_lun structure for the device. 26401 * startb - The starting block number 26402 * endb - The end block number 26403 * 26404 * Return Code: wm - pointer to the wmap structure. 26405 */ 26406 26407 static struct sd_w_map * 26408 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26409 { 26410 struct sd_w_map *wmp; 26411 26412 ASSERT(un != NULL); 26413 26414 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26415 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26416 continue; 26417 } 26418 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26419 break; 26420 } 26421 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26422 break; 26423 } 26424 } 26425 26426 return (wmp); 26427 } 26428 26429 26430 /* 26431 * Function: sd_free_inlist_wmap() 26432 * 26433 * Description: Unlink and free a write map struct. 26434 * 26435 * Arguments: un - sd_lun structure for the device. 26436 * wmp - sd_w_map which needs to be unlinked. 26437 */ 26438 26439 static void 26440 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26441 { 26442 ASSERT(un != NULL); 26443 26444 if (un->un_wm == wmp) { 26445 un->un_wm = wmp->wm_next; 26446 } else { 26447 wmp->wm_prev->wm_next = wmp->wm_next; 26448 } 26449 26450 if (wmp->wm_next) { 26451 wmp->wm_next->wm_prev = wmp->wm_prev; 26452 } 26453 26454 wmp->wm_next = wmp->wm_prev = NULL; 26455 26456 kmem_cache_free(un->un_wm_cache, wmp); 26457 } 26458 26459 26460 /* 26461 * Function: sd_range_unlock() 26462 * 26463 * Description: Unlock the range locked by wm. 26464 * Free write map if nobody else is waiting on it. 26465 * 26466 * Arguments: un - sd_lun structure for the device. 26467 * wmp - sd_w_map which needs to be unlinked. 26468 */ 26469 26470 static void 26471 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26472 { 26473 ASSERT(un != NULL); 26474 ASSERT(wm != NULL); 26475 ASSERT(!mutex_owned(SD_MUTEX(un))); 26476 26477 mutex_enter(SD_MUTEX(un)); 26478 26479 if (wm->wm_flags & SD_WTYPE_RMW) { 26480 un->un_rmw_count--; 26481 } 26482 26483 if (wm->wm_wanted_count) { 26484 wm->wm_flags = 0; 26485 /* 26486 * Broadcast that the wmap is available now. 26487 */ 26488 cv_broadcast(&wm->wm_avail); 26489 } else { 26490 /* 26491 * If no one is waiting on the map, it should be free'ed. 26492 */ 26493 sd_free_inlist_wmap(un, wm); 26494 } 26495 26496 mutex_exit(SD_MUTEX(un)); 26497 } 26498 26499 26500 /* 26501 * Function: sd_read_modify_write_task 26502 * 26503 * Description: Called from a taskq thread to initiate the write phase of 26504 * a read-modify-write request. This is used for targets where 26505 * un->un_sys_blocksize != un->un_tgt_blocksize. 26506 * 26507 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26508 * 26509 * Context: Called under taskq thread context. 26510 */ 26511 26512 static void 26513 sd_read_modify_write_task(void *arg) 26514 { 26515 struct sd_mapblocksize_info *bsp; 26516 struct buf *bp; 26517 struct sd_xbuf *xp; 26518 struct sd_lun *un; 26519 26520 bp = arg; /* The bp is given in arg */ 26521 ASSERT(bp != NULL); 26522 26523 /* Get the pointer to the layer-private data struct */ 26524 xp = SD_GET_XBUF(bp); 26525 ASSERT(xp != NULL); 26526 bsp = xp->xb_private; 26527 ASSERT(bsp != NULL); 26528 26529 un = SD_GET_UN(bp); 26530 ASSERT(un != NULL); 26531 ASSERT(!mutex_owned(SD_MUTEX(un))); 26532 26533 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26534 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26535 26536 /* 26537 * This is the write phase of a read-modify-write request, called 26538 * under the context of a taskq thread in response to the completion 26539 * of the read portion of the rmw request completing under interrupt 26540 * context. The write request must be sent from here down the iostart 26541 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26542 * we use the layer index saved in the layer-private data area. 26543 */ 26544 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26545 26546 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26547 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26548 } 26549 26550 26551 /* 26552 * Function: sddump_do_read_of_rmw() 26553 * 26554 * Description: This routine will be called from sddump, If sddump is called 26555 * with an I/O which not aligned on device blocksize boundary 26556 * then the write has to be converted to read-modify-write. 26557 * Do the read part here in order to keep sddump simple. 26558 * Note - That the sd_mutex is held across the call to this 26559 * routine. 26560 * 26561 * Arguments: un - sd_lun 26562 * blkno - block number in terms of media block size. 26563 * nblk - number of blocks. 26564 * bpp - pointer to pointer to the buf structure. On return 26565 * from this function, *bpp points to the valid buffer 26566 * to which the write has to be done. 26567 * 26568 * Return Code: 0 for success or errno-type return code 26569 */ 26570 26571 static int 26572 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26573 struct buf **bpp) 26574 { 26575 int err; 26576 int i; 26577 int rval; 26578 struct buf *bp; 26579 struct scsi_pkt *pkt = NULL; 26580 uint32_t target_blocksize; 26581 26582 ASSERT(un != NULL); 26583 ASSERT(mutex_owned(SD_MUTEX(un))); 26584 26585 target_blocksize = un->un_tgt_blocksize; 26586 26587 mutex_exit(SD_MUTEX(un)); 26588 26589 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26590 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26591 if (bp == NULL) { 26592 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26593 "no resources for dumping; giving up"); 26594 err = ENOMEM; 26595 goto done; 26596 } 26597 26598 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26599 blkno, nblk); 26600 if (rval != 0) { 26601 scsi_free_consistent_buf(bp); 26602 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26603 "no resources for dumping; giving up"); 26604 err = ENOMEM; 26605 goto done; 26606 } 26607 26608 pkt->pkt_flags |= FLAG_NOINTR; 26609 26610 err = EIO; 26611 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26612 26613 /* 26614 * Scsi_poll returns 0 (success) if the command completes and 26615 * the status block is STATUS_GOOD. We should only check 26616 * errors if this condition is not true. Even then we should 26617 * send our own request sense packet only if we have a check 26618 * condition and auto request sense has not been performed by 26619 * the hba. 26620 */ 26621 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26622 26623 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26624 err = 0; 26625 break; 26626 } 26627 26628 /* 26629 * Check CMD_DEV_GONE 1st, give up if device is gone, 26630 * no need to read RQS data. 26631 */ 26632 if (pkt->pkt_reason == CMD_DEV_GONE) { 26633 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26634 "Error while dumping state with rmw..." 26635 "Device is gone\n"); 26636 break; 26637 } 26638 26639 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26640 SD_INFO(SD_LOG_DUMP, un, 26641 "sddump: read failed with CHECK, try # %d\n", i); 26642 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26643 (void) sd_send_polled_RQS(un); 26644 } 26645 26646 continue; 26647 } 26648 26649 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26650 int reset_retval = 0; 26651 26652 SD_INFO(SD_LOG_DUMP, un, 26653 "sddump: read failed with BUSY, try # %d\n", i); 26654 26655 if (un->un_f_lun_reset_enabled == TRUE) { 26656 reset_retval = scsi_reset(SD_ADDRESS(un), 26657 RESET_LUN); 26658 } 26659 if (reset_retval == 0) { 26660 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26661 } 26662 (void) sd_send_polled_RQS(un); 26663 26664 } else { 26665 SD_INFO(SD_LOG_DUMP, un, 26666 "sddump: read failed with 0x%x, try # %d\n", 26667 SD_GET_PKT_STATUS(pkt), i); 26668 mutex_enter(SD_MUTEX(un)); 26669 sd_reset_target(un, pkt); 26670 mutex_exit(SD_MUTEX(un)); 26671 } 26672 26673 /* 26674 * If we are not getting anywhere with lun/target resets, 26675 * let's reset the bus. 26676 */ 26677 if (i > SD_NDUMP_RETRIES/2) { 26678 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26679 (void) sd_send_polled_RQS(un); 26680 } 26681 26682 } 26683 scsi_destroy_pkt(pkt); 26684 26685 if (err != 0) { 26686 scsi_free_consistent_buf(bp); 26687 *bpp = NULL; 26688 } else { 26689 *bpp = bp; 26690 } 26691 26692 done: 26693 mutex_enter(SD_MUTEX(un)); 26694 return (err); 26695 } 26696 26697 26698 /* 26699 * Function: sd_failfast_flushq 26700 * 26701 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26702 * in b_flags and move them onto the failfast queue, then kick 26703 * off a thread to return all bp's on the failfast queue to 26704 * their owners with an error set. 26705 * 26706 * Arguments: un - pointer to the soft state struct for the instance. 26707 * 26708 * Context: may execute in interrupt context. 26709 */ 26710 26711 static void 26712 sd_failfast_flushq(struct sd_lun *un) 26713 { 26714 struct buf *bp; 26715 struct buf *next_waitq_bp; 26716 struct buf *prev_waitq_bp = NULL; 26717 26718 ASSERT(un != NULL); 26719 ASSERT(mutex_owned(SD_MUTEX(un))); 26720 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26721 ASSERT(un->un_failfast_bp == NULL); 26722 26723 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26724 "sd_failfast_flushq: entry: un:0x%p\n", un); 26725 26726 /* 26727 * Check if we should flush all bufs when entering failfast state, or 26728 * just those with B_FAILFAST set. 26729 */ 26730 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26731 /* 26732 * Move *all* bp's on the wait queue to the failfast flush 26733 * queue, including those that do NOT have B_FAILFAST set. 26734 */ 26735 if (un->un_failfast_headp == NULL) { 26736 ASSERT(un->un_failfast_tailp == NULL); 26737 un->un_failfast_headp = un->un_waitq_headp; 26738 } else { 26739 ASSERT(un->un_failfast_tailp != NULL); 26740 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26741 } 26742 26743 un->un_failfast_tailp = un->un_waitq_tailp; 26744 26745 /* update kstat for each bp moved out of the waitq */ 26746 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26747 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26748 } 26749 26750 /* empty the waitq */ 26751 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26752 26753 } else { 26754 /* 26755 * Go thru the wait queue, pick off all entries with 26756 * B_FAILFAST set, and move these onto the failfast queue. 26757 */ 26758 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26759 /* 26760 * Save the pointer to the next bp on the wait queue, 26761 * so we get to it on the next iteration of this loop. 26762 */ 26763 next_waitq_bp = bp->av_forw; 26764 26765 /* 26766 * If this bp from the wait queue does NOT have 26767 * B_FAILFAST set, just move on to the next element 26768 * in the wait queue. Note, this is the only place 26769 * where it is correct to set prev_waitq_bp. 26770 */ 26771 if ((bp->b_flags & B_FAILFAST) == 0) { 26772 prev_waitq_bp = bp; 26773 continue; 26774 } 26775 26776 /* 26777 * Remove the bp from the wait queue. 26778 */ 26779 if (bp == un->un_waitq_headp) { 26780 /* The bp is the first element of the waitq. */ 26781 un->un_waitq_headp = next_waitq_bp; 26782 if (un->un_waitq_headp == NULL) { 26783 /* The wait queue is now empty */ 26784 un->un_waitq_tailp = NULL; 26785 } 26786 } else { 26787 /* 26788 * The bp is either somewhere in the middle 26789 * or at the end of the wait queue. 26790 */ 26791 ASSERT(un->un_waitq_headp != NULL); 26792 ASSERT(prev_waitq_bp != NULL); 26793 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26794 == 0); 26795 if (bp == un->un_waitq_tailp) { 26796 /* bp is the last entry on the waitq. */ 26797 ASSERT(next_waitq_bp == NULL); 26798 un->un_waitq_tailp = prev_waitq_bp; 26799 } 26800 prev_waitq_bp->av_forw = next_waitq_bp; 26801 } 26802 bp->av_forw = NULL; 26803 26804 /* 26805 * update kstat since the bp is moved out of 26806 * the waitq 26807 */ 26808 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26809 26810 /* 26811 * Now put the bp onto the failfast queue. 26812 */ 26813 if (un->un_failfast_headp == NULL) { 26814 /* failfast queue is currently empty */ 26815 ASSERT(un->un_failfast_tailp == NULL); 26816 un->un_failfast_headp = 26817 un->un_failfast_tailp = bp; 26818 } else { 26819 /* Add the bp to the end of the failfast q */ 26820 ASSERT(un->un_failfast_tailp != NULL); 26821 ASSERT(un->un_failfast_tailp->b_flags & 26822 B_FAILFAST); 26823 un->un_failfast_tailp->av_forw = bp; 26824 un->un_failfast_tailp = bp; 26825 } 26826 } 26827 } 26828 26829 /* 26830 * Now return all bp's on the failfast queue to their owners. 26831 */ 26832 while ((bp = un->un_failfast_headp) != NULL) { 26833 26834 un->un_failfast_headp = bp->av_forw; 26835 if (un->un_failfast_headp == NULL) { 26836 un->un_failfast_tailp = NULL; 26837 } 26838 26839 /* 26840 * We want to return the bp with a failure error code, but 26841 * we do not want a call to sd_start_cmds() to occur here, 26842 * so use sd_return_failed_command_no_restart() instead of 26843 * sd_return_failed_command(). 26844 */ 26845 sd_return_failed_command_no_restart(un, bp, EIO); 26846 } 26847 26848 /* Flush the xbuf queues if required. */ 26849 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26850 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26851 } 26852 26853 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26854 "sd_failfast_flushq: exit: un:0x%p\n", un); 26855 } 26856 26857 26858 /* 26859 * Function: sd_failfast_flushq_callback 26860 * 26861 * Description: Return TRUE if the given bp meets the criteria for failfast 26862 * flushing. Used with ddi_xbuf_flushq(9F). 26863 * 26864 * Arguments: bp - ptr to buf struct to be examined. 26865 * 26866 * Context: Any 26867 */ 26868 26869 static int 26870 sd_failfast_flushq_callback(struct buf *bp) 26871 { 26872 /* 26873 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26874 * state is entered; OR (2) the given bp has B_FAILFAST set. 26875 */ 26876 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26877 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26878 } 26879 26880 26881 26882 /* 26883 * Function: sd_setup_next_xfer 26884 * 26885 * Description: Prepare next I/O operation using DMA_PARTIAL 26886 * 26887 */ 26888 26889 static int 26890 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26891 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26892 { 26893 ssize_t num_blks_not_xfered; 26894 daddr_t strt_blk_num; 26895 ssize_t bytes_not_xfered; 26896 int rval; 26897 26898 ASSERT(pkt->pkt_resid == 0); 26899 26900 /* 26901 * Calculate next block number and amount to be transferred. 26902 * 26903 * How much data NOT transfered to the HBA yet. 26904 */ 26905 bytes_not_xfered = xp->xb_dma_resid; 26906 26907 /* 26908 * figure how many blocks NOT transfered to the HBA yet. 26909 */ 26910 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26911 26912 /* 26913 * set starting block number to the end of what WAS transfered. 26914 */ 26915 strt_blk_num = xp->xb_blkno + 26916 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26917 26918 /* 26919 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26920 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26921 * the disk mutex here. 26922 */ 26923 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26924 strt_blk_num, num_blks_not_xfered); 26925 26926 if (rval == 0) { 26927 26928 /* 26929 * Success. 26930 * 26931 * Adjust things if there are still more blocks to be 26932 * transfered. 26933 */ 26934 xp->xb_dma_resid = pkt->pkt_resid; 26935 pkt->pkt_resid = 0; 26936 26937 return (1); 26938 } 26939 26940 /* 26941 * There's really only one possible return value from 26942 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26943 * returns NULL. 26944 */ 26945 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26946 26947 bp->b_resid = bp->b_bcount; 26948 bp->b_flags |= B_ERROR; 26949 26950 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26951 "Error setting up next portion of DMA transfer\n"); 26952 26953 return (0); 26954 } 26955 26956 /* 26957 * Function: sd_panic_for_res_conflict 26958 * 26959 * Description: Call panic with a string formatted with "Reservation Conflict" 26960 * and a human readable identifier indicating the SD instance 26961 * that experienced the reservation conflict. 26962 * 26963 * Arguments: un - pointer to the soft state struct for the instance. 26964 * 26965 * Context: may execute in interrupt context. 26966 */ 26967 26968 #define SD_RESV_CONFLICT_FMT_LEN 40 26969 void 26970 sd_panic_for_res_conflict(struct sd_lun *un) 26971 { 26972 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26973 char path_str[MAXPATHLEN]; 26974 26975 (void) snprintf(panic_str, sizeof (panic_str), 26976 "Reservation Conflict\nDisk: %s", 26977 ddi_pathname(SD_DEVINFO(un), path_str)); 26978 26979 panic(panic_str); 26980 } 26981 26982 /* 26983 * Note: The following sd_faultinjection_ioctl( ) routines implement 26984 * driver support for handling fault injection for error analysis 26985 * causing faults in multiple layers of the driver. 26986 * 26987 */ 26988 26989 #ifdef SD_FAULT_INJECTION 26990 static uint_t sd_fault_injection_on = 0; 26991 26992 /* 26993 * Function: sd_faultinjection_ioctl() 26994 * 26995 * Description: This routine is the driver entry point for handling 26996 * faultinjection ioctls to inject errors into the 26997 * layer model 26998 * 26999 * Arguments: cmd - the ioctl cmd received 27000 * arg - the arguments from user and returns 27001 */ 27002 27003 static void 27004 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 27005 27006 uint_t i; 27007 uint_t rval; 27008 27009 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 27010 27011 mutex_enter(SD_MUTEX(un)); 27012 27013 switch (cmd) { 27014 case SDIOCRUN: 27015 /* Allow pushed faults to be injected */ 27016 SD_INFO(SD_LOG_SDTEST, un, 27017 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 27018 27019 sd_fault_injection_on = 1; 27020 27021 SD_INFO(SD_LOG_IOERR, un, 27022 "sd_faultinjection_ioctl: run finished\n"); 27023 break; 27024 27025 case SDIOCSTART: 27026 /* Start Injection Session */ 27027 SD_INFO(SD_LOG_SDTEST, un, 27028 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27029 27030 sd_fault_injection_on = 0; 27031 un->sd_injection_mask = 0xFFFFFFFF; 27032 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27033 un->sd_fi_fifo_pkt[i] = NULL; 27034 un->sd_fi_fifo_xb[i] = NULL; 27035 un->sd_fi_fifo_un[i] = NULL; 27036 un->sd_fi_fifo_arq[i] = NULL; 27037 } 27038 un->sd_fi_fifo_start = 0; 27039 un->sd_fi_fifo_end = 0; 27040 27041 mutex_enter(&(un->un_fi_mutex)); 27042 un->sd_fi_log[0] = '\0'; 27043 un->sd_fi_buf_len = 0; 27044 mutex_exit(&(un->un_fi_mutex)); 27045 27046 SD_INFO(SD_LOG_IOERR, un, 27047 "sd_faultinjection_ioctl: start finished\n"); 27048 break; 27049 27050 case SDIOCSTOP: 27051 /* Stop Injection Session */ 27052 SD_INFO(SD_LOG_SDTEST, un, 27053 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27054 sd_fault_injection_on = 0; 27055 un->sd_injection_mask = 0x0; 27056 27057 /* Empty stray or unuseds structs from fifo */ 27058 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27059 if (un->sd_fi_fifo_pkt[i] != NULL) { 27060 kmem_free(un->sd_fi_fifo_pkt[i], 27061 sizeof (struct sd_fi_pkt)); 27062 } 27063 if (un->sd_fi_fifo_xb[i] != NULL) { 27064 kmem_free(un->sd_fi_fifo_xb[i], 27065 sizeof (struct sd_fi_xb)); 27066 } 27067 if (un->sd_fi_fifo_un[i] != NULL) { 27068 kmem_free(un->sd_fi_fifo_un[i], 27069 sizeof (struct sd_fi_un)); 27070 } 27071 if (un->sd_fi_fifo_arq[i] != NULL) { 27072 kmem_free(un->sd_fi_fifo_arq[i], 27073 sizeof (struct sd_fi_arq)); 27074 } 27075 un->sd_fi_fifo_pkt[i] = NULL; 27076 un->sd_fi_fifo_un[i] = NULL; 27077 un->sd_fi_fifo_xb[i] = NULL; 27078 un->sd_fi_fifo_arq[i] = NULL; 27079 } 27080 un->sd_fi_fifo_start = 0; 27081 un->sd_fi_fifo_end = 0; 27082 27083 SD_INFO(SD_LOG_IOERR, un, 27084 "sd_faultinjection_ioctl: stop finished\n"); 27085 break; 27086 27087 case SDIOCINSERTPKT: 27088 /* Store a packet struct to be pushed onto fifo */ 27089 SD_INFO(SD_LOG_SDTEST, un, 27090 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27091 27092 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27093 27094 sd_fault_injection_on = 0; 27095 27096 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27097 if (un->sd_fi_fifo_pkt[i] != NULL) { 27098 kmem_free(un->sd_fi_fifo_pkt[i], 27099 sizeof (struct sd_fi_pkt)); 27100 } 27101 if (arg != NULL) { 27102 un->sd_fi_fifo_pkt[i] = 27103 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27104 if (un->sd_fi_fifo_pkt[i] == NULL) { 27105 /* Alloc failed don't store anything */ 27106 break; 27107 } 27108 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27109 sizeof (struct sd_fi_pkt), 0); 27110 if (rval == -1) { 27111 kmem_free(un->sd_fi_fifo_pkt[i], 27112 sizeof (struct sd_fi_pkt)); 27113 un->sd_fi_fifo_pkt[i] = NULL; 27114 } 27115 } else { 27116 SD_INFO(SD_LOG_IOERR, un, 27117 "sd_faultinjection_ioctl: pkt null\n"); 27118 } 27119 break; 27120 27121 case SDIOCINSERTXB: 27122 /* Store a xb struct to be pushed onto fifo */ 27123 SD_INFO(SD_LOG_SDTEST, un, 27124 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27125 27126 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27127 27128 sd_fault_injection_on = 0; 27129 27130 if (un->sd_fi_fifo_xb[i] != NULL) { 27131 kmem_free(un->sd_fi_fifo_xb[i], 27132 sizeof (struct sd_fi_xb)); 27133 un->sd_fi_fifo_xb[i] = NULL; 27134 } 27135 if (arg != NULL) { 27136 un->sd_fi_fifo_xb[i] = 27137 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27138 if (un->sd_fi_fifo_xb[i] == NULL) { 27139 /* Alloc failed don't store anything */ 27140 break; 27141 } 27142 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27143 sizeof (struct sd_fi_xb), 0); 27144 27145 if (rval == -1) { 27146 kmem_free(un->sd_fi_fifo_xb[i], 27147 sizeof (struct sd_fi_xb)); 27148 un->sd_fi_fifo_xb[i] = NULL; 27149 } 27150 } else { 27151 SD_INFO(SD_LOG_IOERR, un, 27152 "sd_faultinjection_ioctl: xb null\n"); 27153 } 27154 break; 27155 27156 case SDIOCINSERTUN: 27157 /* Store a un struct to be pushed onto fifo */ 27158 SD_INFO(SD_LOG_SDTEST, un, 27159 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27160 27161 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27162 27163 sd_fault_injection_on = 0; 27164 27165 if (un->sd_fi_fifo_un[i] != NULL) { 27166 kmem_free(un->sd_fi_fifo_un[i], 27167 sizeof (struct sd_fi_un)); 27168 un->sd_fi_fifo_un[i] = NULL; 27169 } 27170 if (arg != NULL) { 27171 un->sd_fi_fifo_un[i] = 27172 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27173 if (un->sd_fi_fifo_un[i] == NULL) { 27174 /* Alloc failed don't store anything */ 27175 break; 27176 } 27177 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27178 sizeof (struct sd_fi_un), 0); 27179 if (rval == -1) { 27180 kmem_free(un->sd_fi_fifo_un[i], 27181 sizeof (struct sd_fi_un)); 27182 un->sd_fi_fifo_un[i] = NULL; 27183 } 27184 27185 } else { 27186 SD_INFO(SD_LOG_IOERR, un, 27187 "sd_faultinjection_ioctl: un null\n"); 27188 } 27189 27190 break; 27191 27192 case SDIOCINSERTARQ: 27193 /* Store a arq struct to be pushed onto fifo */ 27194 SD_INFO(SD_LOG_SDTEST, un, 27195 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27196 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27197 27198 sd_fault_injection_on = 0; 27199 27200 if (un->sd_fi_fifo_arq[i] != NULL) { 27201 kmem_free(un->sd_fi_fifo_arq[i], 27202 sizeof (struct sd_fi_arq)); 27203 un->sd_fi_fifo_arq[i] = NULL; 27204 } 27205 if (arg != NULL) { 27206 un->sd_fi_fifo_arq[i] = 27207 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27208 if (un->sd_fi_fifo_arq[i] == NULL) { 27209 /* Alloc failed don't store anything */ 27210 break; 27211 } 27212 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27213 sizeof (struct sd_fi_arq), 0); 27214 if (rval == -1) { 27215 kmem_free(un->sd_fi_fifo_arq[i], 27216 sizeof (struct sd_fi_arq)); 27217 un->sd_fi_fifo_arq[i] = NULL; 27218 } 27219 27220 } else { 27221 SD_INFO(SD_LOG_IOERR, un, 27222 "sd_faultinjection_ioctl: arq null\n"); 27223 } 27224 27225 break; 27226 27227 case SDIOCPUSH: 27228 /* Push stored xb, pkt, un, and arq onto fifo */ 27229 sd_fault_injection_on = 0; 27230 27231 if (arg != NULL) { 27232 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27233 if (rval != -1 && 27234 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27235 un->sd_fi_fifo_end += i; 27236 } 27237 } else { 27238 SD_INFO(SD_LOG_IOERR, un, 27239 "sd_faultinjection_ioctl: push arg null\n"); 27240 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27241 un->sd_fi_fifo_end++; 27242 } 27243 } 27244 SD_INFO(SD_LOG_IOERR, un, 27245 "sd_faultinjection_ioctl: push to end=%d\n", 27246 un->sd_fi_fifo_end); 27247 break; 27248 27249 case SDIOCRETRIEVE: 27250 /* Return buffer of log from Injection session */ 27251 SD_INFO(SD_LOG_SDTEST, un, 27252 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27253 27254 sd_fault_injection_on = 0; 27255 27256 mutex_enter(&(un->un_fi_mutex)); 27257 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27258 un->sd_fi_buf_len+1, 0); 27259 mutex_exit(&(un->un_fi_mutex)); 27260 27261 if (rval == -1) { 27262 /* 27263 * arg is possibly invalid setting 27264 * it to NULL for return 27265 */ 27266 arg = NULL; 27267 } 27268 break; 27269 } 27270 27271 mutex_exit(SD_MUTEX(un)); 27272 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27273 " exit\n"); 27274 } 27275 27276 27277 /* 27278 * Function: sd_injection_log() 27279 * 27280 * Description: This routine adds buff to the already existing injection log 27281 * for retrieval via faultinjection_ioctl for use in fault 27282 * detection and recovery 27283 * 27284 * Arguments: buf - the string to add to the log 27285 */ 27286 27287 static void 27288 sd_injection_log(char *buf, struct sd_lun *un) 27289 { 27290 uint_t len; 27291 27292 ASSERT(un != NULL); 27293 ASSERT(buf != NULL); 27294 27295 mutex_enter(&(un->un_fi_mutex)); 27296 27297 len = min(strlen(buf), 255); 27298 /* Add logged value to Injection log to be returned later */ 27299 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27300 uint_t offset = strlen((char *)un->sd_fi_log); 27301 char *destp = (char *)un->sd_fi_log + offset; 27302 int i; 27303 for (i = 0; i < len; i++) { 27304 *destp++ = *buf++; 27305 } 27306 un->sd_fi_buf_len += len; 27307 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27308 } 27309 27310 mutex_exit(&(un->un_fi_mutex)); 27311 } 27312 27313 27314 /* 27315 * Function: sd_faultinjection() 27316 * 27317 * Description: This routine takes the pkt and changes its 27318 * content based on error injection scenerio. 27319 * 27320 * Arguments: pktp - packet to be changed 27321 */ 27322 27323 static void 27324 sd_faultinjection(struct scsi_pkt *pktp) 27325 { 27326 uint_t i; 27327 struct sd_fi_pkt *fi_pkt; 27328 struct sd_fi_xb *fi_xb; 27329 struct sd_fi_un *fi_un; 27330 struct sd_fi_arq *fi_arq; 27331 struct buf *bp; 27332 struct sd_xbuf *xb; 27333 struct sd_lun *un; 27334 27335 ASSERT(pktp != NULL); 27336 27337 /* pull bp xb and un from pktp */ 27338 bp = (struct buf *)pktp->pkt_private; 27339 xb = SD_GET_XBUF(bp); 27340 un = SD_GET_UN(bp); 27341 27342 ASSERT(un != NULL); 27343 27344 mutex_enter(SD_MUTEX(un)); 27345 27346 SD_TRACE(SD_LOG_SDTEST, un, 27347 "sd_faultinjection: entry Injection from sdintr\n"); 27348 27349 /* if injection is off return */ 27350 if (sd_fault_injection_on == 0 || 27351 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27352 mutex_exit(SD_MUTEX(un)); 27353 return; 27354 } 27355 27356 27357 /* take next set off fifo */ 27358 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27359 27360 fi_pkt = un->sd_fi_fifo_pkt[i]; 27361 fi_xb = un->sd_fi_fifo_xb[i]; 27362 fi_un = un->sd_fi_fifo_un[i]; 27363 fi_arq = un->sd_fi_fifo_arq[i]; 27364 27365 27366 /* set variables accordingly */ 27367 /* set pkt if it was on fifo */ 27368 if (fi_pkt != NULL) { 27369 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27370 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27371 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27372 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27373 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27374 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27375 27376 } 27377 27378 /* set xb if it was on fifo */ 27379 if (fi_xb != NULL) { 27380 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27381 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27382 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27383 SD_CONDSET(xb, xb, xb_victim_retry_count, 27384 "xb_victim_retry_count"); 27385 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27386 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27387 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27388 27389 /* copy in block data from sense */ 27390 if (fi_xb->xb_sense_data[0] != -1) { 27391 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27392 SENSE_LENGTH); 27393 } 27394 27395 /* copy in extended sense codes */ 27396 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27397 "es_code"); 27398 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27399 "es_key"); 27400 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27401 "es_add_code"); 27402 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27403 es_qual_code, "es_qual_code"); 27404 } 27405 27406 /* set un if it was on fifo */ 27407 if (fi_un != NULL) { 27408 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27409 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27410 SD_CONDSET(un, un, un_reset_retry_count, 27411 "un_reset_retry_count"); 27412 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27413 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27414 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27415 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27416 "un_f_allow_bus_device_reset"); 27417 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27418 27419 } 27420 27421 /* copy in auto request sense if it was on fifo */ 27422 if (fi_arq != NULL) { 27423 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27424 } 27425 27426 /* free structs */ 27427 if (un->sd_fi_fifo_pkt[i] != NULL) { 27428 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27429 } 27430 if (un->sd_fi_fifo_xb[i] != NULL) { 27431 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27432 } 27433 if (un->sd_fi_fifo_un[i] != NULL) { 27434 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27435 } 27436 if (un->sd_fi_fifo_arq[i] != NULL) { 27437 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27438 } 27439 27440 /* 27441 * kmem_free does not gurantee to set to NULL 27442 * since we uses these to determine if we set 27443 * values or not lets confirm they are always 27444 * NULL after free 27445 */ 27446 un->sd_fi_fifo_pkt[i] = NULL; 27447 un->sd_fi_fifo_un[i] = NULL; 27448 un->sd_fi_fifo_xb[i] = NULL; 27449 un->sd_fi_fifo_arq[i] = NULL; 27450 27451 un->sd_fi_fifo_start++; 27452 27453 mutex_exit(SD_MUTEX(un)); 27454 27455 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27456 } 27457 27458 #endif /* SD_FAULT_INJECTION */ 27459 27460 /* 27461 * This routine is invoked in sd_unit_attach(). Before calling it, the 27462 * properties in conf file should be processed already, and "hotpluggable" 27463 * property was processed also. 27464 * 27465 * The sd driver distinguishes 3 different type of devices: removable media, 27466 * non-removable media, and hotpluggable. Below the differences are defined: 27467 * 27468 * 1. Device ID 27469 * 27470 * The device ID of a device is used to identify this device. Refer to 27471 * ddi_devid_register(9F). 27472 * 27473 * For a non-removable media disk device which can provide 0x80 or 0x83 27474 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27475 * device ID is created to identify this device. For other non-removable 27476 * media devices, a default device ID is created only if this device has 27477 * at least 2 alter cylinders. Otherwise, this device has no devid. 27478 * 27479 * ------------------------------------------------------- 27480 * removable media hotpluggable | Can Have Device ID 27481 * ------------------------------------------------------- 27482 * false false | Yes 27483 * false true | Yes 27484 * true x | No 27485 * ------------------------------------------------------ 27486 * 27487 * 27488 * 2. SCSI group 4 commands 27489 * 27490 * In SCSI specs, only some commands in group 4 command set can use 27491 * 8-byte addresses that can be used to access >2TB storage spaces. 27492 * Other commands have no such capability. Without supporting group4, 27493 * it is impossible to make full use of storage spaces of a disk with 27494 * capacity larger than 2TB. 27495 * 27496 * ----------------------------------------------- 27497 * removable media hotpluggable LP64 | Group 27498 * ----------------------------------------------- 27499 * false false false | 1 27500 * false false true | 4 27501 * false true false | 1 27502 * false true true | 4 27503 * true x x | 5 27504 * ----------------------------------------------- 27505 * 27506 * 27507 * 3. Check for VTOC Label 27508 * 27509 * If a direct-access disk has no EFI label, sd will check if it has a 27510 * valid VTOC label. Now, sd also does that check for removable media 27511 * and hotpluggable devices. 27512 * 27513 * -------------------------------------------------------------- 27514 * Direct-Access removable media hotpluggable | Check Label 27515 * ------------------------------------------------------------- 27516 * false false false | No 27517 * false false true | No 27518 * false true false | Yes 27519 * false true true | Yes 27520 * true x x | Yes 27521 * -------------------------------------------------------------- 27522 * 27523 * 27524 * 4. Building default VTOC label 27525 * 27526 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27527 * If those devices have no valid VTOC label, sd(7d) will attempt to 27528 * create default VTOC for them. Currently sd creates default VTOC label 27529 * for all devices on x86 platform (VTOC_16), but only for removable 27530 * media devices on SPARC (VTOC_8). 27531 * 27532 * ----------------------------------------------------------- 27533 * removable media hotpluggable platform | Default Label 27534 * ----------------------------------------------------------- 27535 * false false sparc | No 27536 * false true x86 | Yes 27537 * false true sparc | Yes 27538 * true x x | Yes 27539 * ---------------------------------------------------------- 27540 * 27541 * 27542 * 5. Supported blocksizes of target devices 27543 * 27544 * Sd supports non-512-byte blocksize for removable media devices only. 27545 * For other devices, only 512-byte blocksize is supported. This may be 27546 * changed in near future because some RAID devices require non-512-byte 27547 * blocksize 27548 * 27549 * ----------------------------------------------------------- 27550 * removable media hotpluggable | non-512-byte blocksize 27551 * ----------------------------------------------------------- 27552 * false false | No 27553 * false true | No 27554 * true x | Yes 27555 * ----------------------------------------------------------- 27556 * 27557 * 27558 * 6. Automatic mount & unmount 27559 * 27560 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27561 * if a device is removable media device. It return 1 for removable media 27562 * devices, and 0 for others. 27563 * 27564 * The automatic mounting subsystem should distinguish between the types 27565 * of devices and apply automounting policies to each. 27566 * 27567 * 27568 * 7. fdisk partition management 27569 * 27570 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27571 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27572 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27573 * fdisk partitions on both x86 and SPARC platform. 27574 * 27575 * ----------------------------------------------------------- 27576 * platform removable media USB/1394 | fdisk supported 27577 * ----------------------------------------------------------- 27578 * x86 X X | true 27579 * ------------------------------------------------------------ 27580 * sparc X X | false 27581 * ------------------------------------------------------------ 27582 * 27583 * 27584 * 8. MBOOT/MBR 27585 * 27586 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27587 * read/write mboot for removable media devices on sparc platform. 27588 * 27589 * ----------------------------------------------------------- 27590 * platform removable media USB/1394 | mboot supported 27591 * ----------------------------------------------------------- 27592 * x86 X X | true 27593 * ------------------------------------------------------------ 27594 * sparc false false | false 27595 * sparc false true | true 27596 * sparc true false | true 27597 * sparc true true | true 27598 * ------------------------------------------------------------ 27599 * 27600 * 27601 * 9. error handling during opening device 27602 * 27603 * If failed to open a disk device, an errno is returned. For some kinds 27604 * of errors, different errno is returned depending on if this device is 27605 * a removable media device. This brings USB/1394 hard disks in line with 27606 * expected hard disk behavior. It is not expected that this breaks any 27607 * application. 27608 * 27609 * ------------------------------------------------------ 27610 * removable media hotpluggable | errno 27611 * ------------------------------------------------------ 27612 * false false | EIO 27613 * false true | EIO 27614 * true x | ENXIO 27615 * ------------------------------------------------------ 27616 * 27617 * 27618 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27619 * 27620 * These IOCTLs are applicable only to removable media devices. 27621 * 27622 * ----------------------------------------------------------- 27623 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27624 * ----------------------------------------------------------- 27625 * false false | No 27626 * false true | No 27627 * true x | Yes 27628 * ----------------------------------------------------------- 27629 * 27630 * 27631 * 12. Kstats for partitions 27632 * 27633 * sd creates partition kstat for non-removable media devices. USB and 27634 * Firewire hard disks now have partition kstats 27635 * 27636 * ------------------------------------------------------ 27637 * removable media hotpluggable | kstat 27638 * ------------------------------------------------------ 27639 * false false | Yes 27640 * false true | Yes 27641 * true x | No 27642 * ------------------------------------------------------ 27643 * 27644 * 27645 * 13. Removable media & hotpluggable properties 27646 * 27647 * Sd driver creates a "removable-media" property for removable media 27648 * devices. Parent nexus drivers create a "hotpluggable" property if 27649 * it supports hotplugging. 27650 * 27651 * --------------------------------------------------------------------- 27652 * removable media hotpluggable | "removable-media" " hotpluggable" 27653 * --------------------------------------------------------------------- 27654 * false false | No No 27655 * false true | No Yes 27656 * true false | Yes No 27657 * true true | Yes Yes 27658 * --------------------------------------------------------------------- 27659 * 27660 * 27661 * 14. Power Management 27662 * 27663 * sd only power manages removable media devices or devices that support 27664 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27665 * 27666 * A parent nexus that supports hotplugging can also set "pm-capable" 27667 * if the disk can be power managed. 27668 * 27669 * ------------------------------------------------------------ 27670 * removable media hotpluggable pm-capable | power manage 27671 * ------------------------------------------------------------ 27672 * false false false | No 27673 * false false true | Yes 27674 * false true false | No 27675 * false true true | Yes 27676 * true x x | Yes 27677 * ------------------------------------------------------------ 27678 * 27679 * USB and firewire hard disks can now be power managed independently 27680 * of the framebuffer 27681 * 27682 * 27683 * 15. Support for USB disks with capacity larger than 1TB 27684 * 27685 * Currently, sd doesn't permit a fixed disk device with capacity 27686 * larger than 1TB to be used in a 32-bit operating system environment. 27687 * However, sd doesn't do that for removable media devices. Instead, it 27688 * assumes that removable media devices cannot have a capacity larger 27689 * than 1TB. Therefore, using those devices on 32-bit system is partially 27690 * supported, which can cause some unexpected results. 27691 * 27692 * --------------------------------------------------------------------- 27693 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27694 * --------------------------------------------------------------------- 27695 * false false | true | no 27696 * false true | true | no 27697 * true false | true | Yes 27698 * true true | true | Yes 27699 * --------------------------------------------------------------------- 27700 * 27701 * 27702 * 16. Check write-protection at open time 27703 * 27704 * When a removable media device is being opened for writing without NDELAY 27705 * flag, sd will check if this device is writable. If attempting to open 27706 * without NDELAY flag a write-protected device, this operation will abort. 27707 * 27708 * ------------------------------------------------------------ 27709 * removable media USB/1394 | WP Check 27710 * ------------------------------------------------------------ 27711 * false false | No 27712 * false true | No 27713 * true false | Yes 27714 * true true | Yes 27715 * ------------------------------------------------------------ 27716 * 27717 * 27718 * 17. syslog when corrupted VTOC is encountered 27719 * 27720 * Currently, if an invalid VTOC is encountered, sd only print syslog 27721 * for fixed SCSI disks. 27722 * ------------------------------------------------------------ 27723 * removable media USB/1394 | print syslog 27724 * ------------------------------------------------------------ 27725 * false false | Yes 27726 * false true | No 27727 * true false | No 27728 * true true | No 27729 * ------------------------------------------------------------ 27730 */ 27731 static void 27732 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27733 { 27734 int pm_capable_prop; 27735 27736 ASSERT(un->un_sd); 27737 ASSERT(un->un_sd->sd_inq); 27738 27739 /* 27740 * Enable SYNC CACHE support for all devices. 27741 */ 27742 un->un_f_sync_cache_supported = TRUE; 27743 27744 if (un->un_sd->sd_inq->inq_rmb) { 27745 /* 27746 * The media of this device is removable. And for this kind 27747 * of devices, it is possible to change medium after opening 27748 * devices. Thus we should support this operation. 27749 */ 27750 un->un_f_has_removable_media = TRUE; 27751 27752 /* 27753 * support non-512-byte blocksize of removable media devices 27754 */ 27755 un->un_f_non_devbsize_supported = TRUE; 27756 27757 /* 27758 * Assume that all removable media devices support DOOR_LOCK 27759 */ 27760 un->un_f_doorlock_supported = TRUE; 27761 27762 /* 27763 * For a removable media device, it is possible to be opened 27764 * with NDELAY flag when there is no media in drive, in this 27765 * case we don't care if device is writable. But if without 27766 * NDELAY flag, we need to check if media is write-protected. 27767 */ 27768 un->un_f_chk_wp_open = TRUE; 27769 27770 /* 27771 * need to start a SCSI watch thread to monitor media state, 27772 * when media is being inserted or ejected, notify syseventd. 27773 */ 27774 un->un_f_monitor_media_state = TRUE; 27775 27776 /* 27777 * Some devices don't support START_STOP_UNIT command. 27778 * Therefore, we'd better check if a device supports it 27779 * before sending it. 27780 */ 27781 un->un_f_check_start_stop = TRUE; 27782 27783 /* 27784 * support eject media ioctl: 27785 * FDEJECT, DKIOCEJECT, CDROMEJECT 27786 */ 27787 un->un_f_eject_media_supported = TRUE; 27788 27789 /* 27790 * Because many removable-media devices don't support 27791 * LOG_SENSE, we couldn't use this command to check if 27792 * a removable media device support power-management. 27793 * We assume that they support power-management via 27794 * START_STOP_UNIT command and can be spun up and down 27795 * without limitations. 27796 */ 27797 un->un_f_pm_supported = TRUE; 27798 27799 /* 27800 * Need to create a zero length (Boolean) property 27801 * removable-media for the removable media devices. 27802 * Note that the return value of the property is not being 27803 * checked, since if unable to create the property 27804 * then do not want the attach to fail altogether. Consistent 27805 * with other property creation in attach. 27806 */ 27807 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27808 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27809 27810 } else { 27811 /* 27812 * create device ID for device 27813 */ 27814 un->un_f_devid_supported = TRUE; 27815 27816 /* 27817 * Spin up non-removable-media devices once it is attached 27818 */ 27819 un->un_f_attach_spinup = TRUE; 27820 27821 /* 27822 * According to SCSI specification, Sense data has two kinds of 27823 * format: fixed format, and descriptor format. At present, we 27824 * don't support descriptor format sense data for removable 27825 * media. 27826 */ 27827 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27828 un->un_f_descr_format_supported = TRUE; 27829 } 27830 27831 /* 27832 * kstats are created only for non-removable media devices. 27833 * 27834 * Set this in sd.conf to 0 in order to disable kstats. The 27835 * default is 1, so they are enabled by default. 27836 */ 27837 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27838 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27839 "enable-partition-kstats", 1)); 27840 27841 /* 27842 * Check if HBA has set the "pm-capable" property. 27843 * If "pm-capable" exists and is non-zero then we can 27844 * power manage the device without checking the start/stop 27845 * cycle count log sense page. 27846 * 27847 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27848 * then we should not power manage the device. 27849 * 27850 * If "pm-capable" doesn't exist then pm_capable_prop will 27851 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27852 * sd will check the start/stop cycle count log sense page 27853 * and power manage the device if the cycle count limit has 27854 * not been exceeded. 27855 */ 27856 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27857 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27858 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27859 un->un_f_log_sense_supported = TRUE; 27860 } else { 27861 /* 27862 * pm-capable property exists. 27863 * 27864 * Convert "TRUE" values for pm_capable_prop to 27865 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27866 * later. "TRUE" values are any values except 27867 * SD_PM_CAPABLE_FALSE (0) and 27868 * SD_PM_CAPABLE_UNDEFINED (-1) 27869 */ 27870 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27871 un->un_f_log_sense_supported = FALSE; 27872 } else { 27873 un->un_f_pm_supported = TRUE; 27874 } 27875 27876 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27877 "sd_unit_attach: un:0x%p pm-capable " 27878 "property set to %d.\n", un, un->un_f_pm_supported); 27879 } 27880 } 27881 27882 if (un->un_f_is_hotpluggable) { 27883 27884 /* 27885 * Have to watch hotpluggable devices as well, since 27886 * that's the only way for userland applications to 27887 * detect hot removal while device is busy/mounted. 27888 */ 27889 un->un_f_monitor_media_state = TRUE; 27890 27891 un->un_f_check_start_stop = TRUE; 27892 27893 } 27894 } 27895 27896 /* 27897 * sd_tg_rdwr: 27898 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27899 * in sys block size, req_length in bytes. 27900 * 27901 */ 27902 static int 27903 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27904 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27905 { 27906 struct sd_lun *un; 27907 int path_flag = (int)(uintptr_t)tg_cookie; 27908 char *dkl = NULL; 27909 diskaddr_t real_addr = start_block; 27910 diskaddr_t first_byte, end_block; 27911 27912 size_t buffer_size = reqlength; 27913 int rval; 27914 diskaddr_t cap; 27915 uint32_t lbasize; 27916 27917 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27918 if (un == NULL) 27919 return (ENXIO); 27920 27921 if (cmd != TG_READ && cmd != TG_WRITE) 27922 return (EINVAL); 27923 27924 mutex_enter(SD_MUTEX(un)); 27925 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27926 mutex_exit(SD_MUTEX(un)); 27927 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27928 &lbasize, path_flag); 27929 if (rval != 0) 27930 return (rval); 27931 mutex_enter(SD_MUTEX(un)); 27932 sd_update_block_info(un, lbasize, cap); 27933 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27934 mutex_exit(SD_MUTEX(un)); 27935 return (EIO); 27936 } 27937 } 27938 27939 if (NOT_DEVBSIZE(un)) { 27940 /* 27941 * sys_blocksize != tgt_blocksize, need to re-adjust 27942 * blkno and save the index to beginning of dk_label 27943 */ 27944 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27945 real_addr = first_byte / un->un_tgt_blocksize; 27946 27947 end_block = (first_byte + reqlength + 27948 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27949 27950 /* round up buffer size to multiple of target block size */ 27951 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27952 27953 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27954 "label_addr: 0x%x allocation size: 0x%x\n", 27955 real_addr, buffer_size); 27956 27957 if (((first_byte % un->un_tgt_blocksize) != 0) || 27958 (reqlength % un->un_tgt_blocksize) != 0) 27959 /* the request is not aligned */ 27960 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27961 } 27962 27963 /* 27964 * The MMC standard allows READ CAPACITY to be 27965 * inaccurate by a bounded amount (in the interest of 27966 * response latency). As a result, failed READs are 27967 * commonplace (due to the reading of metadata and not 27968 * data). Depending on the per-Vendor/drive Sense data, 27969 * the failed READ can cause many (unnecessary) retries. 27970 */ 27971 27972 if (ISCD(un) && (cmd == TG_READ) && 27973 (un->un_f_blockcount_is_valid == TRUE) && 27974 ((start_block == (un->un_blockcount - 1))|| 27975 (start_block == (un->un_blockcount - 2)))) { 27976 path_flag = SD_PATH_DIRECT_PRIORITY; 27977 } 27978 27979 mutex_exit(SD_MUTEX(un)); 27980 if (cmd == TG_READ) { 27981 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27982 buffer_size, real_addr, path_flag); 27983 if (dkl != NULL) 27984 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27985 real_addr), bufaddr, reqlength); 27986 } else { 27987 if (dkl) { 27988 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27989 real_addr, path_flag); 27990 if (rval) { 27991 kmem_free(dkl, buffer_size); 27992 return (rval); 27993 } 27994 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27995 real_addr), reqlength); 27996 } 27997 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27998 buffer_size, real_addr, path_flag); 27999 } 28000 28001 if (dkl != NULL) 28002 kmem_free(dkl, buffer_size); 28003 28004 return (rval); 28005 } 28006 28007 28008 static int 28009 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 28010 { 28011 28012 struct sd_lun *un; 28013 diskaddr_t cap; 28014 uint32_t lbasize; 28015 int path_flag = (int)(uintptr_t)tg_cookie; 28016 int ret = 0; 28017 28018 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28019 if (un == NULL) 28020 return (ENXIO); 28021 28022 switch (cmd) { 28023 case TG_GETPHYGEOM: 28024 case TG_GETVIRTGEOM: 28025 case TG_GETCAPACITY: 28026 case TG_GETBLOCKSIZE: 28027 mutex_enter(SD_MUTEX(un)); 28028 28029 if ((un->un_f_blockcount_is_valid == TRUE) && 28030 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28031 cap = un->un_blockcount; 28032 lbasize = un->un_tgt_blocksize; 28033 mutex_exit(SD_MUTEX(un)); 28034 } else { 28035 mutex_exit(SD_MUTEX(un)); 28036 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28037 &lbasize, path_flag); 28038 if (ret != 0) 28039 return (ret); 28040 mutex_enter(SD_MUTEX(un)); 28041 sd_update_block_info(un, lbasize, cap); 28042 if ((un->un_f_blockcount_is_valid == FALSE) || 28043 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28044 mutex_exit(SD_MUTEX(un)); 28045 return (EIO); 28046 } 28047 mutex_exit(SD_MUTEX(un)); 28048 } 28049 28050 if (cmd == TG_GETCAPACITY) { 28051 *(diskaddr_t *)arg = cap; 28052 return (0); 28053 } 28054 28055 if (cmd == TG_GETBLOCKSIZE) { 28056 *(uint32_t *)arg = lbasize; 28057 return (0); 28058 } 28059 28060 if (cmd == TG_GETPHYGEOM) 28061 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28062 cap, lbasize, path_flag); 28063 else 28064 /* TG_GETVIRTGEOM */ 28065 ret = sd_get_virtual_geometry(un, 28066 (cmlb_geom_t *)arg, cap, lbasize); 28067 28068 return (ret); 28069 28070 case TG_GETATTR: 28071 mutex_enter(SD_MUTEX(un)); 28072 ((tg_attribute_t *)arg)->media_is_writable = 28073 un->un_f_mmc_writable_media; 28074 mutex_exit(SD_MUTEX(un)); 28075 return (0); 28076 default: 28077 return (ENOTTY); 28078 28079 } 28080 28081 } 28082