1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0, 516 1 517 }; 518 519 520 521 #if (defined(SD_PROP_TST)) 522 523 #define SD_TST_CTYPE_VAL CTYPE_CDROM 524 #define SD_TST_THROTTLE_VAL 16 525 #define SD_TST_NOTREADY_VAL 12 526 #define SD_TST_BUSY_VAL 60 527 #define SD_TST_RST_RETRY_VAL 36 528 #define SD_TST_RSV_REL_TIME 60 529 530 static sd_tunables tst_properties = { 531 SD_TST_THROTTLE_VAL, 532 SD_TST_CTYPE_VAL, 533 SD_TST_NOTREADY_VAL, 534 SD_TST_BUSY_VAL, 535 SD_TST_RST_RETRY_VAL, 536 SD_TST_RSV_REL_TIME, 537 0, 538 0, 539 0 540 }; 541 #endif 542 543 /* This is similar to the ANSI toupper implementation */ 544 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 545 546 /* 547 * Static Driver Configuration Table 548 * 549 * This is the table of disks which need throttle adjustment (or, perhaps 550 * something else as defined by the flags at a future time.) device_id 551 * is a string consisting of concatenated vid (vendor), pid (product/model) 552 * and revision strings as defined in the scsi_inquiry structure. Offsets of 553 * the parts of the string are as defined by the sizes in the scsi_inquiry 554 * structure. Device type is searched as far as the device_id string is 555 * defined. Flags defines which values are to be set in the driver from the 556 * properties list. 557 * 558 * Entries below which begin and end with a "*" are a special case. 559 * These do not have a specific vendor, and the string which follows 560 * can appear anywhere in the 16 byte PID portion of the inquiry data. 561 * 562 * Entries below which begin and end with a " " (blank) are a special 563 * case. The comparison function will treat multiple consecutive blanks 564 * as equivalent to a single blank. For example, this causes a 565 * sd_disk_table entry of " NEC CDROM " to match a device's id string 566 * of "NEC CDROM". 567 * 568 * Note: The MD21 controller type has been obsoleted. 569 * ST318202F is a Legacy device 570 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 571 * made with an FC connection. The entries here are a legacy. 572 */ 573 static sd_disk_config_t sd_disk_table[] = { 574 #if defined(__fibre) || defined(__i386) || defined(__amd64) 575 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 623 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 624 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 625 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 626 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 627 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 628 { "SUN T3", SD_CONF_BSET_THROTTLE | 629 SD_CONF_BSET_BSY_RETRY_COUNT| 630 SD_CONF_BSET_RST_RETRIES| 631 SD_CONF_BSET_RSV_REL_TIME, 632 &purple_properties }, 633 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 634 SD_CONF_BSET_BSY_RETRY_COUNT| 635 SD_CONF_BSET_RST_RETRIES| 636 SD_CONF_BSET_RSV_REL_TIME| 637 SD_CONF_BSET_MIN_THROTTLE| 638 SD_CONF_BSET_DISKSORT_DISABLED, 639 &sve_properties }, 640 { "SUN T4", SD_CONF_BSET_THROTTLE | 641 SD_CONF_BSET_BSY_RETRY_COUNT| 642 SD_CONF_BSET_RST_RETRIES| 643 SD_CONF_BSET_RSV_REL_TIME, 644 &purple_properties }, 645 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 646 SD_CONF_BSET_LUN_RESET_ENABLED, 647 &maserati_properties }, 648 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 649 SD_CONF_BSET_NRR_COUNT| 650 SD_CONF_BSET_BSY_RETRY_COUNT| 651 SD_CONF_BSET_RST_RETRIES| 652 SD_CONF_BSET_MIN_THROTTLE| 653 SD_CONF_BSET_DISKSORT_DISABLED| 654 SD_CONF_BSET_LUN_RESET_ENABLED, 655 &pirus_properties }, 656 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 657 SD_CONF_BSET_NRR_COUNT| 658 SD_CONF_BSET_BSY_RETRY_COUNT| 659 SD_CONF_BSET_RST_RETRIES| 660 SD_CONF_BSET_MIN_THROTTLE| 661 SD_CONF_BSET_DISKSORT_DISABLED| 662 SD_CONF_BSET_LUN_RESET_ENABLED, 663 &pirus_properties }, 664 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 665 SD_CONF_BSET_NRR_COUNT| 666 SD_CONF_BSET_BSY_RETRY_COUNT| 667 SD_CONF_BSET_RST_RETRIES| 668 SD_CONF_BSET_MIN_THROTTLE| 669 SD_CONF_BSET_DISKSORT_DISABLED| 670 SD_CONF_BSET_LUN_RESET_ENABLED, 671 &pirus_properties }, 672 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 673 SD_CONF_BSET_NRR_COUNT| 674 SD_CONF_BSET_BSY_RETRY_COUNT| 675 SD_CONF_BSET_RST_RETRIES| 676 SD_CONF_BSET_MIN_THROTTLE| 677 SD_CONF_BSET_DISKSORT_DISABLED| 678 SD_CONF_BSET_LUN_RESET_ENABLED, 679 &pirus_properties }, 680 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 681 SD_CONF_BSET_NRR_COUNT| 682 SD_CONF_BSET_BSY_RETRY_COUNT| 683 SD_CONF_BSET_RST_RETRIES| 684 SD_CONF_BSET_MIN_THROTTLE| 685 SD_CONF_BSET_DISKSORT_DISABLED| 686 SD_CONF_BSET_LUN_RESET_ENABLED, 687 &pirus_properties }, 688 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 689 SD_CONF_BSET_NRR_COUNT| 690 SD_CONF_BSET_BSY_RETRY_COUNT| 691 SD_CONF_BSET_RST_RETRIES| 692 SD_CONF_BSET_MIN_THROTTLE| 693 SD_CONF_BSET_DISKSORT_DISABLED| 694 SD_CONF_BSET_LUN_RESET_ENABLED, 695 &pirus_properties }, 696 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 697 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 698 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 699 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 700 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 701 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 702 #endif /* fibre or NON-sparc platforms */ 703 #if ((defined(__sparc) && !defined(__fibre)) ||\ 704 (defined(__i386) || defined(__amd64))) 705 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 706 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 707 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 708 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 709 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 710 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 711 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 716 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 717 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 718 &symbios_properties }, 719 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 720 &lsi_properties_scsi }, 721 #if defined(__i386) || defined(__amd64) 722 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 723 | SD_CONF_BSET_READSUB_BCD 724 | SD_CONF_BSET_READ_TOC_ADDR_BCD 725 | SD_CONF_BSET_NO_READ_HEADER 726 | SD_CONF_BSET_READ_CD_XD4), NULL }, 727 728 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 729 | SD_CONF_BSET_READSUB_BCD 730 | SD_CONF_BSET_READ_TOC_ADDR_BCD 731 | SD_CONF_BSET_NO_READ_HEADER 732 | SD_CONF_BSET_READ_CD_XD4), NULL }, 733 #endif /* __i386 || __amd64 */ 734 #endif /* sparc NON-fibre or NON-sparc platforms */ 735 736 #if (defined(SD_PROP_TST)) 737 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 738 | SD_CONF_BSET_CTYPE 739 | SD_CONF_BSET_NRR_COUNT 740 | SD_CONF_BSET_FAB_DEVID 741 | SD_CONF_BSET_NOCACHE 742 | SD_CONF_BSET_BSY_RETRY_COUNT 743 | SD_CONF_BSET_PLAYMSF_BCD 744 | SD_CONF_BSET_READSUB_BCD 745 | SD_CONF_BSET_READ_TOC_TRK_BCD 746 | SD_CONF_BSET_READ_TOC_ADDR_BCD 747 | SD_CONF_BSET_NO_READ_HEADER 748 | SD_CONF_BSET_READ_CD_XD4 749 | SD_CONF_BSET_RST_RETRIES 750 | SD_CONF_BSET_RSV_REL_TIME 751 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 752 #endif 753 }; 754 755 static const int sd_disk_table_size = 756 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 757 758 759 760 #define SD_INTERCONNECT_PARALLEL 0 761 #define SD_INTERCONNECT_FABRIC 1 762 #define SD_INTERCONNECT_FIBRE 2 763 #define SD_INTERCONNECT_SSA 3 764 #define SD_INTERCONNECT_SATA 4 765 #define SD_IS_PARALLEL_SCSI(un) \ 766 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 767 #define SD_IS_SERIAL(un) \ 768 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 769 770 /* 771 * Definitions used by device id registration routines 772 */ 773 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 774 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 775 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 776 777 static kmutex_t sd_sense_mutex = {0}; 778 779 /* 780 * Macros for updates of the driver state 781 */ 782 #define New_state(un, s) \ 783 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 784 #define Restore_state(un) \ 785 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 786 787 static struct sd_cdbinfo sd_cdbtab[] = { 788 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 789 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 790 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 791 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 792 }; 793 794 /* 795 * Specifies the number of seconds that must have elapsed since the last 796 * cmd. has completed for a device to be declared idle to the PM framework. 797 */ 798 static int sd_pm_idletime = 1; 799 800 /* 801 * Internal function prototypes 802 */ 803 804 #if (defined(__fibre)) 805 /* 806 * These #defines are to avoid namespace collisions that occur because this 807 * code is currently used to compile two separate driver modules: sd and ssd. 808 * All function names need to be treated this way (even if declared static) 809 * in order to allow the debugger to resolve the names properly. 810 * It is anticipated that in the near future the ssd module will be obsoleted, 811 * at which time this ugliness should go away. 812 */ 813 #define sd_log_trace ssd_log_trace 814 #define sd_log_info ssd_log_info 815 #define sd_log_err ssd_log_err 816 #define sdprobe ssdprobe 817 #define sdinfo ssdinfo 818 #define sd_prop_op ssd_prop_op 819 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 820 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 821 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 822 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 823 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 824 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 825 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 826 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 827 #define sd_spin_up_unit ssd_spin_up_unit 828 #define sd_enable_descr_sense ssd_enable_descr_sense 829 #define sd_reenable_dsense_task ssd_reenable_dsense_task 830 #define sd_set_mmc_caps ssd_set_mmc_caps 831 #define sd_read_unit_properties ssd_read_unit_properties 832 #define sd_process_sdconf_file ssd_process_sdconf_file 833 #define sd_process_sdconf_table ssd_process_sdconf_table 834 #define sd_sdconf_id_match ssd_sdconf_id_match 835 #define sd_blank_cmp ssd_blank_cmp 836 #define sd_chk_vers1_data ssd_chk_vers1_data 837 #define sd_set_vers1_properties ssd_set_vers1_properties 838 839 #define sd_get_physical_geometry ssd_get_physical_geometry 840 #define sd_get_virtual_geometry ssd_get_virtual_geometry 841 #define sd_update_block_info ssd_update_block_info 842 #define sd_register_devid ssd_register_devid 843 #define sd_get_devid ssd_get_devid 844 #define sd_create_devid ssd_create_devid 845 #define sd_write_deviceid ssd_write_deviceid 846 #define sd_check_vpd_page_support ssd_check_vpd_page_support 847 #define sd_setup_pm ssd_setup_pm 848 #define sd_create_pm_components ssd_create_pm_components 849 #define sd_ddi_suspend ssd_ddi_suspend 850 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 851 #define sd_ddi_resume ssd_ddi_resume 852 #define sd_ddi_pm_resume ssd_ddi_pm_resume 853 #define sdpower ssdpower 854 #define sdattach ssdattach 855 #define sddetach ssddetach 856 #define sd_unit_attach ssd_unit_attach 857 #define sd_unit_detach ssd_unit_detach 858 #define sd_set_unit_attributes ssd_set_unit_attributes 859 #define sd_create_errstats ssd_create_errstats 860 #define sd_set_errstats ssd_set_errstats 861 #define sd_set_pstats ssd_set_pstats 862 #define sddump ssddump 863 #define sd_scsi_poll ssd_scsi_poll 864 #define sd_send_polled_RQS ssd_send_polled_RQS 865 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 866 #define sd_init_event_callbacks ssd_init_event_callbacks 867 #define sd_event_callback ssd_event_callback 868 #define sd_cache_control ssd_cache_control 869 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 870 #define sd_get_nv_sup ssd_get_nv_sup 871 #define sd_make_device ssd_make_device 872 #define sdopen ssdopen 873 #define sdclose ssdclose 874 #define sd_ready_and_valid ssd_ready_and_valid 875 #define sdmin ssdmin 876 #define sdread ssdread 877 #define sdwrite ssdwrite 878 #define sdaread ssdaread 879 #define sdawrite ssdawrite 880 #define sdstrategy ssdstrategy 881 #define sdioctl ssdioctl 882 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 883 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 884 #define sd_checksum_iostart ssd_checksum_iostart 885 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 886 #define sd_pm_iostart ssd_pm_iostart 887 #define sd_core_iostart ssd_core_iostart 888 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 889 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 890 #define sd_checksum_iodone ssd_checksum_iodone 891 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 892 #define sd_pm_iodone ssd_pm_iodone 893 #define sd_initpkt_for_buf ssd_initpkt_for_buf 894 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 895 #define sd_setup_rw_pkt ssd_setup_rw_pkt 896 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 897 #define sd_buf_iodone ssd_buf_iodone 898 #define sd_uscsi_strategy ssd_uscsi_strategy 899 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 900 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 901 #define sd_uscsi_iodone ssd_uscsi_iodone 902 #define sd_xbuf_strategy ssd_xbuf_strategy 903 #define sd_xbuf_init ssd_xbuf_init 904 #define sd_pm_entry ssd_pm_entry 905 #define sd_pm_exit ssd_pm_exit 906 907 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 908 #define sd_pm_timeout_handler ssd_pm_timeout_handler 909 910 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 911 #define sdintr ssdintr 912 #define sd_start_cmds ssd_start_cmds 913 #define sd_send_scsi_cmd ssd_send_scsi_cmd 914 #define sd_bioclone_alloc ssd_bioclone_alloc 915 #define sd_bioclone_free ssd_bioclone_free 916 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 917 #define sd_shadow_buf_free ssd_shadow_buf_free 918 #define sd_print_transport_rejected_message \ 919 ssd_print_transport_rejected_message 920 #define sd_retry_command ssd_retry_command 921 #define sd_set_retry_bp ssd_set_retry_bp 922 #define sd_send_request_sense_command ssd_send_request_sense_command 923 #define sd_start_retry_command ssd_start_retry_command 924 #define sd_start_direct_priority_command \ 925 ssd_start_direct_priority_command 926 #define sd_return_failed_command ssd_return_failed_command 927 #define sd_return_failed_command_no_restart \ 928 ssd_return_failed_command_no_restart 929 #define sd_return_command ssd_return_command 930 #define sd_sync_with_callback ssd_sync_with_callback 931 #define sdrunout ssdrunout 932 #define sd_mark_rqs_busy ssd_mark_rqs_busy 933 #define sd_mark_rqs_idle ssd_mark_rqs_idle 934 #define sd_reduce_throttle ssd_reduce_throttle 935 #define sd_restore_throttle ssd_restore_throttle 936 #define sd_print_incomplete_msg ssd_print_incomplete_msg 937 #define sd_init_cdb_limits ssd_init_cdb_limits 938 #define sd_pkt_status_good ssd_pkt_status_good 939 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 940 #define sd_pkt_status_busy ssd_pkt_status_busy 941 #define sd_pkt_status_reservation_conflict \ 942 ssd_pkt_status_reservation_conflict 943 #define sd_pkt_status_qfull ssd_pkt_status_qfull 944 #define sd_handle_request_sense ssd_handle_request_sense 945 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 946 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 947 #define sd_validate_sense_data ssd_validate_sense_data 948 #define sd_decode_sense ssd_decode_sense 949 #define sd_print_sense_msg ssd_print_sense_msg 950 #define sd_sense_key_no_sense ssd_sense_key_no_sense 951 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 952 #define sd_sense_key_not_ready ssd_sense_key_not_ready 953 #define sd_sense_key_medium_or_hardware_error \ 954 ssd_sense_key_medium_or_hardware_error 955 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 956 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 957 #define sd_sense_key_fail_command ssd_sense_key_fail_command 958 #define sd_sense_key_blank_check ssd_sense_key_blank_check 959 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 960 #define sd_sense_key_default ssd_sense_key_default 961 #define sd_print_retry_msg ssd_print_retry_msg 962 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 963 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 964 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 965 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 966 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 967 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 968 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 969 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 970 #define sd_pkt_reason_default ssd_pkt_reason_default 971 #define sd_reset_target ssd_reset_target 972 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 973 #define sd_start_stop_unit_task ssd_start_stop_unit_task 974 #define sd_taskq_create ssd_taskq_create 975 #define sd_taskq_delete ssd_taskq_delete 976 #define sd_media_change_task ssd_media_change_task 977 #define sd_handle_mchange ssd_handle_mchange 978 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 979 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 980 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 981 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 982 #define sd_send_scsi_feature_GET_CONFIGURATION \ 983 sd_send_scsi_feature_GET_CONFIGURATION 984 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 985 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 986 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 987 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 988 ssd_send_scsi_PERSISTENT_RESERVE_IN 989 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 990 ssd_send_scsi_PERSISTENT_RESERVE_OUT 991 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 992 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 993 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 994 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 995 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 996 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 997 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 998 #define sd_alloc_rqs ssd_alloc_rqs 999 #define sd_free_rqs ssd_free_rqs 1000 #define sd_dump_memory ssd_dump_memory 1001 #define sd_get_media_info ssd_get_media_info 1002 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1003 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1004 #define sd_setup_next_xfer ssd_setup_next_xfer 1005 #define sd_dkio_get_temp ssd_dkio_get_temp 1006 #define sd_check_mhd ssd_check_mhd 1007 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1008 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1009 #define sd_sname ssd_sname 1010 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1011 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1012 #define sd_take_ownership ssd_take_ownership 1013 #define sd_reserve_release ssd_reserve_release 1014 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1015 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1016 #define sd_persistent_reservation_in_read_keys \ 1017 ssd_persistent_reservation_in_read_keys 1018 #define sd_persistent_reservation_in_read_resv \ 1019 ssd_persistent_reservation_in_read_resv 1020 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1021 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1022 #define sd_mhdioc_release ssd_mhdioc_release 1023 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1024 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1025 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1026 #define sr_change_blkmode ssr_change_blkmode 1027 #define sr_change_speed ssr_change_speed 1028 #define sr_atapi_change_speed ssr_atapi_change_speed 1029 #define sr_pause_resume ssr_pause_resume 1030 #define sr_play_msf ssr_play_msf 1031 #define sr_play_trkind ssr_play_trkind 1032 #define sr_read_all_subcodes ssr_read_all_subcodes 1033 #define sr_read_subchannel ssr_read_subchannel 1034 #define sr_read_tocentry ssr_read_tocentry 1035 #define sr_read_tochdr ssr_read_tochdr 1036 #define sr_read_cdda ssr_read_cdda 1037 #define sr_read_cdxa ssr_read_cdxa 1038 #define sr_read_mode1 ssr_read_mode1 1039 #define sr_read_mode2 ssr_read_mode2 1040 #define sr_read_cd_mode2 ssr_read_cd_mode2 1041 #define sr_sector_mode ssr_sector_mode 1042 #define sr_eject ssr_eject 1043 #define sr_ejected ssr_ejected 1044 #define sr_check_wp ssr_check_wp 1045 #define sd_check_media ssd_check_media 1046 #define sd_media_watch_cb ssd_media_watch_cb 1047 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1048 #define sr_volume_ctrl ssr_volume_ctrl 1049 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1050 #define sd_log_page_supported ssd_log_page_supported 1051 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1052 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1053 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1054 #define sd_range_lock ssd_range_lock 1055 #define sd_get_range ssd_get_range 1056 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1057 #define sd_range_unlock ssd_range_unlock 1058 #define sd_read_modify_write_task ssd_read_modify_write_task 1059 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1060 1061 #define sd_iostart_chain ssd_iostart_chain 1062 #define sd_iodone_chain ssd_iodone_chain 1063 #define sd_initpkt_map ssd_initpkt_map 1064 #define sd_destroypkt_map ssd_destroypkt_map 1065 #define sd_chain_type_map ssd_chain_type_map 1066 #define sd_chain_index_map ssd_chain_index_map 1067 1068 #define sd_failfast_flushctl ssd_failfast_flushctl 1069 #define sd_failfast_flushq ssd_failfast_flushq 1070 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1071 1072 #define sd_is_lsi ssd_is_lsi 1073 #define sd_tg_rdwr ssd_tg_rdwr 1074 #define sd_tg_getinfo ssd_tg_getinfo 1075 1076 #endif /* #if (defined(__fibre)) */ 1077 1078 1079 int _init(void); 1080 int _fini(void); 1081 int _info(struct modinfo *modinfop); 1082 1083 /*PRINTFLIKE3*/ 1084 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1085 /*PRINTFLIKE3*/ 1086 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1087 /*PRINTFLIKE3*/ 1088 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1089 1090 static int sdprobe(dev_info_t *devi); 1091 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1092 void **result); 1093 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1094 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1095 1096 /* 1097 * Smart probe for parallel scsi 1098 */ 1099 static void sd_scsi_probe_cache_init(void); 1100 static void sd_scsi_probe_cache_fini(void); 1101 static void sd_scsi_clear_probe_cache(void); 1102 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1103 1104 /* 1105 * Attached luns on target for parallel scsi 1106 */ 1107 static void sd_scsi_target_lun_init(void); 1108 static void sd_scsi_target_lun_fini(void); 1109 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1110 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1111 1112 static int sd_spin_up_unit(struct sd_lun *un); 1113 #ifdef _LP64 1114 static void sd_enable_descr_sense(struct sd_lun *un); 1115 static void sd_reenable_dsense_task(void *arg); 1116 #endif /* _LP64 */ 1117 1118 static void sd_set_mmc_caps(struct sd_lun *un); 1119 1120 static void sd_read_unit_properties(struct sd_lun *un); 1121 static int sd_process_sdconf_file(struct sd_lun *un); 1122 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1123 int *data_list, sd_tunables *values); 1124 static void sd_process_sdconf_table(struct sd_lun *un); 1125 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1126 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1127 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1128 int list_len, char *dataname_ptr); 1129 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1130 sd_tunables *prop_list); 1131 1132 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1133 int reservation_flag); 1134 static int sd_get_devid(struct sd_lun *un); 1135 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1136 static int sd_write_deviceid(struct sd_lun *un); 1137 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1138 static int sd_check_vpd_page_support(struct sd_lun *un); 1139 1140 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1141 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1142 1143 static int sd_ddi_suspend(dev_info_t *devi); 1144 static int sd_ddi_pm_suspend(struct sd_lun *un); 1145 static int sd_ddi_resume(dev_info_t *devi); 1146 static int sd_ddi_pm_resume(struct sd_lun *un); 1147 static int sdpower(dev_info_t *devi, int component, int level); 1148 1149 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1150 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1151 static int sd_unit_attach(dev_info_t *devi); 1152 static int sd_unit_detach(dev_info_t *devi); 1153 1154 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1155 static void sd_create_errstats(struct sd_lun *un, int instance); 1156 static void sd_set_errstats(struct sd_lun *un); 1157 static void sd_set_pstats(struct sd_lun *un); 1158 1159 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1160 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1161 static int sd_send_polled_RQS(struct sd_lun *un); 1162 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1163 1164 #if (defined(__fibre)) 1165 /* 1166 * Event callbacks (photon) 1167 */ 1168 static void sd_init_event_callbacks(struct sd_lun *un); 1169 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1170 #endif 1171 1172 /* 1173 * Defines for sd_cache_control 1174 */ 1175 1176 #define SD_CACHE_ENABLE 1 1177 #define SD_CACHE_DISABLE 0 1178 #define SD_CACHE_NOCHANGE -1 1179 1180 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1181 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1182 static void sd_get_nv_sup(struct sd_lun *un); 1183 static dev_t sd_make_device(dev_info_t *devi); 1184 1185 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1186 uint64_t capacity); 1187 1188 /* 1189 * Driver entry point functions. 1190 */ 1191 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1192 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1193 static int sd_ready_and_valid(struct sd_lun *un); 1194 1195 static void sdmin(struct buf *bp); 1196 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1197 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1198 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1199 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1200 1201 static int sdstrategy(struct buf *bp); 1202 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1203 1204 /* 1205 * Function prototypes for layering functions in the iostart chain. 1206 */ 1207 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1208 struct buf *bp); 1209 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1210 struct buf *bp); 1211 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1212 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1213 struct buf *bp); 1214 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1215 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1216 1217 /* 1218 * Function prototypes for layering functions in the iodone chain. 1219 */ 1220 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1221 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1222 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1223 struct buf *bp); 1224 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1225 struct buf *bp); 1226 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1227 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1228 struct buf *bp); 1229 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1230 1231 /* 1232 * Prototypes for functions to support buf(9S) based IO. 1233 */ 1234 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1235 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1236 static void sd_destroypkt_for_buf(struct buf *); 1237 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1238 struct buf *bp, int flags, 1239 int (*callback)(caddr_t), caddr_t callback_arg, 1240 diskaddr_t lba, uint32_t blockcount); 1241 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1242 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1243 1244 /* 1245 * Prototypes for functions to support USCSI IO. 1246 */ 1247 static int sd_uscsi_strategy(struct buf *bp); 1248 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1249 static void sd_destroypkt_for_uscsi(struct buf *); 1250 1251 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1252 uchar_t chain_type, void *pktinfop); 1253 1254 static int sd_pm_entry(struct sd_lun *un); 1255 static void sd_pm_exit(struct sd_lun *un); 1256 1257 static void sd_pm_idletimeout_handler(void *arg); 1258 1259 /* 1260 * sd_core internal functions (used at the sd_core_io layer). 1261 */ 1262 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1263 static void sdintr(struct scsi_pkt *pktp); 1264 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1265 1266 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1267 enum uio_seg dataspace, int path_flag); 1268 1269 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1270 daddr_t blkno, int (*func)(struct buf *)); 1271 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1272 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1273 static void sd_bioclone_free(struct buf *bp); 1274 static void sd_shadow_buf_free(struct buf *bp); 1275 1276 static void sd_print_transport_rejected_message(struct sd_lun *un, 1277 struct sd_xbuf *xp, int code); 1278 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1279 void *arg, int code); 1280 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1281 void *arg, int code); 1282 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1283 void *arg, int code); 1284 1285 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1286 int retry_check_flag, 1287 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1288 int c), 1289 void *user_arg, int failure_code, clock_t retry_delay, 1290 void (*statp)(kstat_io_t *)); 1291 1292 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1293 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1294 1295 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1296 struct scsi_pkt *pktp); 1297 static void sd_start_retry_command(void *arg); 1298 static void sd_start_direct_priority_command(void *arg); 1299 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1300 int errcode); 1301 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1302 struct buf *bp, int errcode); 1303 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1304 static void sd_sync_with_callback(struct sd_lun *un); 1305 static int sdrunout(caddr_t arg); 1306 1307 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1308 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1309 1310 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1311 static void sd_restore_throttle(void *arg); 1312 1313 static void sd_init_cdb_limits(struct sd_lun *un); 1314 1315 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1316 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1317 1318 /* 1319 * Error handling functions 1320 */ 1321 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1322 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1323 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1324 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1326 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1329 1330 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1334 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, size_t actual_len); 1336 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 1339 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 1342 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1343 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1345 uint8_t *sense_datap, 1346 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1347 static void sd_sense_key_not_ready(struct sd_lun *un, 1348 uint8_t *sense_datap, 1349 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1350 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1351 uint8_t *sense_datap, 1352 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_unit_attention(struct sd_lun *un, 1356 uint8_t *sense_datap, 1357 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1358 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1359 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1361 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1362 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1363 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1364 static void sd_sense_key_default(struct sd_lun *un, 1365 uint8_t *sense_datap, 1366 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1367 1368 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1369 void *arg, int flag); 1370 1371 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1372 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1374 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1375 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1376 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 1388 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1389 1390 static void sd_start_stop_unit_callback(void *arg); 1391 static void sd_start_stop_unit_task(void *arg); 1392 1393 static void sd_taskq_create(void); 1394 static void sd_taskq_delete(void); 1395 static void sd_media_change_task(void *arg); 1396 1397 static int sd_handle_mchange(struct sd_lun *un); 1398 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1399 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1400 uint32_t *lbap, int path_flag); 1401 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1402 uint32_t *lbap, int path_flag); 1403 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1404 int path_flag); 1405 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1406 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1407 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1408 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1409 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1410 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1411 uchar_t usr_cmd, uchar_t *usr_bufp); 1412 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1413 struct dk_callback *dkc); 1414 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1415 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1416 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1417 uchar_t *bufaddr, uint_t buflen, int path_flag); 1418 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1419 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1420 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1421 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1422 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1423 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1424 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1425 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1426 size_t buflen, daddr_t start_block, int path_flag); 1427 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1428 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1429 path_flag) 1430 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1431 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1432 path_flag) 1433 1434 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1435 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1436 uint16_t param_ptr, int path_flag); 1437 1438 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1439 static void sd_free_rqs(struct sd_lun *un); 1440 1441 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1442 uchar_t *data, int len, int fmt); 1443 static void sd_panic_for_res_conflict(struct sd_lun *un); 1444 1445 /* 1446 * Disk Ioctl Function Prototypes 1447 */ 1448 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1449 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1450 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1451 1452 /* 1453 * Multi-host Ioctl Prototypes 1454 */ 1455 static int sd_check_mhd(dev_t dev, int interval); 1456 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1457 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1458 static char *sd_sname(uchar_t status); 1459 static void sd_mhd_resvd_recover(void *arg); 1460 static void sd_resv_reclaim_thread(); 1461 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1462 static int sd_reserve_release(dev_t dev, int cmd); 1463 static void sd_rmv_resv_reclaim_req(dev_t dev); 1464 static void sd_mhd_reset_notify_cb(caddr_t arg); 1465 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1466 mhioc_inkeys_t *usrp, int flag); 1467 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1468 mhioc_inresvs_t *usrp, int flag); 1469 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1470 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1471 static int sd_mhdioc_release(dev_t dev); 1472 static int sd_mhdioc_register_devid(dev_t dev); 1473 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1474 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1475 1476 /* 1477 * SCSI removable prototypes 1478 */ 1479 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1480 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1481 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1482 static int sr_pause_resume(dev_t dev, int mode); 1483 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1484 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1487 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1488 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1489 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1490 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1491 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1492 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1493 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1494 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1495 static int sr_eject(dev_t dev); 1496 static void sr_ejected(register struct sd_lun *un); 1497 static int sr_check_wp(dev_t dev); 1498 static int sd_check_media(dev_t dev, enum dkio_state state); 1499 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1500 static void sd_delayed_cv_broadcast(void *arg); 1501 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1502 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1503 1504 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1505 1506 /* 1507 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1508 */ 1509 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1510 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1511 static void sd_wm_cache_destructor(void *wm, void *un); 1512 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1513 daddr_t endb, ushort_t typ); 1514 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1515 daddr_t endb); 1516 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1517 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1518 static void sd_read_modify_write_task(void * arg); 1519 static int 1520 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1521 struct buf **bpp); 1522 1523 1524 /* 1525 * Function prototypes for failfast support. 1526 */ 1527 static void sd_failfast_flushq(struct sd_lun *un); 1528 static int sd_failfast_flushq_callback(struct buf *bp); 1529 1530 /* 1531 * Function prototypes to check for lsi devices 1532 */ 1533 static void sd_is_lsi(struct sd_lun *un); 1534 1535 /* 1536 * Function prototypes for partial DMA support 1537 */ 1538 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1539 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1540 1541 1542 /* Function prototypes for cmlb */ 1543 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1544 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1545 1546 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1547 1548 /* 1549 * Constants for failfast support: 1550 * 1551 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1552 * failfast processing being performed. 1553 * 1554 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1555 * failfast processing on all bufs with B_FAILFAST set. 1556 */ 1557 1558 #define SD_FAILFAST_INACTIVE 0 1559 #define SD_FAILFAST_ACTIVE 1 1560 1561 /* 1562 * Bitmask to control behavior of buf(9S) flushes when a transition to 1563 * the failfast state occurs. Optional bits include: 1564 * 1565 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1566 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1567 * be flushed. 1568 * 1569 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1570 * driver, in addition to the regular wait queue. This includes the xbuf 1571 * queues. When clear, only the driver's wait queue will be flushed. 1572 */ 1573 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1574 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1575 1576 /* 1577 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1578 * to flush all queues within the driver. 1579 */ 1580 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1581 1582 1583 /* 1584 * SD Testing Fault Injection 1585 */ 1586 #ifdef SD_FAULT_INJECTION 1587 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1588 static void sd_faultinjection(struct scsi_pkt *pktp); 1589 static void sd_injection_log(char *buf, struct sd_lun *un); 1590 #endif 1591 1592 /* 1593 * Device driver ops vector 1594 */ 1595 static struct cb_ops sd_cb_ops = { 1596 sdopen, /* open */ 1597 sdclose, /* close */ 1598 sdstrategy, /* strategy */ 1599 nodev, /* print */ 1600 sddump, /* dump */ 1601 sdread, /* read */ 1602 sdwrite, /* write */ 1603 sdioctl, /* ioctl */ 1604 nodev, /* devmap */ 1605 nodev, /* mmap */ 1606 nodev, /* segmap */ 1607 nochpoll, /* poll */ 1608 sd_prop_op, /* cb_prop_op */ 1609 0, /* streamtab */ 1610 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1611 CB_REV, /* cb_rev */ 1612 sdaread, /* async I/O read entry point */ 1613 sdawrite /* async I/O write entry point */ 1614 }; 1615 1616 static struct dev_ops sd_ops = { 1617 DEVO_REV, /* devo_rev, */ 1618 0, /* refcnt */ 1619 sdinfo, /* info */ 1620 nulldev, /* identify */ 1621 sdprobe, /* probe */ 1622 sdattach, /* attach */ 1623 sddetach, /* detach */ 1624 nodev, /* reset */ 1625 &sd_cb_ops, /* driver operations */ 1626 NULL, /* bus operations */ 1627 sdpower /* power */ 1628 }; 1629 1630 1631 /* 1632 * This is the loadable module wrapper. 1633 */ 1634 #include <sys/modctl.h> 1635 1636 static struct modldrv modldrv = { 1637 &mod_driverops, /* Type of module. This one is a driver */ 1638 SD_MODULE_NAME, /* Module name. */ 1639 &sd_ops /* driver ops */ 1640 }; 1641 1642 1643 static struct modlinkage modlinkage = { 1644 MODREV_1, 1645 &modldrv, 1646 NULL 1647 }; 1648 1649 static cmlb_tg_ops_t sd_tgops = { 1650 TG_DK_OPS_VERSION_1, 1651 sd_tg_rdwr, 1652 sd_tg_getinfo 1653 }; 1654 1655 static struct scsi_asq_key_strings sd_additional_codes[] = { 1656 0x81, 0, "Logical Unit is Reserved", 1657 0x85, 0, "Audio Address Not Valid", 1658 0xb6, 0, "Media Load Mechanism Failed", 1659 0xB9, 0, "Audio Play Operation Aborted", 1660 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1661 0x53, 2, "Medium removal prevented", 1662 0x6f, 0, "Authentication failed during key exchange", 1663 0x6f, 1, "Key not present", 1664 0x6f, 2, "Key not established", 1665 0x6f, 3, "Read without proper authentication", 1666 0x6f, 4, "Mismatched region to this logical unit", 1667 0x6f, 5, "Region reset count error", 1668 0xffff, 0x0, NULL 1669 }; 1670 1671 1672 /* 1673 * Struct for passing printing information for sense data messages 1674 */ 1675 struct sd_sense_info { 1676 int ssi_severity; 1677 int ssi_pfa_flag; 1678 }; 1679 1680 /* 1681 * Table of function pointers for iostart-side routines. Separate "chains" 1682 * of layered function calls are formed by placing the function pointers 1683 * sequentially in the desired order. Functions are called according to an 1684 * incrementing table index ordering. The last function in each chain must 1685 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1686 * in the sd_iodone_chain[] array. 1687 * 1688 * Note: It may seem more natural to organize both the iostart and iodone 1689 * functions together, into an array of structures (or some similar 1690 * organization) with a common index, rather than two separate arrays which 1691 * must be maintained in synchronization. The purpose of this division is 1692 * to achieve improved performance: individual arrays allows for more 1693 * effective cache line utilization on certain platforms. 1694 */ 1695 1696 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1697 1698 1699 static sd_chain_t sd_iostart_chain[] = { 1700 1701 /* Chain for buf IO for disk drive targets (PM enabled) */ 1702 sd_mapblockaddr_iostart, /* Index: 0 */ 1703 sd_pm_iostart, /* Index: 1 */ 1704 sd_core_iostart, /* Index: 2 */ 1705 1706 /* Chain for buf IO for disk drive targets (PM disabled) */ 1707 sd_mapblockaddr_iostart, /* Index: 3 */ 1708 sd_core_iostart, /* Index: 4 */ 1709 1710 /* Chain for buf IO for removable-media targets (PM enabled) */ 1711 sd_mapblockaddr_iostart, /* Index: 5 */ 1712 sd_mapblocksize_iostart, /* Index: 6 */ 1713 sd_pm_iostart, /* Index: 7 */ 1714 sd_core_iostart, /* Index: 8 */ 1715 1716 /* Chain for buf IO for removable-media targets (PM disabled) */ 1717 sd_mapblockaddr_iostart, /* Index: 9 */ 1718 sd_mapblocksize_iostart, /* Index: 10 */ 1719 sd_core_iostart, /* Index: 11 */ 1720 1721 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1722 sd_mapblockaddr_iostart, /* Index: 12 */ 1723 sd_checksum_iostart, /* Index: 13 */ 1724 sd_pm_iostart, /* Index: 14 */ 1725 sd_core_iostart, /* Index: 15 */ 1726 1727 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1728 sd_mapblockaddr_iostart, /* Index: 16 */ 1729 sd_checksum_iostart, /* Index: 17 */ 1730 sd_core_iostart, /* Index: 18 */ 1731 1732 /* Chain for USCSI commands (all targets) */ 1733 sd_pm_iostart, /* Index: 19 */ 1734 sd_core_iostart, /* Index: 20 */ 1735 1736 /* Chain for checksumming USCSI commands (all targets) */ 1737 sd_checksum_uscsi_iostart, /* Index: 21 */ 1738 sd_pm_iostart, /* Index: 22 */ 1739 sd_core_iostart, /* Index: 23 */ 1740 1741 /* Chain for "direct" USCSI commands (all targets) */ 1742 sd_core_iostart, /* Index: 24 */ 1743 1744 /* Chain for "direct priority" USCSI commands (all targets) */ 1745 sd_core_iostart, /* Index: 25 */ 1746 }; 1747 1748 /* 1749 * Macros to locate the first function of each iostart chain in the 1750 * sd_iostart_chain[] array. These are located by the index in the array. 1751 */ 1752 #define SD_CHAIN_DISK_IOSTART 0 1753 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1754 #define SD_CHAIN_RMMEDIA_IOSTART 5 1755 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1756 #define SD_CHAIN_CHKSUM_IOSTART 12 1757 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1758 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1759 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1760 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1761 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1762 1763 1764 /* 1765 * Table of function pointers for the iodone-side routines for the driver- 1766 * internal layering mechanism. The calling sequence for iodone routines 1767 * uses a decrementing table index, so the last routine called in a chain 1768 * must be at the lowest array index location for that chain. The last 1769 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1770 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1771 * of the functions in an iodone side chain must correspond to the ordering 1772 * of the iostart routines for that chain. Note that there is no iodone 1773 * side routine that corresponds to sd_core_iostart(), so there is no 1774 * entry in the table for this. 1775 */ 1776 1777 static sd_chain_t sd_iodone_chain[] = { 1778 1779 /* Chain for buf IO for disk drive targets (PM enabled) */ 1780 sd_buf_iodone, /* Index: 0 */ 1781 sd_mapblockaddr_iodone, /* Index: 1 */ 1782 sd_pm_iodone, /* Index: 2 */ 1783 1784 /* Chain for buf IO for disk drive targets (PM disabled) */ 1785 sd_buf_iodone, /* Index: 3 */ 1786 sd_mapblockaddr_iodone, /* Index: 4 */ 1787 1788 /* Chain for buf IO for removable-media targets (PM enabled) */ 1789 sd_buf_iodone, /* Index: 5 */ 1790 sd_mapblockaddr_iodone, /* Index: 6 */ 1791 sd_mapblocksize_iodone, /* Index: 7 */ 1792 sd_pm_iodone, /* Index: 8 */ 1793 1794 /* Chain for buf IO for removable-media targets (PM disabled) */ 1795 sd_buf_iodone, /* Index: 9 */ 1796 sd_mapblockaddr_iodone, /* Index: 10 */ 1797 sd_mapblocksize_iodone, /* Index: 11 */ 1798 1799 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1800 sd_buf_iodone, /* Index: 12 */ 1801 sd_mapblockaddr_iodone, /* Index: 13 */ 1802 sd_checksum_iodone, /* Index: 14 */ 1803 sd_pm_iodone, /* Index: 15 */ 1804 1805 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1806 sd_buf_iodone, /* Index: 16 */ 1807 sd_mapblockaddr_iodone, /* Index: 17 */ 1808 sd_checksum_iodone, /* Index: 18 */ 1809 1810 /* Chain for USCSI commands (non-checksum targets) */ 1811 sd_uscsi_iodone, /* Index: 19 */ 1812 sd_pm_iodone, /* Index: 20 */ 1813 1814 /* Chain for USCSI commands (checksum targets) */ 1815 sd_uscsi_iodone, /* Index: 21 */ 1816 sd_checksum_uscsi_iodone, /* Index: 22 */ 1817 sd_pm_iodone, /* Index: 22 */ 1818 1819 /* Chain for "direct" USCSI commands (all targets) */ 1820 sd_uscsi_iodone, /* Index: 24 */ 1821 1822 /* Chain for "direct priority" USCSI commands (all targets) */ 1823 sd_uscsi_iodone, /* Index: 25 */ 1824 }; 1825 1826 1827 /* 1828 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1829 * each iodone-side chain. These are located by the array index, but as the 1830 * iodone side functions are called in a decrementing-index order, the 1831 * highest index number in each chain must be specified (as these correspond 1832 * to the first function in the iodone chain that will be called by the core 1833 * at IO completion time). 1834 */ 1835 1836 #define SD_CHAIN_DISK_IODONE 2 1837 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1838 #define SD_CHAIN_RMMEDIA_IODONE 8 1839 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1840 #define SD_CHAIN_CHKSUM_IODONE 15 1841 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1842 #define SD_CHAIN_USCSI_CMD_IODONE 20 1843 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1844 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1845 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1846 1847 1848 1849 1850 /* 1851 * Array to map a layering chain index to the appropriate initpkt routine. 1852 * The redundant entries are present so that the index used for accessing 1853 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1854 * with this table as well. 1855 */ 1856 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1857 1858 static sd_initpkt_t sd_initpkt_map[] = { 1859 1860 /* Chain for buf IO for disk drive targets (PM enabled) */ 1861 sd_initpkt_for_buf, /* Index: 0 */ 1862 sd_initpkt_for_buf, /* Index: 1 */ 1863 sd_initpkt_for_buf, /* Index: 2 */ 1864 1865 /* Chain for buf IO for disk drive targets (PM disabled) */ 1866 sd_initpkt_for_buf, /* Index: 3 */ 1867 sd_initpkt_for_buf, /* Index: 4 */ 1868 1869 /* Chain for buf IO for removable-media targets (PM enabled) */ 1870 sd_initpkt_for_buf, /* Index: 5 */ 1871 sd_initpkt_for_buf, /* Index: 6 */ 1872 sd_initpkt_for_buf, /* Index: 7 */ 1873 sd_initpkt_for_buf, /* Index: 8 */ 1874 1875 /* Chain for buf IO for removable-media targets (PM disabled) */ 1876 sd_initpkt_for_buf, /* Index: 9 */ 1877 sd_initpkt_for_buf, /* Index: 10 */ 1878 sd_initpkt_for_buf, /* Index: 11 */ 1879 1880 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1881 sd_initpkt_for_buf, /* Index: 12 */ 1882 sd_initpkt_for_buf, /* Index: 13 */ 1883 sd_initpkt_for_buf, /* Index: 14 */ 1884 sd_initpkt_for_buf, /* Index: 15 */ 1885 1886 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1887 sd_initpkt_for_buf, /* Index: 16 */ 1888 sd_initpkt_for_buf, /* Index: 17 */ 1889 sd_initpkt_for_buf, /* Index: 18 */ 1890 1891 /* Chain for USCSI commands (non-checksum targets) */ 1892 sd_initpkt_for_uscsi, /* Index: 19 */ 1893 sd_initpkt_for_uscsi, /* Index: 20 */ 1894 1895 /* Chain for USCSI commands (checksum targets) */ 1896 sd_initpkt_for_uscsi, /* Index: 21 */ 1897 sd_initpkt_for_uscsi, /* Index: 22 */ 1898 sd_initpkt_for_uscsi, /* Index: 22 */ 1899 1900 /* Chain for "direct" USCSI commands (all targets) */ 1901 sd_initpkt_for_uscsi, /* Index: 24 */ 1902 1903 /* Chain for "direct priority" USCSI commands (all targets) */ 1904 sd_initpkt_for_uscsi, /* Index: 25 */ 1905 1906 }; 1907 1908 1909 /* 1910 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1911 * The redundant entries are present so that the index used for accessing 1912 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1913 * with this table as well. 1914 */ 1915 typedef void (*sd_destroypkt_t)(struct buf *); 1916 1917 static sd_destroypkt_t sd_destroypkt_map[] = { 1918 1919 /* Chain for buf IO for disk drive targets (PM enabled) */ 1920 sd_destroypkt_for_buf, /* Index: 0 */ 1921 sd_destroypkt_for_buf, /* Index: 1 */ 1922 sd_destroypkt_for_buf, /* Index: 2 */ 1923 1924 /* Chain for buf IO for disk drive targets (PM disabled) */ 1925 sd_destroypkt_for_buf, /* Index: 3 */ 1926 sd_destroypkt_for_buf, /* Index: 4 */ 1927 1928 /* Chain for buf IO for removable-media targets (PM enabled) */ 1929 sd_destroypkt_for_buf, /* Index: 5 */ 1930 sd_destroypkt_for_buf, /* Index: 6 */ 1931 sd_destroypkt_for_buf, /* Index: 7 */ 1932 sd_destroypkt_for_buf, /* Index: 8 */ 1933 1934 /* Chain for buf IO for removable-media targets (PM disabled) */ 1935 sd_destroypkt_for_buf, /* Index: 9 */ 1936 sd_destroypkt_for_buf, /* Index: 10 */ 1937 sd_destroypkt_for_buf, /* Index: 11 */ 1938 1939 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1940 sd_destroypkt_for_buf, /* Index: 12 */ 1941 sd_destroypkt_for_buf, /* Index: 13 */ 1942 sd_destroypkt_for_buf, /* Index: 14 */ 1943 sd_destroypkt_for_buf, /* Index: 15 */ 1944 1945 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1946 sd_destroypkt_for_buf, /* Index: 16 */ 1947 sd_destroypkt_for_buf, /* Index: 17 */ 1948 sd_destroypkt_for_buf, /* Index: 18 */ 1949 1950 /* Chain for USCSI commands (non-checksum targets) */ 1951 sd_destroypkt_for_uscsi, /* Index: 19 */ 1952 sd_destroypkt_for_uscsi, /* Index: 20 */ 1953 1954 /* Chain for USCSI commands (checksum targets) */ 1955 sd_destroypkt_for_uscsi, /* Index: 21 */ 1956 sd_destroypkt_for_uscsi, /* Index: 22 */ 1957 sd_destroypkt_for_uscsi, /* Index: 22 */ 1958 1959 /* Chain for "direct" USCSI commands (all targets) */ 1960 sd_destroypkt_for_uscsi, /* Index: 24 */ 1961 1962 /* Chain for "direct priority" USCSI commands (all targets) */ 1963 sd_destroypkt_for_uscsi, /* Index: 25 */ 1964 1965 }; 1966 1967 1968 1969 /* 1970 * Array to map a layering chain index to the appropriate chain "type". 1971 * The chain type indicates a specific property/usage of the chain. 1972 * The redundant entries are present so that the index used for accessing 1973 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1974 * with this table as well. 1975 */ 1976 1977 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1978 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1979 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1980 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1981 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1982 /* (for error recovery) */ 1983 1984 static int sd_chain_type_map[] = { 1985 1986 /* Chain for buf IO for disk drive targets (PM enabled) */ 1987 SD_CHAIN_BUFIO, /* Index: 0 */ 1988 SD_CHAIN_BUFIO, /* Index: 1 */ 1989 SD_CHAIN_BUFIO, /* Index: 2 */ 1990 1991 /* Chain for buf IO for disk drive targets (PM disabled) */ 1992 SD_CHAIN_BUFIO, /* Index: 3 */ 1993 SD_CHAIN_BUFIO, /* Index: 4 */ 1994 1995 /* Chain for buf IO for removable-media targets (PM enabled) */ 1996 SD_CHAIN_BUFIO, /* Index: 5 */ 1997 SD_CHAIN_BUFIO, /* Index: 6 */ 1998 SD_CHAIN_BUFIO, /* Index: 7 */ 1999 SD_CHAIN_BUFIO, /* Index: 8 */ 2000 2001 /* Chain for buf IO for removable-media targets (PM disabled) */ 2002 SD_CHAIN_BUFIO, /* Index: 9 */ 2003 SD_CHAIN_BUFIO, /* Index: 10 */ 2004 SD_CHAIN_BUFIO, /* Index: 11 */ 2005 2006 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2007 SD_CHAIN_BUFIO, /* Index: 12 */ 2008 SD_CHAIN_BUFIO, /* Index: 13 */ 2009 SD_CHAIN_BUFIO, /* Index: 14 */ 2010 SD_CHAIN_BUFIO, /* Index: 15 */ 2011 2012 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2013 SD_CHAIN_BUFIO, /* Index: 16 */ 2014 SD_CHAIN_BUFIO, /* Index: 17 */ 2015 SD_CHAIN_BUFIO, /* Index: 18 */ 2016 2017 /* Chain for USCSI commands (non-checksum targets) */ 2018 SD_CHAIN_USCSI, /* Index: 19 */ 2019 SD_CHAIN_USCSI, /* Index: 20 */ 2020 2021 /* Chain for USCSI commands (checksum targets) */ 2022 SD_CHAIN_USCSI, /* Index: 21 */ 2023 SD_CHAIN_USCSI, /* Index: 22 */ 2024 SD_CHAIN_USCSI, /* Index: 22 */ 2025 2026 /* Chain for "direct" USCSI commands (all targets) */ 2027 SD_CHAIN_DIRECT, /* Index: 24 */ 2028 2029 /* Chain for "direct priority" USCSI commands (all targets) */ 2030 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2031 }; 2032 2033 2034 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2035 #define SD_IS_BUFIO(xp) \ 2036 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2037 2038 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2039 #define SD_IS_DIRECT_PRIORITY(xp) \ 2040 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2041 2042 2043 2044 /* 2045 * Struct, array, and macros to map a specific chain to the appropriate 2046 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2047 * 2048 * The sd_chain_index_map[] array is used at attach time to set the various 2049 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2050 * chain to be used with the instance. This allows different instances to use 2051 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2052 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2053 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2054 * dynamically & without the use of locking; and (2) a layer may update the 2055 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2056 * to allow for deferred processing of an IO within the same chain from a 2057 * different execution context. 2058 */ 2059 2060 struct sd_chain_index { 2061 int sci_iostart_index; 2062 int sci_iodone_index; 2063 }; 2064 2065 static struct sd_chain_index sd_chain_index_map[] = { 2066 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2067 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2068 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2069 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2070 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2071 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2072 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2073 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2074 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2075 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2076 }; 2077 2078 2079 /* 2080 * The following are indexes into the sd_chain_index_map[] array. 2081 */ 2082 2083 /* un->un_buf_chain_type must be set to one of these */ 2084 #define SD_CHAIN_INFO_DISK 0 2085 #define SD_CHAIN_INFO_DISK_NO_PM 1 2086 #define SD_CHAIN_INFO_RMMEDIA 2 2087 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2088 #define SD_CHAIN_INFO_CHKSUM 4 2089 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2090 2091 /* un->un_uscsi_chain_type must be set to one of these */ 2092 #define SD_CHAIN_INFO_USCSI_CMD 6 2093 /* USCSI with PM disabled is the same as DIRECT */ 2094 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2095 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2096 2097 /* un->un_direct_chain_type must be set to one of these */ 2098 #define SD_CHAIN_INFO_DIRECT_CMD 8 2099 2100 /* un->un_priority_chain_type must be set to one of these */ 2101 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2102 2103 /* size for devid inquiries */ 2104 #define MAX_INQUIRY_SIZE 0xF0 2105 2106 /* 2107 * Macros used by functions to pass a given buf(9S) struct along to the 2108 * next function in the layering chain for further processing. 2109 * 2110 * In the following macros, passing more than three arguments to the called 2111 * routines causes the optimizer for the SPARC compiler to stop doing tail 2112 * call elimination which results in significant performance degradation. 2113 */ 2114 #define SD_BEGIN_IOSTART(index, un, bp) \ 2115 ((*(sd_iostart_chain[index]))(index, un, bp)) 2116 2117 #define SD_BEGIN_IODONE(index, un, bp) \ 2118 ((*(sd_iodone_chain[index]))(index, un, bp)) 2119 2120 #define SD_NEXT_IOSTART(index, un, bp) \ 2121 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2122 2123 #define SD_NEXT_IODONE(index, un, bp) \ 2124 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2125 2126 /* 2127 * Function: _init 2128 * 2129 * Description: This is the driver _init(9E) entry point. 2130 * 2131 * Return Code: Returns the value from mod_install(9F) or 2132 * ddi_soft_state_init(9F) as appropriate. 2133 * 2134 * Context: Called when driver module loaded. 2135 */ 2136 2137 int 2138 _init(void) 2139 { 2140 int err; 2141 2142 /* establish driver name from module name */ 2143 sd_label = mod_modname(&modlinkage); 2144 2145 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2146 SD_MAXUNIT); 2147 2148 if (err != 0) { 2149 return (err); 2150 } 2151 2152 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2153 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2154 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2155 2156 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2157 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2158 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2159 2160 /* 2161 * it's ok to init here even for fibre device 2162 */ 2163 sd_scsi_probe_cache_init(); 2164 2165 sd_scsi_target_lun_init(); 2166 2167 /* 2168 * Creating taskq before mod_install ensures that all callers (threads) 2169 * that enter the module after a successfull mod_install encounter 2170 * a valid taskq. 2171 */ 2172 sd_taskq_create(); 2173 2174 err = mod_install(&modlinkage); 2175 if (err != 0) { 2176 /* delete taskq if install fails */ 2177 sd_taskq_delete(); 2178 2179 mutex_destroy(&sd_detach_mutex); 2180 mutex_destroy(&sd_log_mutex); 2181 mutex_destroy(&sd_label_mutex); 2182 2183 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2184 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2185 cv_destroy(&sd_tr.srq_inprocess_cv); 2186 2187 sd_scsi_probe_cache_fini(); 2188 2189 sd_scsi_target_lun_fini(); 2190 2191 ddi_soft_state_fini(&sd_state); 2192 return (err); 2193 } 2194 2195 return (err); 2196 } 2197 2198 2199 /* 2200 * Function: _fini 2201 * 2202 * Description: This is the driver _fini(9E) entry point. 2203 * 2204 * Return Code: Returns the value from mod_remove(9F) 2205 * 2206 * Context: Called when driver module is unloaded. 2207 */ 2208 2209 int 2210 _fini(void) 2211 { 2212 int err; 2213 2214 if ((err = mod_remove(&modlinkage)) != 0) { 2215 return (err); 2216 } 2217 2218 sd_taskq_delete(); 2219 2220 mutex_destroy(&sd_detach_mutex); 2221 mutex_destroy(&sd_log_mutex); 2222 mutex_destroy(&sd_label_mutex); 2223 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2224 2225 sd_scsi_probe_cache_fini(); 2226 2227 sd_scsi_target_lun_fini(); 2228 2229 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2230 cv_destroy(&sd_tr.srq_inprocess_cv); 2231 2232 ddi_soft_state_fini(&sd_state); 2233 2234 return (err); 2235 } 2236 2237 2238 /* 2239 * Function: _info 2240 * 2241 * Description: This is the driver _info(9E) entry point. 2242 * 2243 * Arguments: modinfop - pointer to the driver modinfo structure 2244 * 2245 * Return Code: Returns the value from mod_info(9F). 2246 * 2247 * Context: Kernel thread context 2248 */ 2249 2250 int 2251 _info(struct modinfo *modinfop) 2252 { 2253 return (mod_info(&modlinkage, modinfop)); 2254 } 2255 2256 2257 /* 2258 * The following routines implement the driver message logging facility. 2259 * They provide component- and level- based debug output filtering. 2260 * Output may also be restricted to messages for a single instance by 2261 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2262 * to NULL, then messages for all instances are printed. 2263 * 2264 * These routines have been cloned from each other due to the language 2265 * constraints of macros and variable argument list processing. 2266 */ 2267 2268 2269 /* 2270 * Function: sd_log_err 2271 * 2272 * Description: This routine is called by the SD_ERROR macro for debug 2273 * logging of error conditions. 2274 * 2275 * Arguments: comp - driver component being logged 2276 * dev - pointer to driver info structure 2277 * fmt - error string and format to be logged 2278 */ 2279 2280 static void 2281 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2282 { 2283 va_list ap; 2284 dev_info_t *dev; 2285 2286 ASSERT(un != NULL); 2287 dev = SD_DEVINFO(un); 2288 ASSERT(dev != NULL); 2289 2290 /* 2291 * Filter messages based on the global component and level masks. 2292 * Also print if un matches the value of sd_debug_un, or if 2293 * sd_debug_un is set to NULL. 2294 */ 2295 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2296 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2297 mutex_enter(&sd_log_mutex); 2298 va_start(ap, fmt); 2299 (void) vsprintf(sd_log_buf, fmt, ap); 2300 va_end(ap); 2301 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2302 mutex_exit(&sd_log_mutex); 2303 } 2304 #ifdef SD_FAULT_INJECTION 2305 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2306 if (un->sd_injection_mask & comp) { 2307 mutex_enter(&sd_log_mutex); 2308 va_start(ap, fmt); 2309 (void) vsprintf(sd_log_buf, fmt, ap); 2310 va_end(ap); 2311 sd_injection_log(sd_log_buf, un); 2312 mutex_exit(&sd_log_mutex); 2313 } 2314 #endif 2315 } 2316 2317 2318 /* 2319 * Function: sd_log_info 2320 * 2321 * Description: This routine is called by the SD_INFO macro for debug 2322 * logging of general purpose informational conditions. 2323 * 2324 * Arguments: comp - driver component being logged 2325 * dev - pointer to driver info structure 2326 * fmt - info string and format to be logged 2327 */ 2328 2329 static void 2330 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2331 { 2332 va_list ap; 2333 dev_info_t *dev; 2334 2335 ASSERT(un != NULL); 2336 dev = SD_DEVINFO(un); 2337 ASSERT(dev != NULL); 2338 2339 /* 2340 * Filter messages based on the global component and level masks. 2341 * Also print if un matches the value of sd_debug_un, or if 2342 * sd_debug_un is set to NULL. 2343 */ 2344 if ((sd_component_mask & component) && 2345 (sd_level_mask & SD_LOGMASK_INFO) && 2346 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2347 mutex_enter(&sd_log_mutex); 2348 va_start(ap, fmt); 2349 (void) vsprintf(sd_log_buf, fmt, ap); 2350 va_end(ap); 2351 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2352 mutex_exit(&sd_log_mutex); 2353 } 2354 #ifdef SD_FAULT_INJECTION 2355 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2356 if (un->sd_injection_mask & component) { 2357 mutex_enter(&sd_log_mutex); 2358 va_start(ap, fmt); 2359 (void) vsprintf(sd_log_buf, fmt, ap); 2360 va_end(ap); 2361 sd_injection_log(sd_log_buf, un); 2362 mutex_exit(&sd_log_mutex); 2363 } 2364 #endif 2365 } 2366 2367 2368 /* 2369 * Function: sd_log_trace 2370 * 2371 * Description: This routine is called by the SD_TRACE macro for debug 2372 * logging of trace conditions (i.e. function entry/exit). 2373 * 2374 * Arguments: comp - driver component being logged 2375 * dev - pointer to driver info structure 2376 * fmt - trace string and format to be logged 2377 */ 2378 2379 static void 2380 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2381 { 2382 va_list ap; 2383 dev_info_t *dev; 2384 2385 ASSERT(un != NULL); 2386 dev = SD_DEVINFO(un); 2387 ASSERT(dev != NULL); 2388 2389 /* 2390 * Filter messages based on the global component and level masks. 2391 * Also print if un matches the value of sd_debug_un, or if 2392 * sd_debug_un is set to NULL. 2393 */ 2394 if ((sd_component_mask & component) && 2395 (sd_level_mask & SD_LOGMASK_TRACE) && 2396 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2397 mutex_enter(&sd_log_mutex); 2398 va_start(ap, fmt); 2399 (void) vsprintf(sd_log_buf, fmt, ap); 2400 va_end(ap); 2401 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2402 mutex_exit(&sd_log_mutex); 2403 } 2404 #ifdef SD_FAULT_INJECTION 2405 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2406 if (un->sd_injection_mask & component) { 2407 mutex_enter(&sd_log_mutex); 2408 va_start(ap, fmt); 2409 (void) vsprintf(sd_log_buf, fmt, ap); 2410 va_end(ap); 2411 sd_injection_log(sd_log_buf, un); 2412 mutex_exit(&sd_log_mutex); 2413 } 2414 #endif 2415 } 2416 2417 2418 /* 2419 * Function: sdprobe 2420 * 2421 * Description: This is the driver probe(9e) entry point function. 2422 * 2423 * Arguments: devi - opaque device info handle 2424 * 2425 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2426 * DDI_PROBE_FAILURE: If the probe failed. 2427 * DDI_PROBE_PARTIAL: If the instance is not present now, 2428 * but may be present in the future. 2429 */ 2430 2431 static int 2432 sdprobe(dev_info_t *devi) 2433 { 2434 struct scsi_device *devp; 2435 int rval; 2436 int instance; 2437 2438 /* 2439 * if it wasn't for pln, sdprobe could actually be nulldev 2440 * in the "__fibre" case. 2441 */ 2442 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2443 return (DDI_PROBE_DONTCARE); 2444 } 2445 2446 devp = ddi_get_driver_private(devi); 2447 2448 if (devp == NULL) { 2449 /* Ooops... nexus driver is mis-configured... */ 2450 return (DDI_PROBE_FAILURE); 2451 } 2452 2453 instance = ddi_get_instance(devi); 2454 2455 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2456 return (DDI_PROBE_PARTIAL); 2457 } 2458 2459 /* 2460 * Call the SCSA utility probe routine to see if we actually 2461 * have a target at this SCSI nexus. 2462 */ 2463 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2464 case SCSIPROBE_EXISTS: 2465 switch (devp->sd_inq->inq_dtype) { 2466 case DTYPE_DIRECT: 2467 rval = DDI_PROBE_SUCCESS; 2468 break; 2469 case DTYPE_RODIRECT: 2470 /* CDs etc. Can be removable media */ 2471 rval = DDI_PROBE_SUCCESS; 2472 break; 2473 case DTYPE_OPTICAL: 2474 /* 2475 * Rewritable optical driver HP115AA 2476 * Can also be removable media 2477 */ 2478 2479 /* 2480 * Do not attempt to bind to DTYPE_OPTICAL if 2481 * pre solaris 9 sparc sd behavior is required 2482 * 2483 * If first time through and sd_dtype_optical_bind 2484 * has not been set in /etc/system check properties 2485 */ 2486 2487 if (sd_dtype_optical_bind < 0) { 2488 sd_dtype_optical_bind = ddi_prop_get_int 2489 (DDI_DEV_T_ANY, devi, 0, 2490 "optical-device-bind", 1); 2491 } 2492 2493 if (sd_dtype_optical_bind == 0) { 2494 rval = DDI_PROBE_FAILURE; 2495 } else { 2496 rval = DDI_PROBE_SUCCESS; 2497 } 2498 break; 2499 2500 case DTYPE_NOTPRESENT: 2501 default: 2502 rval = DDI_PROBE_FAILURE; 2503 break; 2504 } 2505 break; 2506 default: 2507 rval = DDI_PROBE_PARTIAL; 2508 break; 2509 } 2510 2511 /* 2512 * This routine checks for resource allocation prior to freeing, 2513 * so it will take care of the "smart probing" case where a 2514 * scsi_probe() may or may not have been issued and will *not* 2515 * free previously-freed resources. 2516 */ 2517 scsi_unprobe(devp); 2518 return (rval); 2519 } 2520 2521 2522 /* 2523 * Function: sdinfo 2524 * 2525 * Description: This is the driver getinfo(9e) entry point function. 2526 * Given the device number, return the devinfo pointer from 2527 * the scsi_device structure or the instance number 2528 * associated with the dev_t. 2529 * 2530 * Arguments: dip - pointer to device info structure 2531 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2532 * DDI_INFO_DEVT2INSTANCE) 2533 * arg - driver dev_t 2534 * resultp - user buffer for request response 2535 * 2536 * Return Code: DDI_SUCCESS 2537 * DDI_FAILURE 2538 */ 2539 /* ARGSUSED */ 2540 static int 2541 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2542 { 2543 struct sd_lun *un; 2544 dev_t dev; 2545 int instance; 2546 int error; 2547 2548 switch (infocmd) { 2549 case DDI_INFO_DEVT2DEVINFO: 2550 dev = (dev_t)arg; 2551 instance = SDUNIT(dev); 2552 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2553 return (DDI_FAILURE); 2554 } 2555 *result = (void *) SD_DEVINFO(un); 2556 error = DDI_SUCCESS; 2557 break; 2558 case DDI_INFO_DEVT2INSTANCE: 2559 dev = (dev_t)arg; 2560 instance = SDUNIT(dev); 2561 *result = (void *)(uintptr_t)instance; 2562 error = DDI_SUCCESS; 2563 break; 2564 default: 2565 error = DDI_FAILURE; 2566 } 2567 return (error); 2568 } 2569 2570 /* 2571 * Function: sd_prop_op 2572 * 2573 * Description: This is the driver prop_op(9e) entry point function. 2574 * Return the number of blocks for the partition in question 2575 * or forward the request to the property facilities. 2576 * 2577 * Arguments: dev - device number 2578 * dip - pointer to device info structure 2579 * prop_op - property operator 2580 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2581 * name - pointer to property name 2582 * valuep - pointer or address of the user buffer 2583 * lengthp - property length 2584 * 2585 * Return Code: DDI_PROP_SUCCESS 2586 * DDI_PROP_NOT_FOUND 2587 * DDI_PROP_UNDEFINED 2588 * DDI_PROP_NO_MEMORY 2589 * DDI_PROP_BUF_TOO_SMALL 2590 */ 2591 2592 static int 2593 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2594 char *name, caddr_t valuep, int *lengthp) 2595 { 2596 int instance = ddi_get_instance(dip); 2597 struct sd_lun *un; 2598 uint64_t nblocks64; 2599 uint_t dblk; 2600 2601 /* 2602 * Our dynamic properties are all device specific and size oriented. 2603 * Requests issued under conditions where size is valid are passed 2604 * to ddi_prop_op_nblocks with the size information, otherwise the 2605 * request is passed to ddi_prop_op. Size depends on valid geometry. 2606 */ 2607 un = ddi_get_soft_state(sd_state, instance); 2608 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2609 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2610 name, valuep, lengthp)); 2611 } else if (!SD_IS_VALID_LABEL(un)) { 2612 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2613 valuep, lengthp)); 2614 } 2615 2616 /* get nblocks value */ 2617 ASSERT(!mutex_owned(SD_MUTEX(un))); 2618 2619 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2620 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2621 2622 /* report size in target size blocks */ 2623 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2624 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2625 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2626 } 2627 2628 /* 2629 * The following functions are for smart probing: 2630 * sd_scsi_probe_cache_init() 2631 * sd_scsi_probe_cache_fini() 2632 * sd_scsi_clear_probe_cache() 2633 * sd_scsi_probe_with_cache() 2634 */ 2635 2636 /* 2637 * Function: sd_scsi_probe_cache_init 2638 * 2639 * Description: Initializes the probe response cache mutex and head pointer. 2640 * 2641 * Context: Kernel thread context 2642 */ 2643 2644 static void 2645 sd_scsi_probe_cache_init(void) 2646 { 2647 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2648 sd_scsi_probe_cache_head = NULL; 2649 } 2650 2651 2652 /* 2653 * Function: sd_scsi_probe_cache_fini 2654 * 2655 * Description: Frees all resources associated with the probe response cache. 2656 * 2657 * Context: Kernel thread context 2658 */ 2659 2660 static void 2661 sd_scsi_probe_cache_fini(void) 2662 { 2663 struct sd_scsi_probe_cache *cp; 2664 struct sd_scsi_probe_cache *ncp; 2665 2666 /* Clean up our smart probing linked list */ 2667 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2668 ncp = cp->next; 2669 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2670 } 2671 sd_scsi_probe_cache_head = NULL; 2672 mutex_destroy(&sd_scsi_probe_cache_mutex); 2673 } 2674 2675 2676 /* 2677 * Function: sd_scsi_clear_probe_cache 2678 * 2679 * Description: This routine clears the probe response cache. This is 2680 * done when open() returns ENXIO so that when deferred 2681 * attach is attempted (possibly after a device has been 2682 * turned on) we will retry the probe. Since we don't know 2683 * which target we failed to open, we just clear the 2684 * entire cache. 2685 * 2686 * Context: Kernel thread context 2687 */ 2688 2689 static void 2690 sd_scsi_clear_probe_cache(void) 2691 { 2692 struct sd_scsi_probe_cache *cp; 2693 int i; 2694 2695 mutex_enter(&sd_scsi_probe_cache_mutex); 2696 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2697 /* 2698 * Reset all entries to SCSIPROBE_EXISTS. This will 2699 * force probing to be performed the next time 2700 * sd_scsi_probe_with_cache is called. 2701 */ 2702 for (i = 0; i < NTARGETS_WIDE; i++) { 2703 cp->cache[i] = SCSIPROBE_EXISTS; 2704 } 2705 } 2706 mutex_exit(&sd_scsi_probe_cache_mutex); 2707 } 2708 2709 2710 /* 2711 * Function: sd_scsi_probe_with_cache 2712 * 2713 * Description: This routine implements support for a scsi device probe 2714 * with cache. The driver maintains a cache of the target 2715 * responses to scsi probes. If we get no response from a 2716 * target during a probe inquiry, we remember that, and we 2717 * avoid additional calls to scsi_probe on non-zero LUNs 2718 * on the same target until the cache is cleared. By doing 2719 * so we avoid the 1/4 sec selection timeout for nonzero 2720 * LUNs. lun0 of a target is always probed. 2721 * 2722 * Arguments: devp - Pointer to a scsi_device(9S) structure 2723 * waitfunc - indicates what the allocator routines should 2724 * do when resources are not available. This value 2725 * is passed on to scsi_probe() when that routine 2726 * is called. 2727 * 2728 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2729 * otherwise the value returned by scsi_probe(9F). 2730 * 2731 * Context: Kernel thread context 2732 */ 2733 2734 static int 2735 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2736 { 2737 struct sd_scsi_probe_cache *cp; 2738 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2739 int lun, tgt; 2740 2741 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2742 SCSI_ADDR_PROP_LUN, 0); 2743 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2744 SCSI_ADDR_PROP_TARGET, -1); 2745 2746 /* Make sure caching enabled and target in range */ 2747 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2748 /* do it the old way (no cache) */ 2749 return (scsi_probe(devp, waitfn)); 2750 } 2751 2752 mutex_enter(&sd_scsi_probe_cache_mutex); 2753 2754 /* Find the cache for this scsi bus instance */ 2755 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2756 if (cp->pdip == pdip) { 2757 break; 2758 } 2759 } 2760 2761 /* If we can't find a cache for this pdip, create one */ 2762 if (cp == NULL) { 2763 int i; 2764 2765 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2766 KM_SLEEP); 2767 cp->pdip = pdip; 2768 cp->next = sd_scsi_probe_cache_head; 2769 sd_scsi_probe_cache_head = cp; 2770 for (i = 0; i < NTARGETS_WIDE; i++) { 2771 cp->cache[i] = SCSIPROBE_EXISTS; 2772 } 2773 } 2774 2775 mutex_exit(&sd_scsi_probe_cache_mutex); 2776 2777 /* Recompute the cache for this target if LUN zero */ 2778 if (lun == 0) { 2779 cp->cache[tgt] = SCSIPROBE_EXISTS; 2780 } 2781 2782 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2783 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2784 return (SCSIPROBE_NORESP); 2785 } 2786 2787 /* Do the actual probe; save & return the result */ 2788 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2789 } 2790 2791 2792 /* 2793 * Function: sd_scsi_target_lun_init 2794 * 2795 * Description: Initializes the attached lun chain mutex and head pointer. 2796 * 2797 * Context: Kernel thread context 2798 */ 2799 2800 static void 2801 sd_scsi_target_lun_init(void) 2802 { 2803 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2804 sd_scsi_target_lun_head = NULL; 2805 } 2806 2807 2808 /* 2809 * Function: sd_scsi_target_lun_fini 2810 * 2811 * Description: Frees all resources associated with the attached lun 2812 * chain 2813 * 2814 * Context: Kernel thread context 2815 */ 2816 2817 static void 2818 sd_scsi_target_lun_fini(void) 2819 { 2820 struct sd_scsi_hba_tgt_lun *cp; 2821 struct sd_scsi_hba_tgt_lun *ncp; 2822 2823 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2824 ncp = cp->next; 2825 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2826 } 2827 sd_scsi_target_lun_head = NULL; 2828 mutex_destroy(&sd_scsi_target_lun_mutex); 2829 } 2830 2831 2832 /* 2833 * Function: sd_scsi_get_target_lun_count 2834 * 2835 * Description: This routine will check in the attached lun chain to see 2836 * how many luns are attached on the required SCSI controller 2837 * and target. Currently, some capabilities like tagged queue 2838 * are supported per target based by HBA. So all luns in a 2839 * target have the same capabilities. Based on this assumption, 2840 * sd should only set these capabilities once per target. This 2841 * function is called when sd needs to decide how many luns 2842 * already attached on a target. 2843 * 2844 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2845 * controller device. 2846 * target - The target ID on the controller's SCSI bus. 2847 * 2848 * Return Code: The number of luns attached on the required target and 2849 * controller. 2850 * -1 if target ID is not in parallel SCSI scope or the given 2851 * dip is not in the chain. 2852 * 2853 * Context: Kernel thread context 2854 */ 2855 2856 static int 2857 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2858 { 2859 struct sd_scsi_hba_tgt_lun *cp; 2860 2861 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2862 return (-1); 2863 } 2864 2865 mutex_enter(&sd_scsi_target_lun_mutex); 2866 2867 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2868 if (cp->pdip == dip) { 2869 break; 2870 } 2871 } 2872 2873 mutex_exit(&sd_scsi_target_lun_mutex); 2874 2875 if (cp == NULL) { 2876 return (-1); 2877 } 2878 2879 return (cp->nlun[target]); 2880 } 2881 2882 2883 /* 2884 * Function: sd_scsi_update_lun_on_target 2885 * 2886 * Description: This routine is used to update the attached lun chain when a 2887 * lun is attached or detached on a target. 2888 * 2889 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2890 * controller device. 2891 * target - The target ID on the controller's SCSI bus. 2892 * flag - Indicate the lun is attached or detached. 2893 * 2894 * Context: Kernel thread context 2895 */ 2896 2897 static void 2898 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2899 { 2900 struct sd_scsi_hba_tgt_lun *cp; 2901 2902 mutex_enter(&sd_scsi_target_lun_mutex); 2903 2904 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2905 if (cp->pdip == dip) { 2906 break; 2907 } 2908 } 2909 2910 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2911 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2912 KM_SLEEP); 2913 cp->pdip = dip; 2914 cp->next = sd_scsi_target_lun_head; 2915 sd_scsi_target_lun_head = cp; 2916 } 2917 2918 mutex_exit(&sd_scsi_target_lun_mutex); 2919 2920 if (cp != NULL) { 2921 if (flag == SD_SCSI_LUN_ATTACH) { 2922 cp->nlun[target] ++; 2923 } else { 2924 cp->nlun[target] --; 2925 } 2926 } 2927 } 2928 2929 2930 /* 2931 * Function: sd_spin_up_unit 2932 * 2933 * Description: Issues the following commands to spin-up the device: 2934 * START STOP UNIT, and INQUIRY. 2935 * 2936 * Arguments: un - driver soft state (unit) structure 2937 * 2938 * Return Code: 0 - success 2939 * EIO - failure 2940 * EACCES - reservation conflict 2941 * 2942 * Context: Kernel thread context 2943 */ 2944 2945 static int 2946 sd_spin_up_unit(struct sd_lun *un) 2947 { 2948 size_t resid = 0; 2949 int has_conflict = FALSE; 2950 uchar_t *bufaddr; 2951 2952 ASSERT(un != NULL); 2953 2954 /* 2955 * Send a throwaway START UNIT command. 2956 * 2957 * If we fail on this, we don't care presently what precisely 2958 * is wrong. EMC's arrays will also fail this with a check 2959 * condition (0x2/0x4/0x3) if the device is "inactive," but 2960 * we don't want to fail the attach because it may become 2961 * "active" later. 2962 */ 2963 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2964 == EACCES) 2965 has_conflict = TRUE; 2966 2967 /* 2968 * Send another INQUIRY command to the target. This is necessary for 2969 * non-removable media direct access devices because their INQUIRY data 2970 * may not be fully qualified until they are spun up (perhaps via the 2971 * START command above). Note: This seems to be needed for some 2972 * legacy devices only.) The INQUIRY command should succeed even if a 2973 * Reservation Conflict is present. 2974 */ 2975 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2976 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2977 kmem_free(bufaddr, SUN_INQSIZE); 2978 return (EIO); 2979 } 2980 2981 /* 2982 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2983 * Note that this routine does not return a failure here even if the 2984 * INQUIRY command did not return any data. This is a legacy behavior. 2985 */ 2986 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2987 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2988 } 2989 2990 kmem_free(bufaddr, SUN_INQSIZE); 2991 2992 /* If we hit a reservation conflict above, tell the caller. */ 2993 if (has_conflict == TRUE) { 2994 return (EACCES); 2995 } 2996 2997 return (0); 2998 } 2999 3000 #ifdef _LP64 3001 /* 3002 * Function: sd_enable_descr_sense 3003 * 3004 * Description: This routine attempts to select descriptor sense format 3005 * using the Control mode page. Devices that support 64 bit 3006 * LBAs (for >2TB luns) should also implement descriptor 3007 * sense data so we will call this function whenever we see 3008 * a lun larger than 2TB. If for some reason the device 3009 * supports 64 bit LBAs but doesn't support descriptor sense 3010 * presumably the mode select will fail. Everything will 3011 * continue to work normally except that we will not get 3012 * complete sense data for commands that fail with an LBA 3013 * larger than 32 bits. 3014 * 3015 * Arguments: un - driver soft state (unit) structure 3016 * 3017 * Context: Kernel thread context only 3018 */ 3019 3020 static void 3021 sd_enable_descr_sense(struct sd_lun *un) 3022 { 3023 uchar_t *header; 3024 struct mode_control_scsi3 *ctrl_bufp; 3025 size_t buflen; 3026 size_t bd_len; 3027 3028 /* 3029 * Read MODE SENSE page 0xA, Control Mode Page 3030 */ 3031 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3032 sizeof (struct mode_control_scsi3); 3033 header = kmem_zalloc(buflen, KM_SLEEP); 3034 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3035 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3036 SD_ERROR(SD_LOG_COMMON, un, 3037 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3038 goto eds_exit; 3039 } 3040 3041 /* 3042 * Determine size of Block Descriptors in order to locate 3043 * the mode page data. ATAPI devices return 0, SCSI devices 3044 * should return MODE_BLK_DESC_LENGTH. 3045 */ 3046 bd_len = ((struct mode_header *)header)->bdesc_length; 3047 3048 /* Clear the mode data length field for MODE SELECT */ 3049 ((struct mode_header *)header)->length = 0; 3050 3051 ctrl_bufp = (struct mode_control_scsi3 *) 3052 (header + MODE_HEADER_LENGTH + bd_len); 3053 3054 /* 3055 * If the page length is smaller than the expected value, 3056 * the target device doesn't support D_SENSE. Bail out here. 3057 */ 3058 if (ctrl_bufp->mode_page.length < 3059 sizeof (struct mode_control_scsi3) - 2) { 3060 SD_ERROR(SD_LOG_COMMON, un, 3061 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3062 goto eds_exit; 3063 } 3064 3065 /* 3066 * Clear PS bit for MODE SELECT 3067 */ 3068 ctrl_bufp->mode_page.ps = 0; 3069 3070 /* 3071 * Set D_SENSE to enable descriptor sense format. 3072 */ 3073 ctrl_bufp->d_sense = 1; 3074 3075 /* 3076 * Use MODE SELECT to commit the change to the D_SENSE bit 3077 */ 3078 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3079 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3080 SD_INFO(SD_LOG_COMMON, un, 3081 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3082 goto eds_exit; 3083 } 3084 3085 eds_exit: 3086 kmem_free(header, buflen); 3087 } 3088 3089 /* 3090 * Function: sd_reenable_dsense_task 3091 * 3092 * Description: Re-enable descriptor sense after device or bus reset 3093 * 3094 * Context: Executes in a taskq() thread context 3095 */ 3096 static void 3097 sd_reenable_dsense_task(void *arg) 3098 { 3099 struct sd_lun *un = arg; 3100 3101 ASSERT(un != NULL); 3102 sd_enable_descr_sense(un); 3103 } 3104 #endif /* _LP64 */ 3105 3106 /* 3107 * Function: sd_set_mmc_caps 3108 * 3109 * Description: This routine determines if the device is MMC compliant and if 3110 * the device supports CDDA via a mode sense of the CDVD 3111 * capabilities mode page. Also checks if the device is a 3112 * dvdram writable device. 3113 * 3114 * Arguments: un - driver soft state (unit) structure 3115 * 3116 * Context: Kernel thread context only 3117 */ 3118 3119 static void 3120 sd_set_mmc_caps(struct sd_lun *un) 3121 { 3122 struct mode_header_grp2 *sense_mhp; 3123 uchar_t *sense_page; 3124 caddr_t buf; 3125 int bd_len; 3126 int status; 3127 struct uscsi_cmd com; 3128 int rtn; 3129 uchar_t *out_data_rw, *out_data_hd; 3130 uchar_t *rqbuf_rw, *rqbuf_hd; 3131 3132 ASSERT(un != NULL); 3133 3134 /* 3135 * The flags which will be set in this function are - mmc compliant, 3136 * dvdram writable device, cdda support. Initialize them to FALSE 3137 * and if a capability is detected - it will be set to TRUE. 3138 */ 3139 un->un_f_mmc_cap = FALSE; 3140 un->un_f_dvdram_writable_device = FALSE; 3141 un->un_f_cfg_cdda = FALSE; 3142 3143 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3144 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3145 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3146 3147 if (status != 0) { 3148 /* command failed; just return */ 3149 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3150 return; 3151 } 3152 /* 3153 * If the mode sense request for the CDROM CAPABILITIES 3154 * page (0x2A) succeeds the device is assumed to be MMC. 3155 */ 3156 un->un_f_mmc_cap = TRUE; 3157 3158 /* Get to the page data */ 3159 sense_mhp = (struct mode_header_grp2 *)buf; 3160 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3161 sense_mhp->bdesc_length_lo; 3162 if (bd_len > MODE_BLK_DESC_LENGTH) { 3163 /* 3164 * We did not get back the expected block descriptor 3165 * length so we cannot determine if the device supports 3166 * CDDA. However, we still indicate the device is MMC 3167 * according to the successful response to the page 3168 * 0x2A mode sense request. 3169 */ 3170 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3171 "sd_set_mmc_caps: Mode Sense returned " 3172 "invalid block descriptor length\n"); 3173 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3174 return; 3175 } 3176 3177 /* See if read CDDA is supported */ 3178 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3179 bd_len); 3180 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3181 3182 /* See if writing DVD RAM is supported. */ 3183 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3184 if (un->un_f_dvdram_writable_device == TRUE) { 3185 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3186 return; 3187 } 3188 3189 /* 3190 * If the device presents DVD or CD capabilities in the mode 3191 * page, we can return here since a RRD will not have 3192 * these capabilities. 3193 */ 3194 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3195 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3196 return; 3197 } 3198 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3199 3200 /* 3201 * If un->un_f_dvdram_writable_device is still FALSE, 3202 * check for a Removable Rigid Disk (RRD). A RRD 3203 * device is identified by the features RANDOM_WRITABLE and 3204 * HARDWARE_DEFECT_MANAGEMENT. 3205 */ 3206 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3207 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3208 3209 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3210 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3211 RANDOM_WRITABLE, SD_PATH_STANDARD); 3212 if (rtn != 0) { 3213 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3214 kmem_free(rqbuf_rw, SENSE_LENGTH); 3215 return; 3216 } 3217 3218 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3219 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3220 3221 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3222 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3223 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3224 if (rtn == 0) { 3225 /* 3226 * We have good information, check for random writable 3227 * and hardware defect features. 3228 */ 3229 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3230 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3231 un->un_f_dvdram_writable_device = TRUE; 3232 } 3233 } 3234 3235 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3236 kmem_free(rqbuf_rw, SENSE_LENGTH); 3237 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3238 kmem_free(rqbuf_hd, SENSE_LENGTH); 3239 } 3240 3241 /* 3242 * Function: sd_check_for_writable_cd 3243 * 3244 * Description: This routine determines if the media in the device is 3245 * writable or not. It uses the get configuration command (0x46) 3246 * to determine if the media is writable 3247 * 3248 * Arguments: un - driver soft state (unit) structure 3249 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3250 * chain and the normal command waitq, or 3251 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3252 * "direct" chain and bypass the normal command 3253 * waitq. 3254 * 3255 * Context: Never called at interrupt context. 3256 */ 3257 3258 static void 3259 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3260 { 3261 struct uscsi_cmd com; 3262 uchar_t *out_data; 3263 uchar_t *rqbuf; 3264 int rtn; 3265 uchar_t *out_data_rw, *out_data_hd; 3266 uchar_t *rqbuf_rw, *rqbuf_hd; 3267 struct mode_header_grp2 *sense_mhp; 3268 uchar_t *sense_page; 3269 caddr_t buf; 3270 int bd_len; 3271 int status; 3272 3273 ASSERT(un != NULL); 3274 ASSERT(mutex_owned(SD_MUTEX(un))); 3275 3276 /* 3277 * Initialize the writable media to false, if configuration info. 3278 * tells us otherwise then only we will set it. 3279 */ 3280 un->un_f_mmc_writable_media = FALSE; 3281 mutex_exit(SD_MUTEX(un)); 3282 3283 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3284 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3285 3286 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3287 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3288 3289 mutex_enter(SD_MUTEX(un)); 3290 if (rtn == 0) { 3291 /* 3292 * We have good information, check for writable DVD. 3293 */ 3294 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3295 un->un_f_mmc_writable_media = TRUE; 3296 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3297 kmem_free(rqbuf, SENSE_LENGTH); 3298 return; 3299 } 3300 } 3301 3302 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3303 kmem_free(rqbuf, SENSE_LENGTH); 3304 3305 /* 3306 * Determine if this is a RRD type device. 3307 */ 3308 mutex_exit(SD_MUTEX(un)); 3309 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3310 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3311 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3312 mutex_enter(SD_MUTEX(un)); 3313 if (status != 0) { 3314 /* command failed; just return */ 3315 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3316 return; 3317 } 3318 3319 /* Get to the page data */ 3320 sense_mhp = (struct mode_header_grp2 *)buf; 3321 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3322 if (bd_len > MODE_BLK_DESC_LENGTH) { 3323 /* 3324 * We did not get back the expected block descriptor length so 3325 * we cannot check the mode page. 3326 */ 3327 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3328 "sd_check_for_writable_cd: Mode Sense returned " 3329 "invalid block descriptor length\n"); 3330 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3331 return; 3332 } 3333 3334 /* 3335 * If the device presents DVD or CD capabilities in the mode 3336 * page, we can return here since a RRD device will not have 3337 * these capabilities. 3338 */ 3339 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3340 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3341 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3342 return; 3343 } 3344 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3345 3346 /* 3347 * If un->un_f_mmc_writable_media is still FALSE, 3348 * check for RRD type media. A RRD device is identified 3349 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3350 */ 3351 mutex_exit(SD_MUTEX(un)); 3352 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3353 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3354 3355 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3356 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3357 RANDOM_WRITABLE, path_flag); 3358 if (rtn != 0) { 3359 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3360 kmem_free(rqbuf_rw, SENSE_LENGTH); 3361 mutex_enter(SD_MUTEX(un)); 3362 return; 3363 } 3364 3365 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3366 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3367 3368 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3369 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3370 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3371 mutex_enter(SD_MUTEX(un)); 3372 if (rtn == 0) { 3373 /* 3374 * We have good information, check for random writable 3375 * and hardware defect features as current. 3376 */ 3377 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3378 (out_data_rw[10] & 0x1) && 3379 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3380 (out_data_hd[10] & 0x1)) { 3381 un->un_f_mmc_writable_media = TRUE; 3382 } 3383 } 3384 3385 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3386 kmem_free(rqbuf_rw, SENSE_LENGTH); 3387 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3388 kmem_free(rqbuf_hd, SENSE_LENGTH); 3389 } 3390 3391 /* 3392 * Function: sd_read_unit_properties 3393 * 3394 * Description: The following implements a property lookup mechanism. 3395 * Properties for particular disks (keyed on vendor, model 3396 * and rev numbers) are sought in the sd.conf file via 3397 * sd_process_sdconf_file(), and if not found there, are 3398 * looked for in a list hardcoded in this driver via 3399 * sd_process_sdconf_table() Once located the properties 3400 * are used to update the driver unit structure. 3401 * 3402 * Arguments: un - driver soft state (unit) structure 3403 */ 3404 3405 static void 3406 sd_read_unit_properties(struct sd_lun *un) 3407 { 3408 /* 3409 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3410 * the "sd-config-list" property (from the sd.conf file) or if 3411 * there was not a match for the inquiry vid/pid. If this event 3412 * occurs the static driver configuration table is searched for 3413 * a match. 3414 */ 3415 ASSERT(un != NULL); 3416 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3417 sd_process_sdconf_table(un); 3418 } 3419 3420 /* check for LSI device */ 3421 sd_is_lsi(un); 3422 3423 3424 } 3425 3426 3427 /* 3428 * Function: sd_process_sdconf_file 3429 * 3430 * Description: Use ddi_getlongprop to obtain the properties from the 3431 * driver's config file (ie, sd.conf) and update the driver 3432 * soft state structure accordingly. 3433 * 3434 * Arguments: un - driver soft state (unit) structure 3435 * 3436 * Return Code: SD_SUCCESS - The properties were successfully set according 3437 * to the driver configuration file. 3438 * SD_FAILURE - The driver config list was not obtained or 3439 * there was no vid/pid match. This indicates that 3440 * the static config table should be used. 3441 * 3442 * The config file has a property, "sd-config-list", which consists of 3443 * one or more duplets as follows: 3444 * 3445 * sd-config-list= 3446 * <duplet>, 3447 * [<duplet>,] 3448 * [<duplet>]; 3449 * 3450 * The structure of each duplet is as follows: 3451 * 3452 * <duplet>:= <vid+pid>,<data-property-name_list> 3453 * 3454 * The first entry of the duplet is the device ID string (the concatenated 3455 * vid & pid; not to be confused with a device_id). This is defined in 3456 * the same way as in the sd_disk_table. 3457 * 3458 * The second part of the duplet is a string that identifies a 3459 * data-property-name-list. The data-property-name-list is defined as 3460 * follows: 3461 * 3462 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3463 * 3464 * The syntax of <data-property-name> depends on the <version> field. 3465 * 3466 * If version = SD_CONF_VERSION_1 we have the following syntax: 3467 * 3468 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3469 * 3470 * where the prop0 value will be used to set prop0 if bit0 set in the 3471 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3472 * 3473 */ 3474 3475 static int 3476 sd_process_sdconf_file(struct sd_lun *un) 3477 { 3478 char *config_list = NULL; 3479 int config_list_len; 3480 int len; 3481 int dupletlen = 0; 3482 char *vidptr; 3483 int vidlen; 3484 char *dnlist_ptr; 3485 char *dataname_ptr; 3486 int dnlist_len; 3487 int dataname_len; 3488 int *data_list; 3489 int data_list_len; 3490 int rval = SD_FAILURE; 3491 int i; 3492 3493 ASSERT(un != NULL); 3494 3495 /* Obtain the configuration list associated with the .conf file */ 3496 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3497 sd_config_list, (caddr_t)&config_list, &config_list_len) 3498 != DDI_PROP_SUCCESS) { 3499 return (SD_FAILURE); 3500 } 3501 3502 /* 3503 * Compare vids in each duplet to the inquiry vid - if a match is 3504 * made, get the data value and update the soft state structure 3505 * accordingly. 3506 * 3507 * Note: This algorithm is complex and difficult to maintain. It should 3508 * be replaced with a more robust implementation. 3509 */ 3510 for (len = config_list_len, vidptr = config_list; len > 0; 3511 vidptr += dupletlen, len -= dupletlen) { 3512 /* 3513 * Note: The assumption here is that each vid entry is on 3514 * a unique line from its associated duplet. 3515 */ 3516 vidlen = dupletlen = (int)strlen(vidptr); 3517 if ((vidlen == 0) || 3518 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3519 dupletlen++; 3520 continue; 3521 } 3522 3523 /* 3524 * dnlist contains 1 or more blank separated 3525 * data-property-name entries 3526 */ 3527 dnlist_ptr = vidptr + vidlen + 1; 3528 dnlist_len = (int)strlen(dnlist_ptr); 3529 dupletlen += dnlist_len + 2; 3530 3531 /* 3532 * Set a pointer for the first data-property-name 3533 * entry in the list 3534 */ 3535 dataname_ptr = dnlist_ptr; 3536 dataname_len = 0; 3537 3538 /* 3539 * Loop through all data-property-name entries in the 3540 * data-property-name-list setting the properties for each. 3541 */ 3542 while (dataname_len < dnlist_len) { 3543 int version; 3544 3545 /* 3546 * Determine the length of the current 3547 * data-property-name entry by indexing until a 3548 * blank or NULL is encountered. When the space is 3549 * encountered reset it to a NULL for compliance 3550 * with ddi_getlongprop(). 3551 */ 3552 for (i = 0; ((dataname_ptr[i] != ' ') && 3553 (dataname_ptr[i] != '\0')); i++) { 3554 ; 3555 } 3556 3557 dataname_len += i; 3558 /* If not null terminated, Make it so */ 3559 if (dataname_ptr[i] == ' ') { 3560 dataname_ptr[i] = '\0'; 3561 } 3562 dataname_len++; 3563 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3564 "sd_process_sdconf_file: disk:%s, data:%s\n", 3565 vidptr, dataname_ptr); 3566 3567 /* Get the data list */ 3568 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3569 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3570 != DDI_PROP_SUCCESS) { 3571 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3572 "sd_process_sdconf_file: data property (%s)" 3573 " has no value\n", dataname_ptr); 3574 dataname_ptr = dnlist_ptr + dataname_len; 3575 continue; 3576 } 3577 3578 version = data_list[0]; 3579 3580 if (version == SD_CONF_VERSION_1) { 3581 sd_tunables values; 3582 3583 /* Set the properties */ 3584 if (sd_chk_vers1_data(un, data_list[1], 3585 &data_list[2], data_list_len, dataname_ptr) 3586 == SD_SUCCESS) { 3587 sd_get_tunables_from_conf(un, 3588 data_list[1], &data_list[2], 3589 &values); 3590 sd_set_vers1_properties(un, 3591 data_list[1], &values); 3592 rval = SD_SUCCESS; 3593 } else { 3594 rval = SD_FAILURE; 3595 } 3596 } else { 3597 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3598 "data property %s version 0x%x is invalid.", 3599 dataname_ptr, version); 3600 rval = SD_FAILURE; 3601 } 3602 kmem_free(data_list, data_list_len); 3603 dataname_ptr = dnlist_ptr + dataname_len; 3604 } 3605 } 3606 3607 /* free up the memory allocated by ddi_getlongprop */ 3608 if (config_list) { 3609 kmem_free(config_list, config_list_len); 3610 } 3611 3612 return (rval); 3613 } 3614 3615 /* 3616 * Function: sd_get_tunables_from_conf() 3617 * 3618 * 3619 * This function reads the data list from the sd.conf file and pulls 3620 * the values that can have numeric values as arguments and places 3621 * the values in the appropriate sd_tunables member. 3622 * Since the order of the data list members varies across platforms 3623 * This function reads them from the data list in a platform specific 3624 * order and places them into the correct sd_tunable member that is 3625 * consistent across all platforms. 3626 */ 3627 static void 3628 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3629 sd_tunables *values) 3630 { 3631 int i; 3632 int mask; 3633 3634 bzero(values, sizeof (sd_tunables)); 3635 3636 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3637 3638 mask = 1 << i; 3639 if (mask > flags) { 3640 break; 3641 } 3642 3643 switch (mask & flags) { 3644 case 0: /* This mask bit not set in flags */ 3645 continue; 3646 case SD_CONF_BSET_THROTTLE: 3647 values->sdt_throttle = data_list[i]; 3648 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3649 "sd_get_tunables_from_conf: throttle = %d\n", 3650 values->sdt_throttle); 3651 break; 3652 case SD_CONF_BSET_CTYPE: 3653 values->sdt_ctype = data_list[i]; 3654 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3655 "sd_get_tunables_from_conf: ctype = %d\n", 3656 values->sdt_ctype); 3657 break; 3658 case SD_CONF_BSET_NRR_COUNT: 3659 values->sdt_not_rdy_retries = data_list[i]; 3660 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3661 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3662 values->sdt_not_rdy_retries); 3663 break; 3664 case SD_CONF_BSET_BSY_RETRY_COUNT: 3665 values->sdt_busy_retries = data_list[i]; 3666 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3667 "sd_get_tunables_from_conf: busy_retries = %d\n", 3668 values->sdt_busy_retries); 3669 break; 3670 case SD_CONF_BSET_RST_RETRIES: 3671 values->sdt_reset_retries = data_list[i]; 3672 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3673 "sd_get_tunables_from_conf: reset_retries = %d\n", 3674 values->sdt_reset_retries); 3675 break; 3676 case SD_CONF_BSET_RSV_REL_TIME: 3677 values->sdt_reserv_rel_time = data_list[i]; 3678 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3679 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3680 values->sdt_reserv_rel_time); 3681 break; 3682 case SD_CONF_BSET_MIN_THROTTLE: 3683 values->sdt_min_throttle = data_list[i]; 3684 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3685 "sd_get_tunables_from_conf: min_throttle = %d\n", 3686 values->sdt_min_throttle); 3687 break; 3688 case SD_CONF_BSET_DISKSORT_DISABLED: 3689 values->sdt_disk_sort_dis = data_list[i]; 3690 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3691 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3692 values->sdt_disk_sort_dis); 3693 break; 3694 case SD_CONF_BSET_LUN_RESET_ENABLED: 3695 values->sdt_lun_reset_enable = data_list[i]; 3696 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3697 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3698 "\n", values->sdt_lun_reset_enable); 3699 break; 3700 case SD_CONF_BSET_CACHE_IS_NV: 3701 values->sdt_suppress_cache_flush = data_list[i]; 3702 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3703 "sd_get_tunables_from_conf: \ 3704 suppress_cache_flush = %d" 3705 "\n", values->sdt_suppress_cache_flush); 3706 break; 3707 } 3708 } 3709 } 3710 3711 /* 3712 * Function: sd_process_sdconf_table 3713 * 3714 * Description: Search the static configuration table for a match on the 3715 * inquiry vid/pid and update the driver soft state structure 3716 * according to the table property values for the device. 3717 * 3718 * The form of a configuration table entry is: 3719 * <vid+pid>,<flags>,<property-data> 3720 * "SEAGATE ST42400N",1,0x40000, 3721 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3722 * 3723 * Arguments: un - driver soft state (unit) structure 3724 */ 3725 3726 static void 3727 sd_process_sdconf_table(struct sd_lun *un) 3728 { 3729 char *id = NULL; 3730 int table_index; 3731 int idlen; 3732 3733 ASSERT(un != NULL); 3734 for (table_index = 0; table_index < sd_disk_table_size; 3735 table_index++) { 3736 id = sd_disk_table[table_index].device_id; 3737 idlen = strlen(id); 3738 if (idlen == 0) { 3739 continue; 3740 } 3741 3742 /* 3743 * The static configuration table currently does not 3744 * implement version 10 properties. Additionally, 3745 * multiple data-property-name entries are not 3746 * implemented in the static configuration table. 3747 */ 3748 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3749 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3750 "sd_process_sdconf_table: disk %s\n", id); 3751 sd_set_vers1_properties(un, 3752 sd_disk_table[table_index].flags, 3753 sd_disk_table[table_index].properties); 3754 break; 3755 } 3756 } 3757 } 3758 3759 3760 /* 3761 * Function: sd_sdconf_id_match 3762 * 3763 * Description: This local function implements a case sensitive vid/pid 3764 * comparison as well as the boundary cases of wild card and 3765 * multiple blanks. 3766 * 3767 * Note: An implicit assumption made here is that the scsi 3768 * inquiry structure will always keep the vid, pid and 3769 * revision strings in consecutive sequence, so they can be 3770 * read as a single string. If this assumption is not the 3771 * case, a separate string, to be used for the check, needs 3772 * to be built with these strings concatenated. 3773 * 3774 * Arguments: un - driver soft state (unit) structure 3775 * id - table or config file vid/pid 3776 * idlen - length of the vid/pid (bytes) 3777 * 3778 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3779 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3780 */ 3781 3782 static int 3783 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3784 { 3785 struct scsi_inquiry *sd_inq; 3786 int rval = SD_SUCCESS; 3787 3788 ASSERT(un != NULL); 3789 sd_inq = un->un_sd->sd_inq; 3790 ASSERT(id != NULL); 3791 3792 /* 3793 * We use the inq_vid as a pointer to a buffer containing the 3794 * vid and pid and use the entire vid/pid length of the table 3795 * entry for the comparison. This works because the inq_pid 3796 * data member follows inq_vid in the scsi_inquiry structure. 3797 */ 3798 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3799 /* 3800 * The user id string is compared to the inquiry vid/pid 3801 * using a case insensitive comparison and ignoring 3802 * multiple spaces. 3803 */ 3804 rval = sd_blank_cmp(un, id, idlen); 3805 if (rval != SD_SUCCESS) { 3806 /* 3807 * User id strings that start and end with a "*" 3808 * are a special case. These do not have a 3809 * specific vendor, and the product string can 3810 * appear anywhere in the 16 byte PID portion of 3811 * the inquiry data. This is a simple strstr() 3812 * type search for the user id in the inquiry data. 3813 */ 3814 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3815 char *pidptr = &id[1]; 3816 int i; 3817 int j; 3818 int pidstrlen = idlen - 2; 3819 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3820 pidstrlen; 3821 3822 if (j < 0) { 3823 return (SD_FAILURE); 3824 } 3825 for (i = 0; i < j; i++) { 3826 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3827 pidptr, pidstrlen) == 0) { 3828 rval = SD_SUCCESS; 3829 break; 3830 } 3831 } 3832 } 3833 } 3834 } 3835 return (rval); 3836 } 3837 3838 3839 /* 3840 * Function: sd_blank_cmp 3841 * 3842 * Description: If the id string starts and ends with a space, treat 3843 * multiple consecutive spaces as equivalent to a single 3844 * space. For example, this causes a sd_disk_table entry 3845 * of " NEC CDROM " to match a device's id string of 3846 * "NEC CDROM". 3847 * 3848 * Note: The success exit condition for this routine is if 3849 * the pointer to the table entry is '\0' and the cnt of 3850 * the inquiry length is zero. This will happen if the inquiry 3851 * string returned by the device is padded with spaces to be 3852 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3853 * SCSI spec states that the inquiry string is to be padded with 3854 * spaces. 3855 * 3856 * Arguments: un - driver soft state (unit) structure 3857 * id - table or config file vid/pid 3858 * idlen - length of the vid/pid (bytes) 3859 * 3860 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3861 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3862 */ 3863 3864 static int 3865 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3866 { 3867 char *p1; 3868 char *p2; 3869 int cnt; 3870 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3871 sizeof (SD_INQUIRY(un)->inq_pid); 3872 3873 ASSERT(un != NULL); 3874 p2 = un->un_sd->sd_inq->inq_vid; 3875 ASSERT(id != NULL); 3876 p1 = id; 3877 3878 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3879 /* 3880 * Note: string p1 is terminated by a NUL but string p2 3881 * isn't. The end of p2 is determined by cnt. 3882 */ 3883 for (;;) { 3884 /* skip over any extra blanks in both strings */ 3885 while ((*p1 != '\0') && (*p1 == ' ')) { 3886 p1++; 3887 } 3888 while ((cnt != 0) && (*p2 == ' ')) { 3889 p2++; 3890 cnt--; 3891 } 3892 3893 /* compare the two strings */ 3894 if ((cnt == 0) || 3895 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3896 break; 3897 } 3898 while ((cnt > 0) && 3899 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3900 p1++; 3901 p2++; 3902 cnt--; 3903 } 3904 } 3905 } 3906 3907 /* return SD_SUCCESS if both strings match */ 3908 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3909 } 3910 3911 3912 /* 3913 * Function: sd_chk_vers1_data 3914 * 3915 * Description: Verify the version 1 device properties provided by the 3916 * user via the configuration file 3917 * 3918 * Arguments: un - driver soft state (unit) structure 3919 * flags - integer mask indicating properties to be set 3920 * prop_list - integer list of property values 3921 * list_len - length of user provided data 3922 * 3923 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3924 * SD_FAILURE - Indicates the user provided data is invalid 3925 */ 3926 3927 static int 3928 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3929 int list_len, char *dataname_ptr) 3930 { 3931 int i; 3932 int mask = 1; 3933 int index = 0; 3934 3935 ASSERT(un != NULL); 3936 3937 /* Check for a NULL property name and list */ 3938 if (dataname_ptr == NULL) { 3939 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3940 "sd_chk_vers1_data: NULL data property name."); 3941 return (SD_FAILURE); 3942 } 3943 if (prop_list == NULL) { 3944 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3945 "sd_chk_vers1_data: %s NULL data property list.", 3946 dataname_ptr); 3947 return (SD_FAILURE); 3948 } 3949 3950 /* Display a warning if undefined bits are set in the flags */ 3951 if (flags & ~SD_CONF_BIT_MASK) { 3952 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3953 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3954 "Properties not set.", 3955 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3956 return (SD_FAILURE); 3957 } 3958 3959 /* 3960 * Verify the length of the list by identifying the highest bit set 3961 * in the flags and validating that the property list has a length 3962 * up to the index of this bit. 3963 */ 3964 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3965 if (flags & mask) { 3966 index++; 3967 } 3968 mask = 1 << i; 3969 } 3970 if ((list_len / sizeof (int)) < (index + 2)) { 3971 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3972 "sd_chk_vers1_data: " 3973 "Data property list %s size is incorrect. " 3974 "Properties not set.", dataname_ptr); 3975 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3976 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3977 return (SD_FAILURE); 3978 } 3979 return (SD_SUCCESS); 3980 } 3981 3982 3983 /* 3984 * Function: sd_set_vers1_properties 3985 * 3986 * Description: Set version 1 device properties based on a property list 3987 * retrieved from the driver configuration file or static 3988 * configuration table. Version 1 properties have the format: 3989 * 3990 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3991 * 3992 * where the prop0 value will be used to set prop0 if bit0 3993 * is set in the flags 3994 * 3995 * Arguments: un - driver soft state (unit) structure 3996 * flags - integer mask indicating properties to be set 3997 * prop_list - integer list of property values 3998 */ 3999 4000 static void 4001 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4002 { 4003 ASSERT(un != NULL); 4004 4005 /* 4006 * Set the flag to indicate cache is to be disabled. An attempt 4007 * to disable the cache via sd_cache_control() will be made 4008 * later during attach once the basic initialization is complete. 4009 */ 4010 if (flags & SD_CONF_BSET_NOCACHE) { 4011 un->un_f_opt_disable_cache = TRUE; 4012 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4013 "sd_set_vers1_properties: caching disabled flag set\n"); 4014 } 4015 4016 /* CD-specific configuration parameters */ 4017 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4018 un->un_f_cfg_playmsf_bcd = TRUE; 4019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4020 "sd_set_vers1_properties: playmsf_bcd set\n"); 4021 } 4022 if (flags & SD_CONF_BSET_READSUB_BCD) { 4023 un->un_f_cfg_readsub_bcd = TRUE; 4024 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4025 "sd_set_vers1_properties: readsub_bcd set\n"); 4026 } 4027 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4028 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4029 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4030 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4031 } 4032 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4033 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4034 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4035 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4036 } 4037 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4038 un->un_f_cfg_no_read_header = TRUE; 4039 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4040 "sd_set_vers1_properties: no_read_header set\n"); 4041 } 4042 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4043 un->un_f_cfg_read_cd_xd4 = TRUE; 4044 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4045 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4046 } 4047 4048 /* Support for devices which do not have valid/unique serial numbers */ 4049 if (flags & SD_CONF_BSET_FAB_DEVID) { 4050 un->un_f_opt_fab_devid = TRUE; 4051 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4052 "sd_set_vers1_properties: fab_devid bit set\n"); 4053 } 4054 4055 /* Support for user throttle configuration */ 4056 if (flags & SD_CONF_BSET_THROTTLE) { 4057 ASSERT(prop_list != NULL); 4058 un->un_saved_throttle = un->un_throttle = 4059 prop_list->sdt_throttle; 4060 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4061 "sd_set_vers1_properties: throttle set to %d\n", 4062 prop_list->sdt_throttle); 4063 } 4064 4065 /* Set the per disk retry count according to the conf file or table. */ 4066 if (flags & SD_CONF_BSET_NRR_COUNT) { 4067 ASSERT(prop_list != NULL); 4068 if (prop_list->sdt_not_rdy_retries) { 4069 un->un_notready_retry_count = 4070 prop_list->sdt_not_rdy_retries; 4071 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4072 "sd_set_vers1_properties: not ready retry count" 4073 " set to %d\n", un->un_notready_retry_count); 4074 } 4075 } 4076 4077 /* The controller type is reported for generic disk driver ioctls */ 4078 if (flags & SD_CONF_BSET_CTYPE) { 4079 ASSERT(prop_list != NULL); 4080 switch (prop_list->sdt_ctype) { 4081 case CTYPE_CDROM: 4082 un->un_ctype = prop_list->sdt_ctype; 4083 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4084 "sd_set_vers1_properties: ctype set to " 4085 "CTYPE_CDROM\n"); 4086 break; 4087 case CTYPE_CCS: 4088 un->un_ctype = prop_list->sdt_ctype; 4089 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4090 "sd_set_vers1_properties: ctype set to " 4091 "CTYPE_CCS\n"); 4092 break; 4093 case CTYPE_ROD: /* RW optical */ 4094 un->un_ctype = prop_list->sdt_ctype; 4095 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4096 "sd_set_vers1_properties: ctype set to " 4097 "CTYPE_ROD\n"); 4098 break; 4099 default: 4100 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4101 "sd_set_vers1_properties: Could not set " 4102 "invalid ctype value (%d)", 4103 prop_list->sdt_ctype); 4104 } 4105 } 4106 4107 /* Purple failover timeout */ 4108 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4109 ASSERT(prop_list != NULL); 4110 un->un_busy_retry_count = 4111 prop_list->sdt_busy_retries; 4112 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4113 "sd_set_vers1_properties: " 4114 "busy retry count set to %d\n", 4115 un->un_busy_retry_count); 4116 } 4117 4118 /* Purple reset retry count */ 4119 if (flags & SD_CONF_BSET_RST_RETRIES) { 4120 ASSERT(prop_list != NULL); 4121 un->un_reset_retry_count = 4122 prop_list->sdt_reset_retries; 4123 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4124 "sd_set_vers1_properties: " 4125 "reset retry count set to %d\n", 4126 un->un_reset_retry_count); 4127 } 4128 4129 /* Purple reservation release timeout */ 4130 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4131 ASSERT(prop_list != NULL); 4132 un->un_reserve_release_time = 4133 prop_list->sdt_reserv_rel_time; 4134 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4135 "sd_set_vers1_properties: " 4136 "reservation release timeout set to %d\n", 4137 un->un_reserve_release_time); 4138 } 4139 4140 /* 4141 * Driver flag telling the driver to verify that no commands are pending 4142 * for a device before issuing a Test Unit Ready. This is a workaround 4143 * for a firmware bug in some Seagate eliteI drives. 4144 */ 4145 if (flags & SD_CONF_BSET_TUR_CHECK) { 4146 un->un_f_cfg_tur_check = TRUE; 4147 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4148 "sd_set_vers1_properties: tur queue check set\n"); 4149 } 4150 4151 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4152 un->un_min_throttle = prop_list->sdt_min_throttle; 4153 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4154 "sd_set_vers1_properties: min throttle set to %d\n", 4155 un->un_min_throttle); 4156 } 4157 4158 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4159 un->un_f_disksort_disabled = 4160 (prop_list->sdt_disk_sort_dis != 0) ? 4161 TRUE : FALSE; 4162 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4163 "sd_set_vers1_properties: disksort disabled " 4164 "flag set to %d\n", 4165 prop_list->sdt_disk_sort_dis); 4166 } 4167 4168 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4169 un->un_f_lun_reset_enabled = 4170 (prop_list->sdt_lun_reset_enable != 0) ? 4171 TRUE : FALSE; 4172 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4173 "sd_set_vers1_properties: lun reset enabled " 4174 "flag set to %d\n", 4175 prop_list->sdt_lun_reset_enable); 4176 } 4177 4178 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4179 un->un_f_suppress_cache_flush = 4180 (prop_list->sdt_suppress_cache_flush != 0) ? 4181 TRUE : FALSE; 4182 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4183 "sd_set_vers1_properties: suppress_cache_flush " 4184 "flag set to %d\n", 4185 prop_list->sdt_suppress_cache_flush); 4186 } 4187 4188 /* 4189 * Validate the throttle values. 4190 * If any of the numbers are invalid, set everything to defaults. 4191 */ 4192 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4193 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4194 (un->un_min_throttle > un->un_throttle)) { 4195 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4196 un->un_min_throttle = sd_min_throttle; 4197 } 4198 } 4199 4200 /* 4201 * Function: sd_is_lsi() 4202 * 4203 * Description: Check for lsi devices, step through the static device 4204 * table to match vid/pid. 4205 * 4206 * Args: un - ptr to sd_lun 4207 * 4208 * Notes: When creating new LSI property, need to add the new LSI property 4209 * to this function. 4210 */ 4211 static void 4212 sd_is_lsi(struct sd_lun *un) 4213 { 4214 char *id = NULL; 4215 int table_index; 4216 int idlen; 4217 void *prop; 4218 4219 ASSERT(un != NULL); 4220 for (table_index = 0; table_index < sd_disk_table_size; 4221 table_index++) { 4222 id = sd_disk_table[table_index].device_id; 4223 idlen = strlen(id); 4224 if (idlen == 0) { 4225 continue; 4226 } 4227 4228 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4229 prop = sd_disk_table[table_index].properties; 4230 if (prop == &lsi_properties || 4231 prop == &lsi_oem_properties || 4232 prop == &lsi_properties_scsi || 4233 prop == &symbios_properties) { 4234 un->un_f_cfg_is_lsi = TRUE; 4235 } 4236 break; 4237 } 4238 } 4239 } 4240 4241 /* 4242 * Function: sd_get_physical_geometry 4243 * 4244 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4245 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4246 * target, and use this information to initialize the physical 4247 * geometry cache specified by pgeom_p. 4248 * 4249 * MODE SENSE is an optional command, so failure in this case 4250 * does not necessarily denote an error. We want to use the 4251 * MODE SENSE commands to derive the physical geometry of the 4252 * device, but if either command fails, the logical geometry is 4253 * used as the fallback for disk label geometry in cmlb. 4254 * 4255 * This requires that un->un_blockcount and un->un_tgt_blocksize 4256 * have already been initialized for the current target and 4257 * that the current values be passed as args so that we don't 4258 * end up ever trying to use -1 as a valid value. This could 4259 * happen if either value is reset while we're not holding 4260 * the mutex. 4261 * 4262 * Arguments: un - driver soft state (unit) structure 4263 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4264 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4265 * to use the USCSI "direct" chain and bypass the normal 4266 * command waitq. 4267 * 4268 * Context: Kernel thread only (can sleep). 4269 */ 4270 4271 static int 4272 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4273 diskaddr_t capacity, int lbasize, int path_flag) 4274 { 4275 struct mode_format *page3p; 4276 struct mode_geometry *page4p; 4277 struct mode_header *headerp; 4278 int sector_size; 4279 int nsect; 4280 int nhead; 4281 int ncyl; 4282 int intrlv; 4283 int spc; 4284 diskaddr_t modesense_capacity; 4285 int rpm; 4286 int bd_len; 4287 int mode_header_length; 4288 uchar_t *p3bufp; 4289 uchar_t *p4bufp; 4290 int cdbsize; 4291 int ret = EIO; 4292 4293 ASSERT(un != NULL); 4294 4295 if (lbasize == 0) { 4296 if (ISCD(un)) { 4297 lbasize = 2048; 4298 } else { 4299 lbasize = un->un_sys_blocksize; 4300 } 4301 } 4302 pgeom_p->g_secsize = (unsigned short)lbasize; 4303 4304 /* 4305 * If the unit is a cd/dvd drive MODE SENSE page three 4306 * and MODE SENSE page four are reserved (see SBC spec 4307 * and MMC spec). To prevent soft errors just return 4308 * using the default LBA size. 4309 */ 4310 if (ISCD(un)) 4311 return (ret); 4312 4313 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4314 4315 /* 4316 * Retrieve MODE SENSE page 3 - Format Device Page 4317 */ 4318 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4319 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4320 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4321 != 0) { 4322 SD_ERROR(SD_LOG_COMMON, un, 4323 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4324 goto page3_exit; 4325 } 4326 4327 /* 4328 * Determine size of Block Descriptors in order to locate the mode 4329 * page data. ATAPI devices return 0, SCSI devices should return 4330 * MODE_BLK_DESC_LENGTH. 4331 */ 4332 headerp = (struct mode_header *)p3bufp; 4333 if (un->un_f_cfg_is_atapi == TRUE) { 4334 struct mode_header_grp2 *mhp = 4335 (struct mode_header_grp2 *)headerp; 4336 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4337 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4338 } else { 4339 mode_header_length = MODE_HEADER_LENGTH; 4340 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4341 } 4342 4343 if (bd_len > MODE_BLK_DESC_LENGTH) { 4344 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4345 "received unexpected bd_len of %d, page3\n", bd_len); 4346 goto page3_exit; 4347 } 4348 4349 page3p = (struct mode_format *) 4350 ((caddr_t)headerp + mode_header_length + bd_len); 4351 4352 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4353 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4354 "mode sense pg3 code mismatch %d\n", 4355 page3p->mode_page.code); 4356 goto page3_exit; 4357 } 4358 4359 /* 4360 * Use this physical geometry data only if BOTH MODE SENSE commands 4361 * complete successfully; otherwise, revert to the logical geometry. 4362 * So, we need to save everything in temporary variables. 4363 */ 4364 sector_size = BE_16(page3p->data_bytes_sect); 4365 4366 /* 4367 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4368 */ 4369 if (sector_size == 0) { 4370 sector_size = un->un_sys_blocksize; 4371 } else { 4372 sector_size &= ~(un->un_sys_blocksize - 1); 4373 } 4374 4375 nsect = BE_16(page3p->sect_track); 4376 intrlv = BE_16(page3p->interleave); 4377 4378 SD_INFO(SD_LOG_COMMON, un, 4379 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4380 SD_INFO(SD_LOG_COMMON, un, 4381 " mode page: %d; nsect: %d; sector size: %d;\n", 4382 page3p->mode_page.code, nsect, sector_size); 4383 SD_INFO(SD_LOG_COMMON, un, 4384 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4385 BE_16(page3p->track_skew), 4386 BE_16(page3p->cylinder_skew)); 4387 4388 4389 /* 4390 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4391 */ 4392 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4393 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4394 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4395 != 0) { 4396 SD_ERROR(SD_LOG_COMMON, un, 4397 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4398 goto page4_exit; 4399 } 4400 4401 /* 4402 * Determine size of Block Descriptors in order to locate the mode 4403 * page data. ATAPI devices return 0, SCSI devices should return 4404 * MODE_BLK_DESC_LENGTH. 4405 */ 4406 headerp = (struct mode_header *)p4bufp; 4407 if (un->un_f_cfg_is_atapi == TRUE) { 4408 struct mode_header_grp2 *mhp = 4409 (struct mode_header_grp2 *)headerp; 4410 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4411 } else { 4412 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4413 } 4414 4415 if (bd_len > MODE_BLK_DESC_LENGTH) { 4416 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4417 "received unexpected bd_len of %d, page4\n", bd_len); 4418 goto page4_exit; 4419 } 4420 4421 page4p = (struct mode_geometry *) 4422 ((caddr_t)headerp + mode_header_length + bd_len); 4423 4424 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4425 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4426 "mode sense pg4 code mismatch %d\n", 4427 page4p->mode_page.code); 4428 goto page4_exit; 4429 } 4430 4431 /* 4432 * Stash the data now, after we know that both commands completed. 4433 */ 4434 4435 4436 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4437 spc = nhead * nsect; 4438 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4439 rpm = BE_16(page4p->rpm); 4440 4441 modesense_capacity = spc * ncyl; 4442 4443 SD_INFO(SD_LOG_COMMON, un, 4444 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4445 SD_INFO(SD_LOG_COMMON, un, 4446 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4447 SD_INFO(SD_LOG_COMMON, un, 4448 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4449 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4450 (void *)pgeom_p, capacity); 4451 4452 /* 4453 * Compensate if the drive's geometry is not rectangular, i.e., 4454 * the product of C * H * S returned by MODE SENSE >= that returned 4455 * by read capacity. This is an idiosyncrasy of the original x86 4456 * disk subsystem. 4457 */ 4458 if (modesense_capacity >= capacity) { 4459 SD_INFO(SD_LOG_COMMON, un, 4460 "sd_get_physical_geometry: adjusting acyl; " 4461 "old: %d; new: %d\n", pgeom_p->g_acyl, 4462 (modesense_capacity - capacity + spc - 1) / spc); 4463 if (sector_size != 0) { 4464 /* 1243403: NEC D38x7 drives don't support sec size */ 4465 pgeom_p->g_secsize = (unsigned short)sector_size; 4466 } 4467 pgeom_p->g_nsect = (unsigned short)nsect; 4468 pgeom_p->g_nhead = (unsigned short)nhead; 4469 pgeom_p->g_capacity = capacity; 4470 pgeom_p->g_acyl = 4471 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4472 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4473 } 4474 4475 pgeom_p->g_rpm = (unsigned short)rpm; 4476 pgeom_p->g_intrlv = (unsigned short)intrlv; 4477 ret = 0; 4478 4479 SD_INFO(SD_LOG_COMMON, un, 4480 "sd_get_physical_geometry: mode sense geometry:\n"); 4481 SD_INFO(SD_LOG_COMMON, un, 4482 " nsect: %d; sector size: %d; interlv: %d\n", 4483 nsect, sector_size, intrlv); 4484 SD_INFO(SD_LOG_COMMON, un, 4485 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4486 nhead, ncyl, rpm, modesense_capacity); 4487 SD_INFO(SD_LOG_COMMON, un, 4488 "sd_get_physical_geometry: (cached)\n"); 4489 SD_INFO(SD_LOG_COMMON, un, 4490 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4491 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4492 pgeom_p->g_nhead, pgeom_p->g_nsect); 4493 SD_INFO(SD_LOG_COMMON, un, 4494 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4495 pgeom_p->g_secsize, pgeom_p->g_capacity, 4496 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4497 4498 page4_exit: 4499 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4500 page3_exit: 4501 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4502 4503 return (ret); 4504 } 4505 4506 /* 4507 * Function: sd_get_virtual_geometry 4508 * 4509 * Description: Ask the controller to tell us about the target device. 4510 * 4511 * Arguments: un - pointer to softstate 4512 * capacity - disk capacity in #blocks 4513 * lbasize - disk block size in bytes 4514 * 4515 * Context: Kernel thread only 4516 */ 4517 4518 static int 4519 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4520 diskaddr_t capacity, int lbasize) 4521 { 4522 uint_t geombuf; 4523 int spc; 4524 4525 ASSERT(un != NULL); 4526 4527 /* Set sector size, and total number of sectors */ 4528 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4529 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4530 4531 /* Let the HBA tell us its geometry */ 4532 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4533 4534 /* A value of -1 indicates an undefined "geometry" property */ 4535 if (geombuf == (-1)) { 4536 return (EINVAL); 4537 } 4538 4539 /* Initialize the logical geometry cache. */ 4540 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4541 lgeom_p->g_nsect = geombuf & 0xffff; 4542 lgeom_p->g_secsize = un->un_sys_blocksize; 4543 4544 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4545 4546 /* 4547 * Note: The driver originally converted the capacity value from 4548 * target blocks to system blocks. However, the capacity value passed 4549 * to this routine is already in terms of system blocks (this scaling 4550 * is done when the READ CAPACITY command is issued and processed). 4551 * This 'error' may have gone undetected because the usage of g_ncyl 4552 * (which is based upon g_capacity) is very limited within the driver 4553 */ 4554 lgeom_p->g_capacity = capacity; 4555 4556 /* 4557 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4558 * hba may return zero values if the device has been removed. 4559 */ 4560 if (spc == 0) { 4561 lgeom_p->g_ncyl = 0; 4562 } else { 4563 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4564 } 4565 lgeom_p->g_acyl = 0; 4566 4567 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4568 return (0); 4569 4570 } 4571 /* 4572 * Function: sd_update_block_info 4573 * 4574 * Description: Calculate a byte count to sector count bitshift value 4575 * from sector size. 4576 * 4577 * Arguments: un: unit struct. 4578 * lbasize: new target sector size 4579 * capacity: new target capacity, ie. block count 4580 * 4581 * Context: Kernel thread context 4582 */ 4583 4584 static void 4585 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4586 { 4587 uint_t dblk; 4588 4589 if (lbasize != 0) { 4590 un->un_tgt_blocksize = lbasize; 4591 un->un_f_tgt_blocksize_is_valid = TRUE; 4592 } 4593 4594 if (capacity != 0) { 4595 un->un_blockcount = capacity; 4596 un->un_f_blockcount_is_valid = TRUE; 4597 } 4598 4599 /* 4600 * Update device capacity properties. 4601 * 4602 * 'device-nblocks' number of blocks in target's units 4603 * 'device-blksize' data bearing size of target's block 4604 * 4605 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4606 * not be a power of two for checksumming disks with 520/528 byte 4607 * sectors. 4608 */ 4609 if (un->un_f_tgt_blocksize_is_valid && 4610 un->un_f_blockcount_is_valid && 4611 un->un_sys_blocksize) { 4612 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4613 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4614 "device-nblocks", un->un_blockcount / dblk); 4615 /* 4616 * To save memory, only define "device-blksize" when its 4617 * value is differnet than the default DEV_BSIZE value. 4618 */ 4619 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4620 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4621 SD_DEVINFO(un), "device-blksize", 4622 un->un_sys_blocksize * dblk); 4623 } 4624 } 4625 4626 4627 /* 4628 * Function: sd_register_devid 4629 * 4630 * Description: This routine will obtain the device id information from the 4631 * target, obtain the serial number, and register the device 4632 * id with the ddi framework. 4633 * 4634 * Arguments: devi - the system's dev_info_t for the device. 4635 * un - driver soft state (unit) structure 4636 * reservation_flag - indicates if a reservation conflict 4637 * occurred during attach 4638 * 4639 * Context: Kernel Thread 4640 */ 4641 static void 4642 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4643 { 4644 int rval = 0; 4645 uchar_t *inq80 = NULL; 4646 size_t inq80_len = MAX_INQUIRY_SIZE; 4647 size_t inq80_resid = 0; 4648 uchar_t *inq83 = NULL; 4649 size_t inq83_len = MAX_INQUIRY_SIZE; 4650 size_t inq83_resid = 0; 4651 int dlen, len; 4652 char *sn; 4653 4654 ASSERT(un != NULL); 4655 ASSERT(mutex_owned(SD_MUTEX(un))); 4656 ASSERT((SD_DEVINFO(un)) == devi); 4657 4658 /* 4659 * If transport has already registered a devid for this target 4660 * then that takes precedence over the driver's determination 4661 * of the devid. 4662 */ 4663 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4664 ASSERT(un->un_devid); 4665 return; /* use devid registered by the transport */ 4666 } 4667 4668 /* 4669 * This is the case of antiquated Sun disk drives that have the 4670 * FAB_DEVID property set in the disk_table. These drives 4671 * manage the devid's by storing them in last 2 available sectors 4672 * on the drive and have them fabricated by the ddi layer by calling 4673 * ddi_devid_init and passing the DEVID_FAB flag. 4674 */ 4675 if (un->un_f_opt_fab_devid == TRUE) { 4676 /* 4677 * Depending on EINVAL isn't reliable, since a reserved disk 4678 * may result in invalid geometry, so check to make sure a 4679 * reservation conflict did not occur during attach. 4680 */ 4681 if ((sd_get_devid(un) == EINVAL) && 4682 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4683 /* 4684 * The devid is invalid AND there is no reservation 4685 * conflict. Fabricate a new devid. 4686 */ 4687 (void) sd_create_devid(un); 4688 } 4689 4690 /* Register the devid if it exists */ 4691 if (un->un_devid != NULL) { 4692 (void) ddi_devid_register(SD_DEVINFO(un), 4693 un->un_devid); 4694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4695 "sd_register_devid: Devid Fabricated\n"); 4696 } 4697 return; 4698 } 4699 4700 /* 4701 * We check the availibility of the World Wide Name (0x83) and Unit 4702 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4703 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4704 * 0x83 is availible, that is the best choice. Our next choice is 4705 * 0x80. If neither are availible, we munge the devid from the device 4706 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4707 * to fabricate a devid for non-Sun qualified disks. 4708 */ 4709 if (sd_check_vpd_page_support(un) == 0) { 4710 /* collect page 80 data if available */ 4711 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4712 4713 mutex_exit(SD_MUTEX(un)); 4714 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4715 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4716 0x01, 0x80, &inq80_resid); 4717 4718 if (rval != 0) { 4719 kmem_free(inq80, inq80_len); 4720 inq80 = NULL; 4721 inq80_len = 0; 4722 } else if (ddi_prop_exists( 4723 DDI_DEV_T_NONE, SD_DEVINFO(un), 4724 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4725 INQUIRY_SERIAL_NO) == 0) { 4726 /* 4727 * If we don't already have a serial number 4728 * property, do quick verify of data returned 4729 * and define property. 4730 */ 4731 dlen = inq80_len - inq80_resid; 4732 len = (size_t)inq80[3]; 4733 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4734 /* 4735 * Ensure sn termination, skip leading 4736 * blanks, and create property 4737 * 'inquiry-serial-no'. 4738 */ 4739 sn = (char *)&inq80[4]; 4740 sn[len] = 0; 4741 while (*sn && (*sn == ' ')) 4742 sn++; 4743 if (*sn) { 4744 (void) ddi_prop_update_string( 4745 DDI_DEV_T_NONE, 4746 SD_DEVINFO(un), 4747 INQUIRY_SERIAL_NO, sn); 4748 } 4749 } 4750 } 4751 mutex_enter(SD_MUTEX(un)); 4752 } 4753 4754 /* collect page 83 data if available */ 4755 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4756 mutex_exit(SD_MUTEX(un)); 4757 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4758 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4759 0x01, 0x83, &inq83_resid); 4760 4761 if (rval != 0) { 4762 kmem_free(inq83, inq83_len); 4763 inq83 = NULL; 4764 inq83_len = 0; 4765 } 4766 mutex_enter(SD_MUTEX(un)); 4767 } 4768 } 4769 4770 /* encode best devid possible based on data available */ 4771 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4772 (char *)ddi_driver_name(SD_DEVINFO(un)), 4773 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4774 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4775 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4776 4777 /* devid successfully encoded, register devid */ 4778 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4779 4780 } else { 4781 /* 4782 * Unable to encode a devid based on data available. 4783 * This is not a Sun qualified disk. Older Sun disk 4784 * drives that have the SD_FAB_DEVID property 4785 * set in the disk_table and non Sun qualified 4786 * disks are treated in the same manner. These 4787 * drives manage the devid's by storing them in 4788 * last 2 available sectors on the drive and 4789 * have them fabricated by the ddi layer by 4790 * calling ddi_devid_init and passing the 4791 * DEVID_FAB flag. 4792 * Create a fabricate devid only if there's no 4793 * fabricate devid existed. 4794 */ 4795 if (sd_get_devid(un) == EINVAL) { 4796 (void) sd_create_devid(un); 4797 } 4798 un->un_f_opt_fab_devid = TRUE; 4799 4800 /* Register the devid if it exists */ 4801 if (un->un_devid != NULL) { 4802 (void) ddi_devid_register(SD_DEVINFO(un), 4803 un->un_devid); 4804 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4805 "sd_register_devid: devid fabricated using " 4806 "ddi framework\n"); 4807 } 4808 } 4809 4810 /* clean up resources */ 4811 if (inq80 != NULL) { 4812 kmem_free(inq80, inq80_len); 4813 } 4814 if (inq83 != NULL) { 4815 kmem_free(inq83, inq83_len); 4816 } 4817 } 4818 4819 4820 4821 /* 4822 * Function: sd_get_devid 4823 * 4824 * Description: This routine will return 0 if a valid device id has been 4825 * obtained from the target and stored in the soft state. If a 4826 * valid device id has not been previously read and stored, a 4827 * read attempt will be made. 4828 * 4829 * Arguments: un - driver soft state (unit) structure 4830 * 4831 * Return Code: 0 if we successfully get the device id 4832 * 4833 * Context: Kernel Thread 4834 */ 4835 4836 static int 4837 sd_get_devid(struct sd_lun *un) 4838 { 4839 struct dk_devid *dkdevid; 4840 ddi_devid_t tmpid; 4841 uint_t *ip; 4842 size_t sz; 4843 diskaddr_t blk; 4844 int status; 4845 int chksum; 4846 int i; 4847 size_t buffer_size; 4848 4849 ASSERT(un != NULL); 4850 ASSERT(mutex_owned(SD_MUTEX(un))); 4851 4852 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4853 un); 4854 4855 if (un->un_devid != NULL) { 4856 return (0); 4857 } 4858 4859 mutex_exit(SD_MUTEX(un)); 4860 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4861 (void *)SD_PATH_DIRECT) != 0) { 4862 mutex_enter(SD_MUTEX(un)); 4863 return (EINVAL); 4864 } 4865 4866 /* 4867 * Read and verify device id, stored in the reserved cylinders at the 4868 * end of the disk. Backup label is on the odd sectors of the last 4869 * track of the last cylinder. Device id will be on track of the next 4870 * to last cylinder. 4871 */ 4872 mutex_enter(SD_MUTEX(un)); 4873 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4874 mutex_exit(SD_MUTEX(un)); 4875 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4876 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4877 SD_PATH_DIRECT); 4878 if (status != 0) { 4879 goto error; 4880 } 4881 4882 /* Validate the revision */ 4883 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4884 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4885 status = EINVAL; 4886 goto error; 4887 } 4888 4889 /* Calculate the checksum */ 4890 chksum = 0; 4891 ip = (uint_t *)dkdevid; 4892 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4893 i++) { 4894 chksum ^= ip[i]; 4895 } 4896 4897 /* Compare the checksums */ 4898 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4899 status = EINVAL; 4900 goto error; 4901 } 4902 4903 /* Validate the device id */ 4904 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4905 status = EINVAL; 4906 goto error; 4907 } 4908 4909 /* 4910 * Store the device id in the driver soft state 4911 */ 4912 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4913 tmpid = kmem_alloc(sz, KM_SLEEP); 4914 4915 mutex_enter(SD_MUTEX(un)); 4916 4917 un->un_devid = tmpid; 4918 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4919 4920 kmem_free(dkdevid, buffer_size); 4921 4922 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4923 4924 return (status); 4925 error: 4926 mutex_enter(SD_MUTEX(un)); 4927 kmem_free(dkdevid, buffer_size); 4928 return (status); 4929 } 4930 4931 4932 /* 4933 * Function: sd_create_devid 4934 * 4935 * Description: This routine will fabricate the device id and write it 4936 * to the disk. 4937 * 4938 * Arguments: un - driver soft state (unit) structure 4939 * 4940 * Return Code: value of the fabricated device id 4941 * 4942 * Context: Kernel Thread 4943 */ 4944 4945 static ddi_devid_t 4946 sd_create_devid(struct sd_lun *un) 4947 { 4948 ASSERT(un != NULL); 4949 4950 /* Fabricate the devid */ 4951 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4952 == DDI_FAILURE) { 4953 return (NULL); 4954 } 4955 4956 /* Write the devid to disk */ 4957 if (sd_write_deviceid(un) != 0) { 4958 ddi_devid_free(un->un_devid); 4959 un->un_devid = NULL; 4960 } 4961 4962 return (un->un_devid); 4963 } 4964 4965 4966 /* 4967 * Function: sd_write_deviceid 4968 * 4969 * Description: This routine will write the device id to the disk 4970 * reserved sector. 4971 * 4972 * Arguments: un - driver soft state (unit) structure 4973 * 4974 * Return Code: EINVAL 4975 * value returned by sd_send_scsi_cmd 4976 * 4977 * Context: Kernel Thread 4978 */ 4979 4980 static int 4981 sd_write_deviceid(struct sd_lun *un) 4982 { 4983 struct dk_devid *dkdevid; 4984 diskaddr_t blk; 4985 uint_t *ip, chksum; 4986 int status; 4987 int i; 4988 4989 ASSERT(mutex_owned(SD_MUTEX(un))); 4990 4991 mutex_exit(SD_MUTEX(un)); 4992 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4993 (void *)SD_PATH_DIRECT) != 0) { 4994 mutex_enter(SD_MUTEX(un)); 4995 return (-1); 4996 } 4997 4998 4999 /* Allocate the buffer */ 5000 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5001 5002 /* Fill in the revision */ 5003 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5004 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5005 5006 /* Copy in the device id */ 5007 mutex_enter(SD_MUTEX(un)); 5008 bcopy(un->un_devid, &dkdevid->dkd_devid, 5009 ddi_devid_sizeof(un->un_devid)); 5010 mutex_exit(SD_MUTEX(un)); 5011 5012 /* Calculate the checksum */ 5013 chksum = 0; 5014 ip = (uint_t *)dkdevid; 5015 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5016 i++) { 5017 chksum ^= ip[i]; 5018 } 5019 5020 /* Fill-in checksum */ 5021 DKD_FORMCHKSUM(chksum, dkdevid); 5022 5023 /* Write the reserved sector */ 5024 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5025 SD_PATH_DIRECT); 5026 5027 kmem_free(dkdevid, un->un_sys_blocksize); 5028 5029 mutex_enter(SD_MUTEX(un)); 5030 return (status); 5031 } 5032 5033 5034 /* 5035 * Function: sd_check_vpd_page_support 5036 * 5037 * Description: This routine sends an inquiry command with the EVPD bit set and 5038 * a page code of 0x00 to the device. It is used to determine which 5039 * vital product pages are availible to find the devid. We are 5040 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5041 * device does not support that command. 5042 * 5043 * Arguments: un - driver soft state (unit) structure 5044 * 5045 * Return Code: 0 - success 5046 * 1 - check condition 5047 * 5048 * Context: This routine can sleep. 5049 */ 5050 5051 static int 5052 sd_check_vpd_page_support(struct sd_lun *un) 5053 { 5054 uchar_t *page_list = NULL; 5055 uchar_t page_length = 0xff; /* Use max possible length */ 5056 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5057 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5058 int rval = 0; 5059 int counter; 5060 5061 ASSERT(un != NULL); 5062 ASSERT(mutex_owned(SD_MUTEX(un))); 5063 5064 mutex_exit(SD_MUTEX(un)); 5065 5066 /* 5067 * We'll set the page length to the maximum to save figuring it out 5068 * with an additional call. 5069 */ 5070 page_list = kmem_zalloc(page_length, KM_SLEEP); 5071 5072 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5073 page_code, NULL); 5074 5075 mutex_enter(SD_MUTEX(un)); 5076 5077 /* 5078 * Now we must validate that the device accepted the command, as some 5079 * drives do not support it. If the drive does support it, we will 5080 * return 0, and the supported pages will be in un_vpd_page_mask. If 5081 * not, we return -1. 5082 */ 5083 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5084 /* Loop to find one of the 2 pages we need */ 5085 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5086 5087 /* 5088 * Pages are returned in ascending order, and 0x83 is what we 5089 * are hoping for. 5090 */ 5091 while ((page_list[counter] <= 0x86) && 5092 (counter <= (page_list[VPD_PAGE_LENGTH] + 5093 VPD_HEAD_OFFSET))) { 5094 /* 5095 * Add 3 because page_list[3] is the number of 5096 * pages minus 3 5097 */ 5098 5099 switch (page_list[counter]) { 5100 case 0x00: 5101 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5102 break; 5103 case 0x80: 5104 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5105 break; 5106 case 0x81: 5107 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5108 break; 5109 case 0x82: 5110 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5111 break; 5112 case 0x83: 5113 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5114 break; 5115 case 0x86: 5116 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5117 break; 5118 } 5119 counter++; 5120 } 5121 5122 } else { 5123 rval = -1; 5124 5125 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5126 "sd_check_vpd_page_support: This drive does not implement " 5127 "VPD pages.\n"); 5128 } 5129 5130 kmem_free(page_list, page_length); 5131 5132 return (rval); 5133 } 5134 5135 5136 /* 5137 * Function: sd_setup_pm 5138 * 5139 * Description: Initialize Power Management on the device 5140 * 5141 * Context: Kernel Thread 5142 */ 5143 5144 static void 5145 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5146 { 5147 uint_t log_page_size; 5148 uchar_t *log_page_data; 5149 int rval; 5150 5151 /* 5152 * Since we are called from attach, holding a mutex for 5153 * un is unnecessary. Because some of the routines called 5154 * from here require SD_MUTEX to not be held, assert this 5155 * right up front. 5156 */ 5157 ASSERT(!mutex_owned(SD_MUTEX(un))); 5158 /* 5159 * Since the sd device does not have the 'reg' property, 5160 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5161 * The following code is to tell cpr that this device 5162 * DOES need to be suspended and resumed. 5163 */ 5164 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5165 "pm-hardware-state", "needs-suspend-resume"); 5166 5167 /* 5168 * This complies with the new power management framework 5169 * for certain desktop machines. Create the pm_components 5170 * property as a string array property. 5171 */ 5172 if (un->un_f_pm_supported) { 5173 /* 5174 * not all devices have a motor, try it first. 5175 * some devices may return ILLEGAL REQUEST, some 5176 * will hang 5177 * The following START_STOP_UNIT is used to check if target 5178 * device has a motor. 5179 */ 5180 un->un_f_start_stop_supported = TRUE; 5181 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5182 SD_PATH_DIRECT) != 0) { 5183 un->un_f_start_stop_supported = FALSE; 5184 } 5185 5186 /* 5187 * create pm properties anyways otherwise the parent can't 5188 * go to sleep 5189 */ 5190 (void) sd_create_pm_components(devi, un); 5191 un->un_f_pm_is_enabled = TRUE; 5192 return; 5193 } 5194 5195 if (!un->un_f_log_sense_supported) { 5196 un->un_power_level = SD_SPINDLE_ON; 5197 un->un_f_pm_is_enabled = FALSE; 5198 return; 5199 } 5200 5201 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5202 5203 #ifdef SDDEBUG 5204 if (sd_force_pm_supported) { 5205 /* Force a successful result */ 5206 rval = 1; 5207 } 5208 #endif 5209 5210 /* 5211 * If the start-stop cycle counter log page is not supported 5212 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5213 * then we should not create the pm_components property. 5214 */ 5215 if (rval == -1) { 5216 /* 5217 * Error. 5218 * Reading log sense failed, most likely this is 5219 * an older drive that does not support log sense. 5220 * If this fails auto-pm is not supported. 5221 */ 5222 un->un_power_level = SD_SPINDLE_ON; 5223 un->un_f_pm_is_enabled = FALSE; 5224 5225 } else if (rval == 0) { 5226 /* 5227 * Page not found. 5228 * The start stop cycle counter is implemented as page 5229 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5230 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5231 */ 5232 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5233 /* 5234 * Page found, use this one. 5235 */ 5236 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5237 un->un_f_pm_is_enabled = TRUE; 5238 } else { 5239 /* 5240 * Error or page not found. 5241 * auto-pm is not supported for this device. 5242 */ 5243 un->un_power_level = SD_SPINDLE_ON; 5244 un->un_f_pm_is_enabled = FALSE; 5245 } 5246 } else { 5247 /* 5248 * Page found, use it. 5249 */ 5250 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5251 un->un_f_pm_is_enabled = TRUE; 5252 } 5253 5254 5255 if (un->un_f_pm_is_enabled == TRUE) { 5256 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5257 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5258 5259 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5260 log_page_size, un->un_start_stop_cycle_page, 5261 0x01, 0, SD_PATH_DIRECT); 5262 #ifdef SDDEBUG 5263 if (sd_force_pm_supported) { 5264 /* Force a successful result */ 5265 rval = 0; 5266 } 5267 #endif 5268 5269 /* 5270 * If the Log sense for Page( Start/stop cycle counter page) 5271 * succeeds, then power managment is supported and we can 5272 * enable auto-pm. 5273 */ 5274 if (rval == 0) { 5275 (void) sd_create_pm_components(devi, un); 5276 } else { 5277 un->un_power_level = SD_SPINDLE_ON; 5278 un->un_f_pm_is_enabled = FALSE; 5279 } 5280 5281 kmem_free(log_page_data, log_page_size); 5282 } 5283 } 5284 5285 5286 /* 5287 * Function: sd_create_pm_components 5288 * 5289 * Description: Initialize PM property. 5290 * 5291 * Context: Kernel thread context 5292 */ 5293 5294 static void 5295 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5296 { 5297 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5298 5299 ASSERT(!mutex_owned(SD_MUTEX(un))); 5300 5301 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5302 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5303 /* 5304 * When components are initially created they are idle, 5305 * power up any non-removables. 5306 * Note: the return value of pm_raise_power can't be used 5307 * for determining if PM should be enabled for this device. 5308 * Even if you check the return values and remove this 5309 * property created above, the PM framework will not honor the 5310 * change after the first call to pm_raise_power. Hence, 5311 * removal of that property does not help if pm_raise_power 5312 * fails. In the case of removable media, the start/stop 5313 * will fail if the media is not present. 5314 */ 5315 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5316 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5317 mutex_enter(SD_MUTEX(un)); 5318 un->un_power_level = SD_SPINDLE_ON; 5319 mutex_enter(&un->un_pm_mutex); 5320 /* Set to on and not busy. */ 5321 un->un_pm_count = 0; 5322 } else { 5323 mutex_enter(SD_MUTEX(un)); 5324 un->un_power_level = SD_SPINDLE_OFF; 5325 mutex_enter(&un->un_pm_mutex); 5326 /* Set to off. */ 5327 un->un_pm_count = -1; 5328 } 5329 mutex_exit(&un->un_pm_mutex); 5330 mutex_exit(SD_MUTEX(un)); 5331 } else { 5332 un->un_power_level = SD_SPINDLE_ON; 5333 un->un_f_pm_is_enabled = FALSE; 5334 } 5335 } 5336 5337 5338 /* 5339 * Function: sd_ddi_suspend 5340 * 5341 * Description: Performs system power-down operations. This includes 5342 * setting the drive state to indicate its suspended so 5343 * that no new commands will be accepted. Also, wait for 5344 * all commands that are in transport or queued to a timer 5345 * for retry to complete. All timeout threads are cancelled. 5346 * 5347 * Return Code: DDI_FAILURE or DDI_SUCCESS 5348 * 5349 * Context: Kernel thread context 5350 */ 5351 5352 static int 5353 sd_ddi_suspend(dev_info_t *devi) 5354 { 5355 struct sd_lun *un; 5356 clock_t wait_cmds_complete; 5357 5358 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5359 if (un == NULL) { 5360 return (DDI_FAILURE); 5361 } 5362 5363 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5364 5365 mutex_enter(SD_MUTEX(un)); 5366 5367 /* Return success if the device is already suspended. */ 5368 if (un->un_state == SD_STATE_SUSPENDED) { 5369 mutex_exit(SD_MUTEX(un)); 5370 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5371 "device already suspended, exiting\n"); 5372 return (DDI_SUCCESS); 5373 } 5374 5375 /* Return failure if the device is being used by HA */ 5376 if (un->un_resvd_status & 5377 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5378 mutex_exit(SD_MUTEX(un)); 5379 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5380 "device in use by HA, exiting\n"); 5381 return (DDI_FAILURE); 5382 } 5383 5384 /* 5385 * Return failure if the device is in a resource wait 5386 * or power changing state. 5387 */ 5388 if ((un->un_state == SD_STATE_RWAIT) || 5389 (un->un_state == SD_STATE_PM_CHANGING)) { 5390 mutex_exit(SD_MUTEX(un)); 5391 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5392 "device in resource wait state, exiting\n"); 5393 return (DDI_FAILURE); 5394 } 5395 5396 5397 un->un_save_state = un->un_last_state; 5398 New_state(un, SD_STATE_SUSPENDED); 5399 5400 /* 5401 * Wait for all commands that are in transport or queued to a timer 5402 * for retry to complete. 5403 * 5404 * While waiting, no new commands will be accepted or sent because of 5405 * the new state we set above. 5406 * 5407 * Wait till current operation has completed. If we are in the resource 5408 * wait state (with an intr outstanding) then we need to wait till the 5409 * intr completes and starts the next cmd. We want to wait for 5410 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5411 */ 5412 wait_cmds_complete = ddi_get_lbolt() + 5413 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5414 5415 while (un->un_ncmds_in_transport != 0) { 5416 /* 5417 * Fail if commands do not finish in the specified time. 5418 */ 5419 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5420 wait_cmds_complete) == -1) { 5421 /* 5422 * Undo the state changes made above. Everything 5423 * must go back to it's original value. 5424 */ 5425 Restore_state(un); 5426 un->un_last_state = un->un_save_state; 5427 /* Wake up any threads that might be waiting. */ 5428 cv_broadcast(&un->un_suspend_cv); 5429 mutex_exit(SD_MUTEX(un)); 5430 SD_ERROR(SD_LOG_IO_PM, un, 5431 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5432 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5433 return (DDI_FAILURE); 5434 } 5435 } 5436 5437 /* 5438 * Cancel SCSI watch thread and timeouts, if any are active 5439 */ 5440 5441 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5442 opaque_t temp_token = un->un_swr_token; 5443 mutex_exit(SD_MUTEX(un)); 5444 scsi_watch_suspend(temp_token); 5445 mutex_enter(SD_MUTEX(un)); 5446 } 5447 5448 if (un->un_reset_throttle_timeid != NULL) { 5449 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5450 un->un_reset_throttle_timeid = NULL; 5451 mutex_exit(SD_MUTEX(un)); 5452 (void) untimeout(temp_id); 5453 mutex_enter(SD_MUTEX(un)); 5454 } 5455 5456 if (un->un_dcvb_timeid != NULL) { 5457 timeout_id_t temp_id = un->un_dcvb_timeid; 5458 un->un_dcvb_timeid = NULL; 5459 mutex_exit(SD_MUTEX(un)); 5460 (void) untimeout(temp_id); 5461 mutex_enter(SD_MUTEX(un)); 5462 } 5463 5464 mutex_enter(&un->un_pm_mutex); 5465 if (un->un_pm_timeid != NULL) { 5466 timeout_id_t temp_id = un->un_pm_timeid; 5467 un->un_pm_timeid = NULL; 5468 mutex_exit(&un->un_pm_mutex); 5469 mutex_exit(SD_MUTEX(un)); 5470 (void) untimeout(temp_id); 5471 mutex_enter(SD_MUTEX(un)); 5472 } else { 5473 mutex_exit(&un->un_pm_mutex); 5474 } 5475 5476 if (un->un_retry_timeid != NULL) { 5477 timeout_id_t temp_id = un->un_retry_timeid; 5478 un->un_retry_timeid = NULL; 5479 mutex_exit(SD_MUTEX(un)); 5480 (void) untimeout(temp_id); 5481 mutex_enter(SD_MUTEX(un)); 5482 } 5483 5484 if (un->un_direct_priority_timeid != NULL) { 5485 timeout_id_t temp_id = un->un_direct_priority_timeid; 5486 un->un_direct_priority_timeid = NULL; 5487 mutex_exit(SD_MUTEX(un)); 5488 (void) untimeout(temp_id); 5489 mutex_enter(SD_MUTEX(un)); 5490 } 5491 5492 if (un->un_f_is_fibre == TRUE) { 5493 /* 5494 * Remove callbacks for insert and remove events 5495 */ 5496 if (un->un_insert_event != NULL) { 5497 mutex_exit(SD_MUTEX(un)); 5498 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5499 mutex_enter(SD_MUTEX(un)); 5500 un->un_insert_event = NULL; 5501 } 5502 5503 if (un->un_remove_event != NULL) { 5504 mutex_exit(SD_MUTEX(un)); 5505 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5506 mutex_enter(SD_MUTEX(un)); 5507 un->un_remove_event = NULL; 5508 } 5509 } 5510 5511 mutex_exit(SD_MUTEX(un)); 5512 5513 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5514 5515 return (DDI_SUCCESS); 5516 } 5517 5518 5519 /* 5520 * Function: sd_ddi_pm_suspend 5521 * 5522 * Description: Set the drive state to low power. 5523 * Someone else is required to actually change the drive 5524 * power level. 5525 * 5526 * Arguments: un - driver soft state (unit) structure 5527 * 5528 * Return Code: DDI_FAILURE or DDI_SUCCESS 5529 * 5530 * Context: Kernel thread context 5531 */ 5532 5533 static int 5534 sd_ddi_pm_suspend(struct sd_lun *un) 5535 { 5536 ASSERT(un != NULL); 5537 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5538 5539 ASSERT(!mutex_owned(SD_MUTEX(un))); 5540 mutex_enter(SD_MUTEX(un)); 5541 5542 /* 5543 * Exit if power management is not enabled for this device, or if 5544 * the device is being used by HA. 5545 */ 5546 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5547 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5548 mutex_exit(SD_MUTEX(un)); 5549 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5550 return (DDI_SUCCESS); 5551 } 5552 5553 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5554 un->un_ncmds_in_driver); 5555 5556 /* 5557 * See if the device is not busy, ie.: 5558 * - we have no commands in the driver for this device 5559 * - not waiting for resources 5560 */ 5561 if ((un->un_ncmds_in_driver == 0) && 5562 (un->un_state != SD_STATE_RWAIT)) { 5563 /* 5564 * The device is not busy, so it is OK to go to low power state. 5565 * Indicate low power, but rely on someone else to actually 5566 * change it. 5567 */ 5568 mutex_enter(&un->un_pm_mutex); 5569 un->un_pm_count = -1; 5570 mutex_exit(&un->un_pm_mutex); 5571 un->un_power_level = SD_SPINDLE_OFF; 5572 } 5573 5574 mutex_exit(SD_MUTEX(un)); 5575 5576 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5577 5578 return (DDI_SUCCESS); 5579 } 5580 5581 5582 /* 5583 * Function: sd_ddi_resume 5584 * 5585 * Description: Performs system power-up operations.. 5586 * 5587 * Return Code: DDI_SUCCESS 5588 * DDI_FAILURE 5589 * 5590 * Context: Kernel thread context 5591 */ 5592 5593 static int 5594 sd_ddi_resume(dev_info_t *devi) 5595 { 5596 struct sd_lun *un; 5597 5598 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5599 if (un == NULL) { 5600 return (DDI_FAILURE); 5601 } 5602 5603 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5604 5605 mutex_enter(SD_MUTEX(un)); 5606 Restore_state(un); 5607 5608 /* 5609 * Restore the state which was saved to give the 5610 * the right state in un_last_state 5611 */ 5612 un->un_last_state = un->un_save_state; 5613 /* 5614 * Note: throttle comes back at full. 5615 * Also note: this MUST be done before calling pm_raise_power 5616 * otherwise the system can get hung in biowait. The scenario where 5617 * this'll happen is under cpr suspend. Writing of the system 5618 * state goes through sddump, which writes 0 to un_throttle. If 5619 * writing the system state then fails, example if the partition is 5620 * too small, then cpr attempts a resume. If throttle isn't restored 5621 * from the saved value until after calling pm_raise_power then 5622 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5623 * in biowait. 5624 */ 5625 un->un_throttle = un->un_saved_throttle; 5626 5627 /* 5628 * The chance of failure is very rare as the only command done in power 5629 * entry point is START command when you transition from 0->1 or 5630 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5631 * which suspend was done. Ignore the return value as the resume should 5632 * not be failed. In the case of removable media the media need not be 5633 * inserted and hence there is a chance that raise power will fail with 5634 * media not present. 5635 */ 5636 if (un->un_f_attach_spinup) { 5637 mutex_exit(SD_MUTEX(un)); 5638 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5639 mutex_enter(SD_MUTEX(un)); 5640 } 5641 5642 /* 5643 * Don't broadcast to the suspend cv and therefore possibly 5644 * start I/O until after power has been restored. 5645 */ 5646 cv_broadcast(&un->un_suspend_cv); 5647 cv_broadcast(&un->un_state_cv); 5648 5649 /* restart thread */ 5650 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5651 scsi_watch_resume(un->un_swr_token); 5652 } 5653 5654 #if (defined(__fibre)) 5655 if (un->un_f_is_fibre == TRUE) { 5656 /* 5657 * Add callbacks for insert and remove events 5658 */ 5659 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5660 sd_init_event_callbacks(un); 5661 } 5662 } 5663 #endif 5664 5665 /* 5666 * Transport any pending commands to the target. 5667 * 5668 * If this is a low-activity device commands in queue will have to wait 5669 * until new commands come in, which may take awhile. Also, we 5670 * specifically don't check un_ncmds_in_transport because we know that 5671 * there really are no commands in progress after the unit was 5672 * suspended and we could have reached the throttle level, been 5673 * suspended, and have no new commands coming in for awhile. Highly 5674 * unlikely, but so is the low-activity disk scenario. 5675 */ 5676 ddi_xbuf_dispatch(un->un_xbuf_attr); 5677 5678 sd_start_cmds(un, NULL); 5679 mutex_exit(SD_MUTEX(un)); 5680 5681 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5682 5683 return (DDI_SUCCESS); 5684 } 5685 5686 5687 /* 5688 * Function: sd_ddi_pm_resume 5689 * 5690 * Description: Set the drive state to powered on. 5691 * Someone else is required to actually change the drive 5692 * power level. 5693 * 5694 * Arguments: un - driver soft state (unit) structure 5695 * 5696 * Return Code: DDI_SUCCESS 5697 * 5698 * Context: Kernel thread context 5699 */ 5700 5701 static int 5702 sd_ddi_pm_resume(struct sd_lun *un) 5703 { 5704 ASSERT(un != NULL); 5705 5706 ASSERT(!mutex_owned(SD_MUTEX(un))); 5707 mutex_enter(SD_MUTEX(un)); 5708 un->un_power_level = SD_SPINDLE_ON; 5709 5710 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5711 mutex_enter(&un->un_pm_mutex); 5712 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5713 un->un_pm_count++; 5714 ASSERT(un->un_pm_count == 0); 5715 /* 5716 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5717 * un_suspend_cv is for a system resume, not a power management 5718 * device resume. (4297749) 5719 * cv_broadcast(&un->un_suspend_cv); 5720 */ 5721 } 5722 mutex_exit(&un->un_pm_mutex); 5723 mutex_exit(SD_MUTEX(un)); 5724 5725 return (DDI_SUCCESS); 5726 } 5727 5728 5729 /* 5730 * Function: sd_pm_idletimeout_handler 5731 * 5732 * Description: A timer routine that's active only while a device is busy. 5733 * The purpose is to extend slightly the pm framework's busy 5734 * view of the device to prevent busy/idle thrashing for 5735 * back-to-back commands. Do this by comparing the current time 5736 * to the time at which the last command completed and when the 5737 * difference is greater than sd_pm_idletime, call 5738 * pm_idle_component. In addition to indicating idle to the pm 5739 * framework, update the chain type to again use the internal pm 5740 * layers of the driver. 5741 * 5742 * Arguments: arg - driver soft state (unit) structure 5743 * 5744 * Context: Executes in a timeout(9F) thread context 5745 */ 5746 5747 static void 5748 sd_pm_idletimeout_handler(void *arg) 5749 { 5750 struct sd_lun *un = arg; 5751 5752 time_t now; 5753 5754 mutex_enter(&sd_detach_mutex); 5755 if (un->un_detach_count != 0) { 5756 /* Abort if the instance is detaching */ 5757 mutex_exit(&sd_detach_mutex); 5758 return; 5759 } 5760 mutex_exit(&sd_detach_mutex); 5761 5762 now = ddi_get_time(); 5763 /* 5764 * Grab both mutexes, in the proper order, since we're accessing 5765 * both PM and softstate variables. 5766 */ 5767 mutex_enter(SD_MUTEX(un)); 5768 mutex_enter(&un->un_pm_mutex); 5769 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5770 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5771 /* 5772 * Update the chain types. 5773 * This takes affect on the next new command received. 5774 */ 5775 if (un->un_f_non_devbsize_supported) { 5776 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5777 } else { 5778 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5779 } 5780 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5781 5782 SD_TRACE(SD_LOG_IO_PM, un, 5783 "sd_pm_idletimeout_handler: idling device\n"); 5784 (void) pm_idle_component(SD_DEVINFO(un), 0); 5785 un->un_pm_idle_timeid = NULL; 5786 } else { 5787 un->un_pm_idle_timeid = 5788 timeout(sd_pm_idletimeout_handler, un, 5789 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5790 } 5791 mutex_exit(&un->un_pm_mutex); 5792 mutex_exit(SD_MUTEX(un)); 5793 } 5794 5795 5796 /* 5797 * Function: sd_pm_timeout_handler 5798 * 5799 * Description: Callback to tell framework we are idle. 5800 * 5801 * Context: timeout(9f) thread context. 5802 */ 5803 5804 static void 5805 sd_pm_timeout_handler(void *arg) 5806 { 5807 struct sd_lun *un = arg; 5808 5809 (void) pm_idle_component(SD_DEVINFO(un), 0); 5810 mutex_enter(&un->un_pm_mutex); 5811 un->un_pm_timeid = NULL; 5812 mutex_exit(&un->un_pm_mutex); 5813 } 5814 5815 5816 /* 5817 * Function: sdpower 5818 * 5819 * Description: PM entry point. 5820 * 5821 * Return Code: DDI_SUCCESS 5822 * DDI_FAILURE 5823 * 5824 * Context: Kernel thread context 5825 */ 5826 5827 static int 5828 sdpower(dev_info_t *devi, int component, int level) 5829 { 5830 struct sd_lun *un; 5831 int instance; 5832 int rval = DDI_SUCCESS; 5833 uint_t i, log_page_size, maxcycles, ncycles; 5834 uchar_t *log_page_data; 5835 int log_sense_page; 5836 int medium_present; 5837 time_t intvlp; 5838 dev_t dev; 5839 struct pm_trans_data sd_pm_tran_data; 5840 uchar_t save_state; 5841 int sval; 5842 uchar_t state_before_pm; 5843 int got_semaphore_here; 5844 5845 instance = ddi_get_instance(devi); 5846 5847 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5848 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5849 component != 0) { 5850 return (DDI_FAILURE); 5851 } 5852 5853 dev = sd_make_device(SD_DEVINFO(un)); 5854 5855 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5856 5857 /* 5858 * Must synchronize power down with close. 5859 * Attempt to decrement/acquire the open/close semaphore, 5860 * but do NOT wait on it. If it's not greater than zero, 5861 * ie. it can't be decremented without waiting, then 5862 * someone else, either open or close, already has it 5863 * and the try returns 0. Use that knowledge here to determine 5864 * if it's OK to change the device power level. 5865 * Also, only increment it on exit if it was decremented, ie. gotten, 5866 * here. 5867 */ 5868 got_semaphore_here = sema_tryp(&un->un_semoclose); 5869 5870 mutex_enter(SD_MUTEX(un)); 5871 5872 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5873 un->un_ncmds_in_driver); 5874 5875 /* 5876 * If un_ncmds_in_driver is non-zero it indicates commands are 5877 * already being processed in the driver, or if the semaphore was 5878 * not gotten here it indicates an open or close is being processed. 5879 * At the same time somebody is requesting to go low power which 5880 * can't happen, therefore we need to return failure. 5881 */ 5882 if ((level == SD_SPINDLE_OFF) && 5883 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5884 mutex_exit(SD_MUTEX(un)); 5885 5886 if (got_semaphore_here != 0) { 5887 sema_v(&un->un_semoclose); 5888 } 5889 SD_TRACE(SD_LOG_IO_PM, un, 5890 "sdpower: exit, device has queued cmds.\n"); 5891 return (DDI_FAILURE); 5892 } 5893 5894 /* 5895 * if it is OFFLINE that means the disk is completely dead 5896 * in our case we have to put the disk in on or off by sending commands 5897 * Of course that will fail anyway so return back here. 5898 * 5899 * Power changes to a device that's OFFLINE or SUSPENDED 5900 * are not allowed. 5901 */ 5902 if ((un->un_state == SD_STATE_OFFLINE) || 5903 (un->un_state == SD_STATE_SUSPENDED)) { 5904 mutex_exit(SD_MUTEX(un)); 5905 5906 if (got_semaphore_here != 0) { 5907 sema_v(&un->un_semoclose); 5908 } 5909 SD_TRACE(SD_LOG_IO_PM, un, 5910 "sdpower: exit, device is off-line.\n"); 5911 return (DDI_FAILURE); 5912 } 5913 5914 /* 5915 * Change the device's state to indicate it's power level 5916 * is being changed. Do this to prevent a power off in the 5917 * middle of commands, which is especially bad on devices 5918 * that are really powered off instead of just spun down. 5919 */ 5920 state_before_pm = un->un_state; 5921 un->un_state = SD_STATE_PM_CHANGING; 5922 5923 mutex_exit(SD_MUTEX(un)); 5924 5925 /* 5926 * If "pm-capable" property is set to TRUE by HBA drivers, 5927 * bypass the following checking, otherwise, check the log 5928 * sense information for this device 5929 */ 5930 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5931 /* 5932 * Get the log sense information to understand whether the 5933 * the powercycle counts have gone beyond the threshhold. 5934 */ 5935 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5936 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5937 5938 mutex_enter(SD_MUTEX(un)); 5939 log_sense_page = un->un_start_stop_cycle_page; 5940 mutex_exit(SD_MUTEX(un)); 5941 5942 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5943 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5944 #ifdef SDDEBUG 5945 if (sd_force_pm_supported) { 5946 /* Force a successful result */ 5947 rval = 0; 5948 } 5949 #endif 5950 if (rval != 0) { 5951 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5952 "Log Sense Failed\n"); 5953 kmem_free(log_page_data, log_page_size); 5954 /* Cannot support power management on those drives */ 5955 5956 if (got_semaphore_here != 0) { 5957 sema_v(&un->un_semoclose); 5958 } 5959 /* 5960 * On exit put the state back to it's original value 5961 * and broadcast to anyone waiting for the power 5962 * change completion. 5963 */ 5964 mutex_enter(SD_MUTEX(un)); 5965 un->un_state = state_before_pm; 5966 cv_broadcast(&un->un_suspend_cv); 5967 mutex_exit(SD_MUTEX(un)); 5968 SD_TRACE(SD_LOG_IO_PM, un, 5969 "sdpower: exit, Log Sense Failed.\n"); 5970 return (DDI_FAILURE); 5971 } 5972 5973 /* 5974 * From the page data - Convert the essential information to 5975 * pm_trans_data 5976 */ 5977 maxcycles = 5978 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5979 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5980 5981 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5982 5983 ncycles = 5984 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5985 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5986 5987 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5988 5989 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5990 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5991 log_page_data[8+i]; 5992 } 5993 5994 kmem_free(log_page_data, log_page_size); 5995 5996 /* 5997 * Call pm_trans_check routine to get the Ok from 5998 * the global policy 5999 */ 6000 6001 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6002 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6003 6004 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6005 #ifdef SDDEBUG 6006 if (sd_force_pm_supported) { 6007 /* Force a successful result */ 6008 rval = 1; 6009 } 6010 #endif 6011 switch (rval) { 6012 case 0: 6013 /* 6014 * Not Ok to Power cycle or error in parameters passed 6015 * Would have given the advised time to consider power 6016 * cycle. Based on the new intvlp parameter we are 6017 * supposed to pretend we are busy so that pm framework 6018 * will never call our power entry point. Because of 6019 * that install a timeout handler and wait for the 6020 * recommended time to elapse so that power management 6021 * can be effective again. 6022 * 6023 * To effect this behavior, call pm_busy_component to 6024 * indicate to the framework this device is busy. 6025 * By not adjusting un_pm_count the rest of PM in 6026 * the driver will function normally, and independant 6027 * of this but because the framework is told the device 6028 * is busy it won't attempt powering down until it gets 6029 * a matching idle. The timeout handler sends this. 6030 * Note: sd_pm_entry can't be called here to do this 6031 * because sdpower may have been called as a result 6032 * of a call to pm_raise_power from within sd_pm_entry. 6033 * 6034 * If a timeout handler is already active then 6035 * don't install another. 6036 */ 6037 mutex_enter(&un->un_pm_mutex); 6038 if (un->un_pm_timeid == NULL) { 6039 un->un_pm_timeid = 6040 timeout(sd_pm_timeout_handler, 6041 un, intvlp * drv_usectohz(1000000)); 6042 mutex_exit(&un->un_pm_mutex); 6043 (void) pm_busy_component(SD_DEVINFO(un), 0); 6044 } else { 6045 mutex_exit(&un->un_pm_mutex); 6046 } 6047 if (got_semaphore_here != 0) { 6048 sema_v(&un->un_semoclose); 6049 } 6050 /* 6051 * On exit put the state back to it's original value 6052 * and broadcast to anyone waiting for the power 6053 * change completion. 6054 */ 6055 mutex_enter(SD_MUTEX(un)); 6056 un->un_state = state_before_pm; 6057 cv_broadcast(&un->un_suspend_cv); 6058 mutex_exit(SD_MUTEX(un)); 6059 6060 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6061 "trans check Failed, not ok to power cycle.\n"); 6062 return (DDI_FAILURE); 6063 6064 case -1: 6065 if (got_semaphore_here != 0) { 6066 sema_v(&un->un_semoclose); 6067 } 6068 /* 6069 * On exit put the state back to it's original value 6070 * and broadcast to anyone waiting for the power 6071 * change completion. 6072 */ 6073 mutex_enter(SD_MUTEX(un)); 6074 un->un_state = state_before_pm; 6075 cv_broadcast(&un->un_suspend_cv); 6076 mutex_exit(SD_MUTEX(un)); 6077 SD_TRACE(SD_LOG_IO_PM, un, 6078 "sdpower: exit, trans check command Failed.\n"); 6079 return (DDI_FAILURE); 6080 } 6081 } 6082 6083 if (level == SD_SPINDLE_OFF) { 6084 /* 6085 * Save the last state... if the STOP FAILS we need it 6086 * for restoring 6087 */ 6088 mutex_enter(SD_MUTEX(un)); 6089 save_state = un->un_last_state; 6090 /* 6091 * There must not be any cmds. getting processed 6092 * in the driver when we get here. Power to the 6093 * device is potentially going off. 6094 */ 6095 ASSERT(un->un_ncmds_in_driver == 0); 6096 mutex_exit(SD_MUTEX(un)); 6097 6098 /* 6099 * For now suspend the device completely before spindle is 6100 * turned off 6101 */ 6102 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6103 if (got_semaphore_here != 0) { 6104 sema_v(&un->un_semoclose); 6105 } 6106 /* 6107 * On exit put the state back to it's original value 6108 * and broadcast to anyone waiting for the power 6109 * change completion. 6110 */ 6111 mutex_enter(SD_MUTEX(un)); 6112 un->un_state = state_before_pm; 6113 cv_broadcast(&un->un_suspend_cv); 6114 mutex_exit(SD_MUTEX(un)); 6115 SD_TRACE(SD_LOG_IO_PM, un, 6116 "sdpower: exit, PM suspend Failed.\n"); 6117 return (DDI_FAILURE); 6118 } 6119 } 6120 6121 /* 6122 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6123 * close, or strategy. Dump no long uses this routine, it uses it's 6124 * own code so it can be done in polled mode. 6125 */ 6126 6127 medium_present = TRUE; 6128 6129 /* 6130 * When powering up, issue a TUR in case the device is at unit 6131 * attention. Don't do retries. Bypass the PM layer, otherwise 6132 * a deadlock on un_pm_busy_cv will occur. 6133 */ 6134 if (level == SD_SPINDLE_ON) { 6135 (void) sd_send_scsi_TEST_UNIT_READY(un, 6136 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6137 } 6138 6139 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6140 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6141 6142 sval = sd_send_scsi_START_STOP_UNIT(un, 6143 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6144 SD_PATH_DIRECT); 6145 /* Command failed, check for media present. */ 6146 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6147 medium_present = FALSE; 6148 } 6149 6150 /* 6151 * The conditions of interest here are: 6152 * if a spindle off with media present fails, 6153 * then restore the state and return an error. 6154 * else if a spindle on fails, 6155 * then return an error (there's no state to restore). 6156 * In all other cases we setup for the new state 6157 * and return success. 6158 */ 6159 switch (level) { 6160 case SD_SPINDLE_OFF: 6161 if ((medium_present == TRUE) && (sval != 0)) { 6162 /* The stop command from above failed */ 6163 rval = DDI_FAILURE; 6164 /* 6165 * The stop command failed, and we have media 6166 * present. Put the level back by calling the 6167 * sd_pm_resume() and set the state back to 6168 * it's previous value. 6169 */ 6170 (void) sd_ddi_pm_resume(un); 6171 mutex_enter(SD_MUTEX(un)); 6172 un->un_last_state = save_state; 6173 mutex_exit(SD_MUTEX(un)); 6174 break; 6175 } 6176 /* 6177 * The stop command from above succeeded. 6178 */ 6179 if (un->un_f_monitor_media_state) { 6180 /* 6181 * Terminate watch thread in case of removable media 6182 * devices going into low power state. This is as per 6183 * the requirements of pm framework, otherwise commands 6184 * will be generated for the device (through watch 6185 * thread), even when the device is in low power state. 6186 */ 6187 mutex_enter(SD_MUTEX(un)); 6188 un->un_f_watcht_stopped = FALSE; 6189 if (un->un_swr_token != NULL) { 6190 opaque_t temp_token = un->un_swr_token; 6191 un->un_f_watcht_stopped = TRUE; 6192 un->un_swr_token = NULL; 6193 mutex_exit(SD_MUTEX(un)); 6194 (void) scsi_watch_request_terminate(temp_token, 6195 SCSI_WATCH_TERMINATE_WAIT); 6196 } else { 6197 mutex_exit(SD_MUTEX(un)); 6198 } 6199 } 6200 break; 6201 6202 default: /* The level requested is spindle on... */ 6203 /* 6204 * Legacy behavior: return success on a failed spinup 6205 * if there is no media in the drive. 6206 * Do this by looking at medium_present here. 6207 */ 6208 if ((sval != 0) && medium_present) { 6209 /* The start command from above failed */ 6210 rval = DDI_FAILURE; 6211 break; 6212 } 6213 /* 6214 * The start command from above succeeded 6215 * Resume the devices now that we have 6216 * started the disks 6217 */ 6218 (void) sd_ddi_pm_resume(un); 6219 6220 /* 6221 * Resume the watch thread since it was suspended 6222 * when the device went into low power mode. 6223 */ 6224 if (un->un_f_monitor_media_state) { 6225 mutex_enter(SD_MUTEX(un)); 6226 if (un->un_f_watcht_stopped == TRUE) { 6227 opaque_t temp_token; 6228 6229 un->un_f_watcht_stopped = FALSE; 6230 mutex_exit(SD_MUTEX(un)); 6231 temp_token = scsi_watch_request_submit( 6232 SD_SCSI_DEVP(un), 6233 sd_check_media_time, 6234 SENSE_LENGTH, sd_media_watch_cb, 6235 (caddr_t)dev); 6236 mutex_enter(SD_MUTEX(un)); 6237 un->un_swr_token = temp_token; 6238 } 6239 mutex_exit(SD_MUTEX(un)); 6240 } 6241 } 6242 if (got_semaphore_here != 0) { 6243 sema_v(&un->un_semoclose); 6244 } 6245 /* 6246 * On exit put the state back to it's original value 6247 * and broadcast to anyone waiting for the power 6248 * change completion. 6249 */ 6250 mutex_enter(SD_MUTEX(un)); 6251 un->un_state = state_before_pm; 6252 cv_broadcast(&un->un_suspend_cv); 6253 mutex_exit(SD_MUTEX(un)); 6254 6255 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6256 6257 return (rval); 6258 } 6259 6260 6261 6262 /* 6263 * Function: sdattach 6264 * 6265 * Description: Driver's attach(9e) entry point function. 6266 * 6267 * Arguments: devi - opaque device info handle 6268 * cmd - attach type 6269 * 6270 * Return Code: DDI_SUCCESS 6271 * DDI_FAILURE 6272 * 6273 * Context: Kernel thread context 6274 */ 6275 6276 static int 6277 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6278 { 6279 switch (cmd) { 6280 case DDI_ATTACH: 6281 return (sd_unit_attach(devi)); 6282 case DDI_RESUME: 6283 return (sd_ddi_resume(devi)); 6284 default: 6285 break; 6286 } 6287 return (DDI_FAILURE); 6288 } 6289 6290 6291 /* 6292 * Function: sddetach 6293 * 6294 * Description: Driver's detach(9E) entry point function. 6295 * 6296 * Arguments: devi - opaque device info handle 6297 * cmd - detach type 6298 * 6299 * Return Code: DDI_SUCCESS 6300 * DDI_FAILURE 6301 * 6302 * Context: Kernel thread context 6303 */ 6304 6305 static int 6306 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6307 { 6308 switch (cmd) { 6309 case DDI_DETACH: 6310 return (sd_unit_detach(devi)); 6311 case DDI_SUSPEND: 6312 return (sd_ddi_suspend(devi)); 6313 default: 6314 break; 6315 } 6316 return (DDI_FAILURE); 6317 } 6318 6319 6320 /* 6321 * Function: sd_sync_with_callback 6322 * 6323 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6324 * state while the callback routine is active. 6325 * 6326 * Arguments: un: softstate structure for the instance 6327 * 6328 * Context: Kernel thread context 6329 */ 6330 6331 static void 6332 sd_sync_with_callback(struct sd_lun *un) 6333 { 6334 ASSERT(un != NULL); 6335 6336 mutex_enter(SD_MUTEX(un)); 6337 6338 ASSERT(un->un_in_callback >= 0); 6339 6340 while (un->un_in_callback > 0) { 6341 mutex_exit(SD_MUTEX(un)); 6342 delay(2); 6343 mutex_enter(SD_MUTEX(un)); 6344 } 6345 6346 mutex_exit(SD_MUTEX(un)); 6347 } 6348 6349 /* 6350 * Function: sd_unit_attach 6351 * 6352 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6353 * the soft state structure for the device and performs 6354 * all necessary structure and device initializations. 6355 * 6356 * Arguments: devi: the system's dev_info_t for the device. 6357 * 6358 * Return Code: DDI_SUCCESS if attach is successful. 6359 * DDI_FAILURE if any part of the attach fails. 6360 * 6361 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6362 * Kernel thread context only. Can sleep. 6363 */ 6364 6365 static int 6366 sd_unit_attach(dev_info_t *devi) 6367 { 6368 struct scsi_device *devp; 6369 struct sd_lun *un; 6370 char *variantp; 6371 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6372 int instance; 6373 int rval; 6374 int wc_enabled; 6375 int tgt; 6376 uint64_t capacity; 6377 uint_t lbasize = 0; 6378 dev_info_t *pdip = ddi_get_parent(devi); 6379 int offbyone = 0; 6380 int geom_label_valid = 0; 6381 #if defined(__sparc) 6382 int max_xfer_size; 6383 #endif 6384 6385 /* 6386 * Retrieve the target driver's private data area. This was set 6387 * up by the HBA. 6388 */ 6389 devp = ddi_get_driver_private(devi); 6390 6391 /* 6392 * Retrieve the target ID of the device. 6393 */ 6394 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6395 SCSI_ADDR_PROP_TARGET, -1); 6396 6397 /* 6398 * Since we have no idea what state things were left in by the last 6399 * user of the device, set up some 'default' settings, ie. turn 'em 6400 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6401 * Do this before the scsi_probe, which sends an inquiry. 6402 * This is a fix for bug (4430280). 6403 * Of special importance is wide-xfer. The drive could have been left 6404 * in wide transfer mode by the last driver to communicate with it, 6405 * this includes us. If that's the case, and if the following is not 6406 * setup properly or we don't re-negotiate with the drive prior to 6407 * transferring data to/from the drive, it causes bus parity errors, 6408 * data overruns, and unexpected interrupts. This first occurred when 6409 * the fix for bug (4378686) was made. 6410 */ 6411 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6412 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6413 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6414 6415 /* 6416 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6417 * on a target. Setting it per lun instance actually sets the 6418 * capability of this target, which affects those luns already 6419 * attached on the same target. So during attach, we can only disable 6420 * this capability only when no other lun has been attached on this 6421 * target. By doing this, we assume a target has the same tagged-qing 6422 * capability for every lun. The condition can be removed when HBA 6423 * is changed to support per lun based tagged-qing capability. 6424 */ 6425 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6426 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6427 } 6428 6429 /* 6430 * Use scsi_probe() to issue an INQUIRY command to the device. 6431 * This call will allocate and fill in the scsi_inquiry structure 6432 * and point the sd_inq member of the scsi_device structure to it. 6433 * If the attach succeeds, then this memory will not be de-allocated 6434 * (via scsi_unprobe()) until the instance is detached. 6435 */ 6436 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6437 goto probe_failed; 6438 } 6439 6440 /* 6441 * Check the device type as specified in the inquiry data and 6442 * claim it if it is of a type that we support. 6443 */ 6444 switch (devp->sd_inq->inq_dtype) { 6445 case DTYPE_DIRECT: 6446 break; 6447 case DTYPE_RODIRECT: 6448 break; 6449 case DTYPE_OPTICAL: 6450 break; 6451 case DTYPE_NOTPRESENT: 6452 default: 6453 /* Unsupported device type; fail the attach. */ 6454 goto probe_failed; 6455 } 6456 6457 /* 6458 * Allocate the soft state structure for this unit. 6459 * 6460 * We rely upon this memory being set to all zeroes by 6461 * ddi_soft_state_zalloc(). We assume that any member of the 6462 * soft state structure that is not explicitly initialized by 6463 * this routine will have a value of zero. 6464 */ 6465 instance = ddi_get_instance(devp->sd_dev); 6466 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6467 goto probe_failed; 6468 } 6469 6470 /* 6471 * Retrieve a pointer to the newly-allocated soft state. 6472 * 6473 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6474 * was successful, unless something has gone horribly wrong and the 6475 * ddi's soft state internals are corrupt (in which case it is 6476 * probably better to halt here than just fail the attach....) 6477 */ 6478 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6479 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6480 instance); 6481 /*NOTREACHED*/ 6482 } 6483 6484 /* 6485 * Link the back ptr of the driver soft state to the scsi_device 6486 * struct for this lun. 6487 * Save a pointer to the softstate in the driver-private area of 6488 * the scsi_device struct. 6489 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6490 * we first set un->un_sd below. 6491 */ 6492 un->un_sd = devp; 6493 devp->sd_private = (opaque_t)un; 6494 6495 /* 6496 * The following must be after devp is stored in the soft state struct. 6497 */ 6498 #ifdef SDDEBUG 6499 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6500 "%s_unit_attach: un:0x%p instance:%d\n", 6501 ddi_driver_name(devi), un, instance); 6502 #endif 6503 6504 /* 6505 * Set up the device type and node type (for the minor nodes). 6506 * By default we assume that the device can at least support the 6507 * Common Command Set. Call it a CD-ROM if it reports itself 6508 * as a RODIRECT device. 6509 */ 6510 switch (devp->sd_inq->inq_dtype) { 6511 case DTYPE_RODIRECT: 6512 un->un_node_type = DDI_NT_CD_CHAN; 6513 un->un_ctype = CTYPE_CDROM; 6514 break; 6515 case DTYPE_OPTICAL: 6516 un->un_node_type = DDI_NT_BLOCK_CHAN; 6517 un->un_ctype = CTYPE_ROD; 6518 break; 6519 default: 6520 un->un_node_type = DDI_NT_BLOCK_CHAN; 6521 un->un_ctype = CTYPE_CCS; 6522 break; 6523 } 6524 6525 /* 6526 * Try to read the interconnect type from the HBA. 6527 * 6528 * Note: This driver is currently compiled as two binaries, a parallel 6529 * scsi version (sd) and a fibre channel version (ssd). All functional 6530 * differences are determined at compile time. In the future a single 6531 * binary will be provided and the inteconnect type will be used to 6532 * differentiate between fibre and parallel scsi behaviors. At that time 6533 * it will be necessary for all fibre channel HBAs to support this 6534 * property. 6535 * 6536 * set un_f_is_fiber to TRUE ( default fiber ) 6537 */ 6538 un->un_f_is_fibre = TRUE; 6539 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6540 case INTERCONNECT_SSA: 6541 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6542 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6543 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6544 break; 6545 case INTERCONNECT_PARALLEL: 6546 un->un_f_is_fibre = FALSE; 6547 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6548 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6549 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6550 break; 6551 case INTERCONNECT_SATA: 6552 un->un_f_is_fibre = FALSE; 6553 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6554 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6555 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6556 break; 6557 case INTERCONNECT_FIBRE: 6558 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6559 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6560 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6561 break; 6562 case INTERCONNECT_FABRIC: 6563 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6564 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6565 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6566 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6567 break; 6568 default: 6569 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6570 /* 6571 * The HBA does not support the "interconnect-type" property 6572 * (or did not provide a recognized type). 6573 * 6574 * Note: This will be obsoleted when a single fibre channel 6575 * and parallel scsi driver is delivered. In the meantime the 6576 * interconnect type will be set to the platform default.If that 6577 * type is not parallel SCSI, it means that we should be 6578 * assuming "ssd" semantics. However, here this also means that 6579 * the FC HBA is not supporting the "interconnect-type" property 6580 * like we expect it to, so log this occurrence. 6581 */ 6582 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6583 if (!SD_IS_PARALLEL_SCSI(un)) { 6584 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6585 "sd_unit_attach: un:0x%p Assuming " 6586 "INTERCONNECT_FIBRE\n", un); 6587 } else { 6588 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6589 "sd_unit_attach: un:0x%p Assuming " 6590 "INTERCONNECT_PARALLEL\n", un); 6591 un->un_f_is_fibre = FALSE; 6592 } 6593 #else 6594 /* 6595 * Note: This source will be implemented when a single fibre 6596 * channel and parallel scsi driver is delivered. The default 6597 * will be to assume that if a device does not support the 6598 * "interconnect-type" property it is a parallel SCSI HBA and 6599 * we will set the interconnect type for parallel scsi. 6600 */ 6601 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6602 un->un_f_is_fibre = FALSE; 6603 #endif 6604 break; 6605 } 6606 6607 if (un->un_f_is_fibre == TRUE) { 6608 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6609 SCSI_VERSION_3) { 6610 switch (un->un_interconnect_type) { 6611 case SD_INTERCONNECT_FIBRE: 6612 case SD_INTERCONNECT_SSA: 6613 un->un_node_type = DDI_NT_BLOCK_WWN; 6614 break; 6615 default: 6616 break; 6617 } 6618 } 6619 } 6620 6621 /* 6622 * Initialize the Request Sense command for the target 6623 */ 6624 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6625 goto alloc_rqs_failed; 6626 } 6627 6628 /* 6629 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6630 * with separate binary for sd and ssd. 6631 * 6632 * x86 has 1 binary, un_retry_count is set base on connection type. 6633 * The hardcoded values will go away when Sparc uses 1 binary 6634 * for sd and ssd. This hardcoded values need to match 6635 * SD_RETRY_COUNT in sddef.h 6636 * The value used is base on interconnect type. 6637 * fibre = 3, parallel = 5 6638 */ 6639 #if defined(__i386) || defined(__amd64) 6640 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6641 #else 6642 un->un_retry_count = SD_RETRY_COUNT; 6643 #endif 6644 6645 /* 6646 * Set the per disk retry count to the default number of retries 6647 * for disks and CDROMs. This value can be overridden by the 6648 * disk property list or an entry in sd.conf. 6649 */ 6650 un->un_notready_retry_count = 6651 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6652 : DISK_NOT_READY_RETRY_COUNT(un); 6653 6654 /* 6655 * Set the busy retry count to the default value of un_retry_count. 6656 * This can be overridden by entries in sd.conf or the device 6657 * config table. 6658 */ 6659 un->un_busy_retry_count = un->un_retry_count; 6660 6661 /* 6662 * Init the reset threshold for retries. This number determines 6663 * how many retries must be performed before a reset can be issued 6664 * (for certain error conditions). This can be overridden by entries 6665 * in sd.conf or the device config table. 6666 */ 6667 un->un_reset_retry_count = (un->un_retry_count / 2); 6668 6669 /* 6670 * Set the victim_retry_count to the default un_retry_count 6671 */ 6672 un->un_victim_retry_count = (2 * un->un_retry_count); 6673 6674 /* 6675 * Set the reservation release timeout to the default value of 6676 * 5 seconds. This can be overridden by entries in ssd.conf or the 6677 * device config table. 6678 */ 6679 un->un_reserve_release_time = 5; 6680 6681 /* 6682 * Set up the default maximum transfer size. Note that this may 6683 * get updated later in the attach, when setting up default wide 6684 * operations for disks. 6685 */ 6686 #if defined(__i386) || defined(__amd64) 6687 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6688 un->un_partial_dma_supported = 1; 6689 #else 6690 un->un_max_xfer_size = (uint_t)maxphys; 6691 #endif 6692 6693 /* 6694 * Get "allow bus device reset" property (defaults to "enabled" if 6695 * the property was not defined). This is to disable bus resets for 6696 * certain kinds of error recovery. Note: In the future when a run-time 6697 * fibre check is available the soft state flag should default to 6698 * enabled. 6699 */ 6700 if (un->un_f_is_fibre == TRUE) { 6701 un->un_f_allow_bus_device_reset = TRUE; 6702 } else { 6703 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6704 "allow-bus-device-reset", 1) != 0) { 6705 un->un_f_allow_bus_device_reset = TRUE; 6706 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6707 "sd_unit_attach: un:0x%p Bus device reset " 6708 "enabled\n", un); 6709 } else { 6710 un->un_f_allow_bus_device_reset = FALSE; 6711 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6712 "sd_unit_attach: un:0x%p Bus device reset " 6713 "disabled\n", un); 6714 } 6715 } 6716 6717 /* 6718 * Check if this is an ATAPI device. ATAPI devices use Group 1 6719 * Read/Write commands and Group 2 Mode Sense/Select commands. 6720 * 6721 * Note: The "obsolete" way of doing this is to check for the "atapi" 6722 * property. The new "variant" property with a value of "atapi" has been 6723 * introduced so that future 'variants' of standard SCSI behavior (like 6724 * atapi) could be specified by the underlying HBA drivers by supplying 6725 * a new value for the "variant" property, instead of having to define a 6726 * new property. 6727 */ 6728 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6729 un->un_f_cfg_is_atapi = TRUE; 6730 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6731 "sd_unit_attach: un:0x%p Atapi device\n", un); 6732 } 6733 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6734 &variantp) == DDI_PROP_SUCCESS) { 6735 if (strcmp(variantp, "atapi") == 0) { 6736 un->un_f_cfg_is_atapi = TRUE; 6737 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6738 "sd_unit_attach: un:0x%p Atapi device\n", un); 6739 } 6740 ddi_prop_free(variantp); 6741 } 6742 6743 un->un_cmd_timeout = SD_IO_TIME; 6744 6745 /* Info on current states, statuses, etc. (Updated frequently) */ 6746 un->un_state = SD_STATE_NORMAL; 6747 un->un_last_state = SD_STATE_NORMAL; 6748 6749 /* Control & status info for command throttling */ 6750 un->un_throttle = sd_max_throttle; 6751 un->un_saved_throttle = sd_max_throttle; 6752 un->un_min_throttle = sd_min_throttle; 6753 6754 if (un->un_f_is_fibre == TRUE) { 6755 un->un_f_use_adaptive_throttle = TRUE; 6756 } else { 6757 un->un_f_use_adaptive_throttle = FALSE; 6758 } 6759 6760 /* Removable media support. */ 6761 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6762 un->un_mediastate = DKIO_NONE; 6763 un->un_specified_mediastate = DKIO_NONE; 6764 6765 /* CVs for suspend/resume (PM or DR) */ 6766 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6767 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6768 6769 /* Power management support. */ 6770 un->un_power_level = SD_SPINDLE_UNINIT; 6771 6772 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6773 un->un_f_wcc_inprog = 0; 6774 6775 /* 6776 * The open/close semaphore is used to serialize threads executing 6777 * in the driver's open & close entry point routines for a given 6778 * instance. 6779 */ 6780 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6781 6782 /* 6783 * The conf file entry and softstate variable is a forceful override, 6784 * meaning a non-zero value must be entered to change the default. 6785 */ 6786 un->un_f_disksort_disabled = FALSE; 6787 6788 /* 6789 * Retrieve the properties from the static driver table or the driver 6790 * configuration file (.conf) for this unit and update the soft state 6791 * for the device as needed for the indicated properties. 6792 * Note: the property configuration needs to occur here as some of the 6793 * following routines may have dependancies on soft state flags set 6794 * as part of the driver property configuration. 6795 */ 6796 sd_read_unit_properties(un); 6797 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6798 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6799 6800 /* 6801 * Only if a device has "hotpluggable" property, it is 6802 * treated as hotpluggable device. Otherwise, it is 6803 * regarded as non-hotpluggable one. 6804 */ 6805 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6806 -1) != -1) { 6807 un->un_f_is_hotpluggable = TRUE; 6808 } 6809 6810 /* 6811 * set unit's attributes(flags) according to "hotpluggable" and 6812 * RMB bit in INQUIRY data. 6813 */ 6814 sd_set_unit_attributes(un, devi); 6815 6816 /* 6817 * By default, we mark the capacity, lbasize, and geometry 6818 * as invalid. Only if we successfully read a valid capacity 6819 * will we update the un_blockcount and un_tgt_blocksize with the 6820 * valid values (the geometry will be validated later). 6821 */ 6822 un->un_f_blockcount_is_valid = FALSE; 6823 un->un_f_tgt_blocksize_is_valid = FALSE; 6824 6825 /* 6826 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6827 * otherwise. 6828 */ 6829 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6830 un->un_blockcount = 0; 6831 6832 /* 6833 * Set up the per-instance info needed to determine the correct 6834 * CDBs and other info for issuing commands to the target. 6835 */ 6836 sd_init_cdb_limits(un); 6837 6838 /* 6839 * Set up the IO chains to use, based upon the target type. 6840 */ 6841 if (un->un_f_non_devbsize_supported) { 6842 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6843 } else { 6844 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6845 } 6846 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6847 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6848 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6849 6850 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6851 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6852 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6853 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6854 6855 6856 if (ISCD(un)) { 6857 un->un_additional_codes = sd_additional_codes; 6858 } else { 6859 un->un_additional_codes = NULL; 6860 } 6861 6862 /* 6863 * Create the kstats here so they can be available for attach-time 6864 * routines that send commands to the unit (either polled or via 6865 * sd_send_scsi_cmd). 6866 * 6867 * Note: This is a critical sequence that needs to be maintained: 6868 * 1) Instantiate the kstats here, before any routines using the 6869 * iopath (i.e. sd_send_scsi_cmd). 6870 * 2) Instantiate and initialize the partition stats 6871 * (sd_set_pstats). 6872 * 3) Initialize the error stats (sd_set_errstats), following 6873 * sd_validate_geometry(),sd_register_devid(), 6874 * and sd_cache_control(). 6875 */ 6876 6877 un->un_stats = kstat_create(sd_label, instance, 6878 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6879 if (un->un_stats != NULL) { 6880 un->un_stats->ks_lock = SD_MUTEX(un); 6881 kstat_install(un->un_stats); 6882 } 6883 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6884 "sd_unit_attach: un:0x%p un_stats created\n", un); 6885 6886 sd_create_errstats(un, instance); 6887 if (un->un_errstats == NULL) { 6888 goto create_errstats_failed; 6889 } 6890 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6891 "sd_unit_attach: un:0x%p errstats created\n", un); 6892 6893 /* 6894 * The following if/else code was relocated here from below as part 6895 * of the fix for bug (4430280). However with the default setup added 6896 * on entry to this routine, it's no longer absolutely necessary for 6897 * this to be before the call to sd_spin_up_unit. 6898 */ 6899 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6900 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 6901 (devp->sd_inq->inq_ansi == 5)) && 6902 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 6903 6904 /* 6905 * If tagged queueing is supported by the target 6906 * and by the host adapter then we will enable it 6907 */ 6908 un->un_tagflags = 0; 6909 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 6910 (un->un_f_arq_enabled == TRUE)) { 6911 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6912 1, 1) == 1) { 6913 un->un_tagflags = FLAG_STAG; 6914 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6915 "sd_unit_attach: un:0x%p tag queueing " 6916 "enabled\n", un); 6917 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6918 "untagged-qing", 0) == 1) { 6919 un->un_f_opt_queueing = TRUE; 6920 un->un_saved_throttle = un->un_throttle = 6921 min(un->un_throttle, 3); 6922 } else { 6923 un->un_f_opt_queueing = FALSE; 6924 un->un_saved_throttle = un->un_throttle = 1; 6925 } 6926 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6927 == 1) && (un->un_f_arq_enabled == TRUE)) { 6928 /* The Host Adapter supports internal queueing. */ 6929 un->un_f_opt_queueing = TRUE; 6930 un->un_saved_throttle = un->un_throttle = 6931 min(un->un_throttle, 3); 6932 } else { 6933 un->un_f_opt_queueing = FALSE; 6934 un->un_saved_throttle = un->un_throttle = 1; 6935 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6936 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6937 } 6938 6939 /* 6940 * Enable large transfers for SATA/SAS drives 6941 */ 6942 if (SD_IS_SERIAL(un)) { 6943 un->un_max_xfer_size = 6944 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6945 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6946 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6947 "sd_unit_attach: un:0x%p max transfer " 6948 "size=0x%x\n", un, un->un_max_xfer_size); 6949 6950 } 6951 6952 /* Setup or tear down default wide operations for disks */ 6953 6954 /* 6955 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6956 * and "ssd_max_xfer_size" to exist simultaneously on the same 6957 * system and be set to different values. In the future this 6958 * code may need to be updated when the ssd module is 6959 * obsoleted and removed from the system. (4299588) 6960 */ 6961 if (SD_IS_PARALLEL_SCSI(un) && 6962 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6963 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6964 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6965 1, 1) == 1) { 6966 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6967 "sd_unit_attach: un:0x%p Wide Transfer " 6968 "enabled\n", un); 6969 } 6970 6971 /* 6972 * If tagged queuing has also been enabled, then 6973 * enable large xfers 6974 */ 6975 if (un->un_saved_throttle == sd_max_throttle) { 6976 un->un_max_xfer_size = 6977 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6978 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6979 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6980 "sd_unit_attach: un:0x%p max transfer " 6981 "size=0x%x\n", un, un->un_max_xfer_size); 6982 } 6983 } else { 6984 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6985 0, 1) == 1) { 6986 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6987 "sd_unit_attach: un:0x%p " 6988 "Wide Transfer disabled\n", un); 6989 } 6990 } 6991 } else { 6992 un->un_tagflags = FLAG_STAG; 6993 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6994 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6995 } 6996 6997 /* 6998 * If this target supports LUN reset, try to enable it. 6999 */ 7000 if (un->un_f_lun_reset_enabled) { 7001 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7002 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7003 "un:0x%p lun_reset capability set\n", un); 7004 } else { 7005 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7006 "un:0x%p lun-reset capability not set\n", un); 7007 } 7008 } 7009 7010 /* 7011 * Adjust the maximum transfer size. This is to fix 7012 * the problem of partial DMA support on SPARC. Some 7013 * HBA driver, like aac, has very small dma_attr_maxxfer 7014 * size, which requires partial DMA support on SPARC. 7015 * In the future the SPARC pci nexus driver may solve 7016 * the problem instead of this fix. 7017 */ 7018 #if defined(__sparc) 7019 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7020 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7021 un->un_max_xfer_size = max_xfer_size; 7022 un->un_partial_dma_supported = 1; 7023 } 7024 #endif 7025 7026 /* 7027 * Set PKT_DMA_PARTIAL flag. 7028 */ 7029 if (un->un_partial_dma_supported == 1) { 7030 un->un_pkt_flags = PKT_DMA_PARTIAL; 7031 } else { 7032 un->un_pkt_flags = 0; 7033 } 7034 7035 /* 7036 * At this point in the attach, we have enough info in the 7037 * soft state to be able to issue commands to the target. 7038 * 7039 * All command paths used below MUST issue their commands as 7040 * SD_PATH_DIRECT. This is important as intermediate layers 7041 * are not all initialized yet (such as PM). 7042 */ 7043 7044 /* 7045 * Send a TEST UNIT READY command to the device. This should clear 7046 * any outstanding UNIT ATTENTION that may be present. 7047 * 7048 * Note: Don't check for success, just track if there is a reservation, 7049 * this is a throw away command to clear any unit attentions. 7050 * 7051 * Note: This MUST be the first command issued to the target during 7052 * attach to ensure power on UNIT ATTENTIONS are cleared. 7053 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7054 * with attempts at spinning up a device with no media. 7055 */ 7056 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7057 reservation_flag = SD_TARGET_IS_RESERVED; 7058 } 7059 7060 /* 7061 * If the device is NOT a removable media device, attempt to spin 7062 * it up (using the START_STOP_UNIT command) and read its capacity 7063 * (using the READ CAPACITY command). Note, however, that either 7064 * of these could fail and in some cases we would continue with 7065 * the attach despite the failure (see below). 7066 */ 7067 if (un->un_f_descr_format_supported) { 7068 switch (sd_spin_up_unit(un)) { 7069 case 0: 7070 /* 7071 * Spin-up was successful; now try to read the 7072 * capacity. If successful then save the results 7073 * and mark the capacity & lbasize as valid. 7074 */ 7075 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7076 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7077 7078 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7079 &lbasize, SD_PATH_DIRECT)) { 7080 case 0: { 7081 if (capacity > DK_MAX_BLOCKS) { 7082 #ifdef _LP64 7083 if (capacity + 1 > 7084 SD_GROUP1_MAX_ADDRESS) { 7085 /* 7086 * Enable descriptor format 7087 * sense data so that we can 7088 * get 64 bit sense data 7089 * fields. 7090 */ 7091 sd_enable_descr_sense(un); 7092 } 7093 #else 7094 /* 32-bit kernels can't handle this */ 7095 scsi_log(SD_DEVINFO(un), 7096 sd_label, CE_WARN, 7097 "disk has %llu blocks, which " 7098 "is too large for a 32-bit " 7099 "kernel", capacity); 7100 7101 #if defined(__i386) || defined(__amd64) 7102 /* 7103 * 1TB disk was treated as (1T - 512)B 7104 * in the past, so that it might have 7105 * valid VTOC and solaris partitions, 7106 * we have to allow it to continue to 7107 * work. 7108 */ 7109 if (capacity -1 > DK_MAX_BLOCKS) 7110 #endif 7111 goto spinup_failed; 7112 #endif 7113 } 7114 7115 /* 7116 * Here it's not necessary to check the case: 7117 * the capacity of the device is bigger than 7118 * what the max hba cdb can support. Because 7119 * sd_send_scsi_READ_CAPACITY will retrieve 7120 * the capacity by sending USCSI command, which 7121 * is constrained by the max hba cdb. Actually, 7122 * sd_send_scsi_READ_CAPACITY will return 7123 * EINVAL when using bigger cdb than required 7124 * cdb length. Will handle this case in 7125 * "case EINVAL". 7126 */ 7127 7128 /* 7129 * The following relies on 7130 * sd_send_scsi_READ_CAPACITY never 7131 * returning 0 for capacity and/or lbasize. 7132 */ 7133 sd_update_block_info(un, lbasize, capacity); 7134 7135 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7136 "sd_unit_attach: un:0x%p capacity = %ld " 7137 "blocks; lbasize= %ld.\n", un, 7138 un->un_blockcount, un->un_tgt_blocksize); 7139 7140 break; 7141 } 7142 case EINVAL: 7143 /* 7144 * In the case where the max-cdb-length property 7145 * is smaller than the required CDB length for 7146 * a SCSI device, a target driver can fail to 7147 * attach to that device. 7148 */ 7149 scsi_log(SD_DEVINFO(un), 7150 sd_label, CE_WARN, 7151 "disk capacity is too large " 7152 "for current cdb length"); 7153 goto spinup_failed; 7154 case EACCES: 7155 /* 7156 * Should never get here if the spin-up 7157 * succeeded, but code it in anyway. 7158 * From here, just continue with the attach... 7159 */ 7160 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7161 "sd_unit_attach: un:0x%p " 7162 "sd_send_scsi_READ_CAPACITY " 7163 "returned reservation conflict\n", un); 7164 reservation_flag = SD_TARGET_IS_RESERVED; 7165 break; 7166 default: 7167 /* 7168 * Likewise, should never get here if the 7169 * spin-up succeeded. Just continue with 7170 * the attach... 7171 */ 7172 break; 7173 } 7174 break; 7175 case EACCES: 7176 /* 7177 * Device is reserved by another host. In this case 7178 * we could not spin it up or read the capacity, but 7179 * we continue with the attach anyway. 7180 */ 7181 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7182 "sd_unit_attach: un:0x%p spin-up reservation " 7183 "conflict.\n", un); 7184 reservation_flag = SD_TARGET_IS_RESERVED; 7185 break; 7186 default: 7187 /* Fail the attach if the spin-up failed. */ 7188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7189 "sd_unit_attach: un:0x%p spin-up failed.", un); 7190 goto spinup_failed; 7191 } 7192 } 7193 7194 /* 7195 * Check to see if this is a MMC drive 7196 */ 7197 if (ISCD(un)) { 7198 sd_set_mmc_caps(un); 7199 } 7200 7201 7202 /* 7203 * Add a zero-length attribute to tell the world we support 7204 * kernel ioctls (for layered drivers) 7205 */ 7206 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7207 DDI_KERNEL_IOCTL, NULL, 0); 7208 7209 /* 7210 * Add a boolean property to tell the world we support 7211 * the B_FAILFAST flag (for layered drivers) 7212 */ 7213 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7214 "ddi-failfast-supported", NULL, 0); 7215 7216 /* 7217 * Initialize power management 7218 */ 7219 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7220 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7221 sd_setup_pm(un, devi); 7222 if (un->un_f_pm_is_enabled == FALSE) { 7223 /* 7224 * For performance, point to a jump table that does 7225 * not include pm. 7226 * The direct and priority chains don't change with PM. 7227 * 7228 * Note: this is currently done based on individual device 7229 * capabilities. When an interface for determining system 7230 * power enabled state becomes available, or when additional 7231 * layers are added to the command chain, these values will 7232 * have to be re-evaluated for correctness. 7233 */ 7234 if (un->un_f_non_devbsize_supported) { 7235 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7236 } else { 7237 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7238 } 7239 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7240 } 7241 7242 /* 7243 * This property is set to 0 by HA software to avoid retries 7244 * on a reserved disk. (The preferred property name is 7245 * "retry-on-reservation-conflict") (1189689) 7246 * 7247 * Note: The use of a global here can have unintended consequences. A 7248 * per instance variable is preferrable to match the capabilities of 7249 * different underlying hba's (4402600) 7250 */ 7251 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7252 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7253 sd_retry_on_reservation_conflict); 7254 if (sd_retry_on_reservation_conflict != 0) { 7255 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7256 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7257 sd_retry_on_reservation_conflict); 7258 } 7259 7260 /* Set up options for QFULL handling. */ 7261 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7262 "qfull-retries", -1)) != -1) { 7263 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7264 rval, 1); 7265 } 7266 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7267 "qfull-retry-interval", -1)) != -1) { 7268 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7269 rval, 1); 7270 } 7271 7272 /* 7273 * This just prints a message that announces the existence of the 7274 * device. The message is always printed in the system logfile, but 7275 * only appears on the console if the system is booted with the 7276 * -v (verbose) argument. 7277 */ 7278 ddi_report_dev(devi); 7279 7280 un->un_mediastate = DKIO_NONE; 7281 7282 cmlb_alloc_handle(&un->un_cmlbhandle); 7283 7284 #if defined(__i386) || defined(__amd64) 7285 /* 7286 * On x86, compensate for off-by-1 legacy error 7287 */ 7288 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7289 (lbasize == un->un_sys_blocksize)) 7290 offbyone = CMLB_OFF_BY_ONE; 7291 #endif 7292 7293 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7294 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7295 un->un_node_type, offbyone, un->un_cmlbhandle, 7296 (void *)SD_PATH_DIRECT) != 0) { 7297 goto cmlb_attach_failed; 7298 } 7299 7300 7301 /* 7302 * Read and validate the device's geometry (ie, disk label) 7303 * A new unformatted drive will not have a valid geometry, but 7304 * the driver needs to successfully attach to this device so 7305 * the drive can be formatted via ioctls. 7306 */ 7307 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7308 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7309 7310 mutex_enter(SD_MUTEX(un)); 7311 7312 /* 7313 * Read and initialize the devid for the unit. 7314 */ 7315 if (un->un_f_devid_supported) { 7316 sd_register_devid(un, devi, reservation_flag); 7317 } 7318 mutex_exit(SD_MUTEX(un)); 7319 7320 #if (defined(__fibre)) 7321 /* 7322 * Register callbacks for fibre only. You can't do this soley 7323 * on the basis of the devid_type because this is hba specific. 7324 * We need to query our hba capabilities to find out whether to 7325 * register or not. 7326 */ 7327 if (un->un_f_is_fibre) { 7328 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7329 sd_init_event_callbacks(un); 7330 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7331 "sd_unit_attach: un:0x%p event callbacks inserted", 7332 un); 7333 } 7334 } 7335 #endif 7336 7337 if (un->un_f_opt_disable_cache == TRUE) { 7338 /* 7339 * Disable both read cache and write cache. This is 7340 * the historic behavior of the keywords in the config file. 7341 */ 7342 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7343 0) { 7344 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7345 "sd_unit_attach: un:0x%p Could not disable " 7346 "caching", un); 7347 goto devid_failed; 7348 } 7349 } 7350 7351 /* 7352 * Check the value of the WCE bit now and 7353 * set un_f_write_cache_enabled accordingly. 7354 */ 7355 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7356 mutex_enter(SD_MUTEX(un)); 7357 un->un_f_write_cache_enabled = (wc_enabled != 0); 7358 mutex_exit(SD_MUTEX(un)); 7359 7360 /* 7361 * Check the value of the NV_SUP bit and set 7362 * un_f_suppress_cache_flush accordingly. 7363 */ 7364 sd_get_nv_sup(un); 7365 7366 /* 7367 * Find out what type of reservation this disk supports. 7368 */ 7369 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7370 case 0: 7371 /* 7372 * SCSI-3 reservations are supported. 7373 */ 7374 un->un_reservation_type = SD_SCSI3_RESERVATION; 7375 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7376 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7377 break; 7378 case ENOTSUP: 7379 /* 7380 * The PERSISTENT RESERVE IN command would not be recognized by 7381 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7382 */ 7383 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7384 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7385 un->un_reservation_type = SD_SCSI2_RESERVATION; 7386 break; 7387 default: 7388 /* 7389 * default to SCSI-3 reservations 7390 */ 7391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7392 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7393 un->un_reservation_type = SD_SCSI3_RESERVATION; 7394 break; 7395 } 7396 7397 /* 7398 * Set the pstat and error stat values here, so data obtained during the 7399 * previous attach-time routines is available. 7400 * 7401 * Note: This is a critical sequence that needs to be maintained: 7402 * 1) Instantiate the kstats before any routines using the iopath 7403 * (i.e. sd_send_scsi_cmd). 7404 * 2) Initialize the error stats (sd_set_errstats) and partition 7405 * stats (sd_set_pstats)here, following 7406 * cmlb_validate_geometry(), sd_register_devid(), and 7407 * sd_cache_control(). 7408 */ 7409 7410 if (un->un_f_pkstats_enabled && geom_label_valid) { 7411 sd_set_pstats(un); 7412 SD_TRACE(SD_LOG_IO_PARTITION, un, 7413 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7414 } 7415 7416 sd_set_errstats(un); 7417 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7418 "sd_unit_attach: un:0x%p errstats set\n", un); 7419 7420 7421 /* 7422 * After successfully attaching an instance, we record the information 7423 * of how many luns have been attached on the relative target and 7424 * controller for parallel SCSI. This information is used when sd tries 7425 * to set the tagged queuing capability in HBA. 7426 */ 7427 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7428 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7429 } 7430 7431 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7432 "sd_unit_attach: un:0x%p exit success\n", un); 7433 7434 return (DDI_SUCCESS); 7435 7436 /* 7437 * An error occurred during the attach; clean up & return failure. 7438 */ 7439 7440 devid_failed: 7441 7442 setup_pm_failed: 7443 ddi_remove_minor_node(devi, NULL); 7444 7445 cmlb_attach_failed: 7446 /* 7447 * Cleanup from the scsi_ifsetcap() calls (437868) 7448 */ 7449 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7450 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7451 7452 /* 7453 * Refer to the comments of setting tagged-qing in the beginning of 7454 * sd_unit_attach. We can only disable tagged queuing when there is 7455 * no lun attached on the target. 7456 */ 7457 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7458 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7459 } 7460 7461 if (un->un_f_is_fibre == FALSE) { 7462 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7463 } 7464 7465 spinup_failed: 7466 7467 mutex_enter(SD_MUTEX(un)); 7468 7469 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7470 if (un->un_direct_priority_timeid != NULL) { 7471 timeout_id_t temp_id = un->un_direct_priority_timeid; 7472 un->un_direct_priority_timeid = NULL; 7473 mutex_exit(SD_MUTEX(un)); 7474 (void) untimeout(temp_id); 7475 mutex_enter(SD_MUTEX(un)); 7476 } 7477 7478 /* Cancel any pending start/stop timeouts */ 7479 if (un->un_startstop_timeid != NULL) { 7480 timeout_id_t temp_id = un->un_startstop_timeid; 7481 un->un_startstop_timeid = NULL; 7482 mutex_exit(SD_MUTEX(un)); 7483 (void) untimeout(temp_id); 7484 mutex_enter(SD_MUTEX(un)); 7485 } 7486 7487 /* Cancel any pending reset-throttle timeouts */ 7488 if (un->un_reset_throttle_timeid != NULL) { 7489 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7490 un->un_reset_throttle_timeid = NULL; 7491 mutex_exit(SD_MUTEX(un)); 7492 (void) untimeout(temp_id); 7493 mutex_enter(SD_MUTEX(un)); 7494 } 7495 7496 /* Cancel any pending retry timeouts */ 7497 if (un->un_retry_timeid != NULL) { 7498 timeout_id_t temp_id = un->un_retry_timeid; 7499 un->un_retry_timeid = NULL; 7500 mutex_exit(SD_MUTEX(un)); 7501 (void) untimeout(temp_id); 7502 mutex_enter(SD_MUTEX(un)); 7503 } 7504 7505 /* Cancel any pending delayed cv broadcast timeouts */ 7506 if (un->un_dcvb_timeid != NULL) { 7507 timeout_id_t temp_id = un->un_dcvb_timeid; 7508 un->un_dcvb_timeid = NULL; 7509 mutex_exit(SD_MUTEX(un)); 7510 (void) untimeout(temp_id); 7511 mutex_enter(SD_MUTEX(un)); 7512 } 7513 7514 mutex_exit(SD_MUTEX(un)); 7515 7516 /* There should not be any in-progress I/O so ASSERT this check */ 7517 ASSERT(un->un_ncmds_in_transport == 0); 7518 ASSERT(un->un_ncmds_in_driver == 0); 7519 7520 /* Do not free the softstate if the callback routine is active */ 7521 sd_sync_with_callback(un); 7522 7523 /* 7524 * Partition stats apparently are not used with removables. These would 7525 * not have been created during attach, so no need to clean them up... 7526 */ 7527 if (un->un_errstats != NULL) { 7528 kstat_delete(un->un_errstats); 7529 un->un_errstats = NULL; 7530 } 7531 7532 create_errstats_failed: 7533 7534 if (un->un_stats != NULL) { 7535 kstat_delete(un->un_stats); 7536 un->un_stats = NULL; 7537 } 7538 7539 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7540 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7541 7542 ddi_prop_remove_all(devi); 7543 sema_destroy(&un->un_semoclose); 7544 cv_destroy(&un->un_state_cv); 7545 7546 getrbuf_failed: 7547 7548 sd_free_rqs(un); 7549 7550 alloc_rqs_failed: 7551 7552 devp->sd_private = NULL; 7553 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7554 7555 get_softstate_failed: 7556 /* 7557 * Note: the man pages are unclear as to whether or not doing a 7558 * ddi_soft_state_free(sd_state, instance) is the right way to 7559 * clean up after the ddi_soft_state_zalloc() if the subsequent 7560 * ddi_get_soft_state() fails. The implication seems to be 7561 * that the get_soft_state cannot fail if the zalloc succeeds. 7562 */ 7563 ddi_soft_state_free(sd_state, instance); 7564 7565 probe_failed: 7566 scsi_unprobe(devp); 7567 7568 return (DDI_FAILURE); 7569 } 7570 7571 7572 /* 7573 * Function: sd_unit_detach 7574 * 7575 * Description: Performs DDI_DETACH processing for sddetach(). 7576 * 7577 * Return Code: DDI_SUCCESS 7578 * DDI_FAILURE 7579 * 7580 * Context: Kernel thread context 7581 */ 7582 7583 static int 7584 sd_unit_detach(dev_info_t *devi) 7585 { 7586 struct scsi_device *devp; 7587 struct sd_lun *un; 7588 int i; 7589 int tgt; 7590 dev_t dev; 7591 dev_info_t *pdip = ddi_get_parent(devi); 7592 int instance = ddi_get_instance(devi); 7593 7594 mutex_enter(&sd_detach_mutex); 7595 7596 /* 7597 * Fail the detach for any of the following: 7598 * - Unable to get the sd_lun struct for the instance 7599 * - A layered driver has an outstanding open on the instance 7600 * - Another thread is already detaching this instance 7601 * - Another thread is currently performing an open 7602 */ 7603 devp = ddi_get_driver_private(devi); 7604 if ((devp == NULL) || 7605 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7606 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7607 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7608 mutex_exit(&sd_detach_mutex); 7609 return (DDI_FAILURE); 7610 } 7611 7612 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7613 7614 /* 7615 * Mark this instance as currently in a detach, to inhibit any 7616 * opens from a layered driver. 7617 */ 7618 un->un_detach_count++; 7619 mutex_exit(&sd_detach_mutex); 7620 7621 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7622 SCSI_ADDR_PROP_TARGET, -1); 7623 7624 dev = sd_make_device(SD_DEVINFO(un)); 7625 7626 #ifndef lint 7627 _NOTE(COMPETING_THREADS_NOW); 7628 #endif 7629 7630 mutex_enter(SD_MUTEX(un)); 7631 7632 /* 7633 * Fail the detach if there are any outstanding layered 7634 * opens on this device. 7635 */ 7636 for (i = 0; i < NDKMAP; i++) { 7637 if (un->un_ocmap.lyropen[i] != 0) { 7638 goto err_notclosed; 7639 } 7640 } 7641 7642 /* 7643 * Verify there are NO outstanding commands issued to this device. 7644 * ie, un_ncmds_in_transport == 0. 7645 * It's possible to have outstanding commands through the physio 7646 * code path, even though everything's closed. 7647 */ 7648 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7649 (un->un_direct_priority_timeid != NULL) || 7650 (un->un_state == SD_STATE_RWAIT)) { 7651 mutex_exit(SD_MUTEX(un)); 7652 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7653 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7654 goto err_stillbusy; 7655 } 7656 7657 /* 7658 * If we have the device reserved, release the reservation. 7659 */ 7660 if ((un->un_resvd_status & SD_RESERVE) && 7661 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7662 mutex_exit(SD_MUTEX(un)); 7663 /* 7664 * Note: sd_reserve_release sends a command to the device 7665 * via the sd_ioctlcmd() path, and can sleep. 7666 */ 7667 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7668 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7669 "sd_dr_detach: Cannot release reservation \n"); 7670 } 7671 } else { 7672 mutex_exit(SD_MUTEX(un)); 7673 } 7674 7675 /* 7676 * Untimeout any reserve recover, throttle reset, restart unit 7677 * and delayed broadcast timeout threads. Protect the timeout pointer 7678 * from getting nulled by their callback functions. 7679 */ 7680 mutex_enter(SD_MUTEX(un)); 7681 if (un->un_resvd_timeid != NULL) { 7682 timeout_id_t temp_id = un->un_resvd_timeid; 7683 un->un_resvd_timeid = NULL; 7684 mutex_exit(SD_MUTEX(un)); 7685 (void) untimeout(temp_id); 7686 mutex_enter(SD_MUTEX(un)); 7687 } 7688 7689 if (un->un_reset_throttle_timeid != NULL) { 7690 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7691 un->un_reset_throttle_timeid = NULL; 7692 mutex_exit(SD_MUTEX(un)); 7693 (void) untimeout(temp_id); 7694 mutex_enter(SD_MUTEX(un)); 7695 } 7696 7697 if (un->un_startstop_timeid != NULL) { 7698 timeout_id_t temp_id = un->un_startstop_timeid; 7699 un->un_startstop_timeid = NULL; 7700 mutex_exit(SD_MUTEX(un)); 7701 (void) untimeout(temp_id); 7702 mutex_enter(SD_MUTEX(un)); 7703 } 7704 7705 if (un->un_dcvb_timeid != NULL) { 7706 timeout_id_t temp_id = un->un_dcvb_timeid; 7707 un->un_dcvb_timeid = NULL; 7708 mutex_exit(SD_MUTEX(un)); 7709 (void) untimeout(temp_id); 7710 } else { 7711 mutex_exit(SD_MUTEX(un)); 7712 } 7713 7714 /* Remove any pending reservation reclaim requests for this device */ 7715 sd_rmv_resv_reclaim_req(dev); 7716 7717 mutex_enter(SD_MUTEX(un)); 7718 7719 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7720 if (un->un_direct_priority_timeid != NULL) { 7721 timeout_id_t temp_id = un->un_direct_priority_timeid; 7722 un->un_direct_priority_timeid = NULL; 7723 mutex_exit(SD_MUTEX(un)); 7724 (void) untimeout(temp_id); 7725 mutex_enter(SD_MUTEX(un)); 7726 } 7727 7728 /* Cancel any active multi-host disk watch thread requests */ 7729 if (un->un_mhd_token != NULL) { 7730 mutex_exit(SD_MUTEX(un)); 7731 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7732 if (scsi_watch_request_terminate(un->un_mhd_token, 7733 SCSI_WATCH_TERMINATE_NOWAIT)) { 7734 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7735 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7736 /* 7737 * Note: We are returning here after having removed 7738 * some driver timeouts above. This is consistent with 7739 * the legacy implementation but perhaps the watch 7740 * terminate call should be made with the wait flag set. 7741 */ 7742 goto err_stillbusy; 7743 } 7744 mutex_enter(SD_MUTEX(un)); 7745 un->un_mhd_token = NULL; 7746 } 7747 7748 if (un->un_swr_token != NULL) { 7749 mutex_exit(SD_MUTEX(un)); 7750 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7751 if (scsi_watch_request_terminate(un->un_swr_token, 7752 SCSI_WATCH_TERMINATE_NOWAIT)) { 7753 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7754 "sd_dr_detach: Cannot cancel swr watch request\n"); 7755 /* 7756 * Note: We are returning here after having removed 7757 * some driver timeouts above. This is consistent with 7758 * the legacy implementation but perhaps the watch 7759 * terminate call should be made with the wait flag set. 7760 */ 7761 goto err_stillbusy; 7762 } 7763 mutex_enter(SD_MUTEX(un)); 7764 un->un_swr_token = NULL; 7765 } 7766 7767 mutex_exit(SD_MUTEX(un)); 7768 7769 /* 7770 * Clear any scsi_reset_notifies. We clear the reset notifies 7771 * if we have not registered one. 7772 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7773 */ 7774 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7775 sd_mhd_reset_notify_cb, (caddr_t)un); 7776 7777 /* 7778 * protect the timeout pointers from getting nulled by 7779 * their callback functions during the cancellation process. 7780 * In such a scenario untimeout can be invoked with a null value. 7781 */ 7782 _NOTE(NO_COMPETING_THREADS_NOW); 7783 7784 mutex_enter(&un->un_pm_mutex); 7785 if (un->un_pm_idle_timeid != NULL) { 7786 timeout_id_t temp_id = un->un_pm_idle_timeid; 7787 un->un_pm_idle_timeid = NULL; 7788 mutex_exit(&un->un_pm_mutex); 7789 7790 /* 7791 * Timeout is active; cancel it. 7792 * Note that it'll never be active on a device 7793 * that does not support PM therefore we don't 7794 * have to check before calling pm_idle_component. 7795 */ 7796 (void) untimeout(temp_id); 7797 (void) pm_idle_component(SD_DEVINFO(un), 0); 7798 mutex_enter(&un->un_pm_mutex); 7799 } 7800 7801 /* 7802 * Check whether there is already a timeout scheduled for power 7803 * management. If yes then don't lower the power here, that's. 7804 * the timeout handler's job. 7805 */ 7806 if (un->un_pm_timeid != NULL) { 7807 timeout_id_t temp_id = un->un_pm_timeid; 7808 un->un_pm_timeid = NULL; 7809 mutex_exit(&un->un_pm_mutex); 7810 /* 7811 * Timeout is active; cancel it. 7812 * Note that it'll never be active on a device 7813 * that does not support PM therefore we don't 7814 * have to check before calling pm_idle_component. 7815 */ 7816 (void) untimeout(temp_id); 7817 (void) pm_idle_component(SD_DEVINFO(un), 0); 7818 7819 } else { 7820 mutex_exit(&un->un_pm_mutex); 7821 if ((un->un_f_pm_is_enabled == TRUE) && 7822 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7823 DDI_SUCCESS)) { 7824 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7825 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7826 /* 7827 * Fix for bug: 4297749, item # 13 7828 * The above test now includes a check to see if PM is 7829 * supported by this device before call 7830 * pm_lower_power(). 7831 * Note, the following is not dead code. The call to 7832 * pm_lower_power above will generate a call back into 7833 * our sdpower routine which might result in a timeout 7834 * handler getting activated. Therefore the following 7835 * code is valid and necessary. 7836 */ 7837 mutex_enter(&un->un_pm_mutex); 7838 if (un->un_pm_timeid != NULL) { 7839 timeout_id_t temp_id = un->un_pm_timeid; 7840 un->un_pm_timeid = NULL; 7841 mutex_exit(&un->un_pm_mutex); 7842 (void) untimeout(temp_id); 7843 (void) pm_idle_component(SD_DEVINFO(un), 0); 7844 } else { 7845 mutex_exit(&un->un_pm_mutex); 7846 } 7847 } 7848 } 7849 7850 /* 7851 * Cleanup from the scsi_ifsetcap() calls (437868) 7852 * Relocated here from above to be after the call to 7853 * pm_lower_power, which was getting errors. 7854 */ 7855 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7856 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7857 7858 /* 7859 * Currently, tagged queuing is supported per target based by HBA. 7860 * Setting this per lun instance actually sets the capability of this 7861 * target in HBA, which affects those luns already attached on the 7862 * same target. So during detach, we can only disable this capability 7863 * only when this is the only lun left on this target. By doing 7864 * this, we assume a target has the same tagged queuing capability 7865 * for every lun. The condition can be removed when HBA is changed to 7866 * support per lun based tagged queuing capability. 7867 */ 7868 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7869 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7870 } 7871 7872 if (un->un_f_is_fibre == FALSE) { 7873 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7874 } 7875 7876 /* 7877 * Remove any event callbacks, fibre only 7878 */ 7879 if (un->un_f_is_fibre == TRUE) { 7880 if ((un->un_insert_event != NULL) && 7881 (ddi_remove_event_handler(un->un_insert_cb_id) != 7882 DDI_SUCCESS)) { 7883 /* 7884 * Note: We are returning here after having done 7885 * substantial cleanup above. This is consistent 7886 * with the legacy implementation but this may not 7887 * be the right thing to do. 7888 */ 7889 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7890 "sd_dr_detach: Cannot cancel insert event\n"); 7891 goto err_remove_event; 7892 } 7893 un->un_insert_event = NULL; 7894 7895 if ((un->un_remove_event != NULL) && 7896 (ddi_remove_event_handler(un->un_remove_cb_id) != 7897 DDI_SUCCESS)) { 7898 /* 7899 * Note: We are returning here after having done 7900 * substantial cleanup above. This is consistent 7901 * with the legacy implementation but this may not 7902 * be the right thing to do. 7903 */ 7904 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7905 "sd_dr_detach: Cannot cancel remove event\n"); 7906 goto err_remove_event; 7907 } 7908 un->un_remove_event = NULL; 7909 } 7910 7911 /* Do not free the softstate if the callback routine is active */ 7912 sd_sync_with_callback(un); 7913 7914 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7915 cmlb_free_handle(&un->un_cmlbhandle); 7916 7917 /* 7918 * Hold the detach mutex here, to make sure that no other threads ever 7919 * can access a (partially) freed soft state structure. 7920 */ 7921 mutex_enter(&sd_detach_mutex); 7922 7923 /* 7924 * Clean up the soft state struct. 7925 * Cleanup is done in reverse order of allocs/inits. 7926 * At this point there should be no competing threads anymore. 7927 */ 7928 7929 /* Unregister and free device id. */ 7930 ddi_devid_unregister(devi); 7931 if (un->un_devid) { 7932 ddi_devid_free(un->un_devid); 7933 un->un_devid = NULL; 7934 } 7935 7936 /* 7937 * Destroy wmap cache if it exists. 7938 */ 7939 if (un->un_wm_cache != NULL) { 7940 kmem_cache_destroy(un->un_wm_cache); 7941 un->un_wm_cache = NULL; 7942 } 7943 7944 /* 7945 * kstat cleanup is done in detach for all device types (4363169). 7946 * We do not want to fail detach if the device kstats are not deleted 7947 * since there is a confusion about the devo_refcnt for the device. 7948 * We just delete the kstats and let detach complete successfully. 7949 */ 7950 if (un->un_stats != NULL) { 7951 kstat_delete(un->un_stats); 7952 un->un_stats = NULL; 7953 } 7954 if (un->un_errstats != NULL) { 7955 kstat_delete(un->un_errstats); 7956 un->un_errstats = NULL; 7957 } 7958 7959 /* Remove partition stats */ 7960 if (un->un_f_pkstats_enabled) { 7961 for (i = 0; i < NSDMAP; i++) { 7962 if (un->un_pstats[i] != NULL) { 7963 kstat_delete(un->un_pstats[i]); 7964 un->un_pstats[i] = NULL; 7965 } 7966 } 7967 } 7968 7969 /* Remove xbuf registration */ 7970 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7971 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7972 7973 /* Remove driver properties */ 7974 ddi_prop_remove_all(devi); 7975 7976 mutex_destroy(&un->un_pm_mutex); 7977 cv_destroy(&un->un_pm_busy_cv); 7978 7979 cv_destroy(&un->un_wcc_cv); 7980 7981 /* Open/close semaphore */ 7982 sema_destroy(&un->un_semoclose); 7983 7984 /* Removable media condvar. */ 7985 cv_destroy(&un->un_state_cv); 7986 7987 /* Suspend/resume condvar. */ 7988 cv_destroy(&un->un_suspend_cv); 7989 cv_destroy(&un->un_disk_busy_cv); 7990 7991 sd_free_rqs(un); 7992 7993 /* Free up soft state */ 7994 devp->sd_private = NULL; 7995 7996 bzero(un, sizeof (struct sd_lun)); 7997 ddi_soft_state_free(sd_state, instance); 7998 7999 mutex_exit(&sd_detach_mutex); 8000 8001 /* This frees up the INQUIRY data associated with the device. */ 8002 scsi_unprobe(devp); 8003 8004 /* 8005 * After successfully detaching an instance, we update the information 8006 * of how many luns have been attached in the relative target and 8007 * controller for parallel SCSI. This information is used when sd tries 8008 * to set the tagged queuing capability in HBA. 8009 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8010 * check if the device is parallel SCSI. However, we don't need to 8011 * check here because we've already checked during attach. No device 8012 * that is not parallel SCSI is in the chain. 8013 */ 8014 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8015 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8016 } 8017 8018 return (DDI_SUCCESS); 8019 8020 err_notclosed: 8021 mutex_exit(SD_MUTEX(un)); 8022 8023 err_stillbusy: 8024 _NOTE(NO_COMPETING_THREADS_NOW); 8025 8026 err_remove_event: 8027 mutex_enter(&sd_detach_mutex); 8028 un->un_detach_count--; 8029 mutex_exit(&sd_detach_mutex); 8030 8031 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8032 return (DDI_FAILURE); 8033 } 8034 8035 8036 /* 8037 * Function: sd_create_errstats 8038 * 8039 * Description: This routine instantiates the device error stats. 8040 * 8041 * Note: During attach the stats are instantiated first so they are 8042 * available for attach-time routines that utilize the driver 8043 * iopath to send commands to the device. The stats are initialized 8044 * separately so data obtained during some attach-time routines is 8045 * available. (4362483) 8046 * 8047 * Arguments: un - driver soft state (unit) structure 8048 * instance - driver instance 8049 * 8050 * Context: Kernel thread context 8051 */ 8052 8053 static void 8054 sd_create_errstats(struct sd_lun *un, int instance) 8055 { 8056 struct sd_errstats *stp; 8057 char kstatmodule_err[KSTAT_STRLEN]; 8058 char kstatname[KSTAT_STRLEN]; 8059 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8060 8061 ASSERT(un != NULL); 8062 8063 if (un->un_errstats != NULL) { 8064 return; 8065 } 8066 8067 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8068 "%serr", sd_label); 8069 (void) snprintf(kstatname, sizeof (kstatname), 8070 "%s%d,err", sd_label, instance); 8071 8072 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8073 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8074 8075 if (un->un_errstats == NULL) { 8076 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8077 "sd_create_errstats: Failed kstat_create\n"); 8078 return; 8079 } 8080 8081 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8082 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8083 KSTAT_DATA_UINT32); 8084 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8085 KSTAT_DATA_UINT32); 8086 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8087 KSTAT_DATA_UINT32); 8088 kstat_named_init(&stp->sd_vid, "Vendor", 8089 KSTAT_DATA_CHAR); 8090 kstat_named_init(&stp->sd_pid, "Product", 8091 KSTAT_DATA_CHAR); 8092 kstat_named_init(&stp->sd_revision, "Revision", 8093 KSTAT_DATA_CHAR); 8094 kstat_named_init(&stp->sd_serial, "Serial No", 8095 KSTAT_DATA_CHAR); 8096 kstat_named_init(&stp->sd_capacity, "Size", 8097 KSTAT_DATA_ULONGLONG); 8098 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8099 KSTAT_DATA_UINT32); 8100 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8101 KSTAT_DATA_UINT32); 8102 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8103 KSTAT_DATA_UINT32); 8104 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8105 KSTAT_DATA_UINT32); 8106 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8107 KSTAT_DATA_UINT32); 8108 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8109 KSTAT_DATA_UINT32); 8110 8111 un->un_errstats->ks_private = un; 8112 un->un_errstats->ks_update = nulldev; 8113 8114 kstat_install(un->un_errstats); 8115 } 8116 8117 8118 /* 8119 * Function: sd_set_errstats 8120 * 8121 * Description: This routine sets the value of the vendor id, product id, 8122 * revision, serial number, and capacity device error stats. 8123 * 8124 * Note: During attach the stats are instantiated first so they are 8125 * available for attach-time routines that utilize the driver 8126 * iopath to send commands to the device. The stats are initialized 8127 * separately so data obtained during some attach-time routines is 8128 * available. (4362483) 8129 * 8130 * Arguments: un - driver soft state (unit) structure 8131 * 8132 * Context: Kernel thread context 8133 */ 8134 8135 static void 8136 sd_set_errstats(struct sd_lun *un) 8137 { 8138 struct sd_errstats *stp; 8139 8140 ASSERT(un != NULL); 8141 ASSERT(un->un_errstats != NULL); 8142 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8143 ASSERT(stp != NULL); 8144 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8145 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8146 (void) strncpy(stp->sd_revision.value.c, 8147 un->un_sd->sd_inq->inq_revision, 4); 8148 8149 /* 8150 * All the errstats are persistent across detach/attach, 8151 * so reset all the errstats here in case of the hot 8152 * replacement of disk drives, except for not changed 8153 * Sun qualified drives. 8154 */ 8155 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8156 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8157 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8158 stp->sd_softerrs.value.ui32 = 0; 8159 stp->sd_harderrs.value.ui32 = 0; 8160 stp->sd_transerrs.value.ui32 = 0; 8161 stp->sd_rq_media_err.value.ui32 = 0; 8162 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8163 stp->sd_rq_nodev_err.value.ui32 = 0; 8164 stp->sd_rq_recov_err.value.ui32 = 0; 8165 stp->sd_rq_illrq_err.value.ui32 = 0; 8166 stp->sd_rq_pfa_err.value.ui32 = 0; 8167 } 8168 8169 /* 8170 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8171 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8172 * (4376302)) 8173 */ 8174 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8175 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8176 sizeof (SD_INQUIRY(un)->inq_serial)); 8177 } 8178 8179 if (un->un_f_blockcount_is_valid != TRUE) { 8180 /* 8181 * Set capacity error stat to 0 for no media. This ensures 8182 * a valid capacity is displayed in response to 'iostat -E' 8183 * when no media is present in the device. 8184 */ 8185 stp->sd_capacity.value.ui64 = 0; 8186 } else { 8187 /* 8188 * Multiply un_blockcount by un->un_sys_blocksize to get 8189 * capacity. 8190 * 8191 * Note: for non-512 blocksize devices "un_blockcount" has been 8192 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8193 * (un_tgt_blocksize / un->un_sys_blocksize). 8194 */ 8195 stp->sd_capacity.value.ui64 = (uint64_t) 8196 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8197 } 8198 } 8199 8200 8201 /* 8202 * Function: sd_set_pstats 8203 * 8204 * Description: This routine instantiates and initializes the partition 8205 * stats for each partition with more than zero blocks. 8206 * (4363169) 8207 * 8208 * Arguments: un - driver soft state (unit) structure 8209 * 8210 * Context: Kernel thread context 8211 */ 8212 8213 static void 8214 sd_set_pstats(struct sd_lun *un) 8215 { 8216 char kstatname[KSTAT_STRLEN]; 8217 int instance; 8218 int i; 8219 diskaddr_t nblks = 0; 8220 char *partname = NULL; 8221 8222 ASSERT(un != NULL); 8223 8224 instance = ddi_get_instance(SD_DEVINFO(un)); 8225 8226 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8227 for (i = 0; i < NSDMAP; i++) { 8228 8229 if (cmlb_partinfo(un->un_cmlbhandle, i, 8230 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8231 continue; 8232 mutex_enter(SD_MUTEX(un)); 8233 8234 if ((un->un_pstats[i] == NULL) && 8235 (nblks != 0)) { 8236 8237 (void) snprintf(kstatname, sizeof (kstatname), 8238 "%s%d,%s", sd_label, instance, 8239 partname); 8240 8241 un->un_pstats[i] = kstat_create(sd_label, 8242 instance, kstatname, "partition", KSTAT_TYPE_IO, 8243 1, KSTAT_FLAG_PERSISTENT); 8244 if (un->un_pstats[i] != NULL) { 8245 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8246 kstat_install(un->un_pstats[i]); 8247 } 8248 } 8249 mutex_exit(SD_MUTEX(un)); 8250 } 8251 } 8252 8253 8254 #if (defined(__fibre)) 8255 /* 8256 * Function: sd_init_event_callbacks 8257 * 8258 * Description: This routine initializes the insertion and removal event 8259 * callbacks. (fibre only) 8260 * 8261 * Arguments: un - driver soft state (unit) structure 8262 * 8263 * Context: Kernel thread context 8264 */ 8265 8266 static void 8267 sd_init_event_callbacks(struct sd_lun *un) 8268 { 8269 ASSERT(un != NULL); 8270 8271 if ((un->un_insert_event == NULL) && 8272 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8273 &un->un_insert_event) == DDI_SUCCESS)) { 8274 /* 8275 * Add the callback for an insertion event 8276 */ 8277 (void) ddi_add_event_handler(SD_DEVINFO(un), 8278 un->un_insert_event, sd_event_callback, (void *)un, 8279 &(un->un_insert_cb_id)); 8280 } 8281 8282 if ((un->un_remove_event == NULL) && 8283 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8284 &un->un_remove_event) == DDI_SUCCESS)) { 8285 /* 8286 * Add the callback for a removal event 8287 */ 8288 (void) ddi_add_event_handler(SD_DEVINFO(un), 8289 un->un_remove_event, sd_event_callback, (void *)un, 8290 &(un->un_remove_cb_id)); 8291 } 8292 } 8293 8294 8295 /* 8296 * Function: sd_event_callback 8297 * 8298 * Description: This routine handles insert/remove events (photon). The 8299 * state is changed to OFFLINE which can be used to supress 8300 * error msgs. (fibre only) 8301 * 8302 * Arguments: un - driver soft state (unit) structure 8303 * 8304 * Context: Callout thread context 8305 */ 8306 /* ARGSUSED */ 8307 static void 8308 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8309 void *bus_impldata) 8310 { 8311 struct sd_lun *un = (struct sd_lun *)arg; 8312 8313 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8314 if (event == un->un_insert_event) { 8315 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8316 mutex_enter(SD_MUTEX(un)); 8317 if (un->un_state == SD_STATE_OFFLINE) { 8318 if (un->un_last_state != SD_STATE_SUSPENDED) { 8319 un->un_state = un->un_last_state; 8320 } else { 8321 /* 8322 * We have gone through SUSPEND/RESUME while 8323 * we were offline. Restore the last state 8324 */ 8325 un->un_state = un->un_save_state; 8326 } 8327 } 8328 mutex_exit(SD_MUTEX(un)); 8329 8330 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8331 } else if (event == un->un_remove_event) { 8332 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8333 mutex_enter(SD_MUTEX(un)); 8334 /* 8335 * We need to handle an event callback that occurs during 8336 * the suspend operation, since we don't prevent it. 8337 */ 8338 if (un->un_state != SD_STATE_OFFLINE) { 8339 if (un->un_state != SD_STATE_SUSPENDED) { 8340 New_state(un, SD_STATE_OFFLINE); 8341 } else { 8342 un->un_last_state = SD_STATE_OFFLINE; 8343 } 8344 } 8345 mutex_exit(SD_MUTEX(un)); 8346 } else { 8347 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8348 "!Unknown event\n"); 8349 } 8350 8351 } 8352 #endif 8353 8354 /* 8355 * Function: sd_cache_control() 8356 * 8357 * Description: This routine is the driver entry point for setting 8358 * read and write caching by modifying the WCE (write cache 8359 * enable) and RCD (read cache disable) bits of mode 8360 * page 8 (MODEPAGE_CACHING). 8361 * 8362 * Arguments: un - driver soft state (unit) structure 8363 * rcd_flag - flag for controlling the read cache 8364 * wce_flag - flag for controlling the write cache 8365 * 8366 * Return Code: EIO 8367 * code returned by sd_send_scsi_MODE_SENSE and 8368 * sd_send_scsi_MODE_SELECT 8369 * 8370 * Context: Kernel Thread 8371 */ 8372 8373 static int 8374 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8375 { 8376 struct mode_caching *mode_caching_page; 8377 uchar_t *header; 8378 size_t buflen; 8379 int hdrlen; 8380 int bd_len; 8381 int rval = 0; 8382 struct mode_header_grp2 *mhp; 8383 8384 ASSERT(un != NULL); 8385 8386 /* 8387 * Do a test unit ready, otherwise a mode sense may not work if this 8388 * is the first command sent to the device after boot. 8389 */ 8390 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8391 8392 if (un->un_f_cfg_is_atapi == TRUE) { 8393 hdrlen = MODE_HEADER_LENGTH_GRP2; 8394 } else { 8395 hdrlen = MODE_HEADER_LENGTH; 8396 } 8397 8398 /* 8399 * Allocate memory for the retrieved mode page and its headers. Set 8400 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8401 * we get all of the mode sense data otherwise, the mode select 8402 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8403 */ 8404 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8405 sizeof (struct mode_cache_scsi3); 8406 8407 header = kmem_zalloc(buflen, KM_SLEEP); 8408 8409 /* Get the information from the device. */ 8410 if (un->un_f_cfg_is_atapi == TRUE) { 8411 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8412 MODEPAGE_CACHING, SD_PATH_DIRECT); 8413 } else { 8414 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8415 MODEPAGE_CACHING, SD_PATH_DIRECT); 8416 } 8417 if (rval != 0) { 8418 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8419 "sd_cache_control: Mode Sense Failed\n"); 8420 kmem_free(header, buflen); 8421 return (rval); 8422 } 8423 8424 /* 8425 * Determine size of Block Descriptors in order to locate 8426 * the mode page data. ATAPI devices return 0, SCSI devices 8427 * should return MODE_BLK_DESC_LENGTH. 8428 */ 8429 if (un->un_f_cfg_is_atapi == TRUE) { 8430 mhp = (struct mode_header_grp2 *)header; 8431 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8432 } else { 8433 bd_len = ((struct mode_header *)header)->bdesc_length; 8434 } 8435 8436 if (bd_len > MODE_BLK_DESC_LENGTH) { 8437 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8438 "sd_cache_control: Mode Sense returned invalid " 8439 "block descriptor length\n"); 8440 kmem_free(header, buflen); 8441 return (EIO); 8442 } 8443 8444 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8445 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8446 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8447 " caching page code mismatch %d\n", 8448 mode_caching_page->mode_page.code); 8449 kmem_free(header, buflen); 8450 return (EIO); 8451 } 8452 8453 /* Check the relevant bits on successful mode sense. */ 8454 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8455 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8456 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8457 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8458 8459 size_t sbuflen; 8460 uchar_t save_pg; 8461 8462 /* 8463 * Construct select buffer length based on the 8464 * length of the sense data returned. 8465 */ 8466 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8467 sizeof (struct mode_page) + 8468 (int)mode_caching_page->mode_page.length; 8469 8470 /* 8471 * Set the caching bits as requested. 8472 */ 8473 if (rcd_flag == SD_CACHE_ENABLE) 8474 mode_caching_page->rcd = 0; 8475 else if (rcd_flag == SD_CACHE_DISABLE) 8476 mode_caching_page->rcd = 1; 8477 8478 if (wce_flag == SD_CACHE_ENABLE) 8479 mode_caching_page->wce = 1; 8480 else if (wce_flag == SD_CACHE_DISABLE) 8481 mode_caching_page->wce = 0; 8482 8483 /* 8484 * Save the page if the mode sense says the 8485 * drive supports it. 8486 */ 8487 save_pg = mode_caching_page->mode_page.ps ? 8488 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8489 8490 /* Clear reserved bits before mode select. */ 8491 mode_caching_page->mode_page.ps = 0; 8492 8493 /* 8494 * Clear out mode header for mode select. 8495 * The rest of the retrieved page will be reused. 8496 */ 8497 bzero(header, hdrlen); 8498 8499 if (un->un_f_cfg_is_atapi == TRUE) { 8500 mhp = (struct mode_header_grp2 *)header; 8501 mhp->bdesc_length_hi = bd_len >> 8; 8502 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8503 } else { 8504 ((struct mode_header *)header)->bdesc_length = bd_len; 8505 } 8506 8507 /* Issue mode select to change the cache settings */ 8508 if (un->un_f_cfg_is_atapi == TRUE) { 8509 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8510 sbuflen, save_pg, SD_PATH_DIRECT); 8511 } else { 8512 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8513 sbuflen, save_pg, SD_PATH_DIRECT); 8514 } 8515 } 8516 8517 kmem_free(header, buflen); 8518 return (rval); 8519 } 8520 8521 8522 /* 8523 * Function: sd_get_write_cache_enabled() 8524 * 8525 * Description: This routine is the driver entry point for determining if 8526 * write caching is enabled. It examines the WCE (write cache 8527 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8528 * 8529 * Arguments: un - driver soft state (unit) structure 8530 * is_enabled - pointer to int where write cache enabled state 8531 * is returned (non-zero -> write cache enabled) 8532 * 8533 * 8534 * Return Code: EIO 8535 * code returned by sd_send_scsi_MODE_SENSE 8536 * 8537 * Context: Kernel Thread 8538 * 8539 * NOTE: If ioctl is added to disable write cache, this sequence should 8540 * be followed so that no locking is required for accesses to 8541 * un->un_f_write_cache_enabled: 8542 * do mode select to clear wce 8543 * do synchronize cache to flush cache 8544 * set un->un_f_write_cache_enabled = FALSE 8545 * 8546 * Conversely, an ioctl to enable the write cache should be done 8547 * in this order: 8548 * set un->un_f_write_cache_enabled = TRUE 8549 * do mode select to set wce 8550 */ 8551 8552 static int 8553 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8554 { 8555 struct mode_caching *mode_caching_page; 8556 uchar_t *header; 8557 size_t buflen; 8558 int hdrlen; 8559 int bd_len; 8560 int rval = 0; 8561 8562 ASSERT(un != NULL); 8563 ASSERT(is_enabled != NULL); 8564 8565 /* in case of error, flag as enabled */ 8566 *is_enabled = TRUE; 8567 8568 /* 8569 * Do a test unit ready, otherwise a mode sense may not work if this 8570 * is the first command sent to the device after boot. 8571 */ 8572 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8573 8574 if (un->un_f_cfg_is_atapi == TRUE) { 8575 hdrlen = MODE_HEADER_LENGTH_GRP2; 8576 } else { 8577 hdrlen = MODE_HEADER_LENGTH; 8578 } 8579 8580 /* 8581 * Allocate memory for the retrieved mode page and its headers. Set 8582 * a pointer to the page itself. 8583 */ 8584 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8585 header = kmem_zalloc(buflen, KM_SLEEP); 8586 8587 /* Get the information from the device. */ 8588 if (un->un_f_cfg_is_atapi == TRUE) { 8589 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8590 MODEPAGE_CACHING, SD_PATH_DIRECT); 8591 } else { 8592 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8593 MODEPAGE_CACHING, SD_PATH_DIRECT); 8594 } 8595 if (rval != 0) { 8596 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8597 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8598 kmem_free(header, buflen); 8599 return (rval); 8600 } 8601 8602 /* 8603 * Determine size of Block Descriptors in order to locate 8604 * the mode page data. ATAPI devices return 0, SCSI devices 8605 * should return MODE_BLK_DESC_LENGTH. 8606 */ 8607 if (un->un_f_cfg_is_atapi == TRUE) { 8608 struct mode_header_grp2 *mhp; 8609 mhp = (struct mode_header_grp2 *)header; 8610 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8611 } else { 8612 bd_len = ((struct mode_header *)header)->bdesc_length; 8613 } 8614 8615 if (bd_len > MODE_BLK_DESC_LENGTH) { 8616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8617 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8618 "block descriptor length\n"); 8619 kmem_free(header, buflen); 8620 return (EIO); 8621 } 8622 8623 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8624 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8625 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8626 " caching page code mismatch %d\n", 8627 mode_caching_page->mode_page.code); 8628 kmem_free(header, buflen); 8629 return (EIO); 8630 } 8631 *is_enabled = mode_caching_page->wce; 8632 8633 kmem_free(header, buflen); 8634 return (0); 8635 } 8636 8637 /* 8638 * Function: sd_get_nv_sup() 8639 * 8640 * Description: This routine is the driver entry point for 8641 * determining whether non-volatile cache is supported. This 8642 * determination process works as follows: 8643 * 8644 * 1. sd first queries sd.conf on whether 8645 * suppress_cache_flush bit is set for this device. 8646 * 8647 * 2. if not there, then queries the internal disk table. 8648 * 8649 * 3. if either sd.conf or internal disk table specifies 8650 * cache flush be suppressed, we don't bother checking 8651 * NV_SUP bit. 8652 * 8653 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8654 * the optional INQUIRY VPD page 0x86. If the device 8655 * supports VPD page 0x86, sd examines the NV_SUP 8656 * (non-volatile cache support) bit in the INQUIRY VPD page 8657 * 0x86: 8658 * o If NV_SUP bit is set, sd assumes the device has a 8659 * non-volatile cache and set the 8660 * un_f_sync_nv_supported to TRUE. 8661 * o Otherwise cache is not non-volatile, 8662 * un_f_sync_nv_supported is set to FALSE. 8663 * 8664 * Arguments: un - driver soft state (unit) structure 8665 * 8666 * Return Code: 8667 * 8668 * Context: Kernel Thread 8669 */ 8670 8671 static void 8672 sd_get_nv_sup(struct sd_lun *un) 8673 { 8674 int rval = 0; 8675 uchar_t *inq86 = NULL; 8676 size_t inq86_len = MAX_INQUIRY_SIZE; 8677 size_t inq86_resid = 0; 8678 struct dk_callback *dkc; 8679 8680 ASSERT(un != NULL); 8681 8682 mutex_enter(SD_MUTEX(un)); 8683 8684 /* 8685 * Be conservative on the device's support of 8686 * SYNC_NV bit: un_f_sync_nv_supported is 8687 * initialized to be false. 8688 */ 8689 un->un_f_sync_nv_supported = FALSE; 8690 8691 /* 8692 * If either sd.conf or internal disk table 8693 * specifies cache flush be suppressed, then 8694 * we don't bother checking NV_SUP bit. 8695 */ 8696 if (un->un_f_suppress_cache_flush == TRUE) { 8697 mutex_exit(SD_MUTEX(un)); 8698 return; 8699 } 8700 8701 if (sd_check_vpd_page_support(un) == 0 && 8702 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8703 mutex_exit(SD_MUTEX(un)); 8704 /* collect page 86 data if available */ 8705 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8706 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8707 0x01, 0x86, &inq86_resid); 8708 8709 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8710 SD_TRACE(SD_LOG_COMMON, un, 8711 "sd_get_nv_sup: \ 8712 successfully get VPD page: %x \ 8713 PAGE LENGTH: %x BYTE 6: %x\n", 8714 inq86[1], inq86[3], inq86[6]); 8715 8716 mutex_enter(SD_MUTEX(un)); 8717 /* 8718 * check the value of NV_SUP bit: only if the device 8719 * reports NV_SUP bit to be 1, the 8720 * un_f_sync_nv_supported bit will be set to true. 8721 */ 8722 if (inq86[6] & SD_VPD_NV_SUP) { 8723 un->un_f_sync_nv_supported = TRUE; 8724 } 8725 mutex_exit(SD_MUTEX(un)); 8726 } 8727 kmem_free(inq86, inq86_len); 8728 } else { 8729 mutex_exit(SD_MUTEX(un)); 8730 } 8731 8732 /* 8733 * Send a SYNC CACHE command to check whether 8734 * SYNC_NV bit is supported. This command should have 8735 * un_f_sync_nv_supported set to correct value. 8736 */ 8737 mutex_enter(SD_MUTEX(un)); 8738 if (un->un_f_sync_nv_supported) { 8739 mutex_exit(SD_MUTEX(un)); 8740 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8741 dkc->dkc_flag = FLUSH_VOLATILE; 8742 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8743 8744 /* 8745 * Send a TEST UNIT READY command to the device. This should 8746 * clear any outstanding UNIT ATTENTION that may be present. 8747 */ 8748 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8749 8750 kmem_free(dkc, sizeof (struct dk_callback)); 8751 } else { 8752 mutex_exit(SD_MUTEX(un)); 8753 } 8754 8755 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8756 un_f_suppress_cache_flush is set to %d\n", 8757 un->un_f_suppress_cache_flush); 8758 } 8759 8760 /* 8761 * Function: sd_make_device 8762 * 8763 * Description: Utility routine to return the Solaris device number from 8764 * the data in the device's dev_info structure. 8765 * 8766 * Return Code: The Solaris device number 8767 * 8768 * Context: Any 8769 */ 8770 8771 static dev_t 8772 sd_make_device(dev_info_t *devi) 8773 { 8774 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8775 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8776 } 8777 8778 8779 /* 8780 * Function: sd_pm_entry 8781 * 8782 * Description: Called at the start of a new command to manage power 8783 * and busy status of a device. This includes determining whether 8784 * the current power state of the device is sufficient for 8785 * performing the command or whether it must be changed. 8786 * The PM framework is notified appropriately. 8787 * Only with a return status of DDI_SUCCESS will the 8788 * component be busy to the framework. 8789 * 8790 * All callers of sd_pm_entry must check the return status 8791 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8792 * of DDI_FAILURE indicates the device failed to power up. 8793 * In this case un_pm_count has been adjusted so the result 8794 * on exit is still powered down, ie. count is less than 0. 8795 * Calling sd_pm_exit with this count value hits an ASSERT. 8796 * 8797 * Return Code: DDI_SUCCESS or DDI_FAILURE 8798 * 8799 * Context: Kernel thread context. 8800 */ 8801 8802 static int 8803 sd_pm_entry(struct sd_lun *un) 8804 { 8805 int return_status = DDI_SUCCESS; 8806 8807 ASSERT(!mutex_owned(SD_MUTEX(un))); 8808 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8809 8810 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8811 8812 if (un->un_f_pm_is_enabled == FALSE) { 8813 SD_TRACE(SD_LOG_IO_PM, un, 8814 "sd_pm_entry: exiting, PM not enabled\n"); 8815 return (return_status); 8816 } 8817 8818 /* 8819 * Just increment a counter if PM is enabled. On the transition from 8820 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8821 * the count with each IO and mark the device as idle when the count 8822 * hits 0. 8823 * 8824 * If the count is less than 0 the device is powered down. If a powered 8825 * down device is successfully powered up then the count must be 8826 * incremented to reflect the power up. Note that it'll get incremented 8827 * a second time to become busy. 8828 * 8829 * Because the following has the potential to change the device state 8830 * and must release the un_pm_mutex to do so, only one thread can be 8831 * allowed through at a time. 8832 */ 8833 8834 mutex_enter(&un->un_pm_mutex); 8835 while (un->un_pm_busy == TRUE) { 8836 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8837 } 8838 un->un_pm_busy = TRUE; 8839 8840 if (un->un_pm_count < 1) { 8841 8842 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8843 8844 /* 8845 * Indicate we are now busy so the framework won't attempt to 8846 * power down the device. This call will only fail if either 8847 * we passed a bad component number or the device has no 8848 * components. Neither of these should ever happen. 8849 */ 8850 mutex_exit(&un->un_pm_mutex); 8851 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8852 ASSERT(return_status == DDI_SUCCESS); 8853 8854 mutex_enter(&un->un_pm_mutex); 8855 8856 if (un->un_pm_count < 0) { 8857 mutex_exit(&un->un_pm_mutex); 8858 8859 SD_TRACE(SD_LOG_IO_PM, un, 8860 "sd_pm_entry: power up component\n"); 8861 8862 /* 8863 * pm_raise_power will cause sdpower to be called 8864 * which brings the device power level to the 8865 * desired state, ON in this case. If successful, 8866 * un_pm_count and un_power_level will be updated 8867 * appropriately. 8868 */ 8869 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8870 SD_SPINDLE_ON); 8871 8872 mutex_enter(&un->un_pm_mutex); 8873 8874 if (return_status != DDI_SUCCESS) { 8875 /* 8876 * Power up failed. 8877 * Idle the device and adjust the count 8878 * so the result on exit is that we're 8879 * still powered down, ie. count is less than 0. 8880 */ 8881 SD_TRACE(SD_LOG_IO_PM, un, 8882 "sd_pm_entry: power up failed," 8883 " idle the component\n"); 8884 8885 (void) pm_idle_component(SD_DEVINFO(un), 0); 8886 un->un_pm_count--; 8887 } else { 8888 /* 8889 * Device is powered up, verify the 8890 * count is non-negative. 8891 * This is debug only. 8892 */ 8893 ASSERT(un->un_pm_count == 0); 8894 } 8895 } 8896 8897 if (return_status == DDI_SUCCESS) { 8898 /* 8899 * For performance, now that the device has been tagged 8900 * as busy, and it's known to be powered up, update the 8901 * chain types to use jump tables that do not include 8902 * pm. This significantly lowers the overhead and 8903 * therefore improves performance. 8904 */ 8905 8906 mutex_exit(&un->un_pm_mutex); 8907 mutex_enter(SD_MUTEX(un)); 8908 SD_TRACE(SD_LOG_IO_PM, un, 8909 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8910 un->un_uscsi_chain_type); 8911 8912 if (un->un_f_non_devbsize_supported) { 8913 un->un_buf_chain_type = 8914 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8915 } else { 8916 un->un_buf_chain_type = 8917 SD_CHAIN_INFO_DISK_NO_PM; 8918 } 8919 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8920 8921 SD_TRACE(SD_LOG_IO_PM, un, 8922 " changed uscsi_chain_type to %d\n", 8923 un->un_uscsi_chain_type); 8924 mutex_exit(SD_MUTEX(un)); 8925 mutex_enter(&un->un_pm_mutex); 8926 8927 if (un->un_pm_idle_timeid == NULL) { 8928 /* 300 ms. */ 8929 un->un_pm_idle_timeid = 8930 timeout(sd_pm_idletimeout_handler, un, 8931 (drv_usectohz((clock_t)300000))); 8932 /* 8933 * Include an extra call to busy which keeps the 8934 * device busy with-respect-to the PM layer 8935 * until the timer fires, at which time it'll 8936 * get the extra idle call. 8937 */ 8938 (void) pm_busy_component(SD_DEVINFO(un), 0); 8939 } 8940 } 8941 } 8942 un->un_pm_busy = FALSE; 8943 /* Next... */ 8944 cv_signal(&un->un_pm_busy_cv); 8945 8946 un->un_pm_count++; 8947 8948 SD_TRACE(SD_LOG_IO_PM, un, 8949 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8950 8951 mutex_exit(&un->un_pm_mutex); 8952 8953 return (return_status); 8954 } 8955 8956 8957 /* 8958 * Function: sd_pm_exit 8959 * 8960 * Description: Called at the completion of a command to manage busy 8961 * status for the device. If the device becomes idle the 8962 * PM framework is notified. 8963 * 8964 * Context: Kernel thread context 8965 */ 8966 8967 static void 8968 sd_pm_exit(struct sd_lun *un) 8969 { 8970 ASSERT(!mutex_owned(SD_MUTEX(un))); 8971 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8972 8973 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8974 8975 /* 8976 * After attach the following flag is only read, so don't 8977 * take the penalty of acquiring a mutex for it. 8978 */ 8979 if (un->un_f_pm_is_enabled == TRUE) { 8980 8981 mutex_enter(&un->un_pm_mutex); 8982 un->un_pm_count--; 8983 8984 SD_TRACE(SD_LOG_IO_PM, un, 8985 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8986 8987 ASSERT(un->un_pm_count >= 0); 8988 if (un->un_pm_count == 0) { 8989 mutex_exit(&un->un_pm_mutex); 8990 8991 SD_TRACE(SD_LOG_IO_PM, un, 8992 "sd_pm_exit: idle component\n"); 8993 8994 (void) pm_idle_component(SD_DEVINFO(un), 0); 8995 8996 } else { 8997 mutex_exit(&un->un_pm_mutex); 8998 } 8999 } 9000 9001 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9002 } 9003 9004 9005 /* 9006 * Function: sdopen 9007 * 9008 * Description: Driver's open(9e) entry point function. 9009 * 9010 * Arguments: dev_i - pointer to device number 9011 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9012 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9013 * cred_p - user credential pointer 9014 * 9015 * Return Code: EINVAL 9016 * ENXIO 9017 * EIO 9018 * EROFS 9019 * EBUSY 9020 * 9021 * Context: Kernel thread context 9022 */ 9023 /* ARGSUSED */ 9024 static int 9025 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9026 { 9027 struct sd_lun *un; 9028 int nodelay; 9029 int part; 9030 uint64_t partmask; 9031 int instance; 9032 dev_t dev; 9033 int rval = EIO; 9034 diskaddr_t nblks = 0; 9035 9036 /* Validate the open type */ 9037 if (otyp >= OTYPCNT) { 9038 return (EINVAL); 9039 } 9040 9041 dev = *dev_p; 9042 instance = SDUNIT(dev); 9043 mutex_enter(&sd_detach_mutex); 9044 9045 /* 9046 * Fail the open if there is no softstate for the instance, or 9047 * if another thread somewhere is trying to detach the instance. 9048 */ 9049 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9050 (un->un_detach_count != 0)) { 9051 mutex_exit(&sd_detach_mutex); 9052 /* 9053 * The probe cache only needs to be cleared when open (9e) fails 9054 * with ENXIO (4238046). 9055 */ 9056 /* 9057 * un-conditionally clearing probe cache is ok with 9058 * separate sd/ssd binaries 9059 * x86 platform can be an issue with both parallel 9060 * and fibre in 1 binary 9061 */ 9062 sd_scsi_clear_probe_cache(); 9063 return (ENXIO); 9064 } 9065 9066 /* 9067 * The un_layer_count is to prevent another thread in specfs from 9068 * trying to detach the instance, which can happen when we are 9069 * called from a higher-layer driver instead of thru specfs. 9070 * This will not be needed when DDI provides a layered driver 9071 * interface that allows specfs to know that an instance is in 9072 * use by a layered driver & should not be detached. 9073 * 9074 * Note: the semantics for layered driver opens are exactly one 9075 * close for every open. 9076 */ 9077 if (otyp == OTYP_LYR) { 9078 un->un_layer_count++; 9079 } 9080 9081 /* 9082 * Keep a count of the current # of opens in progress. This is because 9083 * some layered drivers try to call us as a regular open. This can 9084 * cause problems that we cannot prevent, however by keeping this count 9085 * we can at least keep our open and detach routines from racing against 9086 * each other under such conditions. 9087 */ 9088 un->un_opens_in_progress++; 9089 mutex_exit(&sd_detach_mutex); 9090 9091 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9092 part = SDPART(dev); 9093 partmask = 1 << part; 9094 9095 /* 9096 * We use a semaphore here in order to serialize 9097 * open and close requests on the device. 9098 */ 9099 sema_p(&un->un_semoclose); 9100 9101 mutex_enter(SD_MUTEX(un)); 9102 9103 /* 9104 * All device accesses go thru sdstrategy() where we check 9105 * on suspend status but there could be a scsi_poll command, 9106 * which bypasses sdstrategy(), so we need to check pm 9107 * status. 9108 */ 9109 9110 if (!nodelay) { 9111 while ((un->un_state == SD_STATE_SUSPENDED) || 9112 (un->un_state == SD_STATE_PM_CHANGING)) { 9113 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9114 } 9115 9116 mutex_exit(SD_MUTEX(un)); 9117 if (sd_pm_entry(un) != DDI_SUCCESS) { 9118 rval = EIO; 9119 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9120 "sdopen: sd_pm_entry failed\n"); 9121 goto open_failed_with_pm; 9122 } 9123 mutex_enter(SD_MUTEX(un)); 9124 } 9125 9126 /* check for previous exclusive open */ 9127 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9128 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9129 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9130 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9131 9132 if (un->un_exclopen & (partmask)) { 9133 goto excl_open_fail; 9134 } 9135 9136 if (flag & FEXCL) { 9137 int i; 9138 if (un->un_ocmap.lyropen[part]) { 9139 goto excl_open_fail; 9140 } 9141 for (i = 0; i < (OTYPCNT - 1); i++) { 9142 if (un->un_ocmap.regopen[i] & (partmask)) { 9143 goto excl_open_fail; 9144 } 9145 } 9146 } 9147 9148 /* 9149 * Check the write permission if this is a removable media device, 9150 * NDELAY has not been set, and writable permission is requested. 9151 * 9152 * Note: If NDELAY was set and this is write-protected media the WRITE 9153 * attempt will fail with EIO as part of the I/O processing. This is a 9154 * more permissive implementation that allows the open to succeed and 9155 * WRITE attempts to fail when appropriate. 9156 */ 9157 if (un->un_f_chk_wp_open) { 9158 if ((flag & FWRITE) && (!nodelay)) { 9159 mutex_exit(SD_MUTEX(un)); 9160 /* 9161 * Defer the check for write permission on writable 9162 * DVD drive till sdstrategy and will not fail open even 9163 * if FWRITE is set as the device can be writable 9164 * depending upon the media and the media can change 9165 * after the call to open(). 9166 */ 9167 if (un->un_f_dvdram_writable_device == FALSE) { 9168 if (ISCD(un) || sr_check_wp(dev)) { 9169 rval = EROFS; 9170 mutex_enter(SD_MUTEX(un)); 9171 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9172 "write to cd or write protected media\n"); 9173 goto open_fail; 9174 } 9175 } 9176 mutex_enter(SD_MUTEX(un)); 9177 } 9178 } 9179 9180 /* 9181 * If opening in NDELAY/NONBLOCK mode, just return. 9182 * Check if disk is ready and has a valid geometry later. 9183 */ 9184 if (!nodelay) { 9185 mutex_exit(SD_MUTEX(un)); 9186 rval = sd_ready_and_valid(un); 9187 mutex_enter(SD_MUTEX(un)); 9188 /* 9189 * Fail if device is not ready or if the number of disk 9190 * blocks is zero or negative for non CD devices. 9191 */ 9192 9193 nblks = 0; 9194 9195 if (rval == SD_READY_VALID && (!ISCD(un))) { 9196 /* if cmlb_partinfo fails, nblks remains 0 */ 9197 mutex_exit(SD_MUTEX(un)); 9198 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9199 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9200 mutex_enter(SD_MUTEX(un)); 9201 } 9202 9203 if ((rval != SD_READY_VALID) || 9204 (!ISCD(un) && nblks <= 0)) { 9205 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9206 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9207 "device not ready or invalid disk block value\n"); 9208 goto open_fail; 9209 } 9210 #if defined(__i386) || defined(__amd64) 9211 } else { 9212 uchar_t *cp; 9213 /* 9214 * x86 requires special nodelay handling, so that p0 is 9215 * always defined and accessible. 9216 * Invalidate geometry only if device is not already open. 9217 */ 9218 cp = &un->un_ocmap.chkd[0]; 9219 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9220 if (*cp != (uchar_t)0) { 9221 break; 9222 } 9223 cp++; 9224 } 9225 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9226 mutex_exit(SD_MUTEX(un)); 9227 cmlb_invalidate(un->un_cmlbhandle, 9228 (void *)SD_PATH_DIRECT); 9229 mutex_enter(SD_MUTEX(un)); 9230 } 9231 9232 #endif 9233 } 9234 9235 if (otyp == OTYP_LYR) { 9236 un->un_ocmap.lyropen[part]++; 9237 } else { 9238 un->un_ocmap.regopen[otyp] |= partmask; 9239 } 9240 9241 /* Set up open and exclusive open flags */ 9242 if (flag & FEXCL) { 9243 un->un_exclopen |= (partmask); 9244 } 9245 9246 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9247 "open of part %d type %d\n", part, otyp); 9248 9249 mutex_exit(SD_MUTEX(un)); 9250 if (!nodelay) { 9251 sd_pm_exit(un); 9252 } 9253 9254 sema_v(&un->un_semoclose); 9255 9256 mutex_enter(&sd_detach_mutex); 9257 un->un_opens_in_progress--; 9258 mutex_exit(&sd_detach_mutex); 9259 9260 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9261 return (DDI_SUCCESS); 9262 9263 excl_open_fail: 9264 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9265 rval = EBUSY; 9266 9267 open_fail: 9268 mutex_exit(SD_MUTEX(un)); 9269 9270 /* 9271 * On a failed open we must exit the pm management. 9272 */ 9273 if (!nodelay) { 9274 sd_pm_exit(un); 9275 } 9276 open_failed_with_pm: 9277 sema_v(&un->un_semoclose); 9278 9279 mutex_enter(&sd_detach_mutex); 9280 un->un_opens_in_progress--; 9281 if (otyp == OTYP_LYR) { 9282 un->un_layer_count--; 9283 } 9284 mutex_exit(&sd_detach_mutex); 9285 9286 return (rval); 9287 } 9288 9289 9290 /* 9291 * Function: sdclose 9292 * 9293 * Description: Driver's close(9e) entry point function. 9294 * 9295 * Arguments: dev - device number 9296 * flag - file status flag, informational only 9297 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9298 * cred_p - user credential pointer 9299 * 9300 * Return Code: ENXIO 9301 * 9302 * Context: Kernel thread context 9303 */ 9304 /* ARGSUSED */ 9305 static int 9306 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9307 { 9308 struct sd_lun *un; 9309 uchar_t *cp; 9310 int part; 9311 int nodelay; 9312 int rval = 0; 9313 9314 /* Validate the open type */ 9315 if (otyp >= OTYPCNT) { 9316 return (ENXIO); 9317 } 9318 9319 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9320 return (ENXIO); 9321 } 9322 9323 part = SDPART(dev); 9324 nodelay = flag & (FNDELAY | FNONBLOCK); 9325 9326 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9327 "sdclose: close of part %d type %d\n", part, otyp); 9328 9329 /* 9330 * We use a semaphore here in order to serialize 9331 * open and close requests on the device. 9332 */ 9333 sema_p(&un->un_semoclose); 9334 9335 mutex_enter(SD_MUTEX(un)); 9336 9337 /* Don't proceed if power is being changed. */ 9338 while (un->un_state == SD_STATE_PM_CHANGING) { 9339 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9340 } 9341 9342 if (un->un_exclopen & (1 << part)) { 9343 un->un_exclopen &= ~(1 << part); 9344 } 9345 9346 /* Update the open partition map */ 9347 if (otyp == OTYP_LYR) { 9348 un->un_ocmap.lyropen[part] -= 1; 9349 } else { 9350 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9351 } 9352 9353 cp = &un->un_ocmap.chkd[0]; 9354 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9355 if (*cp != NULL) { 9356 break; 9357 } 9358 cp++; 9359 } 9360 9361 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9362 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9363 9364 /* 9365 * We avoid persistance upon the last close, and set 9366 * the throttle back to the maximum. 9367 */ 9368 un->un_throttle = un->un_saved_throttle; 9369 9370 if (un->un_state == SD_STATE_OFFLINE) { 9371 if (un->un_f_is_fibre == FALSE) { 9372 scsi_log(SD_DEVINFO(un), sd_label, 9373 CE_WARN, "offline\n"); 9374 } 9375 mutex_exit(SD_MUTEX(un)); 9376 cmlb_invalidate(un->un_cmlbhandle, 9377 (void *)SD_PATH_DIRECT); 9378 mutex_enter(SD_MUTEX(un)); 9379 9380 } else { 9381 /* 9382 * Flush any outstanding writes in NVRAM cache. 9383 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9384 * cmd, it may not work for non-Pluto devices. 9385 * SYNCHRONIZE CACHE is not required for removables, 9386 * except DVD-RAM drives. 9387 * 9388 * Also note: because SYNCHRONIZE CACHE is currently 9389 * the only command issued here that requires the 9390 * drive be powered up, only do the power up before 9391 * sending the Sync Cache command. If additional 9392 * commands are added which require a powered up 9393 * drive, the following sequence may have to change. 9394 * 9395 * And finally, note that parallel SCSI on SPARC 9396 * only issues a Sync Cache to DVD-RAM, a newly 9397 * supported device. 9398 */ 9399 #if defined(__i386) || defined(__amd64) 9400 if (un->un_f_sync_cache_supported || 9401 un->un_f_dvdram_writable_device == TRUE) { 9402 #else 9403 if (un->un_f_dvdram_writable_device == TRUE) { 9404 #endif 9405 mutex_exit(SD_MUTEX(un)); 9406 if (sd_pm_entry(un) == DDI_SUCCESS) { 9407 rval = 9408 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9409 NULL); 9410 /* ignore error if not supported */ 9411 if (rval == ENOTSUP) { 9412 rval = 0; 9413 } else if (rval != 0) { 9414 rval = EIO; 9415 } 9416 sd_pm_exit(un); 9417 } else { 9418 rval = EIO; 9419 } 9420 mutex_enter(SD_MUTEX(un)); 9421 } 9422 9423 /* 9424 * For devices which supports DOOR_LOCK, send an ALLOW 9425 * MEDIA REMOVAL command, but don't get upset if it 9426 * fails. We need to raise the power of the drive before 9427 * we can call sd_send_scsi_DOORLOCK() 9428 */ 9429 if (un->un_f_doorlock_supported) { 9430 mutex_exit(SD_MUTEX(un)); 9431 if (sd_pm_entry(un) == DDI_SUCCESS) { 9432 rval = sd_send_scsi_DOORLOCK(un, 9433 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9434 9435 sd_pm_exit(un); 9436 if (ISCD(un) && (rval != 0) && 9437 (nodelay != 0)) { 9438 rval = ENXIO; 9439 } 9440 } else { 9441 rval = EIO; 9442 } 9443 mutex_enter(SD_MUTEX(un)); 9444 } 9445 9446 /* 9447 * If a device has removable media, invalidate all 9448 * parameters related to media, such as geometry, 9449 * blocksize, and blockcount. 9450 */ 9451 if (un->un_f_has_removable_media) { 9452 sr_ejected(un); 9453 } 9454 9455 /* 9456 * Destroy the cache (if it exists) which was 9457 * allocated for the write maps since this is 9458 * the last close for this media. 9459 */ 9460 if (un->un_wm_cache) { 9461 /* 9462 * Check if there are pending commands. 9463 * and if there are give a warning and 9464 * do not destroy the cache. 9465 */ 9466 if (un->un_ncmds_in_driver > 0) { 9467 scsi_log(SD_DEVINFO(un), 9468 sd_label, CE_WARN, 9469 "Unable to clean up memory " 9470 "because of pending I/O\n"); 9471 } else { 9472 kmem_cache_destroy( 9473 un->un_wm_cache); 9474 un->un_wm_cache = NULL; 9475 } 9476 } 9477 } 9478 } 9479 9480 mutex_exit(SD_MUTEX(un)); 9481 sema_v(&un->un_semoclose); 9482 9483 if (otyp == OTYP_LYR) { 9484 mutex_enter(&sd_detach_mutex); 9485 /* 9486 * The detach routine may run when the layer count 9487 * drops to zero. 9488 */ 9489 un->un_layer_count--; 9490 mutex_exit(&sd_detach_mutex); 9491 } 9492 9493 return (rval); 9494 } 9495 9496 9497 /* 9498 * Function: sd_ready_and_valid 9499 * 9500 * Description: Test if device is ready and has a valid geometry. 9501 * 9502 * Arguments: dev - device number 9503 * un - driver soft state (unit) structure 9504 * 9505 * Return Code: SD_READY_VALID ready and valid label 9506 * SD_NOT_READY_VALID not ready, no label 9507 * SD_RESERVED_BY_OTHERS reservation conflict 9508 * 9509 * Context: Never called at interrupt context. 9510 */ 9511 9512 static int 9513 sd_ready_and_valid(struct sd_lun *un) 9514 { 9515 struct sd_errstats *stp; 9516 uint64_t capacity; 9517 uint_t lbasize; 9518 int rval = SD_READY_VALID; 9519 char name_str[48]; 9520 int is_valid; 9521 9522 ASSERT(un != NULL); 9523 ASSERT(!mutex_owned(SD_MUTEX(un))); 9524 9525 mutex_enter(SD_MUTEX(un)); 9526 /* 9527 * If a device has removable media, we must check if media is 9528 * ready when checking if this device is ready and valid. 9529 */ 9530 if (un->un_f_has_removable_media) { 9531 mutex_exit(SD_MUTEX(un)); 9532 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9533 rval = SD_NOT_READY_VALID; 9534 mutex_enter(SD_MUTEX(un)); 9535 goto done; 9536 } 9537 9538 is_valid = SD_IS_VALID_LABEL(un); 9539 mutex_enter(SD_MUTEX(un)); 9540 if (!is_valid || 9541 (un->un_f_blockcount_is_valid == FALSE) || 9542 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9543 9544 /* capacity has to be read every open. */ 9545 mutex_exit(SD_MUTEX(un)); 9546 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9547 &lbasize, SD_PATH_DIRECT) != 0) { 9548 cmlb_invalidate(un->un_cmlbhandle, 9549 (void *)SD_PATH_DIRECT); 9550 mutex_enter(SD_MUTEX(un)); 9551 rval = SD_NOT_READY_VALID; 9552 goto done; 9553 } else { 9554 mutex_enter(SD_MUTEX(un)); 9555 sd_update_block_info(un, lbasize, capacity); 9556 } 9557 } 9558 9559 /* 9560 * Check if the media in the device is writable or not. 9561 */ 9562 if (!is_valid && ISCD(un)) { 9563 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9564 } 9565 9566 } else { 9567 /* 9568 * Do a test unit ready to clear any unit attention from non-cd 9569 * devices. 9570 */ 9571 mutex_exit(SD_MUTEX(un)); 9572 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9573 mutex_enter(SD_MUTEX(un)); 9574 } 9575 9576 9577 /* 9578 * If this is a non 512 block device, allocate space for 9579 * the wmap cache. This is being done here since every time 9580 * a media is changed this routine will be called and the 9581 * block size is a function of media rather than device. 9582 */ 9583 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9584 if (!(un->un_wm_cache)) { 9585 (void) snprintf(name_str, sizeof (name_str), 9586 "%s%d_cache", 9587 ddi_driver_name(SD_DEVINFO(un)), 9588 ddi_get_instance(SD_DEVINFO(un))); 9589 un->un_wm_cache = kmem_cache_create( 9590 name_str, sizeof (struct sd_w_map), 9591 8, sd_wm_cache_constructor, 9592 sd_wm_cache_destructor, NULL, 9593 (void *)un, NULL, 0); 9594 if (!(un->un_wm_cache)) { 9595 rval = ENOMEM; 9596 goto done; 9597 } 9598 } 9599 } 9600 9601 if (un->un_state == SD_STATE_NORMAL) { 9602 /* 9603 * If the target is not yet ready here (defined by a TUR 9604 * failure), invalidate the geometry and print an 'offline' 9605 * message. This is a legacy message, as the state of the 9606 * target is not actually changed to SD_STATE_OFFLINE. 9607 * 9608 * If the TUR fails for EACCES (Reservation Conflict), 9609 * SD_RESERVED_BY_OTHERS will be returned to indicate 9610 * reservation conflict. If the TUR fails for other 9611 * reasons, SD_NOT_READY_VALID will be returned. 9612 */ 9613 int err; 9614 9615 mutex_exit(SD_MUTEX(un)); 9616 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9617 mutex_enter(SD_MUTEX(un)); 9618 9619 if (err != 0) { 9620 mutex_exit(SD_MUTEX(un)); 9621 cmlb_invalidate(un->un_cmlbhandle, 9622 (void *)SD_PATH_DIRECT); 9623 mutex_enter(SD_MUTEX(un)); 9624 if (err == EACCES) { 9625 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9626 "reservation conflict\n"); 9627 rval = SD_RESERVED_BY_OTHERS; 9628 } else { 9629 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9630 "drive offline\n"); 9631 rval = SD_NOT_READY_VALID; 9632 } 9633 goto done; 9634 } 9635 } 9636 9637 if (un->un_f_format_in_progress == FALSE) { 9638 mutex_exit(SD_MUTEX(un)); 9639 if (cmlb_validate(un->un_cmlbhandle, 0, 9640 (void *)SD_PATH_DIRECT) != 0) { 9641 rval = SD_NOT_READY_VALID; 9642 mutex_enter(SD_MUTEX(un)); 9643 goto done; 9644 } 9645 if (un->un_f_pkstats_enabled) { 9646 sd_set_pstats(un); 9647 SD_TRACE(SD_LOG_IO_PARTITION, un, 9648 "sd_ready_and_valid: un:0x%p pstats created and " 9649 "set\n", un); 9650 } 9651 mutex_enter(SD_MUTEX(un)); 9652 } 9653 9654 /* 9655 * If this device supports DOOR_LOCK command, try and send 9656 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9657 * if it fails. For a CD, however, it is an error 9658 */ 9659 if (un->un_f_doorlock_supported) { 9660 mutex_exit(SD_MUTEX(un)); 9661 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9662 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9663 rval = SD_NOT_READY_VALID; 9664 mutex_enter(SD_MUTEX(un)); 9665 goto done; 9666 } 9667 mutex_enter(SD_MUTEX(un)); 9668 } 9669 9670 /* The state has changed, inform the media watch routines */ 9671 un->un_mediastate = DKIO_INSERTED; 9672 cv_broadcast(&un->un_state_cv); 9673 rval = SD_READY_VALID; 9674 9675 done: 9676 9677 /* 9678 * Initialize the capacity kstat value, if no media previously 9679 * (capacity kstat is 0) and a media has been inserted 9680 * (un_blockcount > 0). 9681 */ 9682 if (un->un_errstats != NULL) { 9683 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9684 if ((stp->sd_capacity.value.ui64 == 0) && 9685 (un->un_f_blockcount_is_valid == TRUE)) { 9686 stp->sd_capacity.value.ui64 = 9687 (uint64_t)((uint64_t)un->un_blockcount * 9688 un->un_sys_blocksize); 9689 } 9690 } 9691 9692 mutex_exit(SD_MUTEX(un)); 9693 return (rval); 9694 } 9695 9696 9697 /* 9698 * Function: sdmin 9699 * 9700 * Description: Routine to limit the size of a data transfer. Used in 9701 * conjunction with physio(9F). 9702 * 9703 * Arguments: bp - pointer to the indicated buf(9S) struct. 9704 * 9705 * Context: Kernel thread context. 9706 */ 9707 9708 static void 9709 sdmin(struct buf *bp) 9710 { 9711 struct sd_lun *un; 9712 int instance; 9713 9714 instance = SDUNIT(bp->b_edev); 9715 9716 un = ddi_get_soft_state(sd_state, instance); 9717 ASSERT(un != NULL); 9718 9719 if (bp->b_bcount > un->un_max_xfer_size) { 9720 bp->b_bcount = un->un_max_xfer_size; 9721 } 9722 } 9723 9724 9725 /* 9726 * Function: sdread 9727 * 9728 * Description: Driver's read(9e) entry point function. 9729 * 9730 * Arguments: dev - device number 9731 * uio - structure pointer describing where data is to be stored 9732 * in user's space 9733 * cred_p - user credential pointer 9734 * 9735 * Return Code: ENXIO 9736 * EIO 9737 * EINVAL 9738 * value returned by physio 9739 * 9740 * Context: Kernel thread context. 9741 */ 9742 /* ARGSUSED */ 9743 static int 9744 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9745 { 9746 struct sd_lun *un = NULL; 9747 int secmask; 9748 int err; 9749 9750 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9751 return (ENXIO); 9752 } 9753 9754 ASSERT(!mutex_owned(SD_MUTEX(un))); 9755 9756 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9757 mutex_enter(SD_MUTEX(un)); 9758 /* 9759 * Because the call to sd_ready_and_valid will issue I/O we 9760 * must wait here if either the device is suspended or 9761 * if it's power level is changing. 9762 */ 9763 while ((un->un_state == SD_STATE_SUSPENDED) || 9764 (un->un_state == SD_STATE_PM_CHANGING)) { 9765 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9766 } 9767 un->un_ncmds_in_driver++; 9768 mutex_exit(SD_MUTEX(un)); 9769 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9770 mutex_enter(SD_MUTEX(un)); 9771 un->un_ncmds_in_driver--; 9772 ASSERT(un->un_ncmds_in_driver >= 0); 9773 mutex_exit(SD_MUTEX(un)); 9774 return (EIO); 9775 } 9776 mutex_enter(SD_MUTEX(un)); 9777 un->un_ncmds_in_driver--; 9778 ASSERT(un->un_ncmds_in_driver >= 0); 9779 mutex_exit(SD_MUTEX(un)); 9780 } 9781 9782 /* 9783 * Read requests are restricted to multiples of the system block size. 9784 */ 9785 secmask = un->un_sys_blocksize - 1; 9786 9787 if (uio->uio_loffset & ((offset_t)(secmask))) { 9788 SD_ERROR(SD_LOG_READ_WRITE, un, 9789 "sdread: file offset not modulo %d\n", 9790 un->un_sys_blocksize); 9791 err = EINVAL; 9792 } else if (uio->uio_iov->iov_len & (secmask)) { 9793 SD_ERROR(SD_LOG_READ_WRITE, un, 9794 "sdread: transfer length not modulo %d\n", 9795 un->un_sys_blocksize); 9796 err = EINVAL; 9797 } else { 9798 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9799 } 9800 return (err); 9801 } 9802 9803 9804 /* 9805 * Function: sdwrite 9806 * 9807 * Description: Driver's write(9e) entry point function. 9808 * 9809 * Arguments: dev - device number 9810 * uio - structure pointer describing where data is stored in 9811 * user's space 9812 * cred_p - user credential pointer 9813 * 9814 * Return Code: ENXIO 9815 * EIO 9816 * EINVAL 9817 * value returned by physio 9818 * 9819 * Context: Kernel thread context. 9820 */ 9821 /* ARGSUSED */ 9822 static int 9823 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9824 { 9825 struct sd_lun *un = NULL; 9826 int secmask; 9827 int err; 9828 9829 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9830 return (ENXIO); 9831 } 9832 9833 ASSERT(!mutex_owned(SD_MUTEX(un))); 9834 9835 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9836 mutex_enter(SD_MUTEX(un)); 9837 /* 9838 * Because the call to sd_ready_and_valid will issue I/O we 9839 * must wait here if either the device is suspended or 9840 * if it's power level is changing. 9841 */ 9842 while ((un->un_state == SD_STATE_SUSPENDED) || 9843 (un->un_state == SD_STATE_PM_CHANGING)) { 9844 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9845 } 9846 un->un_ncmds_in_driver++; 9847 mutex_exit(SD_MUTEX(un)); 9848 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9849 mutex_enter(SD_MUTEX(un)); 9850 un->un_ncmds_in_driver--; 9851 ASSERT(un->un_ncmds_in_driver >= 0); 9852 mutex_exit(SD_MUTEX(un)); 9853 return (EIO); 9854 } 9855 mutex_enter(SD_MUTEX(un)); 9856 un->un_ncmds_in_driver--; 9857 ASSERT(un->un_ncmds_in_driver >= 0); 9858 mutex_exit(SD_MUTEX(un)); 9859 } 9860 9861 /* 9862 * Write requests are restricted to multiples of the system block size. 9863 */ 9864 secmask = un->un_sys_blocksize - 1; 9865 9866 if (uio->uio_loffset & ((offset_t)(secmask))) { 9867 SD_ERROR(SD_LOG_READ_WRITE, un, 9868 "sdwrite: file offset not modulo %d\n", 9869 un->un_sys_blocksize); 9870 err = EINVAL; 9871 } else if (uio->uio_iov->iov_len & (secmask)) { 9872 SD_ERROR(SD_LOG_READ_WRITE, un, 9873 "sdwrite: transfer length not modulo %d\n", 9874 un->un_sys_blocksize); 9875 err = EINVAL; 9876 } else { 9877 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9878 } 9879 return (err); 9880 } 9881 9882 9883 /* 9884 * Function: sdaread 9885 * 9886 * Description: Driver's aread(9e) entry point function. 9887 * 9888 * Arguments: dev - device number 9889 * aio - structure pointer describing where data is to be stored 9890 * cred_p - user credential pointer 9891 * 9892 * Return Code: ENXIO 9893 * EIO 9894 * EINVAL 9895 * value returned by aphysio 9896 * 9897 * Context: Kernel thread context. 9898 */ 9899 /* ARGSUSED */ 9900 static int 9901 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9902 { 9903 struct sd_lun *un = NULL; 9904 struct uio *uio = aio->aio_uio; 9905 int secmask; 9906 int err; 9907 9908 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9909 return (ENXIO); 9910 } 9911 9912 ASSERT(!mutex_owned(SD_MUTEX(un))); 9913 9914 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9915 mutex_enter(SD_MUTEX(un)); 9916 /* 9917 * Because the call to sd_ready_and_valid will issue I/O we 9918 * must wait here if either the device is suspended or 9919 * if it's power level is changing. 9920 */ 9921 while ((un->un_state == SD_STATE_SUSPENDED) || 9922 (un->un_state == SD_STATE_PM_CHANGING)) { 9923 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9924 } 9925 un->un_ncmds_in_driver++; 9926 mutex_exit(SD_MUTEX(un)); 9927 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9928 mutex_enter(SD_MUTEX(un)); 9929 un->un_ncmds_in_driver--; 9930 ASSERT(un->un_ncmds_in_driver >= 0); 9931 mutex_exit(SD_MUTEX(un)); 9932 return (EIO); 9933 } 9934 mutex_enter(SD_MUTEX(un)); 9935 un->un_ncmds_in_driver--; 9936 ASSERT(un->un_ncmds_in_driver >= 0); 9937 mutex_exit(SD_MUTEX(un)); 9938 } 9939 9940 /* 9941 * Read requests are restricted to multiples of the system block size. 9942 */ 9943 secmask = un->un_sys_blocksize - 1; 9944 9945 if (uio->uio_loffset & ((offset_t)(secmask))) { 9946 SD_ERROR(SD_LOG_READ_WRITE, un, 9947 "sdaread: file offset not modulo %d\n", 9948 un->un_sys_blocksize); 9949 err = EINVAL; 9950 } else if (uio->uio_iov->iov_len & (secmask)) { 9951 SD_ERROR(SD_LOG_READ_WRITE, un, 9952 "sdaread: transfer length not modulo %d\n", 9953 un->un_sys_blocksize); 9954 err = EINVAL; 9955 } else { 9956 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9957 } 9958 return (err); 9959 } 9960 9961 9962 /* 9963 * Function: sdawrite 9964 * 9965 * Description: Driver's awrite(9e) entry point function. 9966 * 9967 * Arguments: dev - device number 9968 * aio - structure pointer describing where data is stored 9969 * cred_p - user credential pointer 9970 * 9971 * Return Code: ENXIO 9972 * EIO 9973 * EINVAL 9974 * value returned by aphysio 9975 * 9976 * Context: Kernel thread context. 9977 */ 9978 /* ARGSUSED */ 9979 static int 9980 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9981 { 9982 struct sd_lun *un = NULL; 9983 struct uio *uio = aio->aio_uio; 9984 int secmask; 9985 int err; 9986 9987 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9988 return (ENXIO); 9989 } 9990 9991 ASSERT(!mutex_owned(SD_MUTEX(un))); 9992 9993 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9994 mutex_enter(SD_MUTEX(un)); 9995 /* 9996 * Because the call to sd_ready_and_valid will issue I/O we 9997 * must wait here if either the device is suspended or 9998 * if it's power level is changing. 9999 */ 10000 while ((un->un_state == SD_STATE_SUSPENDED) || 10001 (un->un_state == SD_STATE_PM_CHANGING)) { 10002 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10003 } 10004 un->un_ncmds_in_driver++; 10005 mutex_exit(SD_MUTEX(un)); 10006 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10007 mutex_enter(SD_MUTEX(un)); 10008 un->un_ncmds_in_driver--; 10009 ASSERT(un->un_ncmds_in_driver >= 0); 10010 mutex_exit(SD_MUTEX(un)); 10011 return (EIO); 10012 } 10013 mutex_enter(SD_MUTEX(un)); 10014 un->un_ncmds_in_driver--; 10015 ASSERT(un->un_ncmds_in_driver >= 0); 10016 mutex_exit(SD_MUTEX(un)); 10017 } 10018 10019 /* 10020 * Write requests are restricted to multiples of the system block size. 10021 */ 10022 secmask = un->un_sys_blocksize - 1; 10023 10024 if (uio->uio_loffset & ((offset_t)(secmask))) { 10025 SD_ERROR(SD_LOG_READ_WRITE, un, 10026 "sdawrite: file offset not modulo %d\n", 10027 un->un_sys_blocksize); 10028 err = EINVAL; 10029 } else if (uio->uio_iov->iov_len & (secmask)) { 10030 SD_ERROR(SD_LOG_READ_WRITE, un, 10031 "sdawrite: transfer length not modulo %d\n", 10032 un->un_sys_blocksize); 10033 err = EINVAL; 10034 } else { 10035 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10036 } 10037 return (err); 10038 } 10039 10040 10041 10042 10043 10044 /* 10045 * Driver IO processing follows the following sequence: 10046 * 10047 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10048 * | | ^ 10049 * v v | 10050 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10051 * | | | | 10052 * v | | | 10053 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10054 * | | ^ ^ 10055 * v v | | 10056 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10057 * | | | | 10058 * +---+ | +------------+ +-------+ 10059 * | | | | 10060 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10061 * | v | | 10062 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10063 * | | ^ | 10064 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10065 * | v | | 10066 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10067 * | | ^ | 10068 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10069 * | v | | 10070 * | sd_checksum_iostart() sd_checksum_iodone() | 10071 * | | ^ | 10072 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10073 * | v | | 10074 * | sd_pm_iostart() sd_pm_iodone() | 10075 * | | ^ | 10076 * | | | | 10077 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10078 * | ^ 10079 * v | 10080 * sd_core_iostart() | 10081 * | | 10082 * | +------>(*destroypkt)() 10083 * +-> sd_start_cmds() <-+ | | 10084 * | | | v 10085 * | | | scsi_destroy_pkt(9F) 10086 * | | | 10087 * +->(*initpkt)() +- sdintr() 10088 * | | | | 10089 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10090 * | +-> scsi_setup_cdb(9F) | 10091 * | | 10092 * +--> scsi_transport(9F) | 10093 * | | 10094 * +----> SCSA ---->+ 10095 * 10096 * 10097 * This code is based upon the following presumptions: 10098 * 10099 * - iostart and iodone functions operate on buf(9S) structures. These 10100 * functions perform the necessary operations on the buf(9S) and pass 10101 * them along to the next function in the chain by using the macros 10102 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10103 * (for iodone side functions). 10104 * 10105 * - The iostart side functions may sleep. The iodone side functions 10106 * are called under interrupt context and may NOT sleep. Therefore 10107 * iodone side functions also may not call iostart side functions. 10108 * (NOTE: iostart side functions should NOT sleep for memory, as 10109 * this could result in deadlock.) 10110 * 10111 * - An iostart side function may call its corresponding iodone side 10112 * function directly (if necessary). 10113 * 10114 * - In the event of an error, an iostart side function can return a buf(9S) 10115 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10116 * b_error in the usual way of course). 10117 * 10118 * - The taskq mechanism may be used by the iodone side functions to dispatch 10119 * requests to the iostart side functions. The iostart side functions in 10120 * this case would be called under the context of a taskq thread, so it's 10121 * OK for them to block/sleep/spin in this case. 10122 * 10123 * - iostart side functions may allocate "shadow" buf(9S) structs and 10124 * pass them along to the next function in the chain. The corresponding 10125 * iodone side functions must coalesce the "shadow" bufs and return 10126 * the "original" buf to the next higher layer. 10127 * 10128 * - The b_private field of the buf(9S) struct holds a pointer to 10129 * an sd_xbuf struct, which contains information needed to 10130 * construct the scsi_pkt for the command. 10131 * 10132 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10133 * layer must acquire & release the SD_MUTEX(un) as needed. 10134 */ 10135 10136 10137 /* 10138 * Create taskq for all targets in the system. This is created at 10139 * _init(9E) and destroyed at _fini(9E). 10140 * 10141 * Note: here we set the minalloc to a reasonably high number to ensure that 10142 * we will have an adequate supply of task entries available at interrupt time. 10143 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10144 * sd_create_taskq(). Since we do not want to sleep for allocations at 10145 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10146 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10147 * requests any one instant in time. 10148 */ 10149 #define SD_TASKQ_NUMTHREADS 8 10150 #define SD_TASKQ_MINALLOC 256 10151 #define SD_TASKQ_MAXALLOC 256 10152 10153 static taskq_t *sd_tq = NULL; 10154 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10155 10156 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10157 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10158 10159 /* 10160 * The following task queue is being created for the write part of 10161 * read-modify-write of non-512 block size devices. 10162 * Limit the number of threads to 1 for now. This number has been chosen 10163 * considering the fact that it applies only to dvd ram drives/MO drives 10164 * currently. Performance for which is not main criteria at this stage. 10165 * Note: It needs to be explored if we can use a single taskq in future 10166 */ 10167 #define SD_WMR_TASKQ_NUMTHREADS 1 10168 static taskq_t *sd_wmr_tq = NULL; 10169 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10170 10171 /* 10172 * Function: sd_taskq_create 10173 * 10174 * Description: Create taskq thread(s) and preallocate task entries 10175 * 10176 * Return Code: Returns a pointer to the allocated taskq_t. 10177 * 10178 * Context: Can sleep. Requires blockable context. 10179 * 10180 * Notes: - The taskq() facility currently is NOT part of the DDI. 10181 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10182 * - taskq_create() will block for memory, also it will panic 10183 * if it cannot create the requested number of threads. 10184 * - Currently taskq_create() creates threads that cannot be 10185 * swapped. 10186 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10187 * supply of taskq entries at interrupt time (ie, so that we 10188 * do not have to sleep for memory) 10189 */ 10190 10191 static void 10192 sd_taskq_create(void) 10193 { 10194 char taskq_name[TASKQ_NAMELEN]; 10195 10196 ASSERT(sd_tq == NULL); 10197 ASSERT(sd_wmr_tq == NULL); 10198 10199 (void) snprintf(taskq_name, sizeof (taskq_name), 10200 "%s_drv_taskq", sd_label); 10201 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10202 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10203 TASKQ_PREPOPULATE)); 10204 10205 (void) snprintf(taskq_name, sizeof (taskq_name), 10206 "%s_rmw_taskq", sd_label); 10207 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10208 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10209 TASKQ_PREPOPULATE)); 10210 } 10211 10212 10213 /* 10214 * Function: sd_taskq_delete 10215 * 10216 * Description: Complementary cleanup routine for sd_taskq_create(). 10217 * 10218 * Context: Kernel thread context. 10219 */ 10220 10221 static void 10222 sd_taskq_delete(void) 10223 { 10224 ASSERT(sd_tq != NULL); 10225 ASSERT(sd_wmr_tq != NULL); 10226 taskq_destroy(sd_tq); 10227 taskq_destroy(sd_wmr_tq); 10228 sd_tq = NULL; 10229 sd_wmr_tq = NULL; 10230 } 10231 10232 10233 /* 10234 * Function: sdstrategy 10235 * 10236 * Description: Driver's strategy (9E) entry point function. 10237 * 10238 * Arguments: bp - pointer to buf(9S) 10239 * 10240 * Return Code: Always returns zero 10241 * 10242 * Context: Kernel thread context. 10243 */ 10244 10245 static int 10246 sdstrategy(struct buf *bp) 10247 { 10248 struct sd_lun *un; 10249 10250 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10251 if (un == NULL) { 10252 bioerror(bp, EIO); 10253 bp->b_resid = bp->b_bcount; 10254 biodone(bp); 10255 return (0); 10256 } 10257 /* As was done in the past, fail new cmds. if state is dumping. */ 10258 if (un->un_state == SD_STATE_DUMPING) { 10259 bioerror(bp, ENXIO); 10260 bp->b_resid = bp->b_bcount; 10261 biodone(bp); 10262 return (0); 10263 } 10264 10265 ASSERT(!mutex_owned(SD_MUTEX(un))); 10266 10267 /* 10268 * Commands may sneak in while we released the mutex in 10269 * DDI_SUSPEND, we should block new commands. However, old 10270 * commands that are still in the driver at this point should 10271 * still be allowed to drain. 10272 */ 10273 mutex_enter(SD_MUTEX(un)); 10274 /* 10275 * Must wait here if either the device is suspended or 10276 * if it's power level is changing. 10277 */ 10278 while ((un->un_state == SD_STATE_SUSPENDED) || 10279 (un->un_state == SD_STATE_PM_CHANGING)) { 10280 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10281 } 10282 10283 un->un_ncmds_in_driver++; 10284 10285 /* 10286 * atapi: Since we are running the CD for now in PIO mode we need to 10287 * call bp_mapin here to avoid bp_mapin called interrupt context under 10288 * the HBA's init_pkt routine. 10289 */ 10290 if (un->un_f_cfg_is_atapi == TRUE) { 10291 mutex_exit(SD_MUTEX(un)); 10292 bp_mapin(bp); 10293 mutex_enter(SD_MUTEX(un)); 10294 } 10295 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10296 un->un_ncmds_in_driver); 10297 10298 mutex_exit(SD_MUTEX(un)); 10299 10300 /* 10301 * This will (eventually) allocate the sd_xbuf area and 10302 * call sd_xbuf_strategy(). We just want to return the 10303 * result of ddi_xbuf_qstrategy so that we have an opt- 10304 * imized tail call which saves us a stack frame. 10305 */ 10306 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10307 } 10308 10309 10310 /* 10311 * Function: sd_xbuf_strategy 10312 * 10313 * Description: Function for initiating IO operations via the 10314 * ddi_xbuf_qstrategy() mechanism. 10315 * 10316 * Context: Kernel thread context. 10317 */ 10318 10319 static void 10320 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10321 { 10322 struct sd_lun *un = arg; 10323 10324 ASSERT(bp != NULL); 10325 ASSERT(xp != NULL); 10326 ASSERT(un != NULL); 10327 ASSERT(!mutex_owned(SD_MUTEX(un))); 10328 10329 /* 10330 * Initialize the fields in the xbuf and save a pointer to the 10331 * xbuf in bp->b_private. 10332 */ 10333 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10334 10335 /* Send the buf down the iostart chain */ 10336 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10337 } 10338 10339 10340 /* 10341 * Function: sd_xbuf_init 10342 * 10343 * Description: Prepare the given sd_xbuf struct for use. 10344 * 10345 * Arguments: un - ptr to softstate 10346 * bp - ptr to associated buf(9S) 10347 * xp - ptr to associated sd_xbuf 10348 * chain_type - IO chain type to use: 10349 * SD_CHAIN_NULL 10350 * SD_CHAIN_BUFIO 10351 * SD_CHAIN_USCSI 10352 * SD_CHAIN_DIRECT 10353 * SD_CHAIN_DIRECT_PRIORITY 10354 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10355 * initialization; may be NULL if none. 10356 * 10357 * Context: Kernel thread context 10358 */ 10359 10360 static void 10361 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10362 uchar_t chain_type, void *pktinfop) 10363 { 10364 int index; 10365 10366 ASSERT(un != NULL); 10367 ASSERT(bp != NULL); 10368 ASSERT(xp != NULL); 10369 10370 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10371 bp, chain_type); 10372 10373 xp->xb_un = un; 10374 xp->xb_pktp = NULL; 10375 xp->xb_pktinfo = pktinfop; 10376 xp->xb_private = bp->b_private; 10377 xp->xb_blkno = (daddr_t)bp->b_blkno; 10378 10379 /* 10380 * Set up the iostart and iodone chain indexes in the xbuf, based 10381 * upon the specified chain type to use. 10382 */ 10383 switch (chain_type) { 10384 case SD_CHAIN_NULL: 10385 /* 10386 * Fall thru to just use the values for the buf type, even 10387 * tho for the NULL chain these values will never be used. 10388 */ 10389 /* FALLTHRU */ 10390 case SD_CHAIN_BUFIO: 10391 index = un->un_buf_chain_type; 10392 break; 10393 case SD_CHAIN_USCSI: 10394 index = un->un_uscsi_chain_type; 10395 break; 10396 case SD_CHAIN_DIRECT: 10397 index = un->un_direct_chain_type; 10398 break; 10399 case SD_CHAIN_DIRECT_PRIORITY: 10400 index = un->un_priority_chain_type; 10401 break; 10402 default: 10403 /* We're really broken if we ever get here... */ 10404 panic("sd_xbuf_init: illegal chain type!"); 10405 /*NOTREACHED*/ 10406 } 10407 10408 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10409 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10410 10411 /* 10412 * It might be a bit easier to simply bzero the entire xbuf above, 10413 * but it turns out that since we init a fair number of members anyway, 10414 * we save a fair number cycles by doing explicit assignment of zero. 10415 */ 10416 xp->xb_pkt_flags = 0; 10417 xp->xb_dma_resid = 0; 10418 xp->xb_retry_count = 0; 10419 xp->xb_victim_retry_count = 0; 10420 xp->xb_ua_retry_count = 0; 10421 xp->xb_nr_retry_count = 0; 10422 xp->xb_sense_bp = NULL; 10423 xp->xb_sense_status = 0; 10424 xp->xb_sense_state = 0; 10425 xp->xb_sense_resid = 0; 10426 10427 bp->b_private = xp; 10428 bp->b_flags &= ~(B_DONE | B_ERROR); 10429 bp->b_resid = 0; 10430 bp->av_forw = NULL; 10431 bp->av_back = NULL; 10432 bioerror(bp, 0); 10433 10434 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10435 } 10436 10437 10438 /* 10439 * Function: sd_uscsi_strategy 10440 * 10441 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10442 * 10443 * Arguments: bp - buf struct ptr 10444 * 10445 * Return Code: Always returns 0 10446 * 10447 * Context: Kernel thread context 10448 */ 10449 10450 static int 10451 sd_uscsi_strategy(struct buf *bp) 10452 { 10453 struct sd_lun *un; 10454 struct sd_uscsi_info *uip; 10455 struct sd_xbuf *xp; 10456 uchar_t chain_type; 10457 10458 ASSERT(bp != NULL); 10459 10460 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10461 if (un == NULL) { 10462 bioerror(bp, EIO); 10463 bp->b_resid = bp->b_bcount; 10464 biodone(bp); 10465 return (0); 10466 } 10467 10468 ASSERT(!mutex_owned(SD_MUTEX(un))); 10469 10470 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10471 10472 mutex_enter(SD_MUTEX(un)); 10473 /* 10474 * atapi: Since we are running the CD for now in PIO mode we need to 10475 * call bp_mapin here to avoid bp_mapin called interrupt context under 10476 * the HBA's init_pkt routine. 10477 */ 10478 if (un->un_f_cfg_is_atapi == TRUE) { 10479 mutex_exit(SD_MUTEX(un)); 10480 bp_mapin(bp); 10481 mutex_enter(SD_MUTEX(un)); 10482 } 10483 un->un_ncmds_in_driver++; 10484 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10485 un->un_ncmds_in_driver); 10486 mutex_exit(SD_MUTEX(un)); 10487 10488 /* 10489 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10490 */ 10491 ASSERT(bp->b_private != NULL); 10492 uip = (struct sd_uscsi_info *)bp->b_private; 10493 10494 switch (uip->ui_flags) { 10495 case SD_PATH_DIRECT: 10496 chain_type = SD_CHAIN_DIRECT; 10497 break; 10498 case SD_PATH_DIRECT_PRIORITY: 10499 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10500 break; 10501 default: 10502 chain_type = SD_CHAIN_USCSI; 10503 break; 10504 } 10505 10506 /* 10507 * We may allocate extra buf for external USCSI commands. If the 10508 * application asks for bigger than 20-byte sense data via USCSI, 10509 * SCSA layer will allocate 252 bytes sense buf for that command. 10510 */ 10511 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10512 SENSE_LENGTH) { 10513 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10514 MAX_SENSE_LENGTH, KM_SLEEP); 10515 } else { 10516 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10517 } 10518 10519 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10520 10521 /* Use the index obtained within xbuf_init */ 10522 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10523 10524 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10525 10526 return (0); 10527 } 10528 10529 /* 10530 * Function: sd_send_scsi_cmd 10531 * 10532 * Description: Runs a USCSI command for user (when called thru sdioctl), 10533 * or for the driver 10534 * 10535 * Arguments: dev - the dev_t for the device 10536 * incmd - ptr to a valid uscsi_cmd struct 10537 * flag - bit flag, indicating open settings, 32/64 bit type 10538 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10539 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10540 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10541 * to use the USCSI "direct" chain and bypass the normal 10542 * command waitq. 10543 * 10544 * Return Code: 0 - successful completion of the given command 10545 * EIO - scsi_uscsi_handle_command() failed 10546 * ENXIO - soft state not found for specified dev 10547 * EINVAL 10548 * EFAULT - copyin/copyout error 10549 * return code of scsi_uscsi_handle_command(): 10550 * EIO 10551 * ENXIO 10552 * EACCES 10553 * 10554 * Context: Waits for command to complete. Can sleep. 10555 */ 10556 10557 static int 10558 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10559 enum uio_seg dataspace, int path_flag) 10560 { 10561 struct sd_uscsi_info *uip; 10562 struct uscsi_cmd *uscmd; 10563 struct sd_lun *un; 10564 int format = 0; 10565 int rval; 10566 10567 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10568 if (un == NULL) { 10569 return (ENXIO); 10570 } 10571 10572 ASSERT(!mutex_owned(SD_MUTEX(un))); 10573 10574 #ifdef SDDEBUG 10575 switch (dataspace) { 10576 case UIO_USERSPACE: 10577 SD_TRACE(SD_LOG_IO, un, 10578 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10579 break; 10580 case UIO_SYSSPACE: 10581 SD_TRACE(SD_LOG_IO, un, 10582 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10583 break; 10584 default: 10585 SD_TRACE(SD_LOG_IO, un, 10586 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10587 break; 10588 } 10589 #endif 10590 10591 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10592 SD_ADDRESS(un), &uscmd); 10593 if (rval != 0) { 10594 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10595 "scsi_uscsi_alloc_and_copyin failed\n", un); 10596 return (rval); 10597 } 10598 10599 if ((uscmd->uscsi_cdb != NULL) && 10600 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10601 mutex_enter(SD_MUTEX(un)); 10602 un->un_f_format_in_progress = TRUE; 10603 mutex_exit(SD_MUTEX(un)); 10604 format = 1; 10605 } 10606 10607 /* 10608 * Allocate an sd_uscsi_info struct and fill it with the info 10609 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10610 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10611 * since we allocate the buf here in this function, we do not 10612 * need to preserve the prior contents of b_private. 10613 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10614 */ 10615 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10616 uip->ui_flags = path_flag; 10617 uip->ui_cmdp = uscmd; 10618 10619 /* 10620 * Commands sent with priority are intended for error recovery 10621 * situations, and do not have retries performed. 10622 */ 10623 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10624 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10625 } 10626 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10627 10628 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10629 sd_uscsi_strategy, NULL, uip); 10630 10631 #ifdef SDDEBUG 10632 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10633 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10634 uscmd->uscsi_status, uscmd->uscsi_resid); 10635 if (uscmd->uscsi_bufaddr != NULL) { 10636 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10637 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10638 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10639 if (dataspace == UIO_SYSSPACE) { 10640 SD_DUMP_MEMORY(un, SD_LOG_IO, 10641 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10642 uscmd->uscsi_buflen, SD_LOG_HEX); 10643 } 10644 } 10645 #endif 10646 10647 if (format == 1) { 10648 mutex_enter(SD_MUTEX(un)); 10649 un->un_f_format_in_progress = FALSE; 10650 mutex_exit(SD_MUTEX(un)); 10651 } 10652 10653 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10654 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10655 10656 return (rval); 10657 } 10658 10659 10660 /* 10661 * Function: sd_buf_iodone 10662 * 10663 * Description: Frees the sd_xbuf & returns the buf to its originator. 10664 * 10665 * Context: May be called from interrupt context. 10666 */ 10667 /* ARGSUSED */ 10668 static void 10669 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10670 { 10671 struct sd_xbuf *xp; 10672 10673 ASSERT(un != NULL); 10674 ASSERT(bp != NULL); 10675 ASSERT(!mutex_owned(SD_MUTEX(un))); 10676 10677 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10678 10679 xp = SD_GET_XBUF(bp); 10680 ASSERT(xp != NULL); 10681 10682 mutex_enter(SD_MUTEX(un)); 10683 10684 /* 10685 * Grab time when the cmd completed. 10686 * This is used for determining if the system has been 10687 * idle long enough to make it idle to the PM framework. 10688 * This is for lowering the overhead, and therefore improving 10689 * performance per I/O operation. 10690 */ 10691 un->un_pm_idle_time = ddi_get_time(); 10692 10693 un->un_ncmds_in_driver--; 10694 ASSERT(un->un_ncmds_in_driver >= 0); 10695 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10696 un->un_ncmds_in_driver); 10697 10698 mutex_exit(SD_MUTEX(un)); 10699 10700 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10701 biodone(bp); /* bp is gone after this */ 10702 10703 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10704 } 10705 10706 10707 /* 10708 * Function: sd_uscsi_iodone 10709 * 10710 * Description: Frees the sd_xbuf & returns the buf to its originator. 10711 * 10712 * Context: May be called from interrupt context. 10713 */ 10714 /* ARGSUSED */ 10715 static void 10716 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10717 { 10718 struct sd_xbuf *xp; 10719 10720 ASSERT(un != NULL); 10721 ASSERT(bp != NULL); 10722 10723 xp = SD_GET_XBUF(bp); 10724 ASSERT(xp != NULL); 10725 ASSERT(!mutex_owned(SD_MUTEX(un))); 10726 10727 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10728 10729 bp->b_private = xp->xb_private; 10730 10731 mutex_enter(SD_MUTEX(un)); 10732 10733 /* 10734 * Grab time when the cmd completed. 10735 * This is used for determining if the system has been 10736 * idle long enough to make it idle to the PM framework. 10737 * This is for lowering the overhead, and therefore improving 10738 * performance per I/O operation. 10739 */ 10740 un->un_pm_idle_time = ddi_get_time(); 10741 10742 un->un_ncmds_in_driver--; 10743 ASSERT(un->un_ncmds_in_driver >= 0); 10744 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10745 un->un_ncmds_in_driver); 10746 10747 mutex_exit(SD_MUTEX(un)); 10748 10749 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 10750 SENSE_LENGTH) { 10751 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 10752 MAX_SENSE_LENGTH); 10753 } else { 10754 kmem_free(xp, sizeof (struct sd_xbuf)); 10755 } 10756 10757 biodone(bp); 10758 10759 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10760 } 10761 10762 10763 /* 10764 * Function: sd_mapblockaddr_iostart 10765 * 10766 * Description: Verify request lies within the partition limits for 10767 * the indicated minor device. Issue "overrun" buf if 10768 * request would exceed partition range. Converts 10769 * partition-relative block address to absolute. 10770 * 10771 * Context: Can sleep 10772 * 10773 * Issues: This follows what the old code did, in terms of accessing 10774 * some of the partition info in the unit struct without holding 10775 * the mutext. This is a general issue, if the partition info 10776 * can be altered while IO is in progress... as soon as we send 10777 * a buf, its partitioning can be invalid before it gets to the 10778 * device. Probably the right fix is to move partitioning out 10779 * of the driver entirely. 10780 */ 10781 10782 static void 10783 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10784 { 10785 diskaddr_t nblocks; /* #blocks in the given partition */ 10786 daddr_t blocknum; /* Block number specified by the buf */ 10787 size_t requested_nblocks; 10788 size_t available_nblocks; 10789 int partition; 10790 diskaddr_t partition_offset; 10791 struct sd_xbuf *xp; 10792 10793 10794 ASSERT(un != NULL); 10795 ASSERT(bp != NULL); 10796 ASSERT(!mutex_owned(SD_MUTEX(un))); 10797 10798 SD_TRACE(SD_LOG_IO_PARTITION, un, 10799 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10800 10801 xp = SD_GET_XBUF(bp); 10802 ASSERT(xp != NULL); 10803 10804 /* 10805 * If the geometry is not indicated as valid, attempt to access 10806 * the unit & verify the geometry/label. This can be the case for 10807 * removable-media devices, of if the device was opened in 10808 * NDELAY/NONBLOCK mode. 10809 */ 10810 if (!SD_IS_VALID_LABEL(un) && 10811 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10812 /* 10813 * For removable devices it is possible to start an I/O 10814 * without a media by opening the device in nodelay mode. 10815 * Also for writable CDs there can be many scenarios where 10816 * there is no geometry yet but volume manager is trying to 10817 * issue a read() just because it can see TOC on the CD. So 10818 * do not print a message for removables. 10819 */ 10820 if (!un->un_f_has_removable_media) { 10821 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10822 "i/o to invalid geometry\n"); 10823 } 10824 bioerror(bp, EIO); 10825 bp->b_resid = bp->b_bcount; 10826 SD_BEGIN_IODONE(index, un, bp); 10827 return; 10828 } 10829 10830 partition = SDPART(bp->b_edev); 10831 10832 nblocks = 0; 10833 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10834 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10835 10836 /* 10837 * blocknum is the starting block number of the request. At this 10838 * point it is still relative to the start of the minor device. 10839 */ 10840 blocknum = xp->xb_blkno; 10841 10842 /* 10843 * Legacy: If the starting block number is one past the last block 10844 * in the partition, do not set B_ERROR in the buf. 10845 */ 10846 if (blocknum == nblocks) { 10847 goto error_exit; 10848 } 10849 10850 /* 10851 * Confirm that the first block of the request lies within the 10852 * partition limits. Also the requested number of bytes must be 10853 * a multiple of the system block size. 10854 */ 10855 if ((blocknum < 0) || (blocknum >= nblocks) || 10856 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10857 bp->b_flags |= B_ERROR; 10858 goto error_exit; 10859 } 10860 10861 /* 10862 * If the requsted # blocks exceeds the available # blocks, that 10863 * is an overrun of the partition. 10864 */ 10865 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10866 available_nblocks = (size_t)(nblocks - blocknum); 10867 ASSERT(nblocks >= blocknum); 10868 10869 if (requested_nblocks > available_nblocks) { 10870 /* 10871 * Allocate an "overrun" buf to allow the request to proceed 10872 * for the amount of space available in the partition. The 10873 * amount not transferred will be added into the b_resid 10874 * when the operation is complete. The overrun buf 10875 * replaces the original buf here, and the original buf 10876 * is saved inside the overrun buf, for later use. 10877 */ 10878 size_t resid = SD_SYSBLOCKS2BYTES(un, 10879 (offset_t)(requested_nblocks - available_nblocks)); 10880 size_t count = bp->b_bcount - resid; 10881 /* 10882 * Note: count is an unsigned entity thus it'll NEVER 10883 * be less than 0 so ASSERT the original values are 10884 * correct. 10885 */ 10886 ASSERT(bp->b_bcount >= resid); 10887 10888 bp = sd_bioclone_alloc(bp, count, blocknum, 10889 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10890 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10891 ASSERT(xp != NULL); 10892 } 10893 10894 /* At this point there should be no residual for this buf. */ 10895 ASSERT(bp->b_resid == 0); 10896 10897 /* Convert the block number to an absolute address. */ 10898 xp->xb_blkno += partition_offset; 10899 10900 SD_NEXT_IOSTART(index, un, bp); 10901 10902 SD_TRACE(SD_LOG_IO_PARTITION, un, 10903 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10904 10905 return; 10906 10907 error_exit: 10908 bp->b_resid = bp->b_bcount; 10909 SD_BEGIN_IODONE(index, un, bp); 10910 SD_TRACE(SD_LOG_IO_PARTITION, un, 10911 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10912 } 10913 10914 10915 /* 10916 * Function: sd_mapblockaddr_iodone 10917 * 10918 * Description: Completion-side processing for partition management. 10919 * 10920 * Context: May be called under interrupt context 10921 */ 10922 10923 static void 10924 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10925 { 10926 /* int partition; */ /* Not used, see below. */ 10927 ASSERT(un != NULL); 10928 ASSERT(bp != NULL); 10929 ASSERT(!mutex_owned(SD_MUTEX(un))); 10930 10931 SD_TRACE(SD_LOG_IO_PARTITION, un, 10932 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10933 10934 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10935 /* 10936 * We have an "overrun" buf to deal with... 10937 */ 10938 struct sd_xbuf *xp; 10939 struct buf *obp; /* ptr to the original buf */ 10940 10941 xp = SD_GET_XBUF(bp); 10942 ASSERT(xp != NULL); 10943 10944 /* Retrieve the pointer to the original buf */ 10945 obp = (struct buf *)xp->xb_private; 10946 ASSERT(obp != NULL); 10947 10948 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10949 bioerror(obp, bp->b_error); 10950 10951 sd_bioclone_free(bp); 10952 10953 /* 10954 * Get back the original buf. 10955 * Note that since the restoration of xb_blkno below 10956 * was removed, the sd_xbuf is not needed. 10957 */ 10958 bp = obp; 10959 /* 10960 * xp = SD_GET_XBUF(bp); 10961 * ASSERT(xp != NULL); 10962 */ 10963 } 10964 10965 /* 10966 * Convert sd->xb_blkno back to a minor-device relative value. 10967 * Note: this has been commented out, as it is not needed in the 10968 * current implementation of the driver (ie, since this function 10969 * is at the top of the layering chains, so the info will be 10970 * discarded) and it is in the "hot" IO path. 10971 * 10972 * partition = getminor(bp->b_edev) & SDPART_MASK; 10973 * xp->xb_blkno -= un->un_offset[partition]; 10974 */ 10975 10976 SD_NEXT_IODONE(index, un, bp); 10977 10978 SD_TRACE(SD_LOG_IO_PARTITION, un, 10979 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10980 } 10981 10982 10983 /* 10984 * Function: sd_mapblocksize_iostart 10985 * 10986 * Description: Convert between system block size (un->un_sys_blocksize) 10987 * and target block size (un->un_tgt_blocksize). 10988 * 10989 * Context: Can sleep to allocate resources. 10990 * 10991 * Assumptions: A higher layer has already performed any partition validation, 10992 * and converted the xp->xb_blkno to an absolute value relative 10993 * to the start of the device. 10994 * 10995 * It is also assumed that the higher layer has implemented 10996 * an "overrun" mechanism for the case where the request would 10997 * read/write beyond the end of a partition. In this case we 10998 * assume (and ASSERT) that bp->b_resid == 0. 10999 * 11000 * Note: The implementation for this routine assumes the target 11001 * block size remains constant between allocation and transport. 11002 */ 11003 11004 static void 11005 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11006 { 11007 struct sd_mapblocksize_info *bsp; 11008 struct sd_xbuf *xp; 11009 offset_t first_byte; 11010 daddr_t start_block, end_block; 11011 daddr_t request_bytes; 11012 ushort_t is_aligned = FALSE; 11013 11014 ASSERT(un != NULL); 11015 ASSERT(bp != NULL); 11016 ASSERT(!mutex_owned(SD_MUTEX(un))); 11017 ASSERT(bp->b_resid == 0); 11018 11019 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11020 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11021 11022 /* 11023 * For a non-writable CD, a write request is an error 11024 */ 11025 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11026 (un->un_f_mmc_writable_media == FALSE)) { 11027 bioerror(bp, EIO); 11028 bp->b_resid = bp->b_bcount; 11029 SD_BEGIN_IODONE(index, un, bp); 11030 return; 11031 } 11032 11033 /* 11034 * We do not need a shadow buf if the device is using 11035 * un->un_sys_blocksize as its block size or if bcount == 0. 11036 * In this case there is no layer-private data block allocated. 11037 */ 11038 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11039 (bp->b_bcount == 0)) { 11040 goto done; 11041 } 11042 11043 #if defined(__i386) || defined(__amd64) 11044 /* We do not support non-block-aligned transfers for ROD devices */ 11045 ASSERT(!ISROD(un)); 11046 #endif 11047 11048 xp = SD_GET_XBUF(bp); 11049 ASSERT(xp != NULL); 11050 11051 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11052 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11053 un->un_tgt_blocksize, un->un_sys_blocksize); 11054 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11055 "request start block:0x%x\n", xp->xb_blkno); 11056 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11057 "request len:0x%x\n", bp->b_bcount); 11058 11059 /* 11060 * Allocate the layer-private data area for the mapblocksize layer. 11061 * Layers are allowed to use the xp_private member of the sd_xbuf 11062 * struct to store the pointer to their layer-private data block, but 11063 * each layer also has the responsibility of restoring the prior 11064 * contents of xb_private before returning the buf/xbuf to the 11065 * higher layer that sent it. 11066 * 11067 * Here we save the prior contents of xp->xb_private into the 11068 * bsp->mbs_oprivate field of our layer-private data area. This value 11069 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11070 * the layer-private area and returning the buf/xbuf to the layer 11071 * that sent it. 11072 * 11073 * Note that here we use kmem_zalloc for the allocation as there are 11074 * parts of the mapblocksize code that expect certain fields to be 11075 * zero unless explicitly set to a required value. 11076 */ 11077 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11078 bsp->mbs_oprivate = xp->xb_private; 11079 xp->xb_private = bsp; 11080 11081 /* 11082 * This treats the data on the disk (target) as an array of bytes. 11083 * first_byte is the byte offset, from the beginning of the device, 11084 * to the location of the request. This is converted from a 11085 * un->un_sys_blocksize block address to a byte offset, and then back 11086 * to a block address based upon a un->un_tgt_blocksize block size. 11087 * 11088 * xp->xb_blkno should be absolute upon entry into this function, 11089 * but, but it is based upon partitions that use the "system" 11090 * block size. It must be adjusted to reflect the block size of 11091 * the target. 11092 * 11093 * Note that end_block is actually the block that follows the last 11094 * block of the request, but that's what is needed for the computation. 11095 */ 11096 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11097 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11098 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11099 un->un_tgt_blocksize; 11100 11101 /* request_bytes is rounded up to a multiple of the target block size */ 11102 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11103 11104 /* 11105 * See if the starting address of the request and the request 11106 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11107 * then we do not need to allocate a shadow buf to handle the request. 11108 */ 11109 if (((first_byte % un->un_tgt_blocksize) == 0) && 11110 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11111 is_aligned = TRUE; 11112 } 11113 11114 if ((bp->b_flags & B_READ) == 0) { 11115 /* 11116 * Lock the range for a write operation. An aligned request is 11117 * considered a simple write; otherwise the request must be a 11118 * read-modify-write. 11119 */ 11120 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11121 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11122 } 11123 11124 /* 11125 * Alloc a shadow buf if the request is not aligned. Also, this is 11126 * where the READ command is generated for a read-modify-write. (The 11127 * write phase is deferred until after the read completes.) 11128 */ 11129 if (is_aligned == FALSE) { 11130 11131 struct sd_mapblocksize_info *shadow_bsp; 11132 struct sd_xbuf *shadow_xp; 11133 struct buf *shadow_bp; 11134 11135 /* 11136 * Allocate the shadow buf and it associated xbuf. Note that 11137 * after this call the xb_blkno value in both the original 11138 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11139 * same: absolute relative to the start of the device, and 11140 * adjusted for the target block size. The b_blkno in the 11141 * shadow buf will also be set to this value. We should never 11142 * change b_blkno in the original bp however. 11143 * 11144 * Note also that the shadow buf will always need to be a 11145 * READ command, regardless of whether the incoming command 11146 * is a READ or a WRITE. 11147 */ 11148 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11149 xp->xb_blkno, 11150 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11151 11152 shadow_xp = SD_GET_XBUF(shadow_bp); 11153 11154 /* 11155 * Allocate the layer-private data for the shadow buf. 11156 * (No need to preserve xb_private in the shadow xbuf.) 11157 */ 11158 shadow_xp->xb_private = shadow_bsp = 11159 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11160 11161 /* 11162 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11163 * to figure out where the start of the user data is (based upon 11164 * the system block size) in the data returned by the READ 11165 * command (which will be based upon the target blocksize). Note 11166 * that this is only really used if the request is unaligned. 11167 */ 11168 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11169 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11170 ASSERT((bsp->mbs_copy_offset >= 0) && 11171 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11172 11173 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11174 11175 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11176 11177 /* Transfer the wmap (if any) to the shadow buf */ 11178 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11179 bsp->mbs_wmp = NULL; 11180 11181 /* 11182 * The shadow buf goes on from here in place of the 11183 * original buf. 11184 */ 11185 shadow_bsp->mbs_orig_bp = bp; 11186 bp = shadow_bp; 11187 } 11188 11189 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11190 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11191 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11192 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11193 request_bytes); 11194 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11195 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11196 11197 done: 11198 SD_NEXT_IOSTART(index, un, bp); 11199 11200 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11201 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11202 } 11203 11204 11205 /* 11206 * Function: sd_mapblocksize_iodone 11207 * 11208 * Description: Completion side processing for block-size mapping. 11209 * 11210 * Context: May be called under interrupt context 11211 */ 11212 11213 static void 11214 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11215 { 11216 struct sd_mapblocksize_info *bsp; 11217 struct sd_xbuf *xp; 11218 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11219 struct buf *orig_bp; /* ptr to the original buf */ 11220 offset_t shadow_end; 11221 offset_t request_end; 11222 offset_t shadow_start; 11223 ssize_t copy_offset; 11224 size_t copy_length; 11225 size_t shortfall; 11226 uint_t is_write; /* TRUE if this bp is a WRITE */ 11227 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11228 11229 ASSERT(un != NULL); 11230 ASSERT(bp != NULL); 11231 11232 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11233 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11234 11235 /* 11236 * There is no shadow buf or layer-private data if the target is 11237 * using un->un_sys_blocksize as its block size or if bcount == 0. 11238 */ 11239 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11240 (bp->b_bcount == 0)) { 11241 goto exit; 11242 } 11243 11244 xp = SD_GET_XBUF(bp); 11245 ASSERT(xp != NULL); 11246 11247 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11248 bsp = xp->xb_private; 11249 11250 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11251 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11252 11253 if (is_write) { 11254 /* 11255 * For a WRITE request we must free up the block range that 11256 * we have locked up. This holds regardless of whether this is 11257 * an aligned write request or a read-modify-write request. 11258 */ 11259 sd_range_unlock(un, bsp->mbs_wmp); 11260 bsp->mbs_wmp = NULL; 11261 } 11262 11263 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11264 /* 11265 * An aligned read or write command will have no shadow buf; 11266 * there is not much else to do with it. 11267 */ 11268 goto done; 11269 } 11270 11271 orig_bp = bsp->mbs_orig_bp; 11272 ASSERT(orig_bp != NULL); 11273 orig_xp = SD_GET_XBUF(orig_bp); 11274 ASSERT(orig_xp != NULL); 11275 ASSERT(!mutex_owned(SD_MUTEX(un))); 11276 11277 if (!is_write && has_wmap) { 11278 /* 11279 * A READ with a wmap means this is the READ phase of a 11280 * read-modify-write. If an error occurred on the READ then 11281 * we do not proceed with the WRITE phase or copy any data. 11282 * Just release the write maps and return with an error. 11283 */ 11284 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11285 orig_bp->b_resid = orig_bp->b_bcount; 11286 bioerror(orig_bp, bp->b_error); 11287 sd_range_unlock(un, bsp->mbs_wmp); 11288 goto freebuf_done; 11289 } 11290 } 11291 11292 /* 11293 * Here is where we set up to copy the data from the shadow buf 11294 * into the space associated with the original buf. 11295 * 11296 * To deal with the conversion between block sizes, these 11297 * computations treat the data as an array of bytes, with the 11298 * first byte (byte 0) corresponding to the first byte in the 11299 * first block on the disk. 11300 */ 11301 11302 /* 11303 * shadow_start and shadow_len indicate the location and size of 11304 * the data returned with the shadow IO request. 11305 */ 11306 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11307 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11308 11309 /* 11310 * copy_offset gives the offset (in bytes) from the start of the first 11311 * block of the READ request to the beginning of the data. We retrieve 11312 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11313 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11314 * data to be copied (in bytes). 11315 */ 11316 copy_offset = bsp->mbs_copy_offset; 11317 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11318 copy_length = orig_bp->b_bcount; 11319 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11320 11321 /* 11322 * Set up the resid and error fields of orig_bp as appropriate. 11323 */ 11324 if (shadow_end >= request_end) { 11325 /* We got all the requested data; set resid to zero */ 11326 orig_bp->b_resid = 0; 11327 } else { 11328 /* 11329 * We failed to get enough data to fully satisfy the original 11330 * request. Just copy back whatever data we got and set 11331 * up the residual and error code as required. 11332 * 11333 * 'shortfall' is the amount by which the data received with the 11334 * shadow buf has "fallen short" of the requested amount. 11335 */ 11336 shortfall = (size_t)(request_end - shadow_end); 11337 11338 if (shortfall > orig_bp->b_bcount) { 11339 /* 11340 * We did not get enough data to even partially 11341 * fulfill the original request. The residual is 11342 * equal to the amount requested. 11343 */ 11344 orig_bp->b_resid = orig_bp->b_bcount; 11345 } else { 11346 /* 11347 * We did not get all the data that we requested 11348 * from the device, but we will try to return what 11349 * portion we did get. 11350 */ 11351 orig_bp->b_resid = shortfall; 11352 } 11353 ASSERT(copy_length >= orig_bp->b_resid); 11354 copy_length -= orig_bp->b_resid; 11355 } 11356 11357 /* Propagate the error code from the shadow buf to the original buf */ 11358 bioerror(orig_bp, bp->b_error); 11359 11360 if (is_write) { 11361 goto freebuf_done; /* No data copying for a WRITE */ 11362 } 11363 11364 if (has_wmap) { 11365 /* 11366 * This is a READ command from the READ phase of a 11367 * read-modify-write request. We have to copy the data given 11368 * by the user OVER the data returned by the READ command, 11369 * then convert the command from a READ to a WRITE and send 11370 * it back to the target. 11371 */ 11372 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11373 copy_length); 11374 11375 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11376 11377 /* 11378 * Dispatch the WRITE command to the taskq thread, which 11379 * will in turn send the command to the target. When the 11380 * WRITE command completes, we (sd_mapblocksize_iodone()) 11381 * will get called again as part of the iodone chain 11382 * processing for it. Note that we will still be dealing 11383 * with the shadow buf at that point. 11384 */ 11385 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11386 KM_NOSLEEP) != 0) { 11387 /* 11388 * Dispatch was successful so we are done. Return 11389 * without going any higher up the iodone chain. Do 11390 * not free up any layer-private data until after the 11391 * WRITE completes. 11392 */ 11393 return; 11394 } 11395 11396 /* 11397 * Dispatch of the WRITE command failed; set up the error 11398 * condition and send this IO back up the iodone chain. 11399 */ 11400 bioerror(orig_bp, EIO); 11401 orig_bp->b_resid = orig_bp->b_bcount; 11402 11403 } else { 11404 /* 11405 * This is a regular READ request (ie, not a RMW). Copy the 11406 * data from the shadow buf into the original buf. The 11407 * copy_offset compensates for any "misalignment" between the 11408 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11409 * original buf (with its un->un_sys_blocksize blocks). 11410 */ 11411 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11412 copy_length); 11413 } 11414 11415 freebuf_done: 11416 11417 /* 11418 * At this point we still have both the shadow buf AND the original 11419 * buf to deal with, as well as the layer-private data area in each. 11420 * Local variables are as follows: 11421 * 11422 * bp -- points to shadow buf 11423 * xp -- points to xbuf of shadow buf 11424 * bsp -- points to layer-private data area of shadow buf 11425 * orig_bp -- points to original buf 11426 * 11427 * First free the shadow buf and its associated xbuf, then free the 11428 * layer-private data area from the shadow buf. There is no need to 11429 * restore xb_private in the shadow xbuf. 11430 */ 11431 sd_shadow_buf_free(bp); 11432 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11433 11434 /* 11435 * Now update the local variables to point to the original buf, xbuf, 11436 * and layer-private area. 11437 */ 11438 bp = orig_bp; 11439 xp = SD_GET_XBUF(bp); 11440 ASSERT(xp != NULL); 11441 ASSERT(xp == orig_xp); 11442 bsp = xp->xb_private; 11443 ASSERT(bsp != NULL); 11444 11445 done: 11446 /* 11447 * Restore xb_private to whatever it was set to by the next higher 11448 * layer in the chain, then free the layer-private data area. 11449 */ 11450 xp->xb_private = bsp->mbs_oprivate; 11451 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11452 11453 exit: 11454 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11455 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11456 11457 SD_NEXT_IODONE(index, un, bp); 11458 } 11459 11460 11461 /* 11462 * Function: sd_checksum_iostart 11463 * 11464 * Description: A stub function for a layer that's currently not used. 11465 * For now just a placeholder. 11466 * 11467 * Context: Kernel thread context 11468 */ 11469 11470 static void 11471 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11472 { 11473 ASSERT(un != NULL); 11474 ASSERT(bp != NULL); 11475 ASSERT(!mutex_owned(SD_MUTEX(un))); 11476 SD_NEXT_IOSTART(index, un, bp); 11477 } 11478 11479 11480 /* 11481 * Function: sd_checksum_iodone 11482 * 11483 * Description: A stub function for a layer that's currently not used. 11484 * For now just a placeholder. 11485 * 11486 * Context: May be called under interrupt context 11487 */ 11488 11489 static void 11490 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11491 { 11492 ASSERT(un != NULL); 11493 ASSERT(bp != NULL); 11494 ASSERT(!mutex_owned(SD_MUTEX(un))); 11495 SD_NEXT_IODONE(index, un, bp); 11496 } 11497 11498 11499 /* 11500 * Function: sd_checksum_uscsi_iostart 11501 * 11502 * Description: A stub function for a layer that's currently not used. 11503 * For now just a placeholder. 11504 * 11505 * Context: Kernel thread context 11506 */ 11507 11508 static void 11509 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11510 { 11511 ASSERT(un != NULL); 11512 ASSERT(bp != NULL); 11513 ASSERT(!mutex_owned(SD_MUTEX(un))); 11514 SD_NEXT_IOSTART(index, un, bp); 11515 } 11516 11517 11518 /* 11519 * Function: sd_checksum_uscsi_iodone 11520 * 11521 * Description: A stub function for a layer that's currently not used. 11522 * For now just a placeholder. 11523 * 11524 * Context: May be called under interrupt context 11525 */ 11526 11527 static void 11528 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11529 { 11530 ASSERT(un != NULL); 11531 ASSERT(bp != NULL); 11532 ASSERT(!mutex_owned(SD_MUTEX(un))); 11533 SD_NEXT_IODONE(index, un, bp); 11534 } 11535 11536 11537 /* 11538 * Function: sd_pm_iostart 11539 * 11540 * Description: iostart-side routine for Power mangement. 11541 * 11542 * Context: Kernel thread context 11543 */ 11544 11545 static void 11546 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11547 { 11548 ASSERT(un != NULL); 11549 ASSERT(bp != NULL); 11550 ASSERT(!mutex_owned(SD_MUTEX(un))); 11551 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11552 11553 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11554 11555 if (sd_pm_entry(un) != DDI_SUCCESS) { 11556 /* 11557 * Set up to return the failed buf back up the 'iodone' 11558 * side of the calling chain. 11559 */ 11560 bioerror(bp, EIO); 11561 bp->b_resid = bp->b_bcount; 11562 11563 SD_BEGIN_IODONE(index, un, bp); 11564 11565 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11566 return; 11567 } 11568 11569 SD_NEXT_IOSTART(index, un, bp); 11570 11571 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11572 } 11573 11574 11575 /* 11576 * Function: sd_pm_iodone 11577 * 11578 * Description: iodone-side routine for power mangement. 11579 * 11580 * Context: may be called from interrupt context 11581 */ 11582 11583 static void 11584 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11585 { 11586 ASSERT(un != NULL); 11587 ASSERT(bp != NULL); 11588 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11589 11590 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11591 11592 /* 11593 * After attach the following flag is only read, so don't 11594 * take the penalty of acquiring a mutex for it. 11595 */ 11596 if (un->un_f_pm_is_enabled == TRUE) { 11597 sd_pm_exit(un); 11598 } 11599 11600 SD_NEXT_IODONE(index, un, bp); 11601 11602 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11603 } 11604 11605 11606 /* 11607 * Function: sd_core_iostart 11608 * 11609 * Description: Primary driver function for enqueuing buf(9S) structs from 11610 * the system and initiating IO to the target device 11611 * 11612 * Context: Kernel thread context. Can sleep. 11613 * 11614 * Assumptions: - The given xp->xb_blkno is absolute 11615 * (ie, relative to the start of the device). 11616 * - The IO is to be done using the native blocksize of 11617 * the device, as specified in un->un_tgt_blocksize. 11618 */ 11619 /* ARGSUSED */ 11620 static void 11621 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11622 { 11623 struct sd_xbuf *xp; 11624 11625 ASSERT(un != NULL); 11626 ASSERT(bp != NULL); 11627 ASSERT(!mutex_owned(SD_MUTEX(un))); 11628 ASSERT(bp->b_resid == 0); 11629 11630 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11631 11632 xp = SD_GET_XBUF(bp); 11633 ASSERT(xp != NULL); 11634 11635 mutex_enter(SD_MUTEX(un)); 11636 11637 /* 11638 * If we are currently in the failfast state, fail any new IO 11639 * that has B_FAILFAST set, then return. 11640 */ 11641 if ((bp->b_flags & B_FAILFAST) && 11642 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11643 mutex_exit(SD_MUTEX(un)); 11644 bioerror(bp, EIO); 11645 bp->b_resid = bp->b_bcount; 11646 SD_BEGIN_IODONE(index, un, bp); 11647 return; 11648 } 11649 11650 if (SD_IS_DIRECT_PRIORITY(xp)) { 11651 /* 11652 * Priority command -- transport it immediately. 11653 * 11654 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11655 * because all direct priority commands should be associated 11656 * with error recovery actions which we don't want to retry. 11657 */ 11658 sd_start_cmds(un, bp); 11659 } else { 11660 /* 11661 * Normal command -- add it to the wait queue, then start 11662 * transporting commands from the wait queue. 11663 */ 11664 sd_add_buf_to_waitq(un, bp); 11665 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11666 sd_start_cmds(un, NULL); 11667 } 11668 11669 mutex_exit(SD_MUTEX(un)); 11670 11671 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11672 } 11673 11674 11675 /* 11676 * Function: sd_init_cdb_limits 11677 * 11678 * Description: This is to handle scsi_pkt initialization differences 11679 * between the driver platforms. 11680 * 11681 * Legacy behaviors: 11682 * 11683 * If the block number or the sector count exceeds the 11684 * capabilities of a Group 0 command, shift over to a 11685 * Group 1 command. We don't blindly use Group 1 11686 * commands because a) some drives (CDC Wren IVs) get a 11687 * bit confused, and b) there is probably a fair amount 11688 * of speed difference for a target to receive and decode 11689 * a 10 byte command instead of a 6 byte command. 11690 * 11691 * The xfer time difference of 6 vs 10 byte CDBs is 11692 * still significant so this code is still worthwhile. 11693 * 10 byte CDBs are very inefficient with the fas HBA driver 11694 * and older disks. Each CDB byte took 1 usec with some 11695 * popular disks. 11696 * 11697 * Context: Must be called at attach time 11698 */ 11699 11700 static void 11701 sd_init_cdb_limits(struct sd_lun *un) 11702 { 11703 int hba_cdb_limit; 11704 11705 /* 11706 * Use CDB_GROUP1 commands for most devices except for 11707 * parallel SCSI fixed drives in which case we get better 11708 * performance using CDB_GROUP0 commands (where applicable). 11709 */ 11710 un->un_mincdb = SD_CDB_GROUP1; 11711 #if !defined(__fibre) 11712 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11713 !un->un_f_has_removable_media) { 11714 un->un_mincdb = SD_CDB_GROUP0; 11715 } 11716 #endif 11717 11718 /* 11719 * Try to read the max-cdb-length supported by HBA. 11720 */ 11721 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11722 if (0 >= un->un_max_hba_cdb) { 11723 un->un_max_hba_cdb = CDB_GROUP4; 11724 hba_cdb_limit = SD_CDB_GROUP4; 11725 } else if (0 < un->un_max_hba_cdb && 11726 un->un_max_hba_cdb < CDB_GROUP1) { 11727 hba_cdb_limit = SD_CDB_GROUP0; 11728 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11729 un->un_max_hba_cdb < CDB_GROUP5) { 11730 hba_cdb_limit = SD_CDB_GROUP1; 11731 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11732 un->un_max_hba_cdb < CDB_GROUP4) { 11733 hba_cdb_limit = SD_CDB_GROUP5; 11734 } else { 11735 hba_cdb_limit = SD_CDB_GROUP4; 11736 } 11737 11738 /* 11739 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11740 * commands for fixed disks unless we are building for a 32 bit 11741 * kernel. 11742 */ 11743 #ifdef _LP64 11744 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11745 min(hba_cdb_limit, SD_CDB_GROUP4); 11746 #else 11747 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11748 min(hba_cdb_limit, SD_CDB_GROUP1); 11749 #endif 11750 11751 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11752 ? sizeof (struct scsi_arq_status) : 1); 11753 un->un_cmd_timeout = (ushort_t)sd_io_time; 11754 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11755 } 11756 11757 11758 /* 11759 * Function: sd_initpkt_for_buf 11760 * 11761 * Description: Allocate and initialize for transport a scsi_pkt struct, 11762 * based upon the info specified in the given buf struct. 11763 * 11764 * Assumes the xb_blkno in the request is absolute (ie, 11765 * relative to the start of the device (NOT partition!). 11766 * Also assumes that the request is using the native block 11767 * size of the device (as returned by the READ CAPACITY 11768 * command). 11769 * 11770 * Return Code: SD_PKT_ALLOC_SUCCESS 11771 * SD_PKT_ALLOC_FAILURE 11772 * SD_PKT_ALLOC_FAILURE_NO_DMA 11773 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11774 * 11775 * Context: Kernel thread and may be called from software interrupt context 11776 * as part of a sdrunout callback. This function may not block or 11777 * call routines that block 11778 */ 11779 11780 static int 11781 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11782 { 11783 struct sd_xbuf *xp; 11784 struct scsi_pkt *pktp = NULL; 11785 struct sd_lun *un; 11786 size_t blockcount; 11787 daddr_t startblock; 11788 int rval; 11789 int cmd_flags; 11790 11791 ASSERT(bp != NULL); 11792 ASSERT(pktpp != NULL); 11793 xp = SD_GET_XBUF(bp); 11794 ASSERT(xp != NULL); 11795 un = SD_GET_UN(bp); 11796 ASSERT(un != NULL); 11797 ASSERT(mutex_owned(SD_MUTEX(un))); 11798 ASSERT(bp->b_resid == 0); 11799 11800 SD_TRACE(SD_LOG_IO_CORE, un, 11801 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11802 11803 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11804 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11805 /* 11806 * Already have a scsi_pkt -- just need DMA resources. 11807 * We must recompute the CDB in case the mapping returns 11808 * a nonzero pkt_resid. 11809 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11810 * that is being retried, the unmap/remap of the DMA resouces 11811 * will result in the entire transfer starting over again 11812 * from the very first block. 11813 */ 11814 ASSERT(xp->xb_pktp != NULL); 11815 pktp = xp->xb_pktp; 11816 } else { 11817 pktp = NULL; 11818 } 11819 #endif /* __i386 || __amd64 */ 11820 11821 startblock = xp->xb_blkno; /* Absolute block num. */ 11822 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11823 11824 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11825 11826 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11827 11828 #else 11829 11830 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11831 11832 #endif 11833 11834 /* 11835 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11836 * call scsi_init_pkt, and build the CDB. 11837 */ 11838 rval = sd_setup_rw_pkt(un, &pktp, bp, 11839 cmd_flags, sdrunout, (caddr_t)un, 11840 startblock, blockcount); 11841 11842 if (rval == 0) { 11843 /* 11844 * Success. 11845 * 11846 * If partial DMA is being used and required for this transfer. 11847 * set it up here. 11848 */ 11849 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11850 (pktp->pkt_resid != 0)) { 11851 11852 /* 11853 * Save the CDB length and pkt_resid for the 11854 * next xfer 11855 */ 11856 xp->xb_dma_resid = pktp->pkt_resid; 11857 11858 /* rezero resid */ 11859 pktp->pkt_resid = 0; 11860 11861 } else { 11862 xp->xb_dma_resid = 0; 11863 } 11864 11865 pktp->pkt_flags = un->un_tagflags; 11866 pktp->pkt_time = un->un_cmd_timeout; 11867 pktp->pkt_comp = sdintr; 11868 11869 pktp->pkt_private = bp; 11870 *pktpp = pktp; 11871 11872 SD_TRACE(SD_LOG_IO_CORE, un, 11873 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11874 11875 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11876 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11877 #endif 11878 11879 return (SD_PKT_ALLOC_SUCCESS); 11880 11881 } 11882 11883 /* 11884 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11885 * from sd_setup_rw_pkt. 11886 */ 11887 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11888 11889 if (rval == SD_PKT_ALLOC_FAILURE) { 11890 *pktpp = NULL; 11891 /* 11892 * Set the driver state to RWAIT to indicate the driver 11893 * is waiting on resource allocations. The driver will not 11894 * suspend, pm_suspend, or detatch while the state is RWAIT. 11895 */ 11896 New_state(un, SD_STATE_RWAIT); 11897 11898 SD_ERROR(SD_LOG_IO_CORE, un, 11899 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11900 11901 if ((bp->b_flags & B_ERROR) != 0) { 11902 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11903 } 11904 return (SD_PKT_ALLOC_FAILURE); 11905 } else { 11906 /* 11907 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11908 * 11909 * This should never happen. Maybe someone messed with the 11910 * kernel's minphys? 11911 */ 11912 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11913 "Request rejected: too large for CDB: " 11914 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11915 SD_ERROR(SD_LOG_IO_CORE, un, 11916 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11917 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11918 11919 } 11920 } 11921 11922 11923 /* 11924 * Function: sd_destroypkt_for_buf 11925 * 11926 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11927 * 11928 * Context: Kernel thread or interrupt context 11929 */ 11930 11931 static void 11932 sd_destroypkt_for_buf(struct buf *bp) 11933 { 11934 ASSERT(bp != NULL); 11935 ASSERT(SD_GET_UN(bp) != NULL); 11936 11937 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11938 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11939 11940 ASSERT(SD_GET_PKTP(bp) != NULL); 11941 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11942 11943 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11944 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11945 } 11946 11947 /* 11948 * Function: sd_setup_rw_pkt 11949 * 11950 * Description: Determines appropriate CDB group for the requested LBA 11951 * and transfer length, calls scsi_init_pkt, and builds 11952 * the CDB. Do not use for partial DMA transfers except 11953 * for the initial transfer since the CDB size must 11954 * remain constant. 11955 * 11956 * Context: Kernel thread and may be called from software interrupt 11957 * context as part of a sdrunout callback. This function may not 11958 * block or call routines that block 11959 */ 11960 11961 11962 int 11963 sd_setup_rw_pkt(struct sd_lun *un, 11964 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11965 int (*callback)(caddr_t), caddr_t callback_arg, 11966 diskaddr_t lba, uint32_t blockcount) 11967 { 11968 struct scsi_pkt *return_pktp; 11969 union scsi_cdb *cdbp; 11970 struct sd_cdbinfo *cp = NULL; 11971 int i; 11972 11973 /* 11974 * See which size CDB to use, based upon the request. 11975 */ 11976 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11977 11978 /* 11979 * Check lba and block count against sd_cdbtab limits. 11980 * In the partial DMA case, we have to use the same size 11981 * CDB for all the transfers. Check lba + blockcount 11982 * against the max LBA so we know that segment of the 11983 * transfer can use the CDB we select. 11984 */ 11985 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11986 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11987 11988 /* 11989 * The command will fit into the CDB type 11990 * specified by sd_cdbtab[i]. 11991 */ 11992 cp = sd_cdbtab + i; 11993 11994 /* 11995 * Call scsi_init_pkt so we can fill in the 11996 * CDB. 11997 */ 11998 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11999 bp, cp->sc_grpcode, un->un_status_len, 0, 12000 flags, callback, callback_arg); 12001 12002 if (return_pktp != NULL) { 12003 12004 /* 12005 * Return new value of pkt 12006 */ 12007 *pktpp = return_pktp; 12008 12009 /* 12010 * To be safe, zero the CDB insuring there is 12011 * no leftover data from a previous command. 12012 */ 12013 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12014 12015 /* 12016 * Handle partial DMA mapping 12017 */ 12018 if (return_pktp->pkt_resid != 0) { 12019 12020 /* 12021 * Not going to xfer as many blocks as 12022 * originally expected 12023 */ 12024 blockcount -= 12025 SD_BYTES2TGTBLOCKS(un, 12026 return_pktp->pkt_resid); 12027 } 12028 12029 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12030 12031 /* 12032 * Set command byte based on the CDB 12033 * type we matched. 12034 */ 12035 cdbp->scc_cmd = cp->sc_grpmask | 12036 ((bp->b_flags & B_READ) ? 12037 SCMD_READ : SCMD_WRITE); 12038 12039 SD_FILL_SCSI1_LUN(un, return_pktp); 12040 12041 /* 12042 * Fill in LBA and length 12043 */ 12044 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12045 (cp->sc_grpcode == CDB_GROUP4) || 12046 (cp->sc_grpcode == CDB_GROUP0) || 12047 (cp->sc_grpcode == CDB_GROUP5)); 12048 12049 if (cp->sc_grpcode == CDB_GROUP1) { 12050 FORMG1ADDR(cdbp, lba); 12051 FORMG1COUNT(cdbp, blockcount); 12052 return (0); 12053 } else if (cp->sc_grpcode == CDB_GROUP4) { 12054 FORMG4LONGADDR(cdbp, lba); 12055 FORMG4COUNT(cdbp, blockcount); 12056 return (0); 12057 } else if (cp->sc_grpcode == CDB_GROUP0) { 12058 FORMG0ADDR(cdbp, lba); 12059 FORMG0COUNT(cdbp, blockcount); 12060 return (0); 12061 } else if (cp->sc_grpcode == CDB_GROUP5) { 12062 FORMG5ADDR(cdbp, lba); 12063 FORMG5COUNT(cdbp, blockcount); 12064 return (0); 12065 } 12066 12067 /* 12068 * It should be impossible to not match one 12069 * of the CDB types above, so we should never 12070 * reach this point. Set the CDB command byte 12071 * to test-unit-ready to avoid writing 12072 * to somewhere we don't intend. 12073 */ 12074 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12075 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12076 } else { 12077 /* 12078 * Couldn't get scsi_pkt 12079 */ 12080 return (SD_PKT_ALLOC_FAILURE); 12081 } 12082 } 12083 } 12084 12085 /* 12086 * None of the available CDB types were suitable. This really 12087 * should never happen: on a 64 bit system we support 12088 * READ16/WRITE16 which will hold an entire 64 bit disk address 12089 * and on a 32 bit system we will refuse to bind to a device 12090 * larger than 2TB so addresses will never be larger than 32 bits. 12091 */ 12092 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12093 } 12094 12095 /* 12096 * Function: sd_setup_next_rw_pkt 12097 * 12098 * Description: Setup packet for partial DMA transfers, except for the 12099 * initial transfer. sd_setup_rw_pkt should be used for 12100 * the initial transfer. 12101 * 12102 * Context: Kernel thread and may be called from interrupt context. 12103 */ 12104 12105 int 12106 sd_setup_next_rw_pkt(struct sd_lun *un, 12107 struct scsi_pkt *pktp, struct buf *bp, 12108 diskaddr_t lba, uint32_t blockcount) 12109 { 12110 uchar_t com; 12111 union scsi_cdb *cdbp; 12112 uchar_t cdb_group_id; 12113 12114 ASSERT(pktp != NULL); 12115 ASSERT(pktp->pkt_cdbp != NULL); 12116 12117 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12118 com = cdbp->scc_cmd; 12119 cdb_group_id = CDB_GROUPID(com); 12120 12121 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12122 (cdb_group_id == CDB_GROUPID_1) || 12123 (cdb_group_id == CDB_GROUPID_4) || 12124 (cdb_group_id == CDB_GROUPID_5)); 12125 12126 /* 12127 * Move pkt to the next portion of the xfer. 12128 * func is NULL_FUNC so we do not have to release 12129 * the disk mutex here. 12130 */ 12131 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12132 NULL_FUNC, NULL) == pktp) { 12133 /* Success. Handle partial DMA */ 12134 if (pktp->pkt_resid != 0) { 12135 blockcount -= 12136 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12137 } 12138 12139 cdbp->scc_cmd = com; 12140 SD_FILL_SCSI1_LUN(un, pktp); 12141 if (cdb_group_id == CDB_GROUPID_1) { 12142 FORMG1ADDR(cdbp, lba); 12143 FORMG1COUNT(cdbp, blockcount); 12144 return (0); 12145 } else if (cdb_group_id == CDB_GROUPID_4) { 12146 FORMG4LONGADDR(cdbp, lba); 12147 FORMG4COUNT(cdbp, blockcount); 12148 return (0); 12149 } else if (cdb_group_id == CDB_GROUPID_0) { 12150 FORMG0ADDR(cdbp, lba); 12151 FORMG0COUNT(cdbp, blockcount); 12152 return (0); 12153 } else if (cdb_group_id == CDB_GROUPID_5) { 12154 FORMG5ADDR(cdbp, lba); 12155 FORMG5COUNT(cdbp, blockcount); 12156 return (0); 12157 } 12158 12159 /* Unreachable */ 12160 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12161 } 12162 12163 /* 12164 * Error setting up next portion of cmd transfer. 12165 * Something is definitely very wrong and this 12166 * should not happen. 12167 */ 12168 return (SD_PKT_ALLOC_FAILURE); 12169 } 12170 12171 /* 12172 * Function: sd_initpkt_for_uscsi 12173 * 12174 * Description: Allocate and initialize for transport a scsi_pkt struct, 12175 * based upon the info specified in the given uscsi_cmd struct. 12176 * 12177 * Return Code: SD_PKT_ALLOC_SUCCESS 12178 * SD_PKT_ALLOC_FAILURE 12179 * SD_PKT_ALLOC_FAILURE_NO_DMA 12180 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12181 * 12182 * Context: Kernel thread and may be called from software interrupt context 12183 * as part of a sdrunout callback. This function may not block or 12184 * call routines that block 12185 */ 12186 12187 static int 12188 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12189 { 12190 struct uscsi_cmd *uscmd; 12191 struct sd_xbuf *xp; 12192 struct scsi_pkt *pktp; 12193 struct sd_lun *un; 12194 uint32_t flags = 0; 12195 12196 ASSERT(bp != NULL); 12197 ASSERT(pktpp != NULL); 12198 xp = SD_GET_XBUF(bp); 12199 ASSERT(xp != NULL); 12200 un = SD_GET_UN(bp); 12201 ASSERT(un != NULL); 12202 ASSERT(mutex_owned(SD_MUTEX(un))); 12203 12204 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12205 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12206 ASSERT(uscmd != NULL); 12207 12208 SD_TRACE(SD_LOG_IO_CORE, un, 12209 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12210 12211 /* 12212 * Allocate the scsi_pkt for the command. 12213 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12214 * during scsi_init_pkt time and will continue to use the 12215 * same path as long as the same scsi_pkt is used without 12216 * intervening scsi_dma_free(). Since uscsi command does 12217 * not call scsi_dmafree() before retry failed command, it 12218 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12219 * set such that scsi_vhci can use other available path for 12220 * retry. Besides, ucsci command does not allow DMA breakup, 12221 * so there is no need to set PKT_DMA_PARTIAL flag. 12222 */ 12223 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12224 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12225 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12226 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12227 - sizeof (struct scsi_extended_sense)), 0, 12228 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12229 sdrunout, (caddr_t)un); 12230 } else { 12231 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12232 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12233 sizeof (struct scsi_arq_status), 0, 12234 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12235 sdrunout, (caddr_t)un); 12236 } 12237 12238 if (pktp == NULL) { 12239 *pktpp = NULL; 12240 /* 12241 * Set the driver state to RWAIT to indicate the driver 12242 * is waiting on resource allocations. The driver will not 12243 * suspend, pm_suspend, or detatch while the state is RWAIT. 12244 */ 12245 New_state(un, SD_STATE_RWAIT); 12246 12247 SD_ERROR(SD_LOG_IO_CORE, un, 12248 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12249 12250 if ((bp->b_flags & B_ERROR) != 0) { 12251 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12252 } 12253 return (SD_PKT_ALLOC_FAILURE); 12254 } 12255 12256 /* 12257 * We do not do DMA breakup for USCSI commands, so return failure 12258 * here if all the needed DMA resources were not allocated. 12259 */ 12260 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12261 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12262 scsi_destroy_pkt(pktp); 12263 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12264 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12265 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12266 } 12267 12268 /* Init the cdb from the given uscsi struct */ 12269 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12270 uscmd->uscsi_cdb[0], 0, 0, 0); 12271 12272 SD_FILL_SCSI1_LUN(un, pktp); 12273 12274 /* 12275 * Set up the optional USCSI flags. See the uscsi (7I) man page 12276 * for listing of the supported flags. 12277 */ 12278 12279 if (uscmd->uscsi_flags & USCSI_SILENT) { 12280 flags |= FLAG_SILENT; 12281 } 12282 12283 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12284 flags |= FLAG_DIAGNOSE; 12285 } 12286 12287 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12288 flags |= FLAG_ISOLATE; 12289 } 12290 12291 if (un->un_f_is_fibre == FALSE) { 12292 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12293 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12294 } 12295 } 12296 12297 /* 12298 * Set the pkt flags here so we save time later. 12299 * Note: These flags are NOT in the uscsi man page!!! 12300 */ 12301 if (uscmd->uscsi_flags & USCSI_HEAD) { 12302 flags |= FLAG_HEAD; 12303 } 12304 12305 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12306 flags |= FLAG_NOINTR; 12307 } 12308 12309 /* 12310 * For tagged queueing, things get a bit complicated. 12311 * Check first for head of queue and last for ordered queue. 12312 * If neither head nor order, use the default driver tag flags. 12313 */ 12314 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12315 if (uscmd->uscsi_flags & USCSI_HTAG) { 12316 flags |= FLAG_HTAG; 12317 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12318 flags |= FLAG_OTAG; 12319 } else { 12320 flags |= un->un_tagflags & FLAG_TAGMASK; 12321 } 12322 } 12323 12324 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12325 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12326 } 12327 12328 pktp->pkt_flags = flags; 12329 12330 /* Copy the caller's CDB into the pkt... */ 12331 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12332 12333 if (uscmd->uscsi_timeout == 0) { 12334 pktp->pkt_time = un->un_uscsi_timeout; 12335 } else { 12336 pktp->pkt_time = uscmd->uscsi_timeout; 12337 } 12338 12339 /* need it later to identify USCSI request in sdintr */ 12340 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12341 12342 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12343 12344 pktp->pkt_private = bp; 12345 pktp->pkt_comp = sdintr; 12346 *pktpp = pktp; 12347 12348 SD_TRACE(SD_LOG_IO_CORE, un, 12349 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12350 12351 return (SD_PKT_ALLOC_SUCCESS); 12352 } 12353 12354 12355 /* 12356 * Function: sd_destroypkt_for_uscsi 12357 * 12358 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12359 * IOs.. Also saves relevant info into the associated uscsi_cmd 12360 * struct. 12361 * 12362 * Context: May be called under interrupt context 12363 */ 12364 12365 static void 12366 sd_destroypkt_for_uscsi(struct buf *bp) 12367 { 12368 struct uscsi_cmd *uscmd; 12369 struct sd_xbuf *xp; 12370 struct scsi_pkt *pktp; 12371 struct sd_lun *un; 12372 12373 ASSERT(bp != NULL); 12374 xp = SD_GET_XBUF(bp); 12375 ASSERT(xp != NULL); 12376 un = SD_GET_UN(bp); 12377 ASSERT(un != NULL); 12378 ASSERT(!mutex_owned(SD_MUTEX(un))); 12379 pktp = SD_GET_PKTP(bp); 12380 ASSERT(pktp != NULL); 12381 12382 SD_TRACE(SD_LOG_IO_CORE, un, 12383 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12384 12385 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12386 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12387 ASSERT(uscmd != NULL); 12388 12389 /* Save the status and the residual into the uscsi_cmd struct */ 12390 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12391 uscmd->uscsi_resid = bp->b_resid; 12392 12393 /* 12394 * If enabled, copy any saved sense data into the area specified 12395 * by the uscsi command. 12396 */ 12397 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12398 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12399 /* 12400 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12401 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12402 */ 12403 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12404 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12405 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12406 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12407 MAX_SENSE_LENGTH); 12408 } else { 12409 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12410 SENSE_LENGTH); 12411 } 12412 } 12413 12414 /* We are done with the scsi_pkt; free it now */ 12415 ASSERT(SD_GET_PKTP(bp) != NULL); 12416 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12417 12418 SD_TRACE(SD_LOG_IO_CORE, un, 12419 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12420 } 12421 12422 12423 /* 12424 * Function: sd_bioclone_alloc 12425 * 12426 * Description: Allocate a buf(9S) and init it as per the given buf 12427 * and the various arguments. The associated sd_xbuf 12428 * struct is (nearly) duplicated. The struct buf *bp 12429 * argument is saved in new_xp->xb_private. 12430 * 12431 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12432 * datalen - size of data area for the shadow bp 12433 * blkno - starting LBA 12434 * func - function pointer for b_iodone in the shadow buf. (May 12435 * be NULL if none.) 12436 * 12437 * Return Code: Pointer to allocates buf(9S) struct 12438 * 12439 * Context: Can sleep. 12440 */ 12441 12442 static struct buf * 12443 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12444 daddr_t blkno, int (*func)(struct buf *)) 12445 { 12446 struct sd_lun *un; 12447 struct sd_xbuf *xp; 12448 struct sd_xbuf *new_xp; 12449 struct buf *new_bp; 12450 12451 ASSERT(bp != NULL); 12452 xp = SD_GET_XBUF(bp); 12453 ASSERT(xp != NULL); 12454 un = SD_GET_UN(bp); 12455 ASSERT(un != NULL); 12456 ASSERT(!mutex_owned(SD_MUTEX(un))); 12457 12458 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12459 NULL, KM_SLEEP); 12460 12461 new_bp->b_lblkno = blkno; 12462 12463 /* 12464 * Allocate an xbuf for the shadow bp and copy the contents of the 12465 * original xbuf into it. 12466 */ 12467 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12468 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12469 12470 /* 12471 * The given bp is automatically saved in the xb_private member 12472 * of the new xbuf. Callers are allowed to depend on this. 12473 */ 12474 new_xp->xb_private = bp; 12475 12476 new_bp->b_private = new_xp; 12477 12478 return (new_bp); 12479 } 12480 12481 /* 12482 * Function: sd_shadow_buf_alloc 12483 * 12484 * Description: Allocate a buf(9S) and init it as per the given buf 12485 * and the various arguments. The associated sd_xbuf 12486 * struct is (nearly) duplicated. The struct buf *bp 12487 * argument is saved in new_xp->xb_private. 12488 * 12489 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12490 * datalen - size of data area for the shadow bp 12491 * bflags - B_READ or B_WRITE (pseudo flag) 12492 * blkno - starting LBA 12493 * func - function pointer for b_iodone in the shadow buf. (May 12494 * be NULL if none.) 12495 * 12496 * Return Code: Pointer to allocates buf(9S) struct 12497 * 12498 * Context: Can sleep. 12499 */ 12500 12501 static struct buf * 12502 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12503 daddr_t blkno, int (*func)(struct buf *)) 12504 { 12505 struct sd_lun *un; 12506 struct sd_xbuf *xp; 12507 struct sd_xbuf *new_xp; 12508 struct buf *new_bp; 12509 12510 ASSERT(bp != NULL); 12511 xp = SD_GET_XBUF(bp); 12512 ASSERT(xp != NULL); 12513 un = SD_GET_UN(bp); 12514 ASSERT(un != NULL); 12515 ASSERT(!mutex_owned(SD_MUTEX(un))); 12516 12517 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12518 bp_mapin(bp); 12519 } 12520 12521 bflags &= (B_READ | B_WRITE); 12522 #if defined(__i386) || defined(__amd64) 12523 new_bp = getrbuf(KM_SLEEP); 12524 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12525 new_bp->b_bcount = datalen; 12526 new_bp->b_flags = bflags | 12527 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12528 #else 12529 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12530 datalen, bflags, SLEEP_FUNC, NULL); 12531 #endif 12532 new_bp->av_forw = NULL; 12533 new_bp->av_back = NULL; 12534 new_bp->b_dev = bp->b_dev; 12535 new_bp->b_blkno = blkno; 12536 new_bp->b_iodone = func; 12537 new_bp->b_edev = bp->b_edev; 12538 new_bp->b_resid = 0; 12539 12540 /* We need to preserve the B_FAILFAST flag */ 12541 if (bp->b_flags & B_FAILFAST) { 12542 new_bp->b_flags |= B_FAILFAST; 12543 } 12544 12545 /* 12546 * Allocate an xbuf for the shadow bp and copy the contents of the 12547 * original xbuf into it. 12548 */ 12549 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12550 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12551 12552 /* Need later to copy data between the shadow buf & original buf! */ 12553 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12554 12555 /* 12556 * The given bp is automatically saved in the xb_private member 12557 * of the new xbuf. Callers are allowed to depend on this. 12558 */ 12559 new_xp->xb_private = bp; 12560 12561 new_bp->b_private = new_xp; 12562 12563 return (new_bp); 12564 } 12565 12566 /* 12567 * Function: sd_bioclone_free 12568 * 12569 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12570 * in the larger than partition operation. 12571 * 12572 * Context: May be called under interrupt context 12573 */ 12574 12575 static void 12576 sd_bioclone_free(struct buf *bp) 12577 { 12578 struct sd_xbuf *xp; 12579 12580 ASSERT(bp != NULL); 12581 xp = SD_GET_XBUF(bp); 12582 ASSERT(xp != NULL); 12583 12584 /* 12585 * Call bp_mapout() before freeing the buf, in case a lower 12586 * layer or HBA had done a bp_mapin(). we must do this here 12587 * as we are the "originator" of the shadow buf. 12588 */ 12589 bp_mapout(bp); 12590 12591 /* 12592 * Null out b_iodone before freeing the bp, to ensure that the driver 12593 * never gets confused by a stale value in this field. (Just a little 12594 * extra defensiveness here.) 12595 */ 12596 bp->b_iodone = NULL; 12597 12598 freerbuf(bp); 12599 12600 kmem_free(xp, sizeof (struct sd_xbuf)); 12601 } 12602 12603 /* 12604 * Function: sd_shadow_buf_free 12605 * 12606 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12607 * 12608 * Context: May be called under interrupt context 12609 */ 12610 12611 static void 12612 sd_shadow_buf_free(struct buf *bp) 12613 { 12614 struct sd_xbuf *xp; 12615 12616 ASSERT(bp != NULL); 12617 xp = SD_GET_XBUF(bp); 12618 ASSERT(xp != NULL); 12619 12620 #if defined(__sparc) 12621 /* 12622 * Call bp_mapout() before freeing the buf, in case a lower 12623 * layer or HBA had done a bp_mapin(). we must do this here 12624 * as we are the "originator" of the shadow buf. 12625 */ 12626 bp_mapout(bp); 12627 #endif 12628 12629 /* 12630 * Null out b_iodone before freeing the bp, to ensure that the driver 12631 * never gets confused by a stale value in this field. (Just a little 12632 * extra defensiveness here.) 12633 */ 12634 bp->b_iodone = NULL; 12635 12636 #if defined(__i386) || defined(__amd64) 12637 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12638 freerbuf(bp); 12639 #else 12640 scsi_free_consistent_buf(bp); 12641 #endif 12642 12643 kmem_free(xp, sizeof (struct sd_xbuf)); 12644 } 12645 12646 12647 /* 12648 * Function: sd_print_transport_rejected_message 12649 * 12650 * Description: This implements the ludicrously complex rules for printing 12651 * a "transport rejected" message. This is to address the 12652 * specific problem of having a flood of this error message 12653 * produced when a failover occurs. 12654 * 12655 * Context: Any. 12656 */ 12657 12658 static void 12659 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12660 int code) 12661 { 12662 ASSERT(un != NULL); 12663 ASSERT(mutex_owned(SD_MUTEX(un))); 12664 ASSERT(xp != NULL); 12665 12666 /* 12667 * Print the "transport rejected" message under the following 12668 * conditions: 12669 * 12670 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12671 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12672 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12673 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12674 * scsi_transport(9F) (which indicates that the target might have 12675 * gone off-line). This uses the un->un_tran_fatal_count 12676 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12677 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12678 * from scsi_transport(). 12679 * 12680 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12681 * the preceeding cases in order for the message to be printed. 12682 */ 12683 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12684 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12685 (code != TRAN_FATAL_ERROR) || 12686 (un->un_tran_fatal_count == 1)) { 12687 switch (code) { 12688 case TRAN_BADPKT: 12689 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12690 "transport rejected bad packet\n"); 12691 break; 12692 case TRAN_FATAL_ERROR: 12693 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12694 "transport rejected fatal error\n"); 12695 break; 12696 default: 12697 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12698 "transport rejected (%d)\n", code); 12699 break; 12700 } 12701 } 12702 } 12703 } 12704 12705 12706 /* 12707 * Function: sd_add_buf_to_waitq 12708 * 12709 * Description: Add the given buf(9S) struct to the wait queue for the 12710 * instance. If sorting is enabled, then the buf is added 12711 * to the queue via an elevator sort algorithm (a la 12712 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12713 * If sorting is not enabled, then the buf is just added 12714 * to the end of the wait queue. 12715 * 12716 * Return Code: void 12717 * 12718 * Context: Does not sleep/block, therefore technically can be called 12719 * from any context. However if sorting is enabled then the 12720 * execution time is indeterminate, and may take long if 12721 * the wait queue grows large. 12722 */ 12723 12724 static void 12725 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12726 { 12727 struct buf *ap; 12728 12729 ASSERT(bp != NULL); 12730 ASSERT(un != NULL); 12731 ASSERT(mutex_owned(SD_MUTEX(un))); 12732 12733 /* If the queue is empty, add the buf as the only entry & return. */ 12734 if (un->un_waitq_headp == NULL) { 12735 ASSERT(un->un_waitq_tailp == NULL); 12736 un->un_waitq_headp = un->un_waitq_tailp = bp; 12737 bp->av_forw = NULL; 12738 return; 12739 } 12740 12741 ASSERT(un->un_waitq_tailp != NULL); 12742 12743 /* 12744 * If sorting is disabled, just add the buf to the tail end of 12745 * the wait queue and return. 12746 */ 12747 if (un->un_f_disksort_disabled) { 12748 un->un_waitq_tailp->av_forw = bp; 12749 un->un_waitq_tailp = bp; 12750 bp->av_forw = NULL; 12751 return; 12752 } 12753 12754 /* 12755 * Sort thru the list of requests currently on the wait queue 12756 * and add the new buf request at the appropriate position. 12757 * 12758 * The un->un_waitq_headp is an activity chain pointer on which 12759 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12760 * first queue holds those requests which are positioned after 12761 * the current SD_GET_BLKNO() (in the first request); the second holds 12762 * requests which came in after their SD_GET_BLKNO() number was passed. 12763 * Thus we implement a one way scan, retracting after reaching 12764 * the end of the drive to the first request on the second 12765 * queue, at which time it becomes the first queue. 12766 * A one-way scan is natural because of the way UNIX read-ahead 12767 * blocks are allocated. 12768 * 12769 * If we lie after the first request, then we must locate the 12770 * second request list and add ourselves to it. 12771 */ 12772 ap = un->un_waitq_headp; 12773 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12774 while (ap->av_forw != NULL) { 12775 /* 12776 * Look for an "inversion" in the (normally 12777 * ascending) block numbers. This indicates 12778 * the start of the second request list. 12779 */ 12780 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12781 /* 12782 * Search the second request list for the 12783 * first request at a larger block number. 12784 * We go before that; however if there is 12785 * no such request, we go at the end. 12786 */ 12787 do { 12788 if (SD_GET_BLKNO(bp) < 12789 SD_GET_BLKNO(ap->av_forw)) { 12790 goto insert; 12791 } 12792 ap = ap->av_forw; 12793 } while (ap->av_forw != NULL); 12794 goto insert; /* after last */ 12795 } 12796 ap = ap->av_forw; 12797 } 12798 12799 /* 12800 * No inversions... we will go after the last, and 12801 * be the first request in the second request list. 12802 */ 12803 goto insert; 12804 } 12805 12806 /* 12807 * Request is at/after the current request... 12808 * sort in the first request list. 12809 */ 12810 while (ap->av_forw != NULL) { 12811 /* 12812 * We want to go after the current request (1) if 12813 * there is an inversion after it (i.e. it is the end 12814 * of the first request list), or (2) if the next 12815 * request is a larger block no. than our request. 12816 */ 12817 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12818 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12819 goto insert; 12820 } 12821 ap = ap->av_forw; 12822 } 12823 12824 /* 12825 * Neither a second list nor a larger request, therefore 12826 * we go at the end of the first list (which is the same 12827 * as the end of the whole schebang). 12828 */ 12829 insert: 12830 bp->av_forw = ap->av_forw; 12831 ap->av_forw = bp; 12832 12833 /* 12834 * If we inserted onto the tail end of the waitq, make sure the 12835 * tail pointer is updated. 12836 */ 12837 if (ap == un->un_waitq_tailp) { 12838 un->un_waitq_tailp = bp; 12839 } 12840 } 12841 12842 12843 /* 12844 * Function: sd_start_cmds 12845 * 12846 * Description: Remove and transport cmds from the driver queues. 12847 * 12848 * Arguments: un - pointer to the unit (soft state) struct for the target. 12849 * 12850 * immed_bp - ptr to a buf to be transported immediately. Only 12851 * the immed_bp is transported; bufs on the waitq are not 12852 * processed and the un_retry_bp is not checked. If immed_bp is 12853 * NULL, then normal queue processing is performed. 12854 * 12855 * Context: May be called from kernel thread context, interrupt context, 12856 * or runout callback context. This function may not block or 12857 * call routines that block. 12858 */ 12859 12860 static void 12861 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12862 { 12863 struct sd_xbuf *xp; 12864 struct buf *bp; 12865 void (*statp)(kstat_io_t *); 12866 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12867 void (*saved_statp)(kstat_io_t *); 12868 #endif 12869 int rval; 12870 12871 ASSERT(un != NULL); 12872 ASSERT(mutex_owned(SD_MUTEX(un))); 12873 ASSERT(un->un_ncmds_in_transport >= 0); 12874 ASSERT(un->un_throttle >= 0); 12875 12876 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12877 12878 do { 12879 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12880 saved_statp = NULL; 12881 #endif 12882 12883 /* 12884 * If we are syncing or dumping, fail the command to 12885 * avoid recursively calling back into scsi_transport(). 12886 * The dump I/O itself uses a separate code path so this 12887 * only prevents non-dump I/O from being sent while dumping. 12888 * File system sync takes place before dumping begins. 12889 * During panic, filesystem I/O is allowed provided 12890 * un_in_callback is <= 1. This is to prevent recursion 12891 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12892 * sd_start_cmds and so on. See panic.c for more information 12893 * about the states the system can be in during panic. 12894 */ 12895 if ((un->un_state == SD_STATE_DUMPING) || 12896 (ddi_in_panic() && (un->un_in_callback > 1))) { 12897 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12898 "sd_start_cmds: panicking\n"); 12899 goto exit; 12900 } 12901 12902 if ((bp = immed_bp) != NULL) { 12903 /* 12904 * We have a bp that must be transported immediately. 12905 * It's OK to transport the immed_bp here without doing 12906 * the throttle limit check because the immed_bp is 12907 * always used in a retry/recovery case. This means 12908 * that we know we are not at the throttle limit by 12909 * virtue of the fact that to get here we must have 12910 * already gotten a command back via sdintr(). This also 12911 * relies on (1) the command on un_retry_bp preventing 12912 * further commands from the waitq from being issued; 12913 * and (2) the code in sd_retry_command checking the 12914 * throttle limit before issuing a delayed or immediate 12915 * retry. This holds even if the throttle limit is 12916 * currently ratcheted down from its maximum value. 12917 */ 12918 statp = kstat_runq_enter; 12919 if (bp == un->un_retry_bp) { 12920 ASSERT((un->un_retry_statp == NULL) || 12921 (un->un_retry_statp == kstat_waitq_enter) || 12922 (un->un_retry_statp == 12923 kstat_runq_back_to_waitq)); 12924 /* 12925 * If the waitq kstat was incremented when 12926 * sd_set_retry_bp() queued this bp for a retry, 12927 * then we must set up statp so that the waitq 12928 * count will get decremented correctly below. 12929 * Also we must clear un->un_retry_statp to 12930 * ensure that we do not act on a stale value 12931 * in this field. 12932 */ 12933 if ((un->un_retry_statp == kstat_waitq_enter) || 12934 (un->un_retry_statp == 12935 kstat_runq_back_to_waitq)) { 12936 statp = kstat_waitq_to_runq; 12937 } 12938 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12939 saved_statp = un->un_retry_statp; 12940 #endif 12941 un->un_retry_statp = NULL; 12942 12943 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12944 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12945 "un_throttle:%d un_ncmds_in_transport:%d\n", 12946 un, un->un_retry_bp, un->un_throttle, 12947 un->un_ncmds_in_transport); 12948 } else { 12949 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12950 "processing priority bp:0x%p\n", bp); 12951 } 12952 12953 } else if ((bp = un->un_waitq_headp) != NULL) { 12954 /* 12955 * A command on the waitq is ready to go, but do not 12956 * send it if: 12957 * 12958 * (1) the throttle limit has been reached, or 12959 * (2) a retry is pending, or 12960 * (3) a START_STOP_UNIT callback pending, or 12961 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12962 * command is pending. 12963 * 12964 * For all of these conditions, IO processing will 12965 * restart after the condition is cleared. 12966 */ 12967 if (un->un_ncmds_in_transport >= un->un_throttle) { 12968 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12969 "sd_start_cmds: exiting, " 12970 "throttle limit reached!\n"); 12971 goto exit; 12972 } 12973 if (un->un_retry_bp != NULL) { 12974 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12975 "sd_start_cmds: exiting, retry pending!\n"); 12976 goto exit; 12977 } 12978 if (un->un_startstop_timeid != NULL) { 12979 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12980 "sd_start_cmds: exiting, " 12981 "START_STOP pending!\n"); 12982 goto exit; 12983 } 12984 if (un->un_direct_priority_timeid != NULL) { 12985 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12986 "sd_start_cmds: exiting, " 12987 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12988 goto exit; 12989 } 12990 12991 /* Dequeue the command */ 12992 un->un_waitq_headp = bp->av_forw; 12993 if (un->un_waitq_headp == NULL) { 12994 un->un_waitq_tailp = NULL; 12995 } 12996 bp->av_forw = NULL; 12997 statp = kstat_waitq_to_runq; 12998 SD_TRACE(SD_LOG_IO_CORE, un, 12999 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13000 13001 } else { 13002 /* No work to do so bail out now */ 13003 SD_TRACE(SD_LOG_IO_CORE, un, 13004 "sd_start_cmds: no more work, exiting!\n"); 13005 goto exit; 13006 } 13007 13008 /* 13009 * Reset the state to normal. This is the mechanism by which 13010 * the state transitions from either SD_STATE_RWAIT or 13011 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13012 * If state is SD_STATE_PM_CHANGING then this command is 13013 * part of the device power control and the state must 13014 * not be put back to normal. Doing so would would 13015 * allow new commands to proceed when they shouldn't, 13016 * the device may be going off. 13017 */ 13018 if ((un->un_state != SD_STATE_SUSPENDED) && 13019 (un->un_state != SD_STATE_PM_CHANGING)) { 13020 New_state(un, SD_STATE_NORMAL); 13021 } 13022 13023 xp = SD_GET_XBUF(bp); 13024 ASSERT(xp != NULL); 13025 13026 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13027 /* 13028 * Allocate the scsi_pkt if we need one, or attach DMA 13029 * resources if we have a scsi_pkt that needs them. The 13030 * latter should only occur for commands that are being 13031 * retried. 13032 */ 13033 if ((xp->xb_pktp == NULL) || 13034 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13035 #else 13036 if (xp->xb_pktp == NULL) { 13037 #endif 13038 /* 13039 * There is no scsi_pkt allocated for this buf. Call 13040 * the initpkt function to allocate & init one. 13041 * 13042 * The scsi_init_pkt runout callback functionality is 13043 * implemented as follows: 13044 * 13045 * 1) The initpkt function always calls 13046 * scsi_init_pkt(9F) with sdrunout specified as the 13047 * callback routine. 13048 * 2) A successful packet allocation is initialized and 13049 * the I/O is transported. 13050 * 3) The I/O associated with an allocation resource 13051 * failure is left on its queue to be retried via 13052 * runout or the next I/O. 13053 * 4) The I/O associated with a DMA error is removed 13054 * from the queue and failed with EIO. Processing of 13055 * the transport queues is also halted to be 13056 * restarted via runout or the next I/O. 13057 * 5) The I/O associated with a CDB size or packet 13058 * size error is removed from the queue and failed 13059 * with EIO. Processing of the transport queues is 13060 * continued. 13061 * 13062 * Note: there is no interface for canceling a runout 13063 * callback. To prevent the driver from detaching or 13064 * suspending while a runout is pending the driver 13065 * state is set to SD_STATE_RWAIT 13066 * 13067 * Note: using the scsi_init_pkt callback facility can 13068 * result in an I/O request persisting at the head of 13069 * the list which cannot be satisfied even after 13070 * multiple retries. In the future the driver may 13071 * implement some kind of maximum runout count before 13072 * failing an I/O. 13073 * 13074 * Note: the use of funcp below may seem superfluous, 13075 * but it helps warlock figure out the correct 13076 * initpkt function calls (see [s]sd.wlcmd). 13077 */ 13078 struct scsi_pkt *pktp; 13079 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13080 13081 ASSERT(bp != un->un_rqs_bp); 13082 13083 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13084 switch ((*funcp)(bp, &pktp)) { 13085 case SD_PKT_ALLOC_SUCCESS: 13086 xp->xb_pktp = pktp; 13087 SD_TRACE(SD_LOG_IO_CORE, un, 13088 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13089 pktp); 13090 goto got_pkt; 13091 13092 case SD_PKT_ALLOC_FAILURE: 13093 /* 13094 * Temporary (hopefully) resource depletion. 13095 * Since retries and RQS commands always have a 13096 * scsi_pkt allocated, these cases should never 13097 * get here. So the only cases this needs to 13098 * handle is a bp from the waitq (which we put 13099 * back onto the waitq for sdrunout), or a bp 13100 * sent as an immed_bp (which we just fail). 13101 */ 13102 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13103 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13104 13105 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13106 13107 if (bp == immed_bp) { 13108 /* 13109 * If SD_XB_DMA_FREED is clear, then 13110 * this is a failure to allocate a 13111 * scsi_pkt, and we must fail the 13112 * command. 13113 */ 13114 if ((xp->xb_pkt_flags & 13115 SD_XB_DMA_FREED) == 0) { 13116 break; 13117 } 13118 13119 /* 13120 * If this immediate command is NOT our 13121 * un_retry_bp, then we must fail it. 13122 */ 13123 if (bp != un->un_retry_bp) { 13124 break; 13125 } 13126 13127 /* 13128 * We get here if this cmd is our 13129 * un_retry_bp that was DMAFREED, but 13130 * scsi_init_pkt() failed to reallocate 13131 * DMA resources when we attempted to 13132 * retry it. This can happen when an 13133 * mpxio failover is in progress, but 13134 * we don't want to just fail the 13135 * command in this case. 13136 * 13137 * Use timeout(9F) to restart it after 13138 * a 100ms delay. We don't want to 13139 * let sdrunout() restart it, because 13140 * sdrunout() is just supposed to start 13141 * commands that are sitting on the 13142 * wait queue. The un_retry_bp stays 13143 * set until the command completes, but 13144 * sdrunout can be called many times 13145 * before that happens. Since sdrunout 13146 * cannot tell if the un_retry_bp is 13147 * already in the transport, it could 13148 * end up calling scsi_transport() for 13149 * the un_retry_bp multiple times. 13150 * 13151 * Also: don't schedule the callback 13152 * if some other callback is already 13153 * pending. 13154 */ 13155 if (un->un_retry_statp == NULL) { 13156 /* 13157 * restore the kstat pointer to 13158 * keep kstat counts coherent 13159 * when we do retry the command. 13160 */ 13161 un->un_retry_statp = 13162 saved_statp; 13163 } 13164 13165 if ((un->un_startstop_timeid == NULL) && 13166 (un->un_retry_timeid == NULL) && 13167 (un->un_direct_priority_timeid == 13168 NULL)) { 13169 13170 un->un_retry_timeid = 13171 timeout( 13172 sd_start_retry_command, 13173 un, SD_RESTART_TIMEOUT); 13174 } 13175 goto exit; 13176 } 13177 13178 #else 13179 if (bp == immed_bp) { 13180 break; /* Just fail the command */ 13181 } 13182 #endif 13183 13184 /* Add the buf back to the head of the waitq */ 13185 bp->av_forw = un->un_waitq_headp; 13186 un->un_waitq_headp = bp; 13187 if (un->un_waitq_tailp == NULL) { 13188 un->un_waitq_tailp = bp; 13189 } 13190 goto exit; 13191 13192 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13193 /* 13194 * HBA DMA resource failure. Fail the command 13195 * and continue processing of the queues. 13196 */ 13197 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13198 "sd_start_cmds: " 13199 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13200 break; 13201 13202 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13203 /* 13204 * Note:x86: Partial DMA mapping not supported 13205 * for USCSI commands, and all the needed DMA 13206 * resources were not allocated. 13207 */ 13208 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13209 "sd_start_cmds: " 13210 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13211 break; 13212 13213 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13214 /* 13215 * Note:x86: Request cannot fit into CDB based 13216 * on lba and len. 13217 */ 13218 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13219 "sd_start_cmds: " 13220 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13221 break; 13222 13223 default: 13224 /* Should NEVER get here! */ 13225 panic("scsi_initpkt error"); 13226 /*NOTREACHED*/ 13227 } 13228 13229 /* 13230 * Fatal error in allocating a scsi_pkt for this buf. 13231 * Update kstats & return the buf with an error code. 13232 * We must use sd_return_failed_command_no_restart() to 13233 * avoid a recursive call back into sd_start_cmds(). 13234 * However this also means that we must keep processing 13235 * the waitq here in order to avoid stalling. 13236 */ 13237 if (statp == kstat_waitq_to_runq) { 13238 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13239 } 13240 sd_return_failed_command_no_restart(un, bp, EIO); 13241 if (bp == immed_bp) { 13242 /* immed_bp is gone by now, so clear this */ 13243 immed_bp = NULL; 13244 } 13245 continue; 13246 } 13247 got_pkt: 13248 if (bp == immed_bp) { 13249 /* goto the head of the class.... */ 13250 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13251 } 13252 13253 un->un_ncmds_in_transport++; 13254 SD_UPDATE_KSTATS(un, statp, bp); 13255 13256 /* 13257 * Call scsi_transport() to send the command to the target. 13258 * According to SCSA architecture, we must drop the mutex here 13259 * before calling scsi_transport() in order to avoid deadlock. 13260 * Note that the scsi_pkt's completion routine can be executed 13261 * (from interrupt context) even before the call to 13262 * scsi_transport() returns. 13263 */ 13264 SD_TRACE(SD_LOG_IO_CORE, un, 13265 "sd_start_cmds: calling scsi_transport()\n"); 13266 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13267 13268 mutex_exit(SD_MUTEX(un)); 13269 rval = scsi_transport(xp->xb_pktp); 13270 mutex_enter(SD_MUTEX(un)); 13271 13272 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13273 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13274 13275 switch (rval) { 13276 case TRAN_ACCEPT: 13277 /* Clear this with every pkt accepted by the HBA */ 13278 un->un_tran_fatal_count = 0; 13279 break; /* Success; try the next cmd (if any) */ 13280 13281 case TRAN_BUSY: 13282 un->un_ncmds_in_transport--; 13283 ASSERT(un->un_ncmds_in_transport >= 0); 13284 13285 /* 13286 * Don't retry request sense, the sense data 13287 * is lost when another request is sent. 13288 * Free up the rqs buf and retry 13289 * the original failed cmd. Update kstat. 13290 */ 13291 if (bp == un->un_rqs_bp) { 13292 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13293 bp = sd_mark_rqs_idle(un, xp); 13294 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13295 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13296 kstat_waitq_enter); 13297 goto exit; 13298 } 13299 13300 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13301 /* 13302 * Free the DMA resources for the scsi_pkt. This will 13303 * allow mpxio to select another path the next time 13304 * we call scsi_transport() with this scsi_pkt. 13305 * See sdintr() for the rationalization behind this. 13306 */ 13307 if ((un->un_f_is_fibre == TRUE) && 13308 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13309 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13310 scsi_dmafree(xp->xb_pktp); 13311 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13312 } 13313 #endif 13314 13315 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13316 /* 13317 * Commands that are SD_PATH_DIRECT_PRIORITY 13318 * are for error recovery situations. These do 13319 * not use the normal command waitq, so if they 13320 * get a TRAN_BUSY we cannot put them back onto 13321 * the waitq for later retry. One possible 13322 * problem is that there could already be some 13323 * other command on un_retry_bp that is waiting 13324 * for this one to complete, so we would be 13325 * deadlocked if we put this command back onto 13326 * the waitq for later retry (since un_retry_bp 13327 * must complete before the driver gets back to 13328 * commands on the waitq). 13329 * 13330 * To avoid deadlock we must schedule a callback 13331 * that will restart this command after a set 13332 * interval. This should keep retrying for as 13333 * long as the underlying transport keeps 13334 * returning TRAN_BUSY (just like for other 13335 * commands). Use the same timeout interval as 13336 * for the ordinary TRAN_BUSY retry. 13337 */ 13338 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13339 "sd_start_cmds: scsi_transport() returned " 13340 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13341 13342 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13343 un->un_direct_priority_timeid = 13344 timeout(sd_start_direct_priority_command, 13345 bp, SD_BSY_TIMEOUT / 500); 13346 13347 goto exit; 13348 } 13349 13350 /* 13351 * For TRAN_BUSY, we want to reduce the throttle value, 13352 * unless we are retrying a command. 13353 */ 13354 if (bp != un->un_retry_bp) { 13355 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13356 } 13357 13358 /* 13359 * Set up the bp to be tried again 10 ms later. 13360 * Note:x86: Is there a timeout value in the sd_lun 13361 * for this condition? 13362 */ 13363 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13364 kstat_runq_back_to_waitq); 13365 goto exit; 13366 13367 case TRAN_FATAL_ERROR: 13368 un->un_tran_fatal_count++; 13369 /* FALLTHRU */ 13370 13371 case TRAN_BADPKT: 13372 default: 13373 un->un_ncmds_in_transport--; 13374 ASSERT(un->un_ncmds_in_transport >= 0); 13375 13376 /* 13377 * If this is our REQUEST SENSE command with a 13378 * transport error, we must get back the pointers 13379 * to the original buf, and mark the REQUEST 13380 * SENSE command as "available". 13381 */ 13382 if (bp == un->un_rqs_bp) { 13383 bp = sd_mark_rqs_idle(un, xp); 13384 xp = SD_GET_XBUF(bp); 13385 } else { 13386 /* 13387 * Legacy behavior: do not update transport 13388 * error count for request sense commands. 13389 */ 13390 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13391 } 13392 13393 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13394 sd_print_transport_rejected_message(un, xp, rval); 13395 13396 /* 13397 * We must use sd_return_failed_command_no_restart() to 13398 * avoid a recursive call back into sd_start_cmds(). 13399 * However this also means that we must keep processing 13400 * the waitq here in order to avoid stalling. 13401 */ 13402 sd_return_failed_command_no_restart(un, bp, EIO); 13403 13404 /* 13405 * Notify any threads waiting in sd_ddi_suspend() that 13406 * a command completion has occurred. 13407 */ 13408 if (un->un_state == SD_STATE_SUSPENDED) { 13409 cv_broadcast(&un->un_disk_busy_cv); 13410 } 13411 13412 if (bp == immed_bp) { 13413 /* immed_bp is gone by now, so clear this */ 13414 immed_bp = NULL; 13415 } 13416 break; 13417 } 13418 13419 } while (immed_bp == NULL); 13420 13421 exit: 13422 ASSERT(mutex_owned(SD_MUTEX(un))); 13423 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13424 } 13425 13426 13427 /* 13428 * Function: sd_return_command 13429 * 13430 * Description: Returns a command to its originator (with or without an 13431 * error). Also starts commands waiting to be transported 13432 * to the target. 13433 * 13434 * Context: May be called from interrupt, kernel, or timeout context 13435 */ 13436 13437 static void 13438 sd_return_command(struct sd_lun *un, struct buf *bp) 13439 { 13440 struct sd_xbuf *xp; 13441 struct scsi_pkt *pktp; 13442 13443 ASSERT(bp != NULL); 13444 ASSERT(un != NULL); 13445 ASSERT(mutex_owned(SD_MUTEX(un))); 13446 ASSERT(bp != un->un_rqs_bp); 13447 xp = SD_GET_XBUF(bp); 13448 ASSERT(xp != NULL); 13449 13450 pktp = SD_GET_PKTP(bp); 13451 13452 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13453 13454 /* 13455 * Note: check for the "sdrestart failed" case. 13456 */ 13457 if ((un->un_partial_dma_supported == 1) && 13458 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13459 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13460 (xp->xb_pktp->pkt_resid == 0)) { 13461 13462 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13463 /* 13464 * Successfully set up next portion of cmd 13465 * transfer, try sending it 13466 */ 13467 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13468 NULL, NULL, 0, (clock_t)0, NULL); 13469 sd_start_cmds(un, NULL); 13470 return; /* Note:x86: need a return here? */ 13471 } 13472 } 13473 13474 /* 13475 * If this is the failfast bp, clear it from un_failfast_bp. This 13476 * can happen if upon being re-tried the failfast bp either 13477 * succeeded or encountered another error (possibly even a different 13478 * error than the one that precipitated the failfast state, but in 13479 * that case it would have had to exhaust retries as well). Regardless, 13480 * this should not occur whenever the instance is in the active 13481 * failfast state. 13482 */ 13483 if (bp == un->un_failfast_bp) { 13484 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13485 un->un_failfast_bp = NULL; 13486 } 13487 13488 /* 13489 * Clear the failfast state upon successful completion of ANY cmd. 13490 */ 13491 if (bp->b_error == 0) { 13492 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13493 } 13494 13495 /* 13496 * This is used if the command was retried one or more times. Show that 13497 * we are done with it, and allow processing of the waitq to resume. 13498 */ 13499 if (bp == un->un_retry_bp) { 13500 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13501 "sd_return_command: un:0x%p: " 13502 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13503 un->un_retry_bp = NULL; 13504 un->un_retry_statp = NULL; 13505 } 13506 13507 SD_UPDATE_RDWR_STATS(un, bp); 13508 SD_UPDATE_PARTITION_STATS(un, bp); 13509 13510 switch (un->un_state) { 13511 case SD_STATE_SUSPENDED: 13512 /* 13513 * Notify any threads waiting in sd_ddi_suspend() that 13514 * a command completion has occurred. 13515 */ 13516 cv_broadcast(&un->un_disk_busy_cv); 13517 break; 13518 default: 13519 sd_start_cmds(un, NULL); 13520 break; 13521 } 13522 13523 /* Return this command up the iodone chain to its originator. */ 13524 mutex_exit(SD_MUTEX(un)); 13525 13526 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13527 xp->xb_pktp = NULL; 13528 13529 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13530 13531 ASSERT(!mutex_owned(SD_MUTEX(un))); 13532 mutex_enter(SD_MUTEX(un)); 13533 13534 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13535 } 13536 13537 13538 /* 13539 * Function: sd_return_failed_command 13540 * 13541 * Description: Command completion when an error occurred. 13542 * 13543 * Context: May be called from interrupt context 13544 */ 13545 13546 static void 13547 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13548 { 13549 ASSERT(bp != NULL); 13550 ASSERT(un != NULL); 13551 ASSERT(mutex_owned(SD_MUTEX(un))); 13552 13553 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13554 "sd_return_failed_command: entry\n"); 13555 13556 /* 13557 * b_resid could already be nonzero due to a partial data 13558 * transfer, so do not change it here. 13559 */ 13560 SD_BIOERROR(bp, errcode); 13561 13562 sd_return_command(un, bp); 13563 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13564 "sd_return_failed_command: exit\n"); 13565 } 13566 13567 13568 /* 13569 * Function: sd_return_failed_command_no_restart 13570 * 13571 * Description: Same as sd_return_failed_command, but ensures that no 13572 * call back into sd_start_cmds will be issued. 13573 * 13574 * Context: May be called from interrupt context 13575 */ 13576 13577 static void 13578 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13579 int errcode) 13580 { 13581 struct sd_xbuf *xp; 13582 13583 ASSERT(bp != NULL); 13584 ASSERT(un != NULL); 13585 ASSERT(mutex_owned(SD_MUTEX(un))); 13586 xp = SD_GET_XBUF(bp); 13587 ASSERT(xp != NULL); 13588 ASSERT(errcode != 0); 13589 13590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13591 "sd_return_failed_command_no_restart: entry\n"); 13592 13593 /* 13594 * b_resid could already be nonzero due to a partial data 13595 * transfer, so do not change it here. 13596 */ 13597 SD_BIOERROR(bp, errcode); 13598 13599 /* 13600 * If this is the failfast bp, clear it. This can happen if the 13601 * failfast bp encounterd a fatal error when we attempted to 13602 * re-try it (such as a scsi_transport(9F) failure). However 13603 * we should NOT be in an active failfast state if the failfast 13604 * bp is not NULL. 13605 */ 13606 if (bp == un->un_failfast_bp) { 13607 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13608 un->un_failfast_bp = NULL; 13609 } 13610 13611 if (bp == un->un_retry_bp) { 13612 /* 13613 * This command was retried one or more times. Show that we are 13614 * done with it, and allow processing of the waitq to resume. 13615 */ 13616 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13617 "sd_return_failed_command_no_restart: " 13618 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13619 un->un_retry_bp = NULL; 13620 un->un_retry_statp = NULL; 13621 } 13622 13623 SD_UPDATE_RDWR_STATS(un, bp); 13624 SD_UPDATE_PARTITION_STATS(un, bp); 13625 13626 mutex_exit(SD_MUTEX(un)); 13627 13628 if (xp->xb_pktp != NULL) { 13629 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13630 xp->xb_pktp = NULL; 13631 } 13632 13633 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13634 13635 mutex_enter(SD_MUTEX(un)); 13636 13637 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13638 "sd_return_failed_command_no_restart: exit\n"); 13639 } 13640 13641 13642 /* 13643 * Function: sd_retry_command 13644 * 13645 * Description: queue up a command for retry, or (optionally) fail it 13646 * if retry counts are exhausted. 13647 * 13648 * Arguments: un - Pointer to the sd_lun struct for the target. 13649 * 13650 * bp - Pointer to the buf for the command to be retried. 13651 * 13652 * retry_check_flag - Flag to see which (if any) of the retry 13653 * counts should be decremented/checked. If the indicated 13654 * retry count is exhausted, then the command will not be 13655 * retried; it will be failed instead. This should use a 13656 * value equal to one of the following: 13657 * 13658 * SD_RETRIES_NOCHECK 13659 * SD_RESD_RETRIES_STANDARD 13660 * SD_RETRIES_VICTIM 13661 * 13662 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13663 * if the check should be made to see of FLAG_ISOLATE is set 13664 * in the pkt. If FLAG_ISOLATE is set, then the command is 13665 * not retried, it is simply failed. 13666 * 13667 * user_funcp - Ptr to function to call before dispatching the 13668 * command. May be NULL if no action needs to be performed. 13669 * (Primarily intended for printing messages.) 13670 * 13671 * user_arg - Optional argument to be passed along to 13672 * the user_funcp call. 13673 * 13674 * failure_code - errno return code to set in the bp if the 13675 * command is going to be failed. 13676 * 13677 * retry_delay - Retry delay interval in (clock_t) units. May 13678 * be zero which indicates that the retry should be retried 13679 * immediately (ie, without an intervening delay). 13680 * 13681 * statp - Ptr to kstat function to be updated if the command 13682 * is queued for a delayed retry. May be NULL if no kstat 13683 * update is desired. 13684 * 13685 * Context: May be called from interrupt context. 13686 */ 13687 13688 static void 13689 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13690 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13691 code), void *user_arg, int failure_code, clock_t retry_delay, 13692 void (*statp)(kstat_io_t *)) 13693 { 13694 struct sd_xbuf *xp; 13695 struct scsi_pkt *pktp; 13696 13697 ASSERT(un != NULL); 13698 ASSERT(mutex_owned(SD_MUTEX(un))); 13699 ASSERT(bp != NULL); 13700 xp = SD_GET_XBUF(bp); 13701 ASSERT(xp != NULL); 13702 pktp = SD_GET_PKTP(bp); 13703 ASSERT(pktp != NULL); 13704 13705 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13706 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13707 13708 /* 13709 * If we are syncing or dumping, fail the command to avoid 13710 * recursively calling back into scsi_transport(). 13711 */ 13712 if (ddi_in_panic()) { 13713 goto fail_command_no_log; 13714 } 13715 13716 /* 13717 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13718 * log an error and fail the command. 13719 */ 13720 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13721 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13722 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13723 sd_dump_memory(un, SD_LOG_IO, "CDB", 13724 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13725 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13726 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13727 goto fail_command; 13728 } 13729 13730 /* 13731 * If we are suspended, then put the command onto head of the 13732 * wait queue since we don't want to start more commands, and 13733 * clear the un_retry_bp. Next time when we are resumed, will 13734 * handle the command in the wait queue. 13735 */ 13736 switch (un->un_state) { 13737 case SD_STATE_SUSPENDED: 13738 case SD_STATE_DUMPING: 13739 bp->av_forw = un->un_waitq_headp; 13740 un->un_waitq_headp = bp; 13741 if (un->un_waitq_tailp == NULL) { 13742 un->un_waitq_tailp = bp; 13743 } 13744 if (bp == un->un_retry_bp) { 13745 un->un_retry_bp = NULL; 13746 un->un_retry_statp = NULL; 13747 } 13748 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13749 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13750 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13751 return; 13752 default: 13753 break; 13754 } 13755 13756 /* 13757 * If the caller wants us to check FLAG_ISOLATE, then see if that 13758 * is set; if it is then we do not want to retry the command. 13759 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13760 */ 13761 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13762 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13763 goto fail_command; 13764 } 13765 } 13766 13767 13768 /* 13769 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13770 * command timeout or a selection timeout has occurred. This means 13771 * that we were unable to establish an kind of communication with 13772 * the target, and subsequent retries and/or commands are likely 13773 * to encounter similar results and take a long time to complete. 13774 * 13775 * If this is a failfast error condition, we need to update the 13776 * failfast state, even if this bp does not have B_FAILFAST set. 13777 */ 13778 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13779 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13780 ASSERT(un->un_failfast_bp == NULL); 13781 /* 13782 * If we are already in the active failfast state, and 13783 * another failfast error condition has been detected, 13784 * then fail this command if it has B_FAILFAST set. 13785 * If B_FAILFAST is clear, then maintain the legacy 13786 * behavior of retrying heroically, even tho this will 13787 * take a lot more time to fail the command. 13788 */ 13789 if (bp->b_flags & B_FAILFAST) { 13790 goto fail_command; 13791 } 13792 } else { 13793 /* 13794 * We're not in the active failfast state, but we 13795 * have a failfast error condition, so we must begin 13796 * transition to the next state. We do this regardless 13797 * of whether or not this bp has B_FAILFAST set. 13798 */ 13799 if (un->un_failfast_bp == NULL) { 13800 /* 13801 * This is the first bp to meet a failfast 13802 * condition so save it on un_failfast_bp & 13803 * do normal retry processing. Do not enter 13804 * active failfast state yet. This marks 13805 * entry into the "failfast pending" state. 13806 */ 13807 un->un_failfast_bp = bp; 13808 13809 } else if (un->un_failfast_bp == bp) { 13810 /* 13811 * This is the second time *this* bp has 13812 * encountered a failfast error condition, 13813 * so enter active failfast state & flush 13814 * queues as appropriate. 13815 */ 13816 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13817 un->un_failfast_bp = NULL; 13818 sd_failfast_flushq(un); 13819 13820 /* 13821 * Fail this bp now if B_FAILFAST set; 13822 * otherwise continue with retries. (It would 13823 * be pretty ironic if this bp succeeded on a 13824 * subsequent retry after we just flushed all 13825 * the queues). 13826 */ 13827 if (bp->b_flags & B_FAILFAST) { 13828 goto fail_command; 13829 } 13830 13831 #if !defined(lint) && !defined(__lint) 13832 } else { 13833 /* 13834 * If neither of the preceeding conditionals 13835 * was true, it means that there is some 13836 * *other* bp that has met an inital failfast 13837 * condition and is currently either being 13838 * retried or is waiting to be retried. In 13839 * that case we should perform normal retry 13840 * processing on *this* bp, since there is a 13841 * chance that the current failfast condition 13842 * is transient and recoverable. If that does 13843 * not turn out to be the case, then retries 13844 * will be cleared when the wait queue is 13845 * flushed anyway. 13846 */ 13847 #endif 13848 } 13849 } 13850 } else { 13851 /* 13852 * SD_RETRIES_FAILFAST is clear, which indicates that we 13853 * likely were able to at least establish some level of 13854 * communication with the target and subsequent commands 13855 * and/or retries are likely to get through to the target, 13856 * In this case we want to be aggressive about clearing 13857 * the failfast state. Note that this does not affect 13858 * the "failfast pending" condition. 13859 */ 13860 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13861 } 13862 13863 13864 /* 13865 * Check the specified retry count to see if we can still do 13866 * any retries with this pkt before we should fail it. 13867 */ 13868 switch (retry_check_flag & SD_RETRIES_MASK) { 13869 case SD_RETRIES_VICTIM: 13870 /* 13871 * Check the victim retry count. If exhausted, then fall 13872 * thru & check against the standard retry count. 13873 */ 13874 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13875 /* Increment count & proceed with the retry */ 13876 xp->xb_victim_retry_count++; 13877 break; 13878 } 13879 /* Victim retries exhausted, fall back to std. retries... */ 13880 /* FALLTHRU */ 13881 13882 case SD_RETRIES_STANDARD: 13883 if (xp->xb_retry_count >= un->un_retry_count) { 13884 /* Retries exhausted, fail the command */ 13885 SD_TRACE(SD_LOG_IO_CORE, un, 13886 "sd_retry_command: retries exhausted!\n"); 13887 /* 13888 * update b_resid for failed SCMD_READ & SCMD_WRITE 13889 * commands with nonzero pkt_resid. 13890 */ 13891 if ((pktp->pkt_reason == CMD_CMPLT) && 13892 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13893 (pktp->pkt_resid != 0)) { 13894 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13895 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13896 SD_UPDATE_B_RESID(bp, pktp); 13897 } 13898 } 13899 goto fail_command; 13900 } 13901 xp->xb_retry_count++; 13902 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13903 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13904 break; 13905 13906 case SD_RETRIES_UA: 13907 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13908 /* Retries exhausted, fail the command */ 13909 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13910 "Unit Attention retries exhausted. " 13911 "Check the target.\n"); 13912 goto fail_command; 13913 } 13914 xp->xb_ua_retry_count++; 13915 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13916 "sd_retry_command: retry count:%d\n", 13917 xp->xb_ua_retry_count); 13918 break; 13919 13920 case SD_RETRIES_BUSY: 13921 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13922 /* Retries exhausted, fail the command */ 13923 SD_TRACE(SD_LOG_IO_CORE, un, 13924 "sd_retry_command: retries exhausted!\n"); 13925 goto fail_command; 13926 } 13927 xp->xb_retry_count++; 13928 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13929 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13930 break; 13931 13932 case SD_RETRIES_NOCHECK: 13933 default: 13934 /* No retry count to check. Just proceed with the retry */ 13935 break; 13936 } 13937 13938 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13939 13940 /* 13941 * If we were given a zero timeout, we must attempt to retry the 13942 * command immediately (ie, without a delay). 13943 */ 13944 if (retry_delay == 0) { 13945 /* 13946 * Check some limiting conditions to see if we can actually 13947 * do the immediate retry. If we cannot, then we must 13948 * fall back to queueing up a delayed retry. 13949 */ 13950 if (un->un_ncmds_in_transport >= un->un_throttle) { 13951 /* 13952 * We are at the throttle limit for the target, 13953 * fall back to delayed retry. 13954 */ 13955 retry_delay = SD_BSY_TIMEOUT; 13956 statp = kstat_waitq_enter; 13957 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13958 "sd_retry_command: immed. retry hit " 13959 "throttle!\n"); 13960 } else { 13961 /* 13962 * We're clear to proceed with the immediate retry. 13963 * First call the user-provided function (if any) 13964 */ 13965 if (user_funcp != NULL) { 13966 (*user_funcp)(un, bp, user_arg, 13967 SD_IMMEDIATE_RETRY_ISSUED); 13968 #ifdef __lock_lint 13969 sd_print_incomplete_msg(un, bp, user_arg, 13970 SD_IMMEDIATE_RETRY_ISSUED); 13971 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13972 SD_IMMEDIATE_RETRY_ISSUED); 13973 sd_print_sense_failed_msg(un, bp, user_arg, 13974 SD_IMMEDIATE_RETRY_ISSUED); 13975 #endif 13976 } 13977 13978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13979 "sd_retry_command: issuing immediate retry\n"); 13980 13981 /* 13982 * Call sd_start_cmds() to transport the command to 13983 * the target. 13984 */ 13985 sd_start_cmds(un, bp); 13986 13987 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13988 "sd_retry_command exit\n"); 13989 return; 13990 } 13991 } 13992 13993 /* 13994 * Set up to retry the command after a delay. 13995 * First call the user-provided function (if any) 13996 */ 13997 if (user_funcp != NULL) { 13998 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13999 } 14000 14001 sd_set_retry_bp(un, bp, retry_delay, statp); 14002 14003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14004 return; 14005 14006 fail_command: 14007 14008 if (user_funcp != NULL) { 14009 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14010 } 14011 14012 fail_command_no_log: 14013 14014 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14015 "sd_retry_command: returning failed command\n"); 14016 14017 sd_return_failed_command(un, bp, failure_code); 14018 14019 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14020 } 14021 14022 14023 /* 14024 * Function: sd_set_retry_bp 14025 * 14026 * Description: Set up the given bp for retry. 14027 * 14028 * Arguments: un - ptr to associated softstate 14029 * bp - ptr to buf(9S) for the command 14030 * retry_delay - time interval before issuing retry (may be 0) 14031 * statp - optional pointer to kstat function 14032 * 14033 * Context: May be called under interrupt context 14034 */ 14035 14036 static void 14037 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14038 void (*statp)(kstat_io_t *)) 14039 { 14040 ASSERT(un != NULL); 14041 ASSERT(mutex_owned(SD_MUTEX(un))); 14042 ASSERT(bp != NULL); 14043 14044 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14045 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14046 14047 /* 14048 * Indicate that the command is being retried. This will not allow any 14049 * other commands on the wait queue to be transported to the target 14050 * until this command has been completed (success or failure). The 14051 * "retry command" is not transported to the target until the given 14052 * time delay expires, unless the user specified a 0 retry_delay. 14053 * 14054 * Note: the timeout(9F) callback routine is what actually calls 14055 * sd_start_cmds() to transport the command, with the exception of a 14056 * zero retry_delay. The only current implementor of a zero retry delay 14057 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14058 */ 14059 if (un->un_retry_bp == NULL) { 14060 ASSERT(un->un_retry_statp == NULL); 14061 un->un_retry_bp = bp; 14062 14063 /* 14064 * If the user has not specified a delay the command should 14065 * be queued and no timeout should be scheduled. 14066 */ 14067 if (retry_delay == 0) { 14068 /* 14069 * Save the kstat pointer that will be used in the 14070 * call to SD_UPDATE_KSTATS() below, so that 14071 * sd_start_cmds() can correctly decrement the waitq 14072 * count when it is time to transport this command. 14073 */ 14074 un->un_retry_statp = statp; 14075 goto done; 14076 } 14077 } 14078 14079 if (un->un_retry_bp == bp) { 14080 /* 14081 * Save the kstat pointer that will be used in the call to 14082 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14083 * correctly decrement the waitq count when it is time to 14084 * transport this command. 14085 */ 14086 un->un_retry_statp = statp; 14087 14088 /* 14089 * Schedule a timeout if: 14090 * 1) The user has specified a delay. 14091 * 2) There is not a START_STOP_UNIT callback pending. 14092 * 14093 * If no delay has been specified, then it is up to the caller 14094 * to ensure that IO processing continues without stalling. 14095 * Effectively, this means that the caller will issue the 14096 * required call to sd_start_cmds(). The START_STOP_UNIT 14097 * callback does this after the START STOP UNIT command has 14098 * completed. In either of these cases we should not schedule 14099 * a timeout callback here. Also don't schedule the timeout if 14100 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14101 */ 14102 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14103 (un->un_direct_priority_timeid == NULL)) { 14104 un->un_retry_timeid = 14105 timeout(sd_start_retry_command, un, retry_delay); 14106 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14107 "sd_set_retry_bp: setting timeout: un: 0x%p" 14108 " bp:0x%p un_retry_timeid:0x%p\n", 14109 un, bp, un->un_retry_timeid); 14110 } 14111 } else { 14112 /* 14113 * We only get in here if there is already another command 14114 * waiting to be retried. In this case, we just put the 14115 * given command onto the wait queue, so it can be transported 14116 * after the current retry command has completed. 14117 * 14118 * Also we have to make sure that if the command at the head 14119 * of the wait queue is the un_failfast_bp, that we do not 14120 * put ahead of it any other commands that are to be retried. 14121 */ 14122 if ((un->un_failfast_bp != NULL) && 14123 (un->un_failfast_bp == un->un_waitq_headp)) { 14124 /* 14125 * Enqueue this command AFTER the first command on 14126 * the wait queue (which is also un_failfast_bp). 14127 */ 14128 bp->av_forw = un->un_waitq_headp->av_forw; 14129 un->un_waitq_headp->av_forw = bp; 14130 if (un->un_waitq_headp == un->un_waitq_tailp) { 14131 un->un_waitq_tailp = bp; 14132 } 14133 } else { 14134 /* Enqueue this command at the head of the waitq. */ 14135 bp->av_forw = un->un_waitq_headp; 14136 un->un_waitq_headp = bp; 14137 if (un->un_waitq_tailp == NULL) { 14138 un->un_waitq_tailp = bp; 14139 } 14140 } 14141 14142 if (statp == NULL) { 14143 statp = kstat_waitq_enter; 14144 } 14145 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14146 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14147 } 14148 14149 done: 14150 if (statp != NULL) { 14151 SD_UPDATE_KSTATS(un, statp, bp); 14152 } 14153 14154 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14155 "sd_set_retry_bp: exit un:0x%p\n", un); 14156 } 14157 14158 14159 /* 14160 * Function: sd_start_retry_command 14161 * 14162 * Description: Start the command that has been waiting on the target's 14163 * retry queue. Called from timeout(9F) context after the 14164 * retry delay interval has expired. 14165 * 14166 * Arguments: arg - pointer to associated softstate for the device. 14167 * 14168 * Context: timeout(9F) thread context. May not sleep. 14169 */ 14170 14171 static void 14172 sd_start_retry_command(void *arg) 14173 { 14174 struct sd_lun *un = arg; 14175 14176 ASSERT(un != NULL); 14177 ASSERT(!mutex_owned(SD_MUTEX(un))); 14178 14179 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14180 "sd_start_retry_command: entry\n"); 14181 14182 mutex_enter(SD_MUTEX(un)); 14183 14184 un->un_retry_timeid = NULL; 14185 14186 if (un->un_retry_bp != NULL) { 14187 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14188 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14189 un, un->un_retry_bp); 14190 sd_start_cmds(un, un->un_retry_bp); 14191 } 14192 14193 mutex_exit(SD_MUTEX(un)); 14194 14195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14196 "sd_start_retry_command: exit\n"); 14197 } 14198 14199 14200 /* 14201 * Function: sd_start_direct_priority_command 14202 * 14203 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14204 * received TRAN_BUSY when we called scsi_transport() to send it 14205 * to the underlying HBA. This function is called from timeout(9F) 14206 * context after the delay interval has expired. 14207 * 14208 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14209 * 14210 * Context: timeout(9F) thread context. May not sleep. 14211 */ 14212 14213 static void 14214 sd_start_direct_priority_command(void *arg) 14215 { 14216 struct buf *priority_bp = arg; 14217 struct sd_lun *un; 14218 14219 ASSERT(priority_bp != NULL); 14220 un = SD_GET_UN(priority_bp); 14221 ASSERT(un != NULL); 14222 ASSERT(!mutex_owned(SD_MUTEX(un))); 14223 14224 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14225 "sd_start_direct_priority_command: entry\n"); 14226 14227 mutex_enter(SD_MUTEX(un)); 14228 un->un_direct_priority_timeid = NULL; 14229 sd_start_cmds(un, priority_bp); 14230 mutex_exit(SD_MUTEX(un)); 14231 14232 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14233 "sd_start_direct_priority_command: exit\n"); 14234 } 14235 14236 14237 /* 14238 * Function: sd_send_request_sense_command 14239 * 14240 * Description: Sends a REQUEST SENSE command to the target 14241 * 14242 * Context: May be called from interrupt context. 14243 */ 14244 14245 static void 14246 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14247 struct scsi_pkt *pktp) 14248 { 14249 ASSERT(bp != NULL); 14250 ASSERT(un != NULL); 14251 ASSERT(mutex_owned(SD_MUTEX(un))); 14252 14253 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14254 "entry: buf:0x%p\n", bp); 14255 14256 /* 14257 * If we are syncing or dumping, then fail the command to avoid a 14258 * recursive callback into scsi_transport(). Also fail the command 14259 * if we are suspended (legacy behavior). 14260 */ 14261 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14262 (un->un_state == SD_STATE_DUMPING)) { 14263 sd_return_failed_command(un, bp, EIO); 14264 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14265 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14266 return; 14267 } 14268 14269 /* 14270 * Retry the failed command and don't issue the request sense if: 14271 * 1) the sense buf is busy 14272 * 2) we have 1 or more outstanding commands on the target 14273 * (the sense data will be cleared or invalidated any way) 14274 * 14275 * Note: There could be an issue with not checking a retry limit here, 14276 * the problem is determining which retry limit to check. 14277 */ 14278 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14279 /* Don't retry if the command is flagged as non-retryable */ 14280 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14281 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14282 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14283 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14284 "sd_send_request_sense_command: " 14285 "at full throttle, retrying exit\n"); 14286 } else { 14287 sd_return_failed_command(un, bp, EIO); 14288 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14289 "sd_send_request_sense_command: " 14290 "at full throttle, non-retryable exit\n"); 14291 } 14292 return; 14293 } 14294 14295 sd_mark_rqs_busy(un, bp); 14296 sd_start_cmds(un, un->un_rqs_bp); 14297 14298 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14299 "sd_send_request_sense_command: exit\n"); 14300 } 14301 14302 14303 /* 14304 * Function: sd_mark_rqs_busy 14305 * 14306 * Description: Indicate that the request sense bp for this instance is 14307 * in use. 14308 * 14309 * Context: May be called under interrupt context 14310 */ 14311 14312 static void 14313 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14314 { 14315 struct sd_xbuf *sense_xp; 14316 14317 ASSERT(un != NULL); 14318 ASSERT(bp != NULL); 14319 ASSERT(mutex_owned(SD_MUTEX(un))); 14320 ASSERT(un->un_sense_isbusy == 0); 14321 14322 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14323 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14324 14325 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14326 ASSERT(sense_xp != NULL); 14327 14328 SD_INFO(SD_LOG_IO, un, 14329 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14330 14331 ASSERT(sense_xp->xb_pktp != NULL); 14332 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14333 == (FLAG_SENSING | FLAG_HEAD)); 14334 14335 un->un_sense_isbusy = 1; 14336 un->un_rqs_bp->b_resid = 0; 14337 sense_xp->xb_pktp->pkt_resid = 0; 14338 sense_xp->xb_pktp->pkt_reason = 0; 14339 14340 /* So we can get back the bp at interrupt time! */ 14341 sense_xp->xb_sense_bp = bp; 14342 14343 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14344 14345 /* 14346 * Mark this buf as awaiting sense data. (This is already set in 14347 * the pkt_flags for the RQS packet.) 14348 */ 14349 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14350 14351 sense_xp->xb_retry_count = 0; 14352 sense_xp->xb_victim_retry_count = 0; 14353 sense_xp->xb_ua_retry_count = 0; 14354 sense_xp->xb_nr_retry_count = 0; 14355 sense_xp->xb_dma_resid = 0; 14356 14357 /* Clean up the fields for auto-request sense */ 14358 sense_xp->xb_sense_status = 0; 14359 sense_xp->xb_sense_state = 0; 14360 sense_xp->xb_sense_resid = 0; 14361 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14362 14363 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14364 } 14365 14366 14367 /* 14368 * Function: sd_mark_rqs_idle 14369 * 14370 * Description: SD_MUTEX must be held continuously through this routine 14371 * to prevent reuse of the rqs struct before the caller can 14372 * complete it's processing. 14373 * 14374 * Return Code: Pointer to the RQS buf 14375 * 14376 * Context: May be called under interrupt context 14377 */ 14378 14379 static struct buf * 14380 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14381 { 14382 struct buf *bp; 14383 ASSERT(un != NULL); 14384 ASSERT(sense_xp != NULL); 14385 ASSERT(mutex_owned(SD_MUTEX(un))); 14386 ASSERT(un->un_sense_isbusy != 0); 14387 14388 un->un_sense_isbusy = 0; 14389 bp = sense_xp->xb_sense_bp; 14390 sense_xp->xb_sense_bp = NULL; 14391 14392 /* This pkt is no longer interested in getting sense data */ 14393 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14394 14395 return (bp); 14396 } 14397 14398 14399 14400 /* 14401 * Function: sd_alloc_rqs 14402 * 14403 * Description: Set up the unit to receive auto request sense data 14404 * 14405 * Return Code: DDI_SUCCESS or DDI_FAILURE 14406 * 14407 * Context: Called under attach(9E) context 14408 */ 14409 14410 static int 14411 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14412 { 14413 struct sd_xbuf *xp; 14414 14415 ASSERT(un != NULL); 14416 ASSERT(!mutex_owned(SD_MUTEX(un))); 14417 ASSERT(un->un_rqs_bp == NULL); 14418 ASSERT(un->un_rqs_pktp == NULL); 14419 14420 /* 14421 * First allocate the required buf and scsi_pkt structs, then set up 14422 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14423 */ 14424 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14425 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14426 if (un->un_rqs_bp == NULL) { 14427 return (DDI_FAILURE); 14428 } 14429 14430 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14431 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14432 14433 if (un->un_rqs_pktp == NULL) { 14434 sd_free_rqs(un); 14435 return (DDI_FAILURE); 14436 } 14437 14438 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14439 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14440 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14441 14442 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14443 14444 /* Set up the other needed members in the ARQ scsi_pkt. */ 14445 un->un_rqs_pktp->pkt_comp = sdintr; 14446 un->un_rqs_pktp->pkt_time = sd_io_time; 14447 un->un_rqs_pktp->pkt_flags |= 14448 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14449 14450 /* 14451 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14452 * provide any intpkt, destroypkt routines as we take care of 14453 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14454 */ 14455 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14456 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14457 xp->xb_pktp = un->un_rqs_pktp; 14458 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14459 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14460 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14461 14462 /* 14463 * Save the pointer to the request sense private bp so it can 14464 * be retrieved in sdintr. 14465 */ 14466 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14467 ASSERT(un->un_rqs_bp->b_private == xp); 14468 14469 /* 14470 * See if the HBA supports auto-request sense for the specified 14471 * target/lun. If it does, then try to enable it (if not already 14472 * enabled). 14473 * 14474 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14475 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14476 * return success. However, in both of these cases ARQ is always 14477 * enabled and scsi_ifgetcap will always return true. The best approach 14478 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14479 * 14480 * The 3rd case is the HBA (adp) always return enabled on 14481 * scsi_ifgetgetcap even when it's not enable, the best approach 14482 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14483 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14484 */ 14485 14486 if (un->un_f_is_fibre == TRUE) { 14487 un->un_f_arq_enabled = TRUE; 14488 } else { 14489 #if defined(__i386) || defined(__amd64) 14490 /* 14491 * Circumvent the Adaptec bug, remove this code when 14492 * the bug is fixed 14493 */ 14494 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14495 #endif 14496 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14497 case 0: 14498 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14499 "sd_alloc_rqs: HBA supports ARQ\n"); 14500 /* 14501 * ARQ is supported by this HBA but currently is not 14502 * enabled. Attempt to enable it and if successful then 14503 * mark this instance as ARQ enabled. 14504 */ 14505 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14506 == 1) { 14507 /* Successfully enabled ARQ in the HBA */ 14508 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14509 "sd_alloc_rqs: ARQ enabled\n"); 14510 un->un_f_arq_enabled = TRUE; 14511 } else { 14512 /* Could not enable ARQ in the HBA */ 14513 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14514 "sd_alloc_rqs: failed ARQ enable\n"); 14515 un->un_f_arq_enabled = FALSE; 14516 } 14517 break; 14518 case 1: 14519 /* 14520 * ARQ is supported by this HBA and is already enabled. 14521 * Just mark ARQ as enabled for this instance. 14522 */ 14523 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14524 "sd_alloc_rqs: ARQ already enabled\n"); 14525 un->un_f_arq_enabled = TRUE; 14526 break; 14527 default: 14528 /* 14529 * ARQ is not supported by this HBA; disable it for this 14530 * instance. 14531 */ 14532 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14533 "sd_alloc_rqs: HBA does not support ARQ\n"); 14534 un->un_f_arq_enabled = FALSE; 14535 break; 14536 } 14537 } 14538 14539 return (DDI_SUCCESS); 14540 } 14541 14542 14543 /* 14544 * Function: sd_free_rqs 14545 * 14546 * Description: Cleanup for the pre-instance RQS command. 14547 * 14548 * Context: Kernel thread context 14549 */ 14550 14551 static void 14552 sd_free_rqs(struct sd_lun *un) 14553 { 14554 ASSERT(un != NULL); 14555 14556 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14557 14558 /* 14559 * If consistent memory is bound to a scsi_pkt, the pkt 14560 * has to be destroyed *before* freeing the consistent memory. 14561 * Don't change the sequence of this operations. 14562 * scsi_destroy_pkt() might access memory, which isn't allowed, 14563 * after it was freed in scsi_free_consistent_buf(). 14564 */ 14565 if (un->un_rqs_pktp != NULL) { 14566 scsi_destroy_pkt(un->un_rqs_pktp); 14567 un->un_rqs_pktp = NULL; 14568 } 14569 14570 if (un->un_rqs_bp != NULL) { 14571 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14572 if (xp != NULL) { 14573 kmem_free(xp, sizeof (struct sd_xbuf)); 14574 } 14575 scsi_free_consistent_buf(un->un_rqs_bp); 14576 un->un_rqs_bp = NULL; 14577 } 14578 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14579 } 14580 14581 14582 14583 /* 14584 * Function: sd_reduce_throttle 14585 * 14586 * Description: Reduces the maximum # of outstanding commands on a 14587 * target to the current number of outstanding commands. 14588 * Queues a tiemout(9F) callback to restore the limit 14589 * after a specified interval has elapsed. 14590 * Typically used when we get a TRAN_BUSY return code 14591 * back from scsi_transport(). 14592 * 14593 * Arguments: un - ptr to the sd_lun softstate struct 14594 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14595 * 14596 * Context: May be called from interrupt context 14597 */ 14598 14599 static void 14600 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14601 { 14602 ASSERT(un != NULL); 14603 ASSERT(mutex_owned(SD_MUTEX(un))); 14604 ASSERT(un->un_ncmds_in_transport >= 0); 14605 14606 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14607 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14608 un, un->un_throttle, un->un_ncmds_in_transport); 14609 14610 if (un->un_throttle > 1) { 14611 if (un->un_f_use_adaptive_throttle == TRUE) { 14612 switch (throttle_type) { 14613 case SD_THROTTLE_TRAN_BUSY: 14614 if (un->un_busy_throttle == 0) { 14615 un->un_busy_throttle = un->un_throttle; 14616 } 14617 break; 14618 case SD_THROTTLE_QFULL: 14619 un->un_busy_throttle = 0; 14620 break; 14621 default: 14622 ASSERT(FALSE); 14623 } 14624 14625 if (un->un_ncmds_in_transport > 0) { 14626 un->un_throttle = un->un_ncmds_in_transport; 14627 } 14628 14629 } else { 14630 if (un->un_ncmds_in_transport == 0) { 14631 un->un_throttle = 1; 14632 } else { 14633 un->un_throttle = un->un_ncmds_in_transport; 14634 } 14635 } 14636 } 14637 14638 /* Reschedule the timeout if none is currently active */ 14639 if (un->un_reset_throttle_timeid == NULL) { 14640 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14641 un, SD_THROTTLE_RESET_INTERVAL); 14642 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14643 "sd_reduce_throttle: timeout scheduled!\n"); 14644 } 14645 14646 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14647 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14648 } 14649 14650 14651 14652 /* 14653 * Function: sd_restore_throttle 14654 * 14655 * Description: Callback function for timeout(9F). Resets the current 14656 * value of un->un_throttle to its default. 14657 * 14658 * Arguments: arg - pointer to associated softstate for the device. 14659 * 14660 * Context: May be called from interrupt context 14661 */ 14662 14663 static void 14664 sd_restore_throttle(void *arg) 14665 { 14666 struct sd_lun *un = arg; 14667 14668 ASSERT(un != NULL); 14669 ASSERT(!mutex_owned(SD_MUTEX(un))); 14670 14671 mutex_enter(SD_MUTEX(un)); 14672 14673 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14674 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14675 14676 un->un_reset_throttle_timeid = NULL; 14677 14678 if (un->un_f_use_adaptive_throttle == TRUE) { 14679 /* 14680 * If un_busy_throttle is nonzero, then it contains the 14681 * value that un_throttle was when we got a TRAN_BUSY back 14682 * from scsi_transport(). We want to revert back to this 14683 * value. 14684 * 14685 * In the QFULL case, the throttle limit will incrementally 14686 * increase until it reaches max throttle. 14687 */ 14688 if (un->un_busy_throttle > 0) { 14689 un->un_throttle = un->un_busy_throttle; 14690 un->un_busy_throttle = 0; 14691 } else { 14692 /* 14693 * increase throttle by 10% open gate slowly, schedule 14694 * another restore if saved throttle has not been 14695 * reached 14696 */ 14697 short throttle; 14698 if (sd_qfull_throttle_enable) { 14699 throttle = un->un_throttle + 14700 max((un->un_throttle / 10), 1); 14701 un->un_throttle = 14702 (throttle < un->un_saved_throttle) ? 14703 throttle : un->un_saved_throttle; 14704 if (un->un_throttle < un->un_saved_throttle) { 14705 un->un_reset_throttle_timeid = 14706 timeout(sd_restore_throttle, 14707 un, 14708 SD_QFULL_THROTTLE_RESET_INTERVAL); 14709 } 14710 } 14711 } 14712 14713 /* 14714 * If un_throttle has fallen below the low-water mark, we 14715 * restore the maximum value here (and allow it to ratchet 14716 * down again if necessary). 14717 */ 14718 if (un->un_throttle < un->un_min_throttle) { 14719 un->un_throttle = un->un_saved_throttle; 14720 } 14721 } else { 14722 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14723 "restoring limit from 0x%x to 0x%x\n", 14724 un->un_throttle, un->un_saved_throttle); 14725 un->un_throttle = un->un_saved_throttle; 14726 } 14727 14728 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14729 "sd_restore_throttle: calling sd_start_cmds!\n"); 14730 14731 sd_start_cmds(un, NULL); 14732 14733 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14734 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14735 un, un->un_throttle); 14736 14737 mutex_exit(SD_MUTEX(un)); 14738 14739 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14740 } 14741 14742 /* 14743 * Function: sdrunout 14744 * 14745 * Description: Callback routine for scsi_init_pkt when a resource allocation 14746 * fails. 14747 * 14748 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14749 * soft state instance. 14750 * 14751 * Return Code: The scsi_init_pkt routine allows for the callback function to 14752 * return a 0 indicating the callback should be rescheduled or a 1 14753 * indicating not to reschedule. This routine always returns 1 14754 * because the driver always provides a callback function to 14755 * scsi_init_pkt. This results in a callback always being scheduled 14756 * (via the scsi_init_pkt callback implementation) if a resource 14757 * failure occurs. 14758 * 14759 * Context: This callback function may not block or call routines that block 14760 * 14761 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14762 * request persisting at the head of the list which cannot be 14763 * satisfied even after multiple retries. In the future the driver 14764 * may implement some time of maximum runout count before failing 14765 * an I/O. 14766 */ 14767 14768 static int 14769 sdrunout(caddr_t arg) 14770 { 14771 struct sd_lun *un = (struct sd_lun *)arg; 14772 14773 ASSERT(un != NULL); 14774 ASSERT(!mutex_owned(SD_MUTEX(un))); 14775 14776 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14777 14778 mutex_enter(SD_MUTEX(un)); 14779 sd_start_cmds(un, NULL); 14780 mutex_exit(SD_MUTEX(un)); 14781 /* 14782 * This callback routine always returns 1 (i.e. do not reschedule) 14783 * because we always specify sdrunout as the callback handler for 14784 * scsi_init_pkt inside the call to sd_start_cmds. 14785 */ 14786 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14787 return (1); 14788 } 14789 14790 14791 /* 14792 * Function: sdintr 14793 * 14794 * Description: Completion callback routine for scsi_pkt(9S) structs 14795 * sent to the HBA driver via scsi_transport(9F). 14796 * 14797 * Context: Interrupt context 14798 */ 14799 14800 static void 14801 sdintr(struct scsi_pkt *pktp) 14802 { 14803 struct buf *bp; 14804 struct sd_xbuf *xp; 14805 struct sd_lun *un; 14806 size_t actual_len; 14807 14808 ASSERT(pktp != NULL); 14809 bp = (struct buf *)pktp->pkt_private; 14810 ASSERT(bp != NULL); 14811 xp = SD_GET_XBUF(bp); 14812 ASSERT(xp != NULL); 14813 ASSERT(xp->xb_pktp != NULL); 14814 un = SD_GET_UN(bp); 14815 ASSERT(un != NULL); 14816 ASSERT(!mutex_owned(SD_MUTEX(un))); 14817 14818 #ifdef SD_FAULT_INJECTION 14819 14820 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14821 /* SD FaultInjection */ 14822 sd_faultinjection(pktp); 14823 14824 #endif /* SD_FAULT_INJECTION */ 14825 14826 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14827 " xp:0x%p, un:0x%p\n", bp, xp, un); 14828 14829 mutex_enter(SD_MUTEX(un)); 14830 14831 /* Reduce the count of the #commands currently in transport */ 14832 un->un_ncmds_in_transport--; 14833 ASSERT(un->un_ncmds_in_transport >= 0); 14834 14835 /* Increment counter to indicate that the callback routine is active */ 14836 un->un_in_callback++; 14837 14838 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14839 14840 #ifdef SDDEBUG 14841 if (bp == un->un_retry_bp) { 14842 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14843 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14844 un, un->un_retry_bp, un->un_ncmds_in_transport); 14845 } 14846 #endif 14847 14848 /* 14849 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14850 * state if needed. 14851 */ 14852 if (pktp->pkt_reason == CMD_DEV_GONE) { 14853 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14854 "Command failed to complete...Device is gone\n"); 14855 if (un->un_mediastate != DKIO_DEV_GONE) { 14856 un->un_mediastate = DKIO_DEV_GONE; 14857 cv_broadcast(&un->un_state_cv); 14858 } 14859 sd_return_failed_command(un, bp, EIO); 14860 goto exit; 14861 } 14862 14863 if (pktp->pkt_state & STATE_XARQ_DONE) { 14864 SD_TRACE(SD_LOG_COMMON, un, 14865 "sdintr: extra sense data received. pkt=%p\n", pktp); 14866 } 14867 14868 /* 14869 * First see if the pkt has auto-request sense data with it.... 14870 * Look at the packet state first so we don't take a performance 14871 * hit looking at the arq enabled flag unless absolutely necessary. 14872 */ 14873 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14874 (un->un_f_arq_enabled == TRUE)) { 14875 /* 14876 * The HBA did an auto request sense for this command so check 14877 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14878 * driver command that should not be retried. 14879 */ 14880 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14881 /* 14882 * Save the relevant sense info into the xp for the 14883 * original cmd. 14884 */ 14885 struct scsi_arq_status *asp; 14886 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14887 xp->xb_sense_status = 14888 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14889 xp->xb_sense_state = asp->sts_rqpkt_state; 14890 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14891 if (pktp->pkt_state & STATE_XARQ_DONE) { 14892 actual_len = MAX_SENSE_LENGTH - 14893 xp->xb_sense_resid; 14894 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14895 MAX_SENSE_LENGTH); 14896 } else { 14897 if (xp->xb_sense_resid > SENSE_LENGTH) { 14898 actual_len = MAX_SENSE_LENGTH - 14899 xp->xb_sense_resid; 14900 } else { 14901 actual_len = SENSE_LENGTH - 14902 xp->xb_sense_resid; 14903 } 14904 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14905 if ((((struct uscsi_cmd *) 14906 (xp->xb_pktinfo))->uscsi_rqlen) > 14907 actual_len) { 14908 xp->xb_sense_resid = 14909 (((struct uscsi_cmd *) 14910 (xp->xb_pktinfo))-> 14911 uscsi_rqlen) - actual_len; 14912 } else { 14913 xp->xb_sense_resid = 0; 14914 } 14915 } 14916 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14917 SENSE_LENGTH); 14918 } 14919 14920 /* fail the command */ 14921 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14922 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14923 sd_return_failed_command(un, bp, EIO); 14924 goto exit; 14925 } 14926 14927 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14928 /* 14929 * We want to either retry or fail this command, so free 14930 * the DMA resources here. If we retry the command then 14931 * the DMA resources will be reallocated in sd_start_cmds(). 14932 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14933 * causes the *entire* transfer to start over again from the 14934 * beginning of the request, even for PARTIAL chunks that 14935 * have already transferred successfully. 14936 */ 14937 if ((un->un_f_is_fibre == TRUE) && 14938 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14939 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14940 scsi_dmafree(pktp); 14941 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14942 } 14943 #endif 14944 14945 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14946 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14947 14948 sd_handle_auto_request_sense(un, bp, xp, pktp); 14949 goto exit; 14950 } 14951 14952 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14953 if (pktp->pkt_flags & FLAG_SENSING) { 14954 /* This pktp is from the unit's REQUEST_SENSE command */ 14955 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14956 "sdintr: sd_handle_request_sense\n"); 14957 sd_handle_request_sense(un, bp, xp, pktp); 14958 goto exit; 14959 } 14960 14961 /* 14962 * Check to see if the command successfully completed as requested; 14963 * this is the most common case (and also the hot performance path). 14964 * 14965 * Requirements for successful completion are: 14966 * pkt_reason is CMD_CMPLT and packet status is status good. 14967 * In addition: 14968 * - A residual of zero indicates successful completion no matter what 14969 * the command is. 14970 * - If the residual is not zero and the command is not a read or 14971 * write, then it's still defined as successful completion. In other 14972 * words, if the command is a read or write the residual must be 14973 * zero for successful completion. 14974 * - If the residual is not zero and the command is a read or 14975 * write, and it's a USCSICMD, then it's still defined as 14976 * successful completion. 14977 */ 14978 if ((pktp->pkt_reason == CMD_CMPLT) && 14979 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14980 14981 /* 14982 * Since this command is returned with a good status, we 14983 * can reset the count for Sonoma failover. 14984 */ 14985 un->un_sonoma_failure_count = 0; 14986 14987 /* 14988 * Return all USCSI commands on good status 14989 */ 14990 if (pktp->pkt_resid == 0) { 14991 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14992 "sdintr: returning command for resid == 0\n"); 14993 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14994 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14995 SD_UPDATE_B_RESID(bp, pktp); 14996 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14997 "sdintr: returning command for resid != 0\n"); 14998 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14999 SD_UPDATE_B_RESID(bp, pktp); 15000 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15001 "sdintr: returning uscsi command\n"); 15002 } else { 15003 goto not_successful; 15004 } 15005 sd_return_command(un, bp); 15006 15007 /* 15008 * Decrement counter to indicate that the callback routine 15009 * is done. 15010 */ 15011 un->un_in_callback--; 15012 ASSERT(un->un_in_callback >= 0); 15013 mutex_exit(SD_MUTEX(un)); 15014 15015 return; 15016 } 15017 15018 not_successful: 15019 15020 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15021 /* 15022 * The following is based upon knowledge of the underlying transport 15023 * and its use of DMA resources. This code should be removed when 15024 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15025 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15026 * and sd_start_cmds(). 15027 * 15028 * Free any DMA resources associated with this command if there 15029 * is a chance it could be retried or enqueued for later retry. 15030 * If we keep the DMA binding then mpxio cannot reissue the 15031 * command on another path whenever a path failure occurs. 15032 * 15033 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15034 * causes the *entire* transfer to start over again from the 15035 * beginning of the request, even for PARTIAL chunks that 15036 * have already transferred successfully. 15037 * 15038 * This is only done for non-uscsi commands (and also skipped for the 15039 * driver's internal RQS command). Also just do this for Fibre Channel 15040 * devices as these are the only ones that support mpxio. 15041 */ 15042 if ((un->un_f_is_fibre == TRUE) && 15043 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15044 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15045 scsi_dmafree(pktp); 15046 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15047 } 15048 #endif 15049 15050 /* 15051 * The command did not successfully complete as requested so check 15052 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15053 * driver command that should not be retried so just return. If 15054 * FLAG_DIAGNOSE is not set the error will be processed below. 15055 */ 15056 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15057 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15058 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15059 /* 15060 * Issue a request sense if a check condition caused the error 15061 * (we handle the auto request sense case above), otherwise 15062 * just fail the command. 15063 */ 15064 if ((pktp->pkt_reason == CMD_CMPLT) && 15065 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15066 sd_send_request_sense_command(un, bp, pktp); 15067 } else { 15068 sd_return_failed_command(un, bp, EIO); 15069 } 15070 goto exit; 15071 } 15072 15073 /* 15074 * The command did not successfully complete as requested so process 15075 * the error, retry, and/or attempt recovery. 15076 */ 15077 switch (pktp->pkt_reason) { 15078 case CMD_CMPLT: 15079 switch (SD_GET_PKT_STATUS(pktp)) { 15080 case STATUS_GOOD: 15081 /* 15082 * The command completed successfully with a non-zero 15083 * residual 15084 */ 15085 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15086 "sdintr: STATUS_GOOD \n"); 15087 sd_pkt_status_good(un, bp, xp, pktp); 15088 break; 15089 15090 case STATUS_CHECK: 15091 case STATUS_TERMINATED: 15092 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15093 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15094 sd_pkt_status_check_condition(un, bp, xp, pktp); 15095 break; 15096 15097 case STATUS_BUSY: 15098 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15099 "sdintr: STATUS_BUSY\n"); 15100 sd_pkt_status_busy(un, bp, xp, pktp); 15101 break; 15102 15103 case STATUS_RESERVATION_CONFLICT: 15104 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15105 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15106 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15107 break; 15108 15109 case STATUS_QFULL: 15110 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15111 "sdintr: STATUS_QFULL\n"); 15112 sd_pkt_status_qfull(un, bp, xp, pktp); 15113 break; 15114 15115 case STATUS_MET: 15116 case STATUS_INTERMEDIATE: 15117 case STATUS_SCSI2: 15118 case STATUS_INTERMEDIATE_MET: 15119 case STATUS_ACA_ACTIVE: 15120 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15121 "Unexpected SCSI status received: 0x%x\n", 15122 SD_GET_PKT_STATUS(pktp)); 15123 sd_return_failed_command(un, bp, EIO); 15124 break; 15125 15126 default: 15127 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15128 "Invalid SCSI status received: 0x%x\n", 15129 SD_GET_PKT_STATUS(pktp)); 15130 sd_return_failed_command(un, bp, EIO); 15131 break; 15132 15133 } 15134 break; 15135 15136 case CMD_INCOMPLETE: 15137 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15138 "sdintr: CMD_INCOMPLETE\n"); 15139 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15140 break; 15141 case CMD_TRAN_ERR: 15142 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15143 "sdintr: CMD_TRAN_ERR\n"); 15144 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15145 break; 15146 case CMD_RESET: 15147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15148 "sdintr: CMD_RESET \n"); 15149 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15150 break; 15151 case CMD_ABORTED: 15152 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15153 "sdintr: CMD_ABORTED \n"); 15154 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15155 break; 15156 case CMD_TIMEOUT: 15157 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15158 "sdintr: CMD_TIMEOUT\n"); 15159 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15160 break; 15161 case CMD_UNX_BUS_FREE: 15162 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15163 "sdintr: CMD_UNX_BUS_FREE \n"); 15164 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15165 break; 15166 case CMD_TAG_REJECT: 15167 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15168 "sdintr: CMD_TAG_REJECT\n"); 15169 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15170 break; 15171 default: 15172 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15173 "sdintr: default\n"); 15174 sd_pkt_reason_default(un, bp, xp, pktp); 15175 break; 15176 } 15177 15178 exit: 15179 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15180 15181 /* Decrement counter to indicate that the callback routine is done. */ 15182 un->un_in_callback--; 15183 ASSERT(un->un_in_callback >= 0); 15184 15185 /* 15186 * At this point, the pkt has been dispatched, ie, it is either 15187 * being re-tried or has been returned to its caller and should 15188 * not be referenced. 15189 */ 15190 15191 mutex_exit(SD_MUTEX(un)); 15192 } 15193 15194 15195 /* 15196 * Function: sd_print_incomplete_msg 15197 * 15198 * Description: Prints the error message for a CMD_INCOMPLETE error. 15199 * 15200 * Arguments: un - ptr to associated softstate for the device. 15201 * bp - ptr to the buf(9S) for the command. 15202 * arg - message string ptr 15203 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15204 * or SD_NO_RETRY_ISSUED. 15205 * 15206 * Context: May be called under interrupt context 15207 */ 15208 15209 static void 15210 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15211 { 15212 struct scsi_pkt *pktp; 15213 char *msgp; 15214 char *cmdp = arg; 15215 15216 ASSERT(un != NULL); 15217 ASSERT(mutex_owned(SD_MUTEX(un))); 15218 ASSERT(bp != NULL); 15219 ASSERT(arg != NULL); 15220 pktp = SD_GET_PKTP(bp); 15221 ASSERT(pktp != NULL); 15222 15223 switch (code) { 15224 case SD_DELAYED_RETRY_ISSUED: 15225 case SD_IMMEDIATE_RETRY_ISSUED: 15226 msgp = "retrying"; 15227 break; 15228 case SD_NO_RETRY_ISSUED: 15229 default: 15230 msgp = "giving up"; 15231 break; 15232 } 15233 15234 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15235 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15236 "incomplete %s- %s\n", cmdp, msgp); 15237 } 15238 } 15239 15240 15241 15242 /* 15243 * Function: sd_pkt_status_good 15244 * 15245 * Description: Processing for a STATUS_GOOD code in pkt_status. 15246 * 15247 * Context: May be called under interrupt context 15248 */ 15249 15250 static void 15251 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15252 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15253 { 15254 char *cmdp; 15255 15256 ASSERT(un != NULL); 15257 ASSERT(mutex_owned(SD_MUTEX(un))); 15258 ASSERT(bp != NULL); 15259 ASSERT(xp != NULL); 15260 ASSERT(pktp != NULL); 15261 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15262 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15263 ASSERT(pktp->pkt_resid != 0); 15264 15265 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15266 15267 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15268 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15269 case SCMD_READ: 15270 cmdp = "read"; 15271 break; 15272 case SCMD_WRITE: 15273 cmdp = "write"; 15274 break; 15275 default: 15276 SD_UPDATE_B_RESID(bp, pktp); 15277 sd_return_command(un, bp); 15278 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15279 return; 15280 } 15281 15282 /* 15283 * See if we can retry the read/write, preferrably immediately. 15284 * If retries are exhaused, then sd_retry_command() will update 15285 * the b_resid count. 15286 */ 15287 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15288 cmdp, EIO, (clock_t)0, NULL); 15289 15290 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15291 } 15292 15293 15294 15295 15296 15297 /* 15298 * Function: sd_handle_request_sense 15299 * 15300 * Description: Processing for non-auto Request Sense command. 15301 * 15302 * Arguments: un - ptr to associated softstate 15303 * sense_bp - ptr to buf(9S) for the RQS command 15304 * sense_xp - ptr to the sd_xbuf for the RQS command 15305 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15306 * 15307 * Context: May be called under interrupt context 15308 */ 15309 15310 static void 15311 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15312 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15313 { 15314 struct buf *cmd_bp; /* buf for the original command */ 15315 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15316 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15317 size_t actual_len; /* actual sense data length */ 15318 15319 ASSERT(un != NULL); 15320 ASSERT(mutex_owned(SD_MUTEX(un))); 15321 ASSERT(sense_bp != NULL); 15322 ASSERT(sense_xp != NULL); 15323 ASSERT(sense_pktp != NULL); 15324 15325 /* 15326 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15327 * RQS command and not the original command. 15328 */ 15329 ASSERT(sense_pktp == un->un_rqs_pktp); 15330 ASSERT(sense_bp == un->un_rqs_bp); 15331 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15332 (FLAG_SENSING | FLAG_HEAD)); 15333 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15334 FLAG_SENSING) == FLAG_SENSING); 15335 15336 /* These are the bp, xp, and pktp for the original command */ 15337 cmd_bp = sense_xp->xb_sense_bp; 15338 cmd_xp = SD_GET_XBUF(cmd_bp); 15339 cmd_pktp = SD_GET_PKTP(cmd_bp); 15340 15341 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15342 /* 15343 * The REQUEST SENSE command failed. Release the REQUEST 15344 * SENSE command for re-use, get back the bp for the original 15345 * command, and attempt to re-try the original command if 15346 * FLAG_DIAGNOSE is not set in the original packet. 15347 */ 15348 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15349 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15350 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15351 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15352 NULL, NULL, EIO, (clock_t)0, NULL); 15353 return; 15354 } 15355 } 15356 15357 /* 15358 * Save the relevant sense info into the xp for the original cmd. 15359 * 15360 * Note: if the request sense failed the state info will be zero 15361 * as set in sd_mark_rqs_busy() 15362 */ 15363 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15364 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15365 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15366 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15367 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15368 SENSE_LENGTH)) { 15369 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15370 MAX_SENSE_LENGTH); 15371 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15372 } else { 15373 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15374 SENSE_LENGTH); 15375 if (actual_len < SENSE_LENGTH) { 15376 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15377 } else { 15378 cmd_xp->xb_sense_resid = 0; 15379 } 15380 } 15381 15382 /* 15383 * Free up the RQS command.... 15384 * NOTE: 15385 * Must do this BEFORE calling sd_validate_sense_data! 15386 * sd_validate_sense_data may return the original command in 15387 * which case the pkt will be freed and the flags can no 15388 * longer be touched. 15389 * SD_MUTEX is held through this process until the command 15390 * is dispatched based upon the sense data, so there are 15391 * no race conditions. 15392 */ 15393 (void) sd_mark_rqs_idle(un, sense_xp); 15394 15395 /* 15396 * For a retryable command see if we have valid sense data, if so then 15397 * turn it over to sd_decode_sense() to figure out the right course of 15398 * action. Just fail a non-retryable command. 15399 */ 15400 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15401 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15402 SD_SENSE_DATA_IS_VALID) { 15403 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15404 } 15405 } else { 15406 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15407 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15408 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15409 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15410 sd_return_failed_command(un, cmd_bp, EIO); 15411 } 15412 } 15413 15414 15415 15416 15417 /* 15418 * Function: sd_handle_auto_request_sense 15419 * 15420 * Description: Processing for auto-request sense information. 15421 * 15422 * Arguments: un - ptr to associated softstate 15423 * bp - ptr to buf(9S) for the command 15424 * xp - ptr to the sd_xbuf for the command 15425 * pktp - ptr to the scsi_pkt(9S) for the command 15426 * 15427 * Context: May be called under interrupt context 15428 */ 15429 15430 static void 15431 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15432 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15433 { 15434 struct scsi_arq_status *asp; 15435 size_t actual_len; 15436 15437 ASSERT(un != NULL); 15438 ASSERT(mutex_owned(SD_MUTEX(un))); 15439 ASSERT(bp != NULL); 15440 ASSERT(xp != NULL); 15441 ASSERT(pktp != NULL); 15442 ASSERT(pktp != un->un_rqs_pktp); 15443 ASSERT(bp != un->un_rqs_bp); 15444 15445 /* 15446 * For auto-request sense, we get a scsi_arq_status back from 15447 * the HBA, with the sense data in the sts_sensedata member. 15448 * The pkt_scbp of the packet points to this scsi_arq_status. 15449 */ 15450 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15451 15452 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15453 /* 15454 * The auto REQUEST SENSE failed; see if we can re-try 15455 * the original command. 15456 */ 15457 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15458 "auto request sense failed (reason=%s)\n", 15459 scsi_rname(asp->sts_rqpkt_reason)); 15460 15461 sd_reset_target(un, pktp); 15462 15463 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15464 NULL, NULL, EIO, (clock_t)0, NULL); 15465 return; 15466 } 15467 15468 /* Save the relevant sense info into the xp for the original cmd. */ 15469 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15470 xp->xb_sense_state = asp->sts_rqpkt_state; 15471 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15472 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15473 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15474 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15475 MAX_SENSE_LENGTH); 15476 } else { 15477 if (xp->xb_sense_resid > SENSE_LENGTH) { 15478 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15479 } else { 15480 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15481 } 15482 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15483 if ((((struct uscsi_cmd *) 15484 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 15485 xp->xb_sense_resid = (((struct uscsi_cmd *) 15486 (xp->xb_pktinfo))->uscsi_rqlen) - 15487 actual_len; 15488 } else { 15489 xp->xb_sense_resid = 0; 15490 } 15491 } 15492 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15493 } 15494 15495 /* 15496 * See if we have valid sense data, if so then turn it over to 15497 * sd_decode_sense() to figure out the right course of action. 15498 */ 15499 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15500 SD_SENSE_DATA_IS_VALID) { 15501 sd_decode_sense(un, bp, xp, pktp); 15502 } 15503 } 15504 15505 15506 /* 15507 * Function: sd_print_sense_failed_msg 15508 * 15509 * Description: Print log message when RQS has failed. 15510 * 15511 * Arguments: un - ptr to associated softstate 15512 * bp - ptr to buf(9S) for the command 15513 * arg - generic message string ptr 15514 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15515 * or SD_NO_RETRY_ISSUED 15516 * 15517 * Context: May be called from interrupt context 15518 */ 15519 15520 static void 15521 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15522 int code) 15523 { 15524 char *msgp = arg; 15525 15526 ASSERT(un != NULL); 15527 ASSERT(mutex_owned(SD_MUTEX(un))); 15528 ASSERT(bp != NULL); 15529 15530 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15531 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15532 } 15533 } 15534 15535 15536 /* 15537 * Function: sd_validate_sense_data 15538 * 15539 * Description: Check the given sense data for validity. 15540 * If the sense data is not valid, the command will 15541 * be either failed or retried! 15542 * 15543 * Return Code: SD_SENSE_DATA_IS_INVALID 15544 * SD_SENSE_DATA_IS_VALID 15545 * 15546 * Context: May be called from interrupt context 15547 */ 15548 15549 static int 15550 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15551 size_t actual_len) 15552 { 15553 struct scsi_extended_sense *esp; 15554 struct scsi_pkt *pktp; 15555 char *msgp = NULL; 15556 15557 ASSERT(un != NULL); 15558 ASSERT(mutex_owned(SD_MUTEX(un))); 15559 ASSERT(bp != NULL); 15560 ASSERT(bp != un->un_rqs_bp); 15561 ASSERT(xp != NULL); 15562 15563 pktp = SD_GET_PKTP(bp); 15564 ASSERT(pktp != NULL); 15565 15566 /* 15567 * Check the status of the RQS command (auto or manual). 15568 */ 15569 switch (xp->xb_sense_status & STATUS_MASK) { 15570 case STATUS_GOOD: 15571 break; 15572 15573 case STATUS_RESERVATION_CONFLICT: 15574 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15575 return (SD_SENSE_DATA_IS_INVALID); 15576 15577 case STATUS_BUSY: 15578 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15579 "Busy Status on REQUEST SENSE\n"); 15580 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15581 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15582 return (SD_SENSE_DATA_IS_INVALID); 15583 15584 case STATUS_QFULL: 15585 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15586 "QFULL Status on REQUEST SENSE\n"); 15587 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15588 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15589 return (SD_SENSE_DATA_IS_INVALID); 15590 15591 case STATUS_CHECK: 15592 case STATUS_TERMINATED: 15593 msgp = "Check Condition on REQUEST SENSE\n"; 15594 goto sense_failed; 15595 15596 default: 15597 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15598 goto sense_failed; 15599 } 15600 15601 /* 15602 * See if we got the minimum required amount of sense data. 15603 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15604 * or less. 15605 */ 15606 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15607 (actual_len == 0)) { 15608 msgp = "Request Sense couldn't get sense data\n"; 15609 goto sense_failed; 15610 } 15611 15612 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15613 msgp = "Not enough sense information\n"; 15614 goto sense_failed; 15615 } 15616 15617 /* 15618 * We require the extended sense data 15619 */ 15620 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15621 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15622 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15623 static char tmp[8]; 15624 static char buf[148]; 15625 char *p = (char *)(xp->xb_sense_data); 15626 int i; 15627 15628 mutex_enter(&sd_sense_mutex); 15629 (void) strcpy(buf, "undecodable sense information:"); 15630 for (i = 0; i < actual_len; i++) { 15631 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15632 (void) strcpy(&buf[strlen(buf)], tmp); 15633 } 15634 i = strlen(buf); 15635 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15636 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15637 mutex_exit(&sd_sense_mutex); 15638 } 15639 /* Note: Legacy behavior, fail the command with no retry */ 15640 sd_return_failed_command(un, bp, EIO); 15641 return (SD_SENSE_DATA_IS_INVALID); 15642 } 15643 15644 /* 15645 * Check that es_code is valid (es_class concatenated with es_code 15646 * make up the "response code" field. es_class will always be 7, so 15647 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15648 * format. 15649 */ 15650 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15651 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15652 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15653 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15654 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15655 goto sense_failed; 15656 } 15657 15658 return (SD_SENSE_DATA_IS_VALID); 15659 15660 sense_failed: 15661 /* 15662 * If the request sense failed (for whatever reason), attempt 15663 * to retry the original command. 15664 */ 15665 #if defined(__i386) || defined(__amd64) 15666 /* 15667 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15668 * sddef.h for Sparc platform, and x86 uses 1 binary 15669 * for both SCSI/FC. 15670 * The SD_RETRY_DELAY value need to be adjusted here 15671 * when SD_RETRY_DELAY change in sddef.h 15672 */ 15673 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15674 sd_print_sense_failed_msg, msgp, EIO, 15675 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15676 #else 15677 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15678 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15679 #endif 15680 15681 return (SD_SENSE_DATA_IS_INVALID); 15682 } 15683 15684 15685 15686 /* 15687 * Function: sd_decode_sense 15688 * 15689 * Description: Take recovery action(s) when SCSI Sense Data is received. 15690 * 15691 * Context: Interrupt context. 15692 */ 15693 15694 static void 15695 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15696 struct scsi_pkt *pktp) 15697 { 15698 uint8_t sense_key; 15699 15700 ASSERT(un != NULL); 15701 ASSERT(mutex_owned(SD_MUTEX(un))); 15702 ASSERT(bp != NULL); 15703 ASSERT(bp != un->un_rqs_bp); 15704 ASSERT(xp != NULL); 15705 ASSERT(pktp != NULL); 15706 15707 sense_key = scsi_sense_key(xp->xb_sense_data); 15708 15709 switch (sense_key) { 15710 case KEY_NO_SENSE: 15711 sd_sense_key_no_sense(un, bp, xp, pktp); 15712 break; 15713 case KEY_RECOVERABLE_ERROR: 15714 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15715 bp, xp, pktp); 15716 break; 15717 case KEY_NOT_READY: 15718 sd_sense_key_not_ready(un, xp->xb_sense_data, 15719 bp, xp, pktp); 15720 break; 15721 case KEY_MEDIUM_ERROR: 15722 case KEY_HARDWARE_ERROR: 15723 sd_sense_key_medium_or_hardware_error(un, 15724 xp->xb_sense_data, bp, xp, pktp); 15725 break; 15726 case KEY_ILLEGAL_REQUEST: 15727 sd_sense_key_illegal_request(un, bp, xp, pktp); 15728 break; 15729 case KEY_UNIT_ATTENTION: 15730 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15731 bp, xp, pktp); 15732 break; 15733 case KEY_WRITE_PROTECT: 15734 case KEY_VOLUME_OVERFLOW: 15735 case KEY_MISCOMPARE: 15736 sd_sense_key_fail_command(un, bp, xp, pktp); 15737 break; 15738 case KEY_BLANK_CHECK: 15739 sd_sense_key_blank_check(un, bp, xp, pktp); 15740 break; 15741 case KEY_ABORTED_COMMAND: 15742 sd_sense_key_aborted_command(un, bp, xp, pktp); 15743 break; 15744 case KEY_VENDOR_UNIQUE: 15745 case KEY_COPY_ABORTED: 15746 case KEY_EQUAL: 15747 case KEY_RESERVED: 15748 default: 15749 sd_sense_key_default(un, xp->xb_sense_data, 15750 bp, xp, pktp); 15751 break; 15752 } 15753 } 15754 15755 15756 /* 15757 * Function: sd_dump_memory 15758 * 15759 * Description: Debug logging routine to print the contents of a user provided 15760 * buffer. The output of the buffer is broken up into 256 byte 15761 * segments due to a size constraint of the scsi_log. 15762 * implementation. 15763 * 15764 * Arguments: un - ptr to softstate 15765 * comp - component mask 15766 * title - "title" string to preceed data when printed 15767 * data - ptr to data block to be printed 15768 * len - size of data block to be printed 15769 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15770 * 15771 * Context: May be called from interrupt context 15772 */ 15773 15774 #define SD_DUMP_MEMORY_BUF_SIZE 256 15775 15776 static char *sd_dump_format_string[] = { 15777 " 0x%02x", 15778 " %c" 15779 }; 15780 15781 static void 15782 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15783 int len, int fmt) 15784 { 15785 int i, j; 15786 int avail_count; 15787 int start_offset; 15788 int end_offset; 15789 size_t entry_len; 15790 char *bufp; 15791 char *local_buf; 15792 char *format_string; 15793 15794 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15795 15796 /* 15797 * In the debug version of the driver, this function is called from a 15798 * number of places which are NOPs in the release driver. 15799 * The debug driver therefore has additional methods of filtering 15800 * debug output. 15801 */ 15802 #ifdef SDDEBUG 15803 /* 15804 * In the debug version of the driver we can reduce the amount of debug 15805 * messages by setting sd_error_level to something other than 15806 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15807 * sd_component_mask. 15808 */ 15809 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15810 (sd_error_level != SCSI_ERR_ALL)) { 15811 return; 15812 } 15813 if (((sd_component_mask & comp) == 0) || 15814 (sd_error_level != SCSI_ERR_ALL)) { 15815 return; 15816 } 15817 #else 15818 if (sd_error_level != SCSI_ERR_ALL) { 15819 return; 15820 } 15821 #endif 15822 15823 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15824 bufp = local_buf; 15825 /* 15826 * Available length is the length of local_buf[], minus the 15827 * length of the title string, minus one for the ":", minus 15828 * one for the newline, minus one for the NULL terminator. 15829 * This gives the #bytes available for holding the printed 15830 * values from the given data buffer. 15831 */ 15832 if (fmt == SD_LOG_HEX) { 15833 format_string = sd_dump_format_string[0]; 15834 } else /* SD_LOG_CHAR */ { 15835 format_string = sd_dump_format_string[1]; 15836 } 15837 /* 15838 * Available count is the number of elements from the given 15839 * data buffer that we can fit into the available length. 15840 * This is based upon the size of the format string used. 15841 * Make one entry and find it's size. 15842 */ 15843 (void) sprintf(bufp, format_string, data[0]); 15844 entry_len = strlen(bufp); 15845 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15846 15847 j = 0; 15848 while (j < len) { 15849 bufp = local_buf; 15850 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15851 start_offset = j; 15852 15853 end_offset = start_offset + avail_count; 15854 15855 (void) sprintf(bufp, "%s:", title); 15856 bufp += strlen(bufp); 15857 for (i = start_offset; ((i < end_offset) && (j < len)); 15858 i++, j++) { 15859 (void) sprintf(bufp, format_string, data[i]); 15860 bufp += entry_len; 15861 } 15862 (void) sprintf(bufp, "\n"); 15863 15864 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15865 } 15866 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15867 } 15868 15869 /* 15870 * Function: sd_print_sense_msg 15871 * 15872 * Description: Log a message based upon the given sense data. 15873 * 15874 * Arguments: un - ptr to associated softstate 15875 * bp - ptr to buf(9S) for the command 15876 * arg - ptr to associate sd_sense_info struct 15877 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15878 * or SD_NO_RETRY_ISSUED 15879 * 15880 * Context: May be called from interrupt context 15881 */ 15882 15883 static void 15884 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15885 { 15886 struct sd_xbuf *xp; 15887 struct scsi_pkt *pktp; 15888 uint8_t *sensep; 15889 daddr_t request_blkno; 15890 diskaddr_t err_blkno; 15891 int severity; 15892 int pfa_flag; 15893 extern struct scsi_key_strings scsi_cmds[]; 15894 15895 ASSERT(un != NULL); 15896 ASSERT(mutex_owned(SD_MUTEX(un))); 15897 ASSERT(bp != NULL); 15898 xp = SD_GET_XBUF(bp); 15899 ASSERT(xp != NULL); 15900 pktp = SD_GET_PKTP(bp); 15901 ASSERT(pktp != NULL); 15902 ASSERT(arg != NULL); 15903 15904 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15905 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15906 15907 if ((code == SD_DELAYED_RETRY_ISSUED) || 15908 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15909 severity = SCSI_ERR_RETRYABLE; 15910 } 15911 15912 /* Use absolute block number for the request block number */ 15913 request_blkno = xp->xb_blkno; 15914 15915 /* 15916 * Now try to get the error block number from the sense data 15917 */ 15918 sensep = xp->xb_sense_data; 15919 15920 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15921 (uint64_t *)&err_blkno)) { 15922 /* 15923 * We retrieved the error block number from the information 15924 * portion of the sense data. 15925 * 15926 * For USCSI commands we are better off using the error 15927 * block no. as the requested block no. (This is the best 15928 * we can estimate.) 15929 */ 15930 if ((SD_IS_BUFIO(xp) == FALSE) && 15931 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15932 request_blkno = err_blkno; 15933 } 15934 } else { 15935 /* 15936 * Without the es_valid bit set (for fixed format) or an 15937 * information descriptor (for descriptor format) we cannot 15938 * be certain of the error blkno, so just use the 15939 * request_blkno. 15940 */ 15941 err_blkno = (diskaddr_t)request_blkno; 15942 } 15943 15944 /* 15945 * The following will log the buffer contents for the release driver 15946 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15947 * level is set to verbose. 15948 */ 15949 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15950 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15951 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15952 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15953 15954 if (pfa_flag == FALSE) { 15955 /* This is normally only set for USCSI */ 15956 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15957 return; 15958 } 15959 15960 if ((SD_IS_BUFIO(xp) == TRUE) && 15961 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15962 (severity < sd_error_level))) { 15963 return; 15964 } 15965 } 15966 15967 /* 15968 * Check for Sonoma Failover and keep a count of how many failed I/O's 15969 */ 15970 if ((SD_IS_LSI(un)) && 15971 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15972 (scsi_sense_asc(sensep) == 0x94) && 15973 (scsi_sense_ascq(sensep) == 0x01)) { 15974 un->un_sonoma_failure_count++; 15975 if (un->un_sonoma_failure_count > 1) { 15976 return; 15977 } 15978 } 15979 15980 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15981 request_blkno, err_blkno, scsi_cmds, 15982 (struct scsi_extended_sense *)sensep, 15983 un->un_additional_codes, NULL); 15984 } 15985 15986 /* 15987 * Function: sd_sense_key_no_sense 15988 * 15989 * Description: Recovery action when sense data was not received. 15990 * 15991 * Context: May be called from interrupt context 15992 */ 15993 15994 static void 15995 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15996 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15997 { 15998 struct sd_sense_info si; 15999 16000 ASSERT(un != NULL); 16001 ASSERT(mutex_owned(SD_MUTEX(un))); 16002 ASSERT(bp != NULL); 16003 ASSERT(xp != NULL); 16004 ASSERT(pktp != NULL); 16005 16006 si.ssi_severity = SCSI_ERR_FATAL; 16007 si.ssi_pfa_flag = FALSE; 16008 16009 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16010 16011 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16012 &si, EIO, (clock_t)0, NULL); 16013 } 16014 16015 16016 /* 16017 * Function: sd_sense_key_recoverable_error 16018 * 16019 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16020 * 16021 * Context: May be called from interrupt context 16022 */ 16023 16024 static void 16025 sd_sense_key_recoverable_error(struct sd_lun *un, 16026 uint8_t *sense_datap, 16027 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16028 { 16029 struct sd_sense_info si; 16030 uint8_t asc = scsi_sense_asc(sense_datap); 16031 16032 ASSERT(un != NULL); 16033 ASSERT(mutex_owned(SD_MUTEX(un))); 16034 ASSERT(bp != NULL); 16035 ASSERT(xp != NULL); 16036 ASSERT(pktp != NULL); 16037 16038 /* 16039 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16040 */ 16041 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16042 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16043 si.ssi_severity = SCSI_ERR_INFO; 16044 si.ssi_pfa_flag = TRUE; 16045 } else { 16046 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16047 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16048 si.ssi_severity = SCSI_ERR_RECOVERED; 16049 si.ssi_pfa_flag = FALSE; 16050 } 16051 16052 if (pktp->pkt_resid == 0) { 16053 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16054 sd_return_command(un, bp); 16055 return; 16056 } 16057 16058 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16059 &si, EIO, (clock_t)0, NULL); 16060 } 16061 16062 16063 16064 16065 /* 16066 * Function: sd_sense_key_not_ready 16067 * 16068 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16069 * 16070 * Context: May be called from interrupt context 16071 */ 16072 16073 static void 16074 sd_sense_key_not_ready(struct sd_lun *un, 16075 uint8_t *sense_datap, 16076 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16077 { 16078 struct sd_sense_info si; 16079 uint8_t asc = scsi_sense_asc(sense_datap); 16080 uint8_t ascq = scsi_sense_ascq(sense_datap); 16081 16082 ASSERT(un != NULL); 16083 ASSERT(mutex_owned(SD_MUTEX(un))); 16084 ASSERT(bp != NULL); 16085 ASSERT(xp != NULL); 16086 ASSERT(pktp != NULL); 16087 16088 si.ssi_severity = SCSI_ERR_FATAL; 16089 si.ssi_pfa_flag = FALSE; 16090 16091 /* 16092 * Update error stats after first NOT READY error. Disks may have 16093 * been powered down and may need to be restarted. For CDROMs, 16094 * report NOT READY errors only if media is present. 16095 */ 16096 if ((ISCD(un) && (asc == 0x3A)) || 16097 (xp->xb_nr_retry_count > 0)) { 16098 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16099 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16100 } 16101 16102 /* 16103 * Just fail if the "not ready" retry limit has been reached. 16104 */ 16105 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16106 /* Special check for error message printing for removables. */ 16107 if (un->un_f_has_removable_media && (asc == 0x04) && 16108 (ascq >= 0x04)) { 16109 si.ssi_severity = SCSI_ERR_ALL; 16110 } 16111 goto fail_command; 16112 } 16113 16114 /* 16115 * Check the ASC and ASCQ in the sense data as needed, to determine 16116 * what to do. 16117 */ 16118 switch (asc) { 16119 case 0x04: /* LOGICAL UNIT NOT READY */ 16120 /* 16121 * disk drives that don't spin up result in a very long delay 16122 * in format without warning messages. We will log a message 16123 * if the error level is set to verbose. 16124 */ 16125 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16126 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16127 "logical unit not ready, resetting disk\n"); 16128 } 16129 16130 /* 16131 * There are different requirements for CDROMs and disks for 16132 * the number of retries. If a CD-ROM is giving this, it is 16133 * probably reading TOC and is in the process of getting 16134 * ready, so we should keep on trying for a long time to make 16135 * sure that all types of media are taken in account (for 16136 * some media the drive takes a long time to read TOC). For 16137 * disks we do not want to retry this too many times as this 16138 * can cause a long hang in format when the drive refuses to 16139 * spin up (a very common failure). 16140 */ 16141 switch (ascq) { 16142 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16143 /* 16144 * Disk drives frequently refuse to spin up which 16145 * results in a very long hang in format without 16146 * warning messages. 16147 * 16148 * Note: This code preserves the legacy behavior of 16149 * comparing xb_nr_retry_count against zero for fibre 16150 * channel targets instead of comparing against the 16151 * un_reset_retry_count value. The reason for this 16152 * discrepancy has been so utterly lost beneath the 16153 * Sands of Time that even Indiana Jones could not 16154 * find it. 16155 */ 16156 if (un->un_f_is_fibre == TRUE) { 16157 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16158 (xp->xb_nr_retry_count > 0)) && 16159 (un->un_startstop_timeid == NULL)) { 16160 scsi_log(SD_DEVINFO(un), sd_label, 16161 CE_WARN, "logical unit not ready, " 16162 "resetting disk\n"); 16163 sd_reset_target(un, pktp); 16164 } 16165 } else { 16166 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16167 (xp->xb_nr_retry_count > 16168 un->un_reset_retry_count)) && 16169 (un->un_startstop_timeid == NULL)) { 16170 scsi_log(SD_DEVINFO(un), sd_label, 16171 CE_WARN, "logical unit not ready, " 16172 "resetting disk\n"); 16173 sd_reset_target(un, pktp); 16174 } 16175 } 16176 break; 16177 16178 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16179 /* 16180 * If the target is in the process of becoming 16181 * ready, just proceed with the retry. This can 16182 * happen with CD-ROMs that take a long time to 16183 * read TOC after a power cycle or reset. 16184 */ 16185 goto do_retry; 16186 16187 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16188 break; 16189 16190 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16191 /* 16192 * Retries cannot help here so just fail right away. 16193 */ 16194 goto fail_command; 16195 16196 case 0x88: 16197 /* 16198 * Vendor-unique code for T3/T4: it indicates a 16199 * path problem in a mutipathed config, but as far as 16200 * the target driver is concerned it equates to a fatal 16201 * error, so we should just fail the command right away 16202 * (without printing anything to the console). If this 16203 * is not a T3/T4, fall thru to the default recovery 16204 * action. 16205 * T3/T4 is FC only, don't need to check is_fibre 16206 */ 16207 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16208 sd_return_failed_command(un, bp, EIO); 16209 return; 16210 } 16211 /* FALLTHRU */ 16212 16213 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16214 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16215 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16216 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16217 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16218 default: /* Possible future codes in SCSI spec? */ 16219 /* 16220 * For removable-media devices, do not retry if 16221 * ASCQ > 2 as these result mostly from USCSI commands 16222 * on MMC devices issued to check status of an 16223 * operation initiated in immediate mode. Also for 16224 * ASCQ >= 4 do not print console messages as these 16225 * mainly represent a user-initiated operation 16226 * instead of a system failure. 16227 */ 16228 if (un->un_f_has_removable_media) { 16229 si.ssi_severity = SCSI_ERR_ALL; 16230 goto fail_command; 16231 } 16232 break; 16233 } 16234 16235 /* 16236 * As part of our recovery attempt for the NOT READY 16237 * condition, we issue a START STOP UNIT command. However 16238 * we want to wait for a short delay before attempting this 16239 * as there may still be more commands coming back from the 16240 * target with the check condition. To do this we use 16241 * timeout(9F) to call sd_start_stop_unit_callback() after 16242 * the delay interval expires. (sd_start_stop_unit_callback() 16243 * dispatches sd_start_stop_unit_task(), which will issue 16244 * the actual START STOP UNIT command. The delay interval 16245 * is one-half of the delay that we will use to retry the 16246 * command that generated the NOT READY condition. 16247 * 16248 * Note that we could just dispatch sd_start_stop_unit_task() 16249 * from here and allow it to sleep for the delay interval, 16250 * but then we would be tying up the taskq thread 16251 * uncesessarily for the duration of the delay. 16252 * 16253 * Do not issue the START STOP UNIT if the current command 16254 * is already a START STOP UNIT. 16255 */ 16256 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16257 break; 16258 } 16259 16260 /* 16261 * Do not schedule the timeout if one is already pending. 16262 */ 16263 if (un->un_startstop_timeid != NULL) { 16264 SD_INFO(SD_LOG_ERROR, un, 16265 "sd_sense_key_not_ready: restart already issued to" 16266 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16267 ddi_get_instance(SD_DEVINFO(un))); 16268 break; 16269 } 16270 16271 /* 16272 * Schedule the START STOP UNIT command, then queue the command 16273 * for a retry. 16274 * 16275 * Note: A timeout is not scheduled for this retry because we 16276 * want the retry to be serial with the START_STOP_UNIT. The 16277 * retry will be started when the START_STOP_UNIT is completed 16278 * in sd_start_stop_unit_task. 16279 */ 16280 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16281 un, SD_BSY_TIMEOUT / 2); 16282 xp->xb_nr_retry_count++; 16283 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16284 return; 16285 16286 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16287 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16288 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16289 "unit does not respond to selection\n"); 16290 } 16291 break; 16292 16293 case 0x3A: /* MEDIUM NOT PRESENT */ 16294 if (sd_error_level >= SCSI_ERR_FATAL) { 16295 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16296 "Caddy not inserted in drive\n"); 16297 } 16298 16299 sr_ejected(un); 16300 un->un_mediastate = DKIO_EJECTED; 16301 /* The state has changed, inform the media watch routines */ 16302 cv_broadcast(&un->un_state_cv); 16303 /* Just fail if no media is present in the drive. */ 16304 goto fail_command; 16305 16306 default: 16307 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16308 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16309 "Unit not Ready. Additional sense code 0x%x\n", 16310 asc); 16311 } 16312 break; 16313 } 16314 16315 do_retry: 16316 16317 /* 16318 * Retry the command, as some targets may report NOT READY for 16319 * several seconds after being reset. 16320 */ 16321 xp->xb_nr_retry_count++; 16322 si.ssi_severity = SCSI_ERR_RETRYABLE; 16323 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16324 &si, EIO, SD_BSY_TIMEOUT, NULL); 16325 16326 return; 16327 16328 fail_command: 16329 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16330 sd_return_failed_command(un, bp, EIO); 16331 } 16332 16333 16334 16335 /* 16336 * Function: sd_sense_key_medium_or_hardware_error 16337 * 16338 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16339 * sense key. 16340 * 16341 * Context: May be called from interrupt context 16342 */ 16343 16344 static void 16345 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16346 uint8_t *sense_datap, 16347 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16348 { 16349 struct sd_sense_info si; 16350 uint8_t sense_key = scsi_sense_key(sense_datap); 16351 uint8_t asc = scsi_sense_asc(sense_datap); 16352 16353 ASSERT(un != NULL); 16354 ASSERT(mutex_owned(SD_MUTEX(un))); 16355 ASSERT(bp != NULL); 16356 ASSERT(xp != NULL); 16357 ASSERT(pktp != NULL); 16358 16359 si.ssi_severity = SCSI_ERR_FATAL; 16360 si.ssi_pfa_flag = FALSE; 16361 16362 if (sense_key == KEY_MEDIUM_ERROR) { 16363 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16364 } 16365 16366 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16367 16368 if ((un->un_reset_retry_count != 0) && 16369 (xp->xb_retry_count == un->un_reset_retry_count)) { 16370 mutex_exit(SD_MUTEX(un)); 16371 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16372 if (un->un_f_allow_bus_device_reset == TRUE) { 16373 16374 boolean_t try_resetting_target = B_TRUE; 16375 16376 /* 16377 * We need to be able to handle specific ASC when we are 16378 * handling a KEY_HARDWARE_ERROR. In particular 16379 * taking the default action of resetting the target may 16380 * not be the appropriate way to attempt recovery. 16381 * Resetting a target because of a single LUN failure 16382 * victimizes all LUNs on that target. 16383 * 16384 * This is true for the LSI arrays, if an LSI 16385 * array controller returns an ASC of 0x84 (LUN Dead) we 16386 * should trust it. 16387 */ 16388 16389 if (sense_key == KEY_HARDWARE_ERROR) { 16390 switch (asc) { 16391 case 0x84: 16392 if (SD_IS_LSI(un)) { 16393 try_resetting_target = B_FALSE; 16394 } 16395 break; 16396 default: 16397 break; 16398 } 16399 } 16400 16401 if (try_resetting_target == B_TRUE) { 16402 int reset_retval = 0; 16403 if (un->un_f_lun_reset_enabled == TRUE) { 16404 SD_TRACE(SD_LOG_IO_CORE, un, 16405 "sd_sense_key_medium_or_hardware_" 16406 "error: issuing RESET_LUN\n"); 16407 reset_retval = 16408 scsi_reset(SD_ADDRESS(un), 16409 RESET_LUN); 16410 } 16411 if (reset_retval == 0) { 16412 SD_TRACE(SD_LOG_IO_CORE, un, 16413 "sd_sense_key_medium_or_hardware_" 16414 "error: issuing RESET_TARGET\n"); 16415 (void) scsi_reset(SD_ADDRESS(un), 16416 RESET_TARGET); 16417 } 16418 } 16419 } 16420 mutex_enter(SD_MUTEX(un)); 16421 } 16422 16423 /* 16424 * This really ought to be a fatal error, but we will retry anyway 16425 * as some drives report this as a spurious error. 16426 */ 16427 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16428 &si, EIO, (clock_t)0, NULL); 16429 } 16430 16431 16432 16433 /* 16434 * Function: sd_sense_key_illegal_request 16435 * 16436 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16437 * 16438 * Context: May be called from interrupt context 16439 */ 16440 16441 static void 16442 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16443 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16444 { 16445 struct sd_sense_info si; 16446 16447 ASSERT(un != NULL); 16448 ASSERT(mutex_owned(SD_MUTEX(un))); 16449 ASSERT(bp != NULL); 16450 ASSERT(xp != NULL); 16451 ASSERT(pktp != NULL); 16452 16453 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16454 16455 si.ssi_severity = SCSI_ERR_INFO; 16456 si.ssi_pfa_flag = FALSE; 16457 16458 /* Pointless to retry if the target thinks it's an illegal request */ 16459 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16460 sd_return_failed_command(un, bp, EIO); 16461 } 16462 16463 16464 16465 16466 /* 16467 * Function: sd_sense_key_unit_attention 16468 * 16469 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16470 * 16471 * Context: May be called from interrupt context 16472 */ 16473 16474 static void 16475 sd_sense_key_unit_attention(struct sd_lun *un, 16476 uint8_t *sense_datap, 16477 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16478 { 16479 /* 16480 * For UNIT ATTENTION we allow retries for one minute. Devices 16481 * like Sonoma can return UNIT ATTENTION close to a minute 16482 * under certain conditions. 16483 */ 16484 int retry_check_flag = SD_RETRIES_UA; 16485 boolean_t kstat_updated = B_FALSE; 16486 struct sd_sense_info si; 16487 uint8_t asc = scsi_sense_asc(sense_datap); 16488 16489 ASSERT(un != NULL); 16490 ASSERT(mutex_owned(SD_MUTEX(un))); 16491 ASSERT(bp != NULL); 16492 ASSERT(xp != NULL); 16493 ASSERT(pktp != NULL); 16494 16495 si.ssi_severity = SCSI_ERR_INFO; 16496 si.ssi_pfa_flag = FALSE; 16497 16498 16499 switch (asc) { 16500 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16501 if (sd_report_pfa != 0) { 16502 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16503 si.ssi_pfa_flag = TRUE; 16504 retry_check_flag = SD_RETRIES_STANDARD; 16505 goto do_retry; 16506 } 16507 16508 break; 16509 16510 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16511 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16512 un->un_resvd_status |= 16513 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16514 } 16515 #ifdef _LP64 16516 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16517 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16518 un, KM_NOSLEEP) == 0) { 16519 /* 16520 * If we can't dispatch the task we'll just 16521 * live without descriptor sense. We can 16522 * try again on the next "unit attention" 16523 */ 16524 SD_ERROR(SD_LOG_ERROR, un, 16525 "sd_sense_key_unit_attention: " 16526 "Could not dispatch " 16527 "sd_reenable_dsense_task\n"); 16528 } 16529 } 16530 #endif /* _LP64 */ 16531 /* FALLTHRU */ 16532 16533 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16534 if (!un->un_f_has_removable_media) { 16535 break; 16536 } 16537 16538 /* 16539 * When we get a unit attention from a removable-media device, 16540 * it may be in a state that will take a long time to recover 16541 * (e.g., from a reset). Since we are executing in interrupt 16542 * context here, we cannot wait around for the device to come 16543 * back. So hand this command off to sd_media_change_task() 16544 * for deferred processing under taskq thread context. (Note 16545 * that the command still may be failed if a problem is 16546 * encountered at a later time.) 16547 */ 16548 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16549 KM_NOSLEEP) == 0) { 16550 /* 16551 * Cannot dispatch the request so fail the command. 16552 */ 16553 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16554 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16555 si.ssi_severity = SCSI_ERR_FATAL; 16556 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16557 sd_return_failed_command(un, bp, EIO); 16558 } 16559 16560 /* 16561 * If failed to dispatch sd_media_change_task(), we already 16562 * updated kstat. If succeed to dispatch sd_media_change_task(), 16563 * we should update kstat later if it encounters an error. So, 16564 * we update kstat_updated flag here. 16565 */ 16566 kstat_updated = B_TRUE; 16567 16568 /* 16569 * Either the command has been successfully dispatched to a 16570 * task Q for retrying, or the dispatch failed. In either case 16571 * do NOT retry again by calling sd_retry_command. This sets up 16572 * two retries of the same command and when one completes and 16573 * frees the resources the other will access freed memory, 16574 * a bad thing. 16575 */ 16576 return; 16577 16578 default: 16579 break; 16580 } 16581 16582 /* 16583 * Update kstat if we haven't done that. 16584 */ 16585 if (!kstat_updated) { 16586 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16587 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16588 } 16589 16590 do_retry: 16591 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16592 EIO, SD_UA_RETRY_DELAY, NULL); 16593 } 16594 16595 16596 16597 /* 16598 * Function: sd_sense_key_fail_command 16599 * 16600 * Description: Use to fail a command when we don't like the sense key that 16601 * was returned. 16602 * 16603 * Context: May be called from interrupt context 16604 */ 16605 16606 static void 16607 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16608 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16609 { 16610 struct sd_sense_info si; 16611 16612 ASSERT(un != NULL); 16613 ASSERT(mutex_owned(SD_MUTEX(un))); 16614 ASSERT(bp != NULL); 16615 ASSERT(xp != NULL); 16616 ASSERT(pktp != NULL); 16617 16618 si.ssi_severity = SCSI_ERR_FATAL; 16619 si.ssi_pfa_flag = FALSE; 16620 16621 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16622 sd_return_failed_command(un, bp, EIO); 16623 } 16624 16625 16626 16627 /* 16628 * Function: sd_sense_key_blank_check 16629 * 16630 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16631 * Has no monetary connotation. 16632 * 16633 * Context: May be called from interrupt context 16634 */ 16635 16636 static void 16637 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16638 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16639 { 16640 struct sd_sense_info si; 16641 16642 ASSERT(un != NULL); 16643 ASSERT(mutex_owned(SD_MUTEX(un))); 16644 ASSERT(bp != NULL); 16645 ASSERT(xp != NULL); 16646 ASSERT(pktp != NULL); 16647 16648 /* 16649 * Blank check is not fatal for removable devices, therefore 16650 * it does not require a console message. 16651 */ 16652 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16653 SCSI_ERR_FATAL; 16654 si.ssi_pfa_flag = FALSE; 16655 16656 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16657 sd_return_failed_command(un, bp, EIO); 16658 } 16659 16660 16661 16662 16663 /* 16664 * Function: sd_sense_key_aborted_command 16665 * 16666 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16667 * 16668 * Context: May be called from interrupt context 16669 */ 16670 16671 static void 16672 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16673 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16674 { 16675 struct sd_sense_info si; 16676 16677 ASSERT(un != NULL); 16678 ASSERT(mutex_owned(SD_MUTEX(un))); 16679 ASSERT(bp != NULL); 16680 ASSERT(xp != NULL); 16681 ASSERT(pktp != NULL); 16682 16683 si.ssi_severity = SCSI_ERR_FATAL; 16684 si.ssi_pfa_flag = FALSE; 16685 16686 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16687 16688 /* 16689 * This really ought to be a fatal error, but we will retry anyway 16690 * as some drives report this as a spurious error. 16691 */ 16692 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16693 &si, EIO, drv_usectohz(100000), NULL); 16694 } 16695 16696 16697 16698 /* 16699 * Function: sd_sense_key_default 16700 * 16701 * Description: Default recovery action for several SCSI sense keys (basically 16702 * attempts a retry). 16703 * 16704 * Context: May be called from interrupt context 16705 */ 16706 16707 static void 16708 sd_sense_key_default(struct sd_lun *un, 16709 uint8_t *sense_datap, 16710 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16711 { 16712 struct sd_sense_info si; 16713 uint8_t sense_key = scsi_sense_key(sense_datap); 16714 16715 ASSERT(un != NULL); 16716 ASSERT(mutex_owned(SD_MUTEX(un))); 16717 ASSERT(bp != NULL); 16718 ASSERT(xp != NULL); 16719 ASSERT(pktp != NULL); 16720 16721 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16722 16723 /* 16724 * Undecoded sense key. Attempt retries and hope that will fix 16725 * the problem. Otherwise, we're dead. 16726 */ 16727 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16728 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16729 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16730 } 16731 16732 si.ssi_severity = SCSI_ERR_FATAL; 16733 si.ssi_pfa_flag = FALSE; 16734 16735 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16736 &si, EIO, (clock_t)0, NULL); 16737 } 16738 16739 16740 16741 /* 16742 * Function: sd_print_retry_msg 16743 * 16744 * Description: Print a message indicating the retry action being taken. 16745 * 16746 * Arguments: un - ptr to associated softstate 16747 * bp - ptr to buf(9S) for the command 16748 * arg - not used. 16749 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16750 * or SD_NO_RETRY_ISSUED 16751 * 16752 * Context: May be called from interrupt context 16753 */ 16754 /* ARGSUSED */ 16755 static void 16756 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16757 { 16758 struct sd_xbuf *xp; 16759 struct scsi_pkt *pktp; 16760 char *reasonp; 16761 char *msgp; 16762 16763 ASSERT(un != NULL); 16764 ASSERT(mutex_owned(SD_MUTEX(un))); 16765 ASSERT(bp != NULL); 16766 pktp = SD_GET_PKTP(bp); 16767 ASSERT(pktp != NULL); 16768 xp = SD_GET_XBUF(bp); 16769 ASSERT(xp != NULL); 16770 16771 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16772 mutex_enter(&un->un_pm_mutex); 16773 if ((un->un_state == SD_STATE_SUSPENDED) || 16774 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16775 (pktp->pkt_flags & FLAG_SILENT)) { 16776 mutex_exit(&un->un_pm_mutex); 16777 goto update_pkt_reason; 16778 } 16779 mutex_exit(&un->un_pm_mutex); 16780 16781 /* 16782 * Suppress messages if they are all the same pkt_reason; with 16783 * TQ, many (up to 256) are returned with the same pkt_reason. 16784 * If we are in panic, then suppress the retry messages. 16785 */ 16786 switch (flag) { 16787 case SD_NO_RETRY_ISSUED: 16788 msgp = "giving up"; 16789 break; 16790 case SD_IMMEDIATE_RETRY_ISSUED: 16791 case SD_DELAYED_RETRY_ISSUED: 16792 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16793 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16794 (sd_error_level != SCSI_ERR_ALL))) { 16795 return; 16796 } 16797 msgp = "retrying command"; 16798 break; 16799 default: 16800 goto update_pkt_reason; 16801 } 16802 16803 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16804 scsi_rname(pktp->pkt_reason)); 16805 16806 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16807 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16808 16809 update_pkt_reason: 16810 /* 16811 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16812 * This is to prevent multiple console messages for the same failure 16813 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16814 * when the command is retried successfully because there still may be 16815 * more commands coming back with the same value of pktp->pkt_reason. 16816 */ 16817 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16818 un->un_last_pkt_reason = pktp->pkt_reason; 16819 } 16820 } 16821 16822 16823 /* 16824 * Function: sd_print_cmd_incomplete_msg 16825 * 16826 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16827 * 16828 * Arguments: un - ptr to associated softstate 16829 * bp - ptr to buf(9S) for the command 16830 * arg - passed to sd_print_retry_msg() 16831 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16832 * or SD_NO_RETRY_ISSUED 16833 * 16834 * Context: May be called from interrupt context 16835 */ 16836 16837 static void 16838 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16839 int code) 16840 { 16841 dev_info_t *dip; 16842 16843 ASSERT(un != NULL); 16844 ASSERT(mutex_owned(SD_MUTEX(un))); 16845 ASSERT(bp != NULL); 16846 16847 switch (code) { 16848 case SD_NO_RETRY_ISSUED: 16849 /* Command was failed. Someone turned off this target? */ 16850 if (un->un_state != SD_STATE_OFFLINE) { 16851 /* 16852 * Suppress message if we are detaching and 16853 * device has been disconnected 16854 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16855 * private interface and not part of the DDI 16856 */ 16857 dip = un->un_sd->sd_dev; 16858 if (!(DEVI_IS_DETACHING(dip) && 16859 DEVI_IS_DEVICE_REMOVED(dip))) { 16860 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16861 "disk not responding to selection\n"); 16862 } 16863 New_state(un, SD_STATE_OFFLINE); 16864 } 16865 break; 16866 16867 case SD_DELAYED_RETRY_ISSUED: 16868 case SD_IMMEDIATE_RETRY_ISSUED: 16869 default: 16870 /* Command was successfully queued for retry */ 16871 sd_print_retry_msg(un, bp, arg, code); 16872 break; 16873 } 16874 } 16875 16876 16877 /* 16878 * Function: sd_pkt_reason_cmd_incomplete 16879 * 16880 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16881 * 16882 * Context: May be called from interrupt context 16883 */ 16884 16885 static void 16886 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16887 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16888 { 16889 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16890 16891 ASSERT(un != NULL); 16892 ASSERT(mutex_owned(SD_MUTEX(un))); 16893 ASSERT(bp != NULL); 16894 ASSERT(xp != NULL); 16895 ASSERT(pktp != NULL); 16896 16897 /* Do not do a reset if selection did not complete */ 16898 /* Note: Should this not just check the bit? */ 16899 if (pktp->pkt_state != STATE_GOT_BUS) { 16900 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16901 sd_reset_target(un, pktp); 16902 } 16903 16904 /* 16905 * If the target was not successfully selected, then set 16906 * SD_RETRIES_FAILFAST to indicate that we lost communication 16907 * with the target, and further retries and/or commands are 16908 * likely to take a long time. 16909 */ 16910 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16911 flag |= SD_RETRIES_FAILFAST; 16912 } 16913 16914 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16915 16916 sd_retry_command(un, bp, flag, 16917 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16918 } 16919 16920 16921 16922 /* 16923 * Function: sd_pkt_reason_cmd_tran_err 16924 * 16925 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16926 * 16927 * Context: May be called from interrupt context 16928 */ 16929 16930 static void 16931 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16932 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16933 { 16934 ASSERT(un != NULL); 16935 ASSERT(mutex_owned(SD_MUTEX(un))); 16936 ASSERT(bp != NULL); 16937 ASSERT(xp != NULL); 16938 ASSERT(pktp != NULL); 16939 16940 /* 16941 * Do not reset if we got a parity error, or if 16942 * selection did not complete. 16943 */ 16944 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16945 /* Note: Should this not just check the bit for pkt_state? */ 16946 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16947 (pktp->pkt_state != STATE_GOT_BUS)) { 16948 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16949 sd_reset_target(un, pktp); 16950 } 16951 16952 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16953 16954 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16955 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16956 } 16957 16958 16959 16960 /* 16961 * Function: sd_pkt_reason_cmd_reset 16962 * 16963 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16964 * 16965 * Context: May be called from interrupt context 16966 */ 16967 16968 static void 16969 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16970 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16971 { 16972 ASSERT(un != NULL); 16973 ASSERT(mutex_owned(SD_MUTEX(un))); 16974 ASSERT(bp != NULL); 16975 ASSERT(xp != NULL); 16976 ASSERT(pktp != NULL); 16977 16978 /* The target may still be running the command, so try to reset. */ 16979 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16980 sd_reset_target(un, pktp); 16981 16982 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16983 16984 /* 16985 * If pkt_reason is CMD_RESET chances are that this pkt got 16986 * reset because another target on this bus caused it. The target 16987 * that caused it should get CMD_TIMEOUT with pkt_statistics 16988 * of STAT_TIMEOUT/STAT_DEV_RESET. 16989 */ 16990 16991 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16992 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16993 } 16994 16995 16996 16997 16998 /* 16999 * Function: sd_pkt_reason_cmd_aborted 17000 * 17001 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 17002 * 17003 * Context: May be called from interrupt context 17004 */ 17005 17006 static void 17007 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 17008 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17009 { 17010 ASSERT(un != NULL); 17011 ASSERT(mutex_owned(SD_MUTEX(un))); 17012 ASSERT(bp != NULL); 17013 ASSERT(xp != NULL); 17014 ASSERT(pktp != NULL); 17015 17016 /* The target may still be running the command, so try to reset. */ 17017 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17018 sd_reset_target(un, pktp); 17019 17020 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17021 17022 /* 17023 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17024 * aborted because another target on this bus caused it. The target 17025 * that caused it should get CMD_TIMEOUT with pkt_statistics 17026 * of STAT_TIMEOUT/STAT_DEV_RESET. 17027 */ 17028 17029 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17030 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17031 } 17032 17033 17034 17035 /* 17036 * Function: sd_pkt_reason_cmd_timeout 17037 * 17038 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17039 * 17040 * Context: May be called from interrupt context 17041 */ 17042 17043 static void 17044 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17045 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17046 { 17047 ASSERT(un != NULL); 17048 ASSERT(mutex_owned(SD_MUTEX(un))); 17049 ASSERT(bp != NULL); 17050 ASSERT(xp != NULL); 17051 ASSERT(pktp != NULL); 17052 17053 17054 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17055 sd_reset_target(un, pktp); 17056 17057 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17058 17059 /* 17060 * A command timeout indicates that we could not establish 17061 * communication with the target, so set SD_RETRIES_FAILFAST 17062 * as further retries/commands are likely to take a long time. 17063 */ 17064 sd_retry_command(un, bp, 17065 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17066 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17067 } 17068 17069 17070 17071 /* 17072 * Function: sd_pkt_reason_cmd_unx_bus_free 17073 * 17074 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17075 * 17076 * Context: May be called from interrupt context 17077 */ 17078 17079 static void 17080 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17081 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17082 { 17083 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17084 17085 ASSERT(un != NULL); 17086 ASSERT(mutex_owned(SD_MUTEX(un))); 17087 ASSERT(bp != NULL); 17088 ASSERT(xp != NULL); 17089 ASSERT(pktp != NULL); 17090 17091 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17092 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17093 17094 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17095 sd_print_retry_msg : NULL; 17096 17097 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17098 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17099 } 17100 17101 17102 /* 17103 * Function: sd_pkt_reason_cmd_tag_reject 17104 * 17105 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17106 * 17107 * Context: May be called from interrupt context 17108 */ 17109 17110 static void 17111 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17112 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17113 { 17114 ASSERT(un != NULL); 17115 ASSERT(mutex_owned(SD_MUTEX(un))); 17116 ASSERT(bp != NULL); 17117 ASSERT(xp != NULL); 17118 ASSERT(pktp != NULL); 17119 17120 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17121 pktp->pkt_flags = 0; 17122 un->un_tagflags = 0; 17123 if (un->un_f_opt_queueing == TRUE) { 17124 un->un_throttle = min(un->un_throttle, 3); 17125 } else { 17126 un->un_throttle = 1; 17127 } 17128 mutex_exit(SD_MUTEX(un)); 17129 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17130 mutex_enter(SD_MUTEX(un)); 17131 17132 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17133 17134 /* Legacy behavior not to check retry counts here. */ 17135 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17136 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17137 } 17138 17139 17140 /* 17141 * Function: sd_pkt_reason_default 17142 * 17143 * Description: Default recovery actions for SCSA pkt_reason values that 17144 * do not have more explicit recovery actions. 17145 * 17146 * Context: May be called from interrupt context 17147 */ 17148 17149 static void 17150 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17151 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17152 { 17153 ASSERT(un != NULL); 17154 ASSERT(mutex_owned(SD_MUTEX(un))); 17155 ASSERT(bp != NULL); 17156 ASSERT(xp != NULL); 17157 ASSERT(pktp != NULL); 17158 17159 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17160 sd_reset_target(un, pktp); 17161 17162 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17163 17164 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17165 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17166 } 17167 17168 17169 17170 /* 17171 * Function: sd_pkt_status_check_condition 17172 * 17173 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17174 * 17175 * Context: May be called from interrupt context 17176 */ 17177 17178 static void 17179 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17180 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17181 { 17182 ASSERT(un != NULL); 17183 ASSERT(mutex_owned(SD_MUTEX(un))); 17184 ASSERT(bp != NULL); 17185 ASSERT(xp != NULL); 17186 ASSERT(pktp != NULL); 17187 17188 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17189 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17190 17191 /* 17192 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17193 * command will be retried after the request sense). Otherwise, retry 17194 * the command. Note: we are issuing the request sense even though the 17195 * retry limit may have been reached for the failed command. 17196 */ 17197 if (un->un_f_arq_enabled == FALSE) { 17198 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17199 "no ARQ, sending request sense command\n"); 17200 sd_send_request_sense_command(un, bp, pktp); 17201 } else { 17202 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17203 "ARQ,retrying request sense command\n"); 17204 #if defined(__i386) || defined(__amd64) 17205 /* 17206 * The SD_RETRY_DELAY value need to be adjusted here 17207 * when SD_RETRY_DELAY change in sddef.h 17208 */ 17209 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17210 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17211 NULL); 17212 #else 17213 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17214 EIO, SD_RETRY_DELAY, NULL); 17215 #endif 17216 } 17217 17218 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17219 } 17220 17221 17222 /* 17223 * Function: sd_pkt_status_busy 17224 * 17225 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17226 * 17227 * Context: May be called from interrupt context 17228 */ 17229 17230 static void 17231 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17232 struct scsi_pkt *pktp) 17233 { 17234 ASSERT(un != NULL); 17235 ASSERT(mutex_owned(SD_MUTEX(un))); 17236 ASSERT(bp != NULL); 17237 ASSERT(xp != NULL); 17238 ASSERT(pktp != NULL); 17239 17240 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17241 "sd_pkt_status_busy: entry\n"); 17242 17243 /* If retries are exhausted, just fail the command. */ 17244 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17245 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17246 "device busy too long\n"); 17247 sd_return_failed_command(un, bp, EIO); 17248 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17249 "sd_pkt_status_busy: exit\n"); 17250 return; 17251 } 17252 xp->xb_retry_count++; 17253 17254 /* 17255 * Try to reset the target. However, we do not want to perform 17256 * more than one reset if the device continues to fail. The reset 17257 * will be performed when the retry count reaches the reset 17258 * threshold. This threshold should be set such that at least 17259 * one retry is issued before the reset is performed. 17260 */ 17261 if (xp->xb_retry_count == 17262 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17263 int rval = 0; 17264 mutex_exit(SD_MUTEX(un)); 17265 if (un->un_f_allow_bus_device_reset == TRUE) { 17266 /* 17267 * First try to reset the LUN; if we cannot then 17268 * try to reset the target. 17269 */ 17270 if (un->un_f_lun_reset_enabled == TRUE) { 17271 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17272 "sd_pkt_status_busy: RESET_LUN\n"); 17273 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17274 } 17275 if (rval == 0) { 17276 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17277 "sd_pkt_status_busy: RESET_TARGET\n"); 17278 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17279 } 17280 } 17281 if (rval == 0) { 17282 /* 17283 * If the RESET_LUN and/or RESET_TARGET failed, 17284 * try RESET_ALL 17285 */ 17286 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17287 "sd_pkt_status_busy: RESET_ALL\n"); 17288 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17289 } 17290 mutex_enter(SD_MUTEX(un)); 17291 if (rval == 0) { 17292 /* 17293 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17294 * At this point we give up & fail the command. 17295 */ 17296 sd_return_failed_command(un, bp, EIO); 17297 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17298 "sd_pkt_status_busy: exit (failed cmd)\n"); 17299 return; 17300 } 17301 } 17302 17303 /* 17304 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17305 * we have already checked the retry counts above. 17306 */ 17307 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17308 EIO, SD_BSY_TIMEOUT, NULL); 17309 17310 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17311 "sd_pkt_status_busy: exit\n"); 17312 } 17313 17314 17315 /* 17316 * Function: sd_pkt_status_reservation_conflict 17317 * 17318 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17319 * command status. 17320 * 17321 * Context: May be called from interrupt context 17322 */ 17323 17324 static void 17325 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17326 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17327 { 17328 ASSERT(un != NULL); 17329 ASSERT(mutex_owned(SD_MUTEX(un))); 17330 ASSERT(bp != NULL); 17331 ASSERT(xp != NULL); 17332 ASSERT(pktp != NULL); 17333 17334 /* 17335 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17336 * conflict could be due to various reasons like incorrect keys, not 17337 * registered or not reserved etc. So, we return EACCES to the caller. 17338 */ 17339 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17340 int cmd = SD_GET_PKT_OPCODE(pktp); 17341 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17342 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17343 sd_return_failed_command(un, bp, EACCES); 17344 return; 17345 } 17346 } 17347 17348 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17349 17350 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17351 if (sd_failfast_enable != 0) { 17352 /* By definition, we must panic here.... */ 17353 sd_panic_for_res_conflict(un); 17354 /*NOTREACHED*/ 17355 } 17356 SD_ERROR(SD_LOG_IO, un, 17357 "sd_handle_resv_conflict: Disk Reserved\n"); 17358 sd_return_failed_command(un, bp, EACCES); 17359 return; 17360 } 17361 17362 /* 17363 * 1147670: retry only if sd_retry_on_reservation_conflict 17364 * property is set (default is 1). Retries will not succeed 17365 * on a disk reserved by another initiator. HA systems 17366 * may reset this via sd.conf to avoid these retries. 17367 * 17368 * Note: The legacy return code for this failure is EIO, however EACCES 17369 * seems more appropriate for a reservation conflict. 17370 */ 17371 if (sd_retry_on_reservation_conflict == 0) { 17372 SD_ERROR(SD_LOG_IO, un, 17373 "sd_handle_resv_conflict: Device Reserved\n"); 17374 sd_return_failed_command(un, bp, EIO); 17375 return; 17376 } 17377 17378 /* 17379 * Retry the command if we can. 17380 * 17381 * Note: The legacy return code for this failure is EIO, however EACCES 17382 * seems more appropriate for a reservation conflict. 17383 */ 17384 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17385 (clock_t)2, NULL); 17386 } 17387 17388 17389 17390 /* 17391 * Function: sd_pkt_status_qfull 17392 * 17393 * Description: Handle a QUEUE FULL condition from the target. This can 17394 * occur if the HBA does not handle the queue full condition. 17395 * (Basically this means third-party HBAs as Sun HBAs will 17396 * handle the queue full condition.) Note that if there are 17397 * some commands already in the transport, then the queue full 17398 * has occurred because the queue for this nexus is actually 17399 * full. If there are no commands in the transport, then the 17400 * queue full is resulting from some other initiator or lun 17401 * consuming all the resources at the target. 17402 * 17403 * Context: May be called from interrupt context 17404 */ 17405 17406 static void 17407 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17408 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17409 { 17410 ASSERT(un != NULL); 17411 ASSERT(mutex_owned(SD_MUTEX(un))); 17412 ASSERT(bp != NULL); 17413 ASSERT(xp != NULL); 17414 ASSERT(pktp != NULL); 17415 17416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17417 "sd_pkt_status_qfull: entry\n"); 17418 17419 /* 17420 * Just lower the QFULL throttle and retry the command. Note that 17421 * we do not limit the number of retries here. 17422 */ 17423 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17424 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17425 SD_RESTART_TIMEOUT, NULL); 17426 17427 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17428 "sd_pkt_status_qfull: exit\n"); 17429 } 17430 17431 17432 /* 17433 * Function: sd_reset_target 17434 * 17435 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17436 * RESET_TARGET, or RESET_ALL. 17437 * 17438 * Context: May be called under interrupt context. 17439 */ 17440 17441 static void 17442 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17443 { 17444 int rval = 0; 17445 17446 ASSERT(un != NULL); 17447 ASSERT(mutex_owned(SD_MUTEX(un))); 17448 ASSERT(pktp != NULL); 17449 17450 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17451 17452 /* 17453 * No need to reset if the transport layer has already done so. 17454 */ 17455 if ((pktp->pkt_statistics & 17456 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17457 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17458 "sd_reset_target: no reset\n"); 17459 return; 17460 } 17461 17462 mutex_exit(SD_MUTEX(un)); 17463 17464 if (un->un_f_allow_bus_device_reset == TRUE) { 17465 if (un->un_f_lun_reset_enabled == TRUE) { 17466 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17467 "sd_reset_target: RESET_LUN\n"); 17468 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17469 } 17470 if (rval == 0) { 17471 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17472 "sd_reset_target: RESET_TARGET\n"); 17473 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17474 } 17475 } 17476 17477 if (rval == 0) { 17478 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17479 "sd_reset_target: RESET_ALL\n"); 17480 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17481 } 17482 17483 mutex_enter(SD_MUTEX(un)); 17484 17485 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17486 } 17487 17488 17489 /* 17490 * Function: sd_media_change_task 17491 * 17492 * Description: Recovery action for CDROM to become available. 17493 * 17494 * Context: Executes in a taskq() thread context 17495 */ 17496 17497 static void 17498 sd_media_change_task(void *arg) 17499 { 17500 struct scsi_pkt *pktp = arg; 17501 struct sd_lun *un; 17502 struct buf *bp; 17503 struct sd_xbuf *xp; 17504 int err = 0; 17505 int retry_count = 0; 17506 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17507 struct sd_sense_info si; 17508 17509 ASSERT(pktp != NULL); 17510 bp = (struct buf *)pktp->pkt_private; 17511 ASSERT(bp != NULL); 17512 xp = SD_GET_XBUF(bp); 17513 ASSERT(xp != NULL); 17514 un = SD_GET_UN(bp); 17515 ASSERT(un != NULL); 17516 ASSERT(!mutex_owned(SD_MUTEX(un))); 17517 ASSERT(un->un_f_monitor_media_state); 17518 17519 si.ssi_severity = SCSI_ERR_INFO; 17520 si.ssi_pfa_flag = FALSE; 17521 17522 /* 17523 * When a reset is issued on a CDROM, it takes a long time to 17524 * recover. First few attempts to read capacity and other things 17525 * related to handling unit attention fail (with a ASC 0x4 and 17526 * ASCQ 0x1). In that case we want to do enough retries and we want 17527 * to limit the retries in other cases of genuine failures like 17528 * no media in drive. 17529 */ 17530 while (retry_count++ < retry_limit) { 17531 if ((err = sd_handle_mchange(un)) == 0) { 17532 break; 17533 } 17534 if (err == EAGAIN) { 17535 retry_limit = SD_UNIT_ATTENTION_RETRY; 17536 } 17537 /* Sleep for 0.5 sec. & try again */ 17538 delay(drv_usectohz(500000)); 17539 } 17540 17541 /* 17542 * Dispatch (retry or fail) the original command here, 17543 * along with appropriate console messages.... 17544 * 17545 * Must grab the mutex before calling sd_retry_command, 17546 * sd_print_sense_msg and sd_return_failed_command. 17547 */ 17548 mutex_enter(SD_MUTEX(un)); 17549 if (err != SD_CMD_SUCCESS) { 17550 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17551 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17552 si.ssi_severity = SCSI_ERR_FATAL; 17553 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17554 sd_return_failed_command(un, bp, EIO); 17555 } else { 17556 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17557 &si, EIO, (clock_t)0, NULL); 17558 } 17559 mutex_exit(SD_MUTEX(un)); 17560 } 17561 17562 17563 17564 /* 17565 * Function: sd_handle_mchange 17566 * 17567 * Description: Perform geometry validation & other recovery when CDROM 17568 * has been removed from drive. 17569 * 17570 * Return Code: 0 for success 17571 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17572 * sd_send_scsi_READ_CAPACITY() 17573 * 17574 * Context: Executes in a taskq() thread context 17575 */ 17576 17577 static int 17578 sd_handle_mchange(struct sd_lun *un) 17579 { 17580 uint64_t capacity; 17581 uint32_t lbasize; 17582 int rval; 17583 17584 ASSERT(!mutex_owned(SD_MUTEX(un))); 17585 ASSERT(un->un_f_monitor_media_state); 17586 17587 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17588 SD_PATH_DIRECT_PRIORITY)) != 0) { 17589 return (rval); 17590 } 17591 17592 mutex_enter(SD_MUTEX(un)); 17593 sd_update_block_info(un, lbasize, capacity); 17594 17595 if (un->un_errstats != NULL) { 17596 struct sd_errstats *stp = 17597 (struct sd_errstats *)un->un_errstats->ks_data; 17598 stp->sd_capacity.value.ui64 = (uint64_t) 17599 ((uint64_t)un->un_blockcount * 17600 (uint64_t)un->un_tgt_blocksize); 17601 } 17602 17603 17604 /* 17605 * Check if the media in the device is writable or not 17606 */ 17607 if (ISCD(un)) 17608 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17609 17610 /* 17611 * Note: Maybe let the strategy/partitioning chain worry about getting 17612 * valid geometry. 17613 */ 17614 mutex_exit(SD_MUTEX(un)); 17615 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17616 17617 17618 if (cmlb_validate(un->un_cmlbhandle, 0, 17619 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17620 return (EIO); 17621 } else { 17622 if (un->un_f_pkstats_enabled) { 17623 sd_set_pstats(un); 17624 SD_TRACE(SD_LOG_IO_PARTITION, un, 17625 "sd_handle_mchange: un:0x%p pstats created and " 17626 "set\n", un); 17627 } 17628 } 17629 17630 17631 /* 17632 * Try to lock the door 17633 */ 17634 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17635 SD_PATH_DIRECT_PRIORITY)); 17636 } 17637 17638 17639 /* 17640 * Function: sd_send_scsi_DOORLOCK 17641 * 17642 * Description: Issue the scsi DOOR LOCK command 17643 * 17644 * Arguments: un - pointer to driver soft state (unit) structure for 17645 * this target. 17646 * flag - SD_REMOVAL_ALLOW 17647 * SD_REMOVAL_PREVENT 17648 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17649 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17650 * to use the USCSI "direct" chain and bypass the normal 17651 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17652 * command is issued as part of an error recovery action. 17653 * 17654 * Return Code: 0 - Success 17655 * errno return code from sd_send_scsi_cmd() 17656 * 17657 * Context: Can sleep. 17658 */ 17659 17660 static int 17661 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17662 { 17663 union scsi_cdb cdb; 17664 struct uscsi_cmd ucmd_buf; 17665 struct scsi_extended_sense sense_buf; 17666 int status; 17667 17668 ASSERT(un != NULL); 17669 ASSERT(!mutex_owned(SD_MUTEX(un))); 17670 17671 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17672 17673 /* already determined doorlock is not supported, fake success */ 17674 if (un->un_f_doorlock_supported == FALSE) { 17675 return (0); 17676 } 17677 17678 /* 17679 * If we are ejecting and see an SD_REMOVAL_PREVENT 17680 * ignore the command so we can complete the eject 17681 * operation. 17682 */ 17683 if (flag == SD_REMOVAL_PREVENT) { 17684 mutex_enter(SD_MUTEX(un)); 17685 if (un->un_f_ejecting == TRUE) { 17686 mutex_exit(SD_MUTEX(un)); 17687 return (EAGAIN); 17688 } 17689 mutex_exit(SD_MUTEX(un)); 17690 } 17691 17692 bzero(&cdb, sizeof (cdb)); 17693 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17694 17695 cdb.scc_cmd = SCMD_DOORLOCK; 17696 cdb.cdb_opaque[4] = (uchar_t)flag; 17697 17698 ucmd_buf.uscsi_cdb = (char *)&cdb; 17699 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17700 ucmd_buf.uscsi_bufaddr = NULL; 17701 ucmd_buf.uscsi_buflen = 0; 17702 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17703 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17704 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17705 ucmd_buf.uscsi_timeout = 15; 17706 17707 SD_TRACE(SD_LOG_IO, un, 17708 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17709 17710 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17711 UIO_SYSSPACE, path_flag); 17712 17713 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17714 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17715 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17716 /* fake success and skip subsequent doorlock commands */ 17717 un->un_f_doorlock_supported = FALSE; 17718 return (0); 17719 } 17720 17721 return (status); 17722 } 17723 17724 /* 17725 * Function: sd_send_scsi_READ_CAPACITY 17726 * 17727 * Description: This routine uses the scsi READ CAPACITY command to determine 17728 * the device capacity in number of blocks and the device native 17729 * block size. If this function returns a failure, then the 17730 * values in *capp and *lbap are undefined. If the capacity 17731 * returned is 0xffffffff then the lun is too large for a 17732 * normal READ CAPACITY command and the results of a 17733 * READ CAPACITY 16 will be used instead. 17734 * 17735 * Arguments: un - ptr to soft state struct for the target 17736 * capp - ptr to unsigned 64-bit variable to receive the 17737 * capacity value from the command. 17738 * lbap - ptr to unsigned 32-bit varaible to receive the 17739 * block size value from the command 17740 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17741 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17742 * to use the USCSI "direct" chain and bypass the normal 17743 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17744 * command is issued as part of an error recovery action. 17745 * 17746 * Return Code: 0 - Success 17747 * EIO - IO error 17748 * EACCES - Reservation conflict detected 17749 * EAGAIN - Device is becoming ready 17750 * errno return code from sd_send_scsi_cmd() 17751 * 17752 * Context: Can sleep. Blocks until command completes. 17753 */ 17754 17755 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17756 17757 static int 17758 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17759 int path_flag) 17760 { 17761 struct scsi_extended_sense sense_buf; 17762 struct uscsi_cmd ucmd_buf; 17763 union scsi_cdb cdb; 17764 uint32_t *capacity_buf; 17765 uint64_t capacity; 17766 uint32_t lbasize; 17767 int status; 17768 17769 ASSERT(un != NULL); 17770 ASSERT(!mutex_owned(SD_MUTEX(un))); 17771 ASSERT(capp != NULL); 17772 ASSERT(lbap != NULL); 17773 17774 SD_TRACE(SD_LOG_IO, un, 17775 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17776 17777 /* 17778 * First send a READ_CAPACITY command to the target. 17779 * (This command is mandatory under SCSI-2.) 17780 * 17781 * Set up the CDB for the READ_CAPACITY command. The Partial 17782 * Medium Indicator bit is cleared. The address field must be 17783 * zero if the PMI bit is zero. 17784 */ 17785 bzero(&cdb, sizeof (cdb)); 17786 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17787 17788 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17789 17790 cdb.scc_cmd = SCMD_READ_CAPACITY; 17791 17792 ucmd_buf.uscsi_cdb = (char *)&cdb; 17793 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17794 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17795 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17796 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17797 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17798 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17799 ucmd_buf.uscsi_timeout = 60; 17800 17801 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17802 UIO_SYSSPACE, path_flag); 17803 17804 switch (status) { 17805 case 0: 17806 /* Return failure if we did not get valid capacity data. */ 17807 if (ucmd_buf.uscsi_resid != 0) { 17808 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17809 return (EIO); 17810 } 17811 17812 /* 17813 * Read capacity and block size from the READ CAPACITY 10 data. 17814 * This data may be adjusted later due to device specific 17815 * issues. 17816 * 17817 * According to the SCSI spec, the READ CAPACITY 10 17818 * command returns the following: 17819 * 17820 * bytes 0-3: Maximum logical block address available. 17821 * (MSB in byte:0 & LSB in byte:3) 17822 * 17823 * bytes 4-7: Block length in bytes 17824 * (MSB in byte:4 & LSB in byte:7) 17825 * 17826 */ 17827 capacity = BE_32(capacity_buf[0]); 17828 lbasize = BE_32(capacity_buf[1]); 17829 17830 /* 17831 * Done with capacity_buf 17832 */ 17833 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17834 17835 /* 17836 * if the reported capacity is set to all 0xf's, then 17837 * this disk is too large and requires SBC-2 commands. 17838 * Reissue the request using READ CAPACITY 16. 17839 */ 17840 if (capacity == 0xffffffff) { 17841 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17842 &lbasize, path_flag); 17843 if (status != 0) { 17844 return (status); 17845 } 17846 } 17847 break; /* Success! */ 17848 case EIO: 17849 switch (ucmd_buf.uscsi_status) { 17850 case STATUS_RESERVATION_CONFLICT: 17851 status = EACCES; 17852 break; 17853 case STATUS_CHECK: 17854 /* 17855 * Check condition; look for ASC/ASCQ of 0x04/0x01 17856 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17857 */ 17858 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17859 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17860 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17861 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17862 return (EAGAIN); 17863 } 17864 break; 17865 default: 17866 break; 17867 } 17868 /* FALLTHRU */ 17869 default: 17870 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17871 return (status); 17872 } 17873 17874 /* 17875 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17876 * (2352 and 0 are common) so for these devices always force the value 17877 * to 2048 as required by the ATAPI specs. 17878 */ 17879 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17880 lbasize = 2048; 17881 } 17882 17883 /* 17884 * Get the maximum LBA value from the READ CAPACITY data. 17885 * Here we assume that the Partial Medium Indicator (PMI) bit 17886 * was cleared when issuing the command. This means that the LBA 17887 * returned from the device is the LBA of the last logical block 17888 * on the logical unit. The actual logical block count will be 17889 * this value plus one. 17890 * 17891 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17892 * so scale the capacity value to reflect this. 17893 */ 17894 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17895 17896 /* 17897 * Copy the values from the READ CAPACITY command into the space 17898 * provided by the caller. 17899 */ 17900 *capp = capacity; 17901 *lbap = lbasize; 17902 17903 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17904 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17905 17906 /* 17907 * Both the lbasize and capacity from the device must be nonzero, 17908 * otherwise we assume that the values are not valid and return 17909 * failure to the caller. (4203735) 17910 */ 17911 if ((capacity == 0) || (lbasize == 0)) { 17912 return (EIO); 17913 } 17914 17915 return (0); 17916 } 17917 17918 /* 17919 * Function: sd_send_scsi_READ_CAPACITY_16 17920 * 17921 * Description: This routine uses the scsi READ CAPACITY 16 command to 17922 * determine the device capacity in number of blocks and the 17923 * device native block size. If this function returns a failure, 17924 * then the values in *capp and *lbap are undefined. 17925 * This routine should always be called by 17926 * sd_send_scsi_READ_CAPACITY which will appy any device 17927 * specific adjustments to capacity and lbasize. 17928 * 17929 * Arguments: un - ptr to soft state struct for the target 17930 * capp - ptr to unsigned 64-bit variable to receive the 17931 * capacity value from the command. 17932 * lbap - ptr to unsigned 32-bit varaible to receive the 17933 * block size value from the command 17934 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17935 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17936 * to use the USCSI "direct" chain and bypass the normal 17937 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17938 * this command is issued as part of an error recovery 17939 * action. 17940 * 17941 * Return Code: 0 - Success 17942 * EIO - IO error 17943 * EACCES - Reservation conflict detected 17944 * EAGAIN - Device is becoming ready 17945 * errno return code from sd_send_scsi_cmd() 17946 * 17947 * Context: Can sleep. Blocks until command completes. 17948 */ 17949 17950 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17951 17952 static int 17953 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17954 uint32_t *lbap, int path_flag) 17955 { 17956 struct scsi_extended_sense sense_buf; 17957 struct uscsi_cmd ucmd_buf; 17958 union scsi_cdb cdb; 17959 uint64_t *capacity16_buf; 17960 uint64_t capacity; 17961 uint32_t lbasize; 17962 int status; 17963 17964 ASSERT(un != NULL); 17965 ASSERT(!mutex_owned(SD_MUTEX(un))); 17966 ASSERT(capp != NULL); 17967 ASSERT(lbap != NULL); 17968 17969 SD_TRACE(SD_LOG_IO, un, 17970 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17971 17972 /* 17973 * First send a READ_CAPACITY_16 command to the target. 17974 * 17975 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17976 * Medium Indicator bit is cleared. The address field must be 17977 * zero if the PMI bit is zero. 17978 */ 17979 bzero(&cdb, sizeof (cdb)); 17980 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17981 17982 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17983 17984 ucmd_buf.uscsi_cdb = (char *)&cdb; 17985 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17986 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17987 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17988 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17989 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17990 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17991 ucmd_buf.uscsi_timeout = 60; 17992 17993 /* 17994 * Read Capacity (16) is a Service Action In command. One 17995 * command byte (0x9E) is overloaded for multiple operations, 17996 * with the second CDB byte specifying the desired operation 17997 */ 17998 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17999 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 18000 18001 /* 18002 * Fill in allocation length field 18003 */ 18004 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 18005 18006 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18007 UIO_SYSSPACE, path_flag); 18008 18009 switch (status) { 18010 case 0: 18011 /* Return failure if we did not get valid capacity data. */ 18012 if (ucmd_buf.uscsi_resid > 20) { 18013 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18014 return (EIO); 18015 } 18016 18017 /* 18018 * Read capacity and block size from the READ CAPACITY 10 data. 18019 * This data may be adjusted later due to device specific 18020 * issues. 18021 * 18022 * According to the SCSI spec, the READ CAPACITY 10 18023 * command returns the following: 18024 * 18025 * bytes 0-7: Maximum logical block address available. 18026 * (MSB in byte:0 & LSB in byte:7) 18027 * 18028 * bytes 8-11: Block length in bytes 18029 * (MSB in byte:8 & LSB in byte:11) 18030 * 18031 */ 18032 capacity = BE_64(capacity16_buf[0]); 18033 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18034 18035 /* 18036 * Done with capacity16_buf 18037 */ 18038 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18039 18040 /* 18041 * if the reported capacity is set to all 0xf's, then 18042 * this disk is too large. This could only happen with 18043 * a device that supports LBAs larger than 64 bits which 18044 * are not defined by any current T10 standards. 18045 */ 18046 if (capacity == 0xffffffffffffffff) { 18047 return (EIO); 18048 } 18049 break; /* Success! */ 18050 case EIO: 18051 switch (ucmd_buf.uscsi_status) { 18052 case STATUS_RESERVATION_CONFLICT: 18053 status = EACCES; 18054 break; 18055 case STATUS_CHECK: 18056 /* 18057 * Check condition; look for ASC/ASCQ of 0x04/0x01 18058 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18059 */ 18060 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18061 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18062 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18063 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18064 return (EAGAIN); 18065 } 18066 break; 18067 default: 18068 break; 18069 } 18070 /* FALLTHRU */ 18071 default: 18072 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18073 return (status); 18074 } 18075 18076 *capp = capacity; 18077 *lbap = lbasize; 18078 18079 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18080 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18081 18082 return (0); 18083 } 18084 18085 18086 /* 18087 * Function: sd_send_scsi_START_STOP_UNIT 18088 * 18089 * Description: Issue a scsi START STOP UNIT command to the target. 18090 * 18091 * Arguments: un - pointer to driver soft state (unit) structure for 18092 * this target. 18093 * flag - SD_TARGET_START 18094 * SD_TARGET_STOP 18095 * SD_TARGET_EJECT 18096 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18097 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18098 * to use the USCSI "direct" chain and bypass the normal 18099 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18100 * command is issued as part of an error recovery action. 18101 * 18102 * Return Code: 0 - Success 18103 * EIO - IO error 18104 * EACCES - Reservation conflict detected 18105 * ENXIO - Not Ready, medium not present 18106 * errno return code from sd_send_scsi_cmd() 18107 * 18108 * Context: Can sleep. 18109 */ 18110 18111 static int 18112 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18113 { 18114 struct scsi_extended_sense sense_buf; 18115 union scsi_cdb cdb; 18116 struct uscsi_cmd ucmd_buf; 18117 int status; 18118 18119 ASSERT(un != NULL); 18120 ASSERT(!mutex_owned(SD_MUTEX(un))); 18121 18122 SD_TRACE(SD_LOG_IO, un, 18123 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18124 18125 if (un->un_f_check_start_stop && 18126 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18127 (un->un_f_start_stop_supported != TRUE)) { 18128 return (0); 18129 } 18130 18131 /* 18132 * If we are performing an eject operation and 18133 * we receive any command other than SD_TARGET_EJECT 18134 * we should immediately return. 18135 */ 18136 if (flag != SD_TARGET_EJECT) { 18137 mutex_enter(SD_MUTEX(un)); 18138 if (un->un_f_ejecting == TRUE) { 18139 mutex_exit(SD_MUTEX(un)); 18140 return (EAGAIN); 18141 } 18142 mutex_exit(SD_MUTEX(un)); 18143 } 18144 18145 bzero(&cdb, sizeof (cdb)); 18146 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18147 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18148 18149 cdb.scc_cmd = SCMD_START_STOP; 18150 cdb.cdb_opaque[4] = (uchar_t)flag; 18151 18152 ucmd_buf.uscsi_cdb = (char *)&cdb; 18153 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18154 ucmd_buf.uscsi_bufaddr = NULL; 18155 ucmd_buf.uscsi_buflen = 0; 18156 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18157 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18158 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18159 ucmd_buf.uscsi_timeout = 200; 18160 18161 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18162 UIO_SYSSPACE, path_flag); 18163 18164 switch (status) { 18165 case 0: 18166 break; /* Success! */ 18167 case EIO: 18168 switch (ucmd_buf.uscsi_status) { 18169 case STATUS_RESERVATION_CONFLICT: 18170 status = EACCES; 18171 break; 18172 case STATUS_CHECK: 18173 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18174 switch (scsi_sense_key( 18175 (uint8_t *)&sense_buf)) { 18176 case KEY_ILLEGAL_REQUEST: 18177 status = ENOTSUP; 18178 break; 18179 case KEY_NOT_READY: 18180 if (scsi_sense_asc( 18181 (uint8_t *)&sense_buf) 18182 == 0x3A) { 18183 status = ENXIO; 18184 } 18185 break; 18186 default: 18187 break; 18188 } 18189 } 18190 break; 18191 default: 18192 break; 18193 } 18194 break; 18195 default: 18196 break; 18197 } 18198 18199 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18200 18201 return (status); 18202 } 18203 18204 18205 /* 18206 * Function: sd_start_stop_unit_callback 18207 * 18208 * Description: timeout(9F) callback to begin recovery process for a 18209 * device that has spun down. 18210 * 18211 * Arguments: arg - pointer to associated softstate struct. 18212 * 18213 * Context: Executes in a timeout(9F) thread context 18214 */ 18215 18216 static void 18217 sd_start_stop_unit_callback(void *arg) 18218 { 18219 struct sd_lun *un = arg; 18220 ASSERT(un != NULL); 18221 ASSERT(!mutex_owned(SD_MUTEX(un))); 18222 18223 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18224 18225 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18226 } 18227 18228 18229 /* 18230 * Function: sd_start_stop_unit_task 18231 * 18232 * Description: Recovery procedure when a drive is spun down. 18233 * 18234 * Arguments: arg - pointer to associated softstate struct. 18235 * 18236 * Context: Executes in a taskq() thread context 18237 */ 18238 18239 static void 18240 sd_start_stop_unit_task(void *arg) 18241 { 18242 struct sd_lun *un = arg; 18243 18244 ASSERT(un != NULL); 18245 ASSERT(!mutex_owned(SD_MUTEX(un))); 18246 18247 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18248 18249 /* 18250 * Some unformatted drives report not ready error, no need to 18251 * restart if format has been initiated. 18252 */ 18253 mutex_enter(SD_MUTEX(un)); 18254 if (un->un_f_format_in_progress == TRUE) { 18255 mutex_exit(SD_MUTEX(un)); 18256 return; 18257 } 18258 mutex_exit(SD_MUTEX(un)); 18259 18260 /* 18261 * When a START STOP command is issued from here, it is part of a 18262 * failure recovery operation and must be issued before any other 18263 * commands, including any pending retries. Thus it must be sent 18264 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18265 * succeeds or not, we will start I/O after the attempt. 18266 */ 18267 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18268 SD_PATH_DIRECT_PRIORITY); 18269 18270 /* 18271 * The above call blocks until the START_STOP_UNIT command completes. 18272 * Now that it has completed, we must re-try the original IO that 18273 * received the NOT READY condition in the first place. There are 18274 * three possible conditions here: 18275 * 18276 * (1) The original IO is on un_retry_bp. 18277 * (2) The original IO is on the regular wait queue, and un_retry_bp 18278 * is NULL. 18279 * (3) The original IO is on the regular wait queue, and un_retry_bp 18280 * points to some other, unrelated bp. 18281 * 18282 * For each case, we must call sd_start_cmds() with un_retry_bp 18283 * as the argument. If un_retry_bp is NULL, this will initiate 18284 * processing of the regular wait queue. If un_retry_bp is not NULL, 18285 * then this will process the bp on un_retry_bp. That may or may not 18286 * be the original IO, but that does not matter: the important thing 18287 * is to keep the IO processing going at this point. 18288 * 18289 * Note: This is a very specific error recovery sequence associated 18290 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18291 * serialize the I/O with completion of the spin-up. 18292 */ 18293 mutex_enter(SD_MUTEX(un)); 18294 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18295 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18296 un, un->un_retry_bp); 18297 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18298 sd_start_cmds(un, un->un_retry_bp); 18299 mutex_exit(SD_MUTEX(un)); 18300 18301 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18302 } 18303 18304 18305 /* 18306 * Function: sd_send_scsi_INQUIRY 18307 * 18308 * Description: Issue the scsi INQUIRY command. 18309 * 18310 * Arguments: un 18311 * bufaddr 18312 * buflen 18313 * evpd 18314 * page_code 18315 * page_length 18316 * 18317 * Return Code: 0 - Success 18318 * errno return code from sd_send_scsi_cmd() 18319 * 18320 * Context: Can sleep. Does not return until command is completed. 18321 */ 18322 18323 static int 18324 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18325 uchar_t evpd, uchar_t page_code, size_t *residp) 18326 { 18327 union scsi_cdb cdb; 18328 struct uscsi_cmd ucmd_buf; 18329 int status; 18330 18331 ASSERT(un != NULL); 18332 ASSERT(!mutex_owned(SD_MUTEX(un))); 18333 ASSERT(bufaddr != NULL); 18334 18335 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18336 18337 bzero(&cdb, sizeof (cdb)); 18338 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18339 bzero(bufaddr, buflen); 18340 18341 cdb.scc_cmd = SCMD_INQUIRY; 18342 cdb.cdb_opaque[1] = evpd; 18343 cdb.cdb_opaque[2] = page_code; 18344 FORMG0COUNT(&cdb, buflen); 18345 18346 ucmd_buf.uscsi_cdb = (char *)&cdb; 18347 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18348 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18349 ucmd_buf.uscsi_buflen = buflen; 18350 ucmd_buf.uscsi_rqbuf = NULL; 18351 ucmd_buf.uscsi_rqlen = 0; 18352 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18353 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18354 18355 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18356 UIO_SYSSPACE, SD_PATH_DIRECT); 18357 18358 if ((status == 0) && (residp != NULL)) { 18359 *residp = ucmd_buf.uscsi_resid; 18360 } 18361 18362 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18363 18364 return (status); 18365 } 18366 18367 18368 /* 18369 * Function: sd_send_scsi_TEST_UNIT_READY 18370 * 18371 * Description: Issue the scsi TEST UNIT READY command. 18372 * This routine can be told to set the flag USCSI_DIAGNOSE to 18373 * prevent retrying failed commands. Use this when the intent 18374 * is either to check for device readiness, to clear a Unit 18375 * Attention, or to clear any outstanding sense data. 18376 * However under specific conditions the expected behavior 18377 * is for retries to bring a device ready, so use the flag 18378 * with caution. 18379 * 18380 * Arguments: un 18381 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18382 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18383 * 0: dont check for media present, do retries on cmd. 18384 * 18385 * Return Code: 0 - Success 18386 * EIO - IO error 18387 * EACCES - Reservation conflict detected 18388 * ENXIO - Not Ready, medium not present 18389 * errno return code from sd_send_scsi_cmd() 18390 * 18391 * Context: Can sleep. Does not return until command is completed. 18392 */ 18393 18394 static int 18395 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18396 { 18397 struct scsi_extended_sense sense_buf; 18398 union scsi_cdb cdb; 18399 struct uscsi_cmd ucmd_buf; 18400 int status; 18401 18402 ASSERT(un != NULL); 18403 ASSERT(!mutex_owned(SD_MUTEX(un))); 18404 18405 SD_TRACE(SD_LOG_IO, un, 18406 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18407 18408 /* 18409 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18410 * timeouts when they receive a TUR and the queue is not empty. Check 18411 * the configuration flag set during attach (indicating the drive has 18412 * this firmware bug) and un_ncmds_in_transport before issuing the 18413 * TUR. If there are 18414 * pending commands return success, this is a bit arbitrary but is ok 18415 * for non-removables (i.e. the eliteI disks) and non-clustering 18416 * configurations. 18417 */ 18418 if (un->un_f_cfg_tur_check == TRUE) { 18419 mutex_enter(SD_MUTEX(un)); 18420 if (un->un_ncmds_in_transport != 0) { 18421 mutex_exit(SD_MUTEX(un)); 18422 return (0); 18423 } 18424 mutex_exit(SD_MUTEX(un)); 18425 } 18426 18427 bzero(&cdb, sizeof (cdb)); 18428 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18429 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18430 18431 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18432 18433 ucmd_buf.uscsi_cdb = (char *)&cdb; 18434 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18435 ucmd_buf.uscsi_bufaddr = NULL; 18436 ucmd_buf.uscsi_buflen = 0; 18437 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18438 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18439 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18440 18441 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18442 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18443 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18444 } 18445 ucmd_buf.uscsi_timeout = 60; 18446 18447 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18448 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18449 SD_PATH_STANDARD)); 18450 18451 switch (status) { 18452 case 0: 18453 break; /* Success! */ 18454 case EIO: 18455 switch (ucmd_buf.uscsi_status) { 18456 case STATUS_RESERVATION_CONFLICT: 18457 status = EACCES; 18458 break; 18459 case STATUS_CHECK: 18460 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18461 break; 18462 } 18463 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18464 (scsi_sense_key((uint8_t *)&sense_buf) == 18465 KEY_NOT_READY) && 18466 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18467 status = ENXIO; 18468 } 18469 break; 18470 default: 18471 break; 18472 } 18473 break; 18474 default: 18475 break; 18476 } 18477 18478 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18479 18480 return (status); 18481 } 18482 18483 18484 /* 18485 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18486 * 18487 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18488 * 18489 * Arguments: un 18490 * 18491 * Return Code: 0 - Success 18492 * EACCES 18493 * ENOTSUP 18494 * errno return code from sd_send_scsi_cmd() 18495 * 18496 * Context: Can sleep. Does not return until command is completed. 18497 */ 18498 18499 static int 18500 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18501 uint16_t data_len, uchar_t *data_bufp) 18502 { 18503 struct scsi_extended_sense sense_buf; 18504 union scsi_cdb cdb; 18505 struct uscsi_cmd ucmd_buf; 18506 int status; 18507 int no_caller_buf = FALSE; 18508 18509 ASSERT(un != NULL); 18510 ASSERT(!mutex_owned(SD_MUTEX(un))); 18511 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18512 18513 SD_TRACE(SD_LOG_IO, un, 18514 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18515 18516 bzero(&cdb, sizeof (cdb)); 18517 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18518 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18519 if (data_bufp == NULL) { 18520 /* Allocate a default buf if the caller did not give one */ 18521 ASSERT(data_len == 0); 18522 data_len = MHIOC_RESV_KEY_SIZE; 18523 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18524 no_caller_buf = TRUE; 18525 } 18526 18527 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18528 cdb.cdb_opaque[1] = usr_cmd; 18529 FORMG1COUNT(&cdb, data_len); 18530 18531 ucmd_buf.uscsi_cdb = (char *)&cdb; 18532 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18533 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18534 ucmd_buf.uscsi_buflen = data_len; 18535 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18536 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18537 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18538 ucmd_buf.uscsi_timeout = 60; 18539 18540 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18541 UIO_SYSSPACE, SD_PATH_STANDARD); 18542 18543 switch (status) { 18544 case 0: 18545 break; /* Success! */ 18546 case EIO: 18547 switch (ucmd_buf.uscsi_status) { 18548 case STATUS_RESERVATION_CONFLICT: 18549 status = EACCES; 18550 break; 18551 case STATUS_CHECK: 18552 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18553 (scsi_sense_key((uint8_t *)&sense_buf) == 18554 KEY_ILLEGAL_REQUEST)) { 18555 status = ENOTSUP; 18556 } 18557 break; 18558 default: 18559 break; 18560 } 18561 break; 18562 default: 18563 break; 18564 } 18565 18566 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18567 18568 if (no_caller_buf == TRUE) { 18569 kmem_free(data_bufp, data_len); 18570 } 18571 18572 return (status); 18573 } 18574 18575 18576 /* 18577 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18578 * 18579 * Description: This routine is the driver entry point for handling CD-ROM 18580 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18581 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18582 * device. 18583 * 18584 * Arguments: un - Pointer to soft state struct for the target. 18585 * usr_cmd SCSI-3 reservation facility command (one of 18586 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18587 * SD_SCSI3_PREEMPTANDABORT) 18588 * usr_bufp - user provided pointer register, reserve descriptor or 18589 * preempt and abort structure (mhioc_register_t, 18590 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18591 * 18592 * Return Code: 0 - Success 18593 * EACCES 18594 * ENOTSUP 18595 * errno return code from sd_send_scsi_cmd() 18596 * 18597 * Context: Can sleep. Does not return until command is completed. 18598 */ 18599 18600 static int 18601 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18602 uchar_t *usr_bufp) 18603 { 18604 struct scsi_extended_sense sense_buf; 18605 union scsi_cdb cdb; 18606 struct uscsi_cmd ucmd_buf; 18607 int status; 18608 uchar_t data_len = sizeof (sd_prout_t); 18609 sd_prout_t *prp; 18610 18611 ASSERT(un != NULL); 18612 ASSERT(!mutex_owned(SD_MUTEX(un))); 18613 ASSERT(data_len == 24); /* required by scsi spec */ 18614 18615 SD_TRACE(SD_LOG_IO, un, 18616 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18617 18618 if (usr_bufp == NULL) { 18619 return (EINVAL); 18620 } 18621 18622 bzero(&cdb, sizeof (cdb)); 18623 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18624 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18625 prp = kmem_zalloc(data_len, KM_SLEEP); 18626 18627 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18628 cdb.cdb_opaque[1] = usr_cmd; 18629 FORMG1COUNT(&cdb, data_len); 18630 18631 ucmd_buf.uscsi_cdb = (char *)&cdb; 18632 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18633 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18634 ucmd_buf.uscsi_buflen = data_len; 18635 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18636 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18637 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18638 ucmd_buf.uscsi_timeout = 60; 18639 18640 switch (usr_cmd) { 18641 case SD_SCSI3_REGISTER: { 18642 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18643 18644 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18645 bcopy(ptr->newkey.key, prp->service_key, 18646 MHIOC_RESV_KEY_SIZE); 18647 prp->aptpl = ptr->aptpl; 18648 break; 18649 } 18650 case SD_SCSI3_RESERVE: 18651 case SD_SCSI3_RELEASE: { 18652 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18653 18654 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18655 prp->scope_address = BE_32(ptr->scope_specific_addr); 18656 cdb.cdb_opaque[2] = ptr->type; 18657 break; 18658 } 18659 case SD_SCSI3_PREEMPTANDABORT: { 18660 mhioc_preemptandabort_t *ptr = 18661 (mhioc_preemptandabort_t *)usr_bufp; 18662 18663 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18664 bcopy(ptr->victim_key.key, prp->service_key, 18665 MHIOC_RESV_KEY_SIZE); 18666 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18667 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18668 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18669 break; 18670 } 18671 case SD_SCSI3_REGISTERANDIGNOREKEY: 18672 { 18673 mhioc_registerandignorekey_t *ptr; 18674 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18675 bcopy(ptr->newkey.key, 18676 prp->service_key, MHIOC_RESV_KEY_SIZE); 18677 prp->aptpl = ptr->aptpl; 18678 break; 18679 } 18680 default: 18681 ASSERT(FALSE); 18682 break; 18683 } 18684 18685 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18686 UIO_SYSSPACE, SD_PATH_STANDARD); 18687 18688 switch (status) { 18689 case 0: 18690 break; /* Success! */ 18691 case EIO: 18692 switch (ucmd_buf.uscsi_status) { 18693 case STATUS_RESERVATION_CONFLICT: 18694 status = EACCES; 18695 break; 18696 case STATUS_CHECK: 18697 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18698 (scsi_sense_key((uint8_t *)&sense_buf) == 18699 KEY_ILLEGAL_REQUEST)) { 18700 status = ENOTSUP; 18701 } 18702 break; 18703 default: 18704 break; 18705 } 18706 break; 18707 default: 18708 break; 18709 } 18710 18711 kmem_free(prp, data_len); 18712 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18713 return (status); 18714 } 18715 18716 18717 /* 18718 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18719 * 18720 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18721 * 18722 * Arguments: un - pointer to the target's soft state struct 18723 * dkc - pointer to the callback structure 18724 * 18725 * Return Code: 0 - success 18726 * errno-type error code 18727 * 18728 * Context: kernel thread context only. 18729 * 18730 * _______________________________________________________________ 18731 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 18732 * |FLUSH_VOLATILE| | operation | 18733 * |______________|______________|_________________________________| 18734 * | 0 | NULL | Synchronous flush on both | 18735 * | | | volatile and non-volatile cache | 18736 * |______________|______________|_________________________________| 18737 * | 1 | NULL | Synchronous flush on volatile | 18738 * | | | cache; disk drivers may suppress| 18739 * | | | flush if disk table indicates | 18740 * | | | non-volatile cache | 18741 * |______________|______________|_________________________________| 18742 * | 0 | !NULL | Asynchronous flush on both | 18743 * | | | volatile and non-volatile cache;| 18744 * |______________|______________|_________________________________| 18745 * | 1 | !NULL | Asynchronous flush on volatile | 18746 * | | | cache; disk drivers may suppress| 18747 * | | | flush if disk table indicates | 18748 * | | | non-volatile cache | 18749 * |______________|______________|_________________________________| 18750 * 18751 */ 18752 18753 static int 18754 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18755 { 18756 struct sd_uscsi_info *uip; 18757 struct uscsi_cmd *uscmd; 18758 union scsi_cdb *cdb; 18759 struct buf *bp; 18760 int rval = 0; 18761 int is_async; 18762 18763 SD_TRACE(SD_LOG_IO, un, 18764 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18765 18766 ASSERT(un != NULL); 18767 ASSERT(!mutex_owned(SD_MUTEX(un))); 18768 18769 if (dkc == NULL || dkc->dkc_callback == NULL) { 18770 is_async = FALSE; 18771 } else { 18772 is_async = TRUE; 18773 } 18774 18775 mutex_enter(SD_MUTEX(un)); 18776 /* check whether cache flush should be suppressed */ 18777 if (un->un_f_suppress_cache_flush == TRUE) { 18778 mutex_exit(SD_MUTEX(un)); 18779 /* 18780 * suppress the cache flush if the device is told to do 18781 * so by sd.conf or disk table 18782 */ 18783 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 18784 skip the cache flush since suppress_cache_flush is %d!\n", 18785 un->un_f_suppress_cache_flush); 18786 18787 if (is_async == TRUE) { 18788 /* invoke callback for asynchronous flush */ 18789 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 18790 } 18791 return (rval); 18792 } 18793 mutex_exit(SD_MUTEX(un)); 18794 18795 /* 18796 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 18797 * set properly 18798 */ 18799 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18800 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18801 18802 mutex_enter(SD_MUTEX(un)); 18803 if (dkc != NULL && un->un_f_sync_nv_supported && 18804 (dkc->dkc_flag & FLUSH_VOLATILE)) { 18805 /* 18806 * if the device supports SYNC_NV bit, turn on 18807 * the SYNC_NV bit to only flush volatile cache 18808 */ 18809 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 18810 } 18811 mutex_exit(SD_MUTEX(un)); 18812 18813 /* 18814 * First get some memory for the uscsi_cmd struct and cdb 18815 * and initialize for SYNCHRONIZE_CACHE cmd. 18816 */ 18817 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18818 uscmd->uscsi_cdblen = CDB_GROUP1; 18819 uscmd->uscsi_cdb = (caddr_t)cdb; 18820 uscmd->uscsi_bufaddr = NULL; 18821 uscmd->uscsi_buflen = 0; 18822 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18823 uscmd->uscsi_rqlen = SENSE_LENGTH; 18824 uscmd->uscsi_rqresid = SENSE_LENGTH; 18825 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18826 uscmd->uscsi_timeout = sd_io_time; 18827 18828 /* 18829 * Allocate an sd_uscsi_info struct and fill it with the info 18830 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18831 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18832 * since we allocate the buf here in this function, we do not 18833 * need to preserve the prior contents of b_private. 18834 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18835 */ 18836 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18837 uip->ui_flags = SD_PATH_DIRECT; 18838 uip->ui_cmdp = uscmd; 18839 18840 bp = getrbuf(KM_SLEEP); 18841 bp->b_private = uip; 18842 18843 /* 18844 * Setup buffer to carry uscsi request. 18845 */ 18846 bp->b_flags = B_BUSY; 18847 bp->b_bcount = 0; 18848 bp->b_blkno = 0; 18849 18850 if (is_async == TRUE) { 18851 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18852 uip->ui_dkc = *dkc; 18853 } 18854 18855 bp->b_edev = SD_GET_DEV(un); 18856 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18857 18858 (void) sd_uscsi_strategy(bp); 18859 18860 /* 18861 * If synchronous request, wait for completion 18862 * If async just return and let b_iodone callback 18863 * cleanup. 18864 * NOTE: On return, u_ncmds_in_driver will be decremented, 18865 * but it was also incremented in sd_uscsi_strategy(), so 18866 * we should be ok. 18867 */ 18868 if (is_async == FALSE) { 18869 (void) biowait(bp); 18870 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18871 } 18872 18873 return (rval); 18874 } 18875 18876 18877 static int 18878 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18879 { 18880 struct sd_uscsi_info *uip; 18881 struct uscsi_cmd *uscmd; 18882 uint8_t *sense_buf; 18883 struct sd_lun *un; 18884 int status; 18885 union scsi_cdb *cdb; 18886 18887 uip = (struct sd_uscsi_info *)(bp->b_private); 18888 ASSERT(uip != NULL); 18889 18890 uscmd = uip->ui_cmdp; 18891 ASSERT(uscmd != NULL); 18892 18893 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18894 ASSERT(sense_buf != NULL); 18895 18896 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18897 ASSERT(un != NULL); 18898 18899 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 18900 18901 status = geterror(bp); 18902 switch (status) { 18903 case 0: 18904 break; /* Success! */ 18905 case EIO: 18906 switch (uscmd->uscsi_status) { 18907 case STATUS_RESERVATION_CONFLICT: 18908 /* Ignore reservation conflict */ 18909 status = 0; 18910 goto done; 18911 18912 case STATUS_CHECK: 18913 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18914 (scsi_sense_key(sense_buf) == 18915 KEY_ILLEGAL_REQUEST)) { 18916 /* Ignore Illegal Request error */ 18917 if (cdb->cdb_un.tag|SD_SYNC_NV_BIT) { 18918 mutex_enter(SD_MUTEX(un)); 18919 un->un_f_sync_nv_supported = FALSE; 18920 mutex_exit(SD_MUTEX(un)); 18921 status = 0; 18922 SD_TRACE(SD_LOG_IO, un, 18923 "un_f_sync_nv_supported \ 18924 is set to false.\n"); 18925 goto done; 18926 } 18927 18928 mutex_enter(SD_MUTEX(un)); 18929 un->un_f_sync_cache_supported = FALSE; 18930 mutex_exit(SD_MUTEX(un)); 18931 SD_TRACE(SD_LOG_IO, un, 18932 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 18933 un_f_sync_cache_supported set to false \ 18934 with asc = %x, ascq = %x\n", 18935 scsi_sense_asc(sense_buf), 18936 scsi_sense_ascq(sense_buf)); 18937 status = ENOTSUP; 18938 goto done; 18939 } 18940 break; 18941 default: 18942 break; 18943 } 18944 /* FALLTHRU */ 18945 default: 18946 /* 18947 * Don't log an error message if this device 18948 * has removable media. 18949 */ 18950 if (!un->un_f_has_removable_media) { 18951 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18952 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18953 } 18954 break; 18955 } 18956 18957 done: 18958 if (uip->ui_dkc.dkc_callback != NULL) { 18959 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18960 } 18961 18962 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18963 freerbuf(bp); 18964 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18965 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18966 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18967 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18968 18969 return (status); 18970 } 18971 18972 18973 /* 18974 * Function: sd_send_scsi_GET_CONFIGURATION 18975 * 18976 * Description: Issues the get configuration command to the device. 18977 * Called from sd_check_for_writable_cd & sd_get_media_info 18978 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18979 * Arguments: un 18980 * ucmdbuf 18981 * rqbuf 18982 * rqbuflen 18983 * bufaddr 18984 * buflen 18985 * path_flag 18986 * 18987 * Return Code: 0 - Success 18988 * errno return code from sd_send_scsi_cmd() 18989 * 18990 * Context: Can sleep. Does not return until command is completed. 18991 * 18992 */ 18993 18994 static int 18995 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18996 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18997 int path_flag) 18998 { 18999 char cdb[CDB_GROUP1]; 19000 int status; 19001 19002 ASSERT(un != NULL); 19003 ASSERT(!mutex_owned(SD_MUTEX(un))); 19004 ASSERT(bufaddr != NULL); 19005 ASSERT(ucmdbuf != NULL); 19006 ASSERT(rqbuf != NULL); 19007 19008 SD_TRACE(SD_LOG_IO, un, 19009 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19010 19011 bzero(cdb, sizeof (cdb)); 19012 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19013 bzero(rqbuf, rqbuflen); 19014 bzero(bufaddr, buflen); 19015 19016 /* 19017 * Set up cdb field for the get configuration command. 19018 */ 19019 cdb[0] = SCMD_GET_CONFIGURATION; 19020 cdb[1] = 0x02; /* Requested Type */ 19021 cdb[8] = SD_PROFILE_HEADER_LEN; 19022 ucmdbuf->uscsi_cdb = cdb; 19023 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19024 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19025 ucmdbuf->uscsi_buflen = buflen; 19026 ucmdbuf->uscsi_timeout = sd_io_time; 19027 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19028 ucmdbuf->uscsi_rqlen = rqbuflen; 19029 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19030 19031 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19032 UIO_SYSSPACE, path_flag); 19033 19034 switch (status) { 19035 case 0: 19036 break; /* Success! */ 19037 case EIO: 19038 switch (ucmdbuf->uscsi_status) { 19039 case STATUS_RESERVATION_CONFLICT: 19040 status = EACCES; 19041 break; 19042 default: 19043 break; 19044 } 19045 break; 19046 default: 19047 break; 19048 } 19049 19050 if (status == 0) { 19051 SD_DUMP_MEMORY(un, SD_LOG_IO, 19052 "sd_send_scsi_GET_CONFIGURATION: data", 19053 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19054 } 19055 19056 SD_TRACE(SD_LOG_IO, un, 19057 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19058 19059 return (status); 19060 } 19061 19062 /* 19063 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19064 * 19065 * Description: Issues the get configuration command to the device to 19066 * retrieve a specific feature. Called from 19067 * sd_check_for_writable_cd & sd_set_mmc_caps. 19068 * Arguments: un 19069 * ucmdbuf 19070 * rqbuf 19071 * rqbuflen 19072 * bufaddr 19073 * buflen 19074 * feature 19075 * 19076 * Return Code: 0 - Success 19077 * errno return code from sd_send_scsi_cmd() 19078 * 19079 * Context: Can sleep. Does not return until command is completed. 19080 * 19081 */ 19082 static int 19083 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19084 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19085 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19086 { 19087 char cdb[CDB_GROUP1]; 19088 int status; 19089 19090 ASSERT(un != NULL); 19091 ASSERT(!mutex_owned(SD_MUTEX(un))); 19092 ASSERT(bufaddr != NULL); 19093 ASSERT(ucmdbuf != NULL); 19094 ASSERT(rqbuf != NULL); 19095 19096 SD_TRACE(SD_LOG_IO, un, 19097 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19098 19099 bzero(cdb, sizeof (cdb)); 19100 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19101 bzero(rqbuf, rqbuflen); 19102 bzero(bufaddr, buflen); 19103 19104 /* 19105 * Set up cdb field for the get configuration command. 19106 */ 19107 cdb[0] = SCMD_GET_CONFIGURATION; 19108 cdb[1] = 0x02; /* Requested Type */ 19109 cdb[3] = feature; 19110 cdb[8] = buflen; 19111 ucmdbuf->uscsi_cdb = cdb; 19112 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19113 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19114 ucmdbuf->uscsi_buflen = buflen; 19115 ucmdbuf->uscsi_timeout = sd_io_time; 19116 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19117 ucmdbuf->uscsi_rqlen = rqbuflen; 19118 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19119 19120 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19121 UIO_SYSSPACE, path_flag); 19122 19123 switch (status) { 19124 case 0: 19125 break; /* Success! */ 19126 case EIO: 19127 switch (ucmdbuf->uscsi_status) { 19128 case STATUS_RESERVATION_CONFLICT: 19129 status = EACCES; 19130 break; 19131 default: 19132 break; 19133 } 19134 break; 19135 default: 19136 break; 19137 } 19138 19139 if (status == 0) { 19140 SD_DUMP_MEMORY(un, SD_LOG_IO, 19141 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19142 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19143 } 19144 19145 SD_TRACE(SD_LOG_IO, un, 19146 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19147 19148 return (status); 19149 } 19150 19151 19152 /* 19153 * Function: sd_send_scsi_MODE_SENSE 19154 * 19155 * Description: Utility function for issuing a scsi MODE SENSE command. 19156 * Note: This routine uses a consistent implementation for Group0, 19157 * Group1, and Group2 commands across all platforms. ATAPI devices 19158 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19159 * 19160 * Arguments: un - pointer to the softstate struct for the target. 19161 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19162 * CDB_GROUP[1|2] (10 byte). 19163 * bufaddr - buffer for page data retrieved from the target. 19164 * buflen - size of page to be retrieved. 19165 * page_code - page code of data to be retrieved from the target. 19166 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19167 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19168 * to use the USCSI "direct" chain and bypass the normal 19169 * command waitq. 19170 * 19171 * Return Code: 0 - Success 19172 * errno return code from sd_send_scsi_cmd() 19173 * 19174 * Context: Can sleep. Does not return until command is completed. 19175 */ 19176 19177 static int 19178 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19179 size_t buflen, uchar_t page_code, int path_flag) 19180 { 19181 struct scsi_extended_sense sense_buf; 19182 union scsi_cdb cdb; 19183 struct uscsi_cmd ucmd_buf; 19184 int status; 19185 int headlen; 19186 19187 ASSERT(un != NULL); 19188 ASSERT(!mutex_owned(SD_MUTEX(un))); 19189 ASSERT(bufaddr != NULL); 19190 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19191 (cdbsize == CDB_GROUP2)); 19192 19193 SD_TRACE(SD_LOG_IO, un, 19194 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19195 19196 bzero(&cdb, sizeof (cdb)); 19197 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19198 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19199 bzero(bufaddr, buflen); 19200 19201 if (cdbsize == CDB_GROUP0) { 19202 cdb.scc_cmd = SCMD_MODE_SENSE; 19203 cdb.cdb_opaque[2] = page_code; 19204 FORMG0COUNT(&cdb, buflen); 19205 headlen = MODE_HEADER_LENGTH; 19206 } else { 19207 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19208 cdb.cdb_opaque[2] = page_code; 19209 FORMG1COUNT(&cdb, buflen); 19210 headlen = MODE_HEADER_LENGTH_GRP2; 19211 } 19212 19213 ASSERT(headlen <= buflen); 19214 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19215 19216 ucmd_buf.uscsi_cdb = (char *)&cdb; 19217 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19218 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19219 ucmd_buf.uscsi_buflen = buflen; 19220 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19221 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19222 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19223 ucmd_buf.uscsi_timeout = 60; 19224 19225 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19226 UIO_SYSSPACE, path_flag); 19227 19228 switch (status) { 19229 case 0: 19230 /* 19231 * sr_check_wp() uses 0x3f page code and check the header of 19232 * mode page to determine if target device is write-protected. 19233 * But some USB devices return 0 bytes for 0x3f page code. For 19234 * this case, make sure that mode page header is returned at 19235 * least. 19236 */ 19237 if (buflen - ucmd_buf.uscsi_resid < headlen) 19238 status = EIO; 19239 break; /* Success! */ 19240 case EIO: 19241 switch (ucmd_buf.uscsi_status) { 19242 case STATUS_RESERVATION_CONFLICT: 19243 status = EACCES; 19244 break; 19245 default: 19246 break; 19247 } 19248 break; 19249 default: 19250 break; 19251 } 19252 19253 if (status == 0) { 19254 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19255 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19256 } 19257 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19258 19259 return (status); 19260 } 19261 19262 19263 /* 19264 * Function: sd_send_scsi_MODE_SELECT 19265 * 19266 * Description: Utility function for issuing a scsi MODE SELECT command. 19267 * Note: This routine uses a consistent implementation for Group0, 19268 * Group1, and Group2 commands across all platforms. ATAPI devices 19269 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19270 * 19271 * Arguments: un - pointer to the softstate struct for the target. 19272 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19273 * CDB_GROUP[1|2] (10 byte). 19274 * bufaddr - buffer for page data retrieved from the target. 19275 * buflen - size of page to be retrieved. 19276 * save_page - boolean to determin if SP bit should be set. 19277 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19278 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19279 * to use the USCSI "direct" chain and bypass the normal 19280 * command waitq. 19281 * 19282 * Return Code: 0 - Success 19283 * errno return code from sd_send_scsi_cmd() 19284 * 19285 * Context: Can sleep. Does not return until command is completed. 19286 */ 19287 19288 static int 19289 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19290 size_t buflen, uchar_t save_page, int path_flag) 19291 { 19292 struct scsi_extended_sense sense_buf; 19293 union scsi_cdb cdb; 19294 struct uscsi_cmd ucmd_buf; 19295 int status; 19296 19297 ASSERT(un != NULL); 19298 ASSERT(!mutex_owned(SD_MUTEX(un))); 19299 ASSERT(bufaddr != NULL); 19300 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19301 (cdbsize == CDB_GROUP2)); 19302 19303 SD_TRACE(SD_LOG_IO, un, 19304 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19305 19306 bzero(&cdb, sizeof (cdb)); 19307 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19308 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19309 19310 /* Set the PF bit for many third party drives */ 19311 cdb.cdb_opaque[1] = 0x10; 19312 19313 /* Set the savepage(SP) bit if given */ 19314 if (save_page == SD_SAVE_PAGE) { 19315 cdb.cdb_opaque[1] |= 0x01; 19316 } 19317 19318 if (cdbsize == CDB_GROUP0) { 19319 cdb.scc_cmd = SCMD_MODE_SELECT; 19320 FORMG0COUNT(&cdb, buflen); 19321 } else { 19322 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19323 FORMG1COUNT(&cdb, buflen); 19324 } 19325 19326 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19327 19328 ucmd_buf.uscsi_cdb = (char *)&cdb; 19329 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19330 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19331 ucmd_buf.uscsi_buflen = buflen; 19332 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19333 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19334 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19335 ucmd_buf.uscsi_timeout = 60; 19336 19337 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19338 UIO_SYSSPACE, path_flag); 19339 19340 switch (status) { 19341 case 0: 19342 break; /* Success! */ 19343 case EIO: 19344 switch (ucmd_buf.uscsi_status) { 19345 case STATUS_RESERVATION_CONFLICT: 19346 status = EACCES; 19347 break; 19348 default: 19349 break; 19350 } 19351 break; 19352 default: 19353 break; 19354 } 19355 19356 if (status == 0) { 19357 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19358 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19359 } 19360 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19361 19362 return (status); 19363 } 19364 19365 19366 /* 19367 * Function: sd_send_scsi_RDWR 19368 * 19369 * Description: Issue a scsi READ or WRITE command with the given parameters. 19370 * 19371 * Arguments: un: Pointer to the sd_lun struct for the target. 19372 * cmd: SCMD_READ or SCMD_WRITE 19373 * bufaddr: Address of caller's buffer to receive the RDWR data 19374 * buflen: Length of caller's buffer receive the RDWR data. 19375 * start_block: Block number for the start of the RDWR operation. 19376 * (Assumes target-native block size.) 19377 * residp: Pointer to variable to receive the redisual of the 19378 * RDWR operation (may be NULL of no residual requested). 19379 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19380 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19381 * to use the USCSI "direct" chain and bypass the normal 19382 * command waitq. 19383 * 19384 * Return Code: 0 - Success 19385 * errno return code from sd_send_scsi_cmd() 19386 * 19387 * Context: Can sleep. Does not return until command is completed. 19388 */ 19389 19390 static int 19391 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19392 size_t buflen, daddr_t start_block, int path_flag) 19393 { 19394 struct scsi_extended_sense sense_buf; 19395 union scsi_cdb cdb; 19396 struct uscsi_cmd ucmd_buf; 19397 uint32_t block_count; 19398 int status; 19399 int cdbsize; 19400 uchar_t flag; 19401 19402 ASSERT(un != NULL); 19403 ASSERT(!mutex_owned(SD_MUTEX(un))); 19404 ASSERT(bufaddr != NULL); 19405 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19406 19407 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19408 19409 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19410 return (EINVAL); 19411 } 19412 19413 mutex_enter(SD_MUTEX(un)); 19414 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19415 mutex_exit(SD_MUTEX(un)); 19416 19417 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19418 19419 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19420 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19421 bufaddr, buflen, start_block, block_count); 19422 19423 bzero(&cdb, sizeof (cdb)); 19424 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19425 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19426 19427 /* Compute CDB size to use */ 19428 if (start_block > 0xffffffff) 19429 cdbsize = CDB_GROUP4; 19430 else if ((start_block & 0xFFE00000) || 19431 (un->un_f_cfg_is_atapi == TRUE)) 19432 cdbsize = CDB_GROUP1; 19433 else 19434 cdbsize = CDB_GROUP0; 19435 19436 switch (cdbsize) { 19437 case CDB_GROUP0: /* 6-byte CDBs */ 19438 cdb.scc_cmd = cmd; 19439 FORMG0ADDR(&cdb, start_block); 19440 FORMG0COUNT(&cdb, block_count); 19441 break; 19442 case CDB_GROUP1: /* 10-byte CDBs */ 19443 cdb.scc_cmd = cmd | SCMD_GROUP1; 19444 FORMG1ADDR(&cdb, start_block); 19445 FORMG1COUNT(&cdb, block_count); 19446 break; 19447 case CDB_GROUP4: /* 16-byte CDBs */ 19448 cdb.scc_cmd = cmd | SCMD_GROUP4; 19449 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19450 FORMG4COUNT(&cdb, block_count); 19451 break; 19452 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19453 default: 19454 /* All others reserved */ 19455 return (EINVAL); 19456 } 19457 19458 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19459 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19460 19461 ucmd_buf.uscsi_cdb = (char *)&cdb; 19462 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19463 ucmd_buf.uscsi_bufaddr = bufaddr; 19464 ucmd_buf.uscsi_buflen = buflen; 19465 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19466 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19467 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19468 ucmd_buf.uscsi_timeout = 60; 19469 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19470 UIO_SYSSPACE, path_flag); 19471 switch (status) { 19472 case 0: 19473 break; /* Success! */ 19474 case EIO: 19475 switch (ucmd_buf.uscsi_status) { 19476 case STATUS_RESERVATION_CONFLICT: 19477 status = EACCES; 19478 break; 19479 default: 19480 break; 19481 } 19482 break; 19483 default: 19484 break; 19485 } 19486 19487 if (status == 0) { 19488 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19489 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19490 } 19491 19492 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19493 19494 return (status); 19495 } 19496 19497 19498 /* 19499 * Function: sd_send_scsi_LOG_SENSE 19500 * 19501 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19502 * 19503 * Arguments: un: Pointer to the sd_lun struct for the target. 19504 * 19505 * Return Code: 0 - Success 19506 * errno return code from sd_send_scsi_cmd() 19507 * 19508 * Context: Can sleep. Does not return until command is completed. 19509 */ 19510 19511 static int 19512 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19513 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19514 int path_flag) 19515 19516 { 19517 struct scsi_extended_sense sense_buf; 19518 union scsi_cdb cdb; 19519 struct uscsi_cmd ucmd_buf; 19520 int status; 19521 19522 ASSERT(un != NULL); 19523 ASSERT(!mutex_owned(SD_MUTEX(un))); 19524 19525 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19526 19527 bzero(&cdb, sizeof (cdb)); 19528 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19529 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19530 19531 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19532 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19533 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19534 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19535 FORMG1COUNT(&cdb, buflen); 19536 19537 ucmd_buf.uscsi_cdb = (char *)&cdb; 19538 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19539 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19540 ucmd_buf.uscsi_buflen = buflen; 19541 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19542 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19543 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19544 ucmd_buf.uscsi_timeout = 60; 19545 19546 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19547 UIO_SYSSPACE, path_flag); 19548 19549 switch (status) { 19550 case 0: 19551 break; 19552 case EIO: 19553 switch (ucmd_buf.uscsi_status) { 19554 case STATUS_RESERVATION_CONFLICT: 19555 status = EACCES; 19556 break; 19557 case STATUS_CHECK: 19558 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19559 (scsi_sense_key((uint8_t *)&sense_buf) == 19560 KEY_ILLEGAL_REQUEST) && 19561 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19562 /* 19563 * ASC 0x24: INVALID FIELD IN CDB 19564 */ 19565 switch (page_code) { 19566 case START_STOP_CYCLE_PAGE: 19567 /* 19568 * The start stop cycle counter is 19569 * implemented as page 0x31 in earlier 19570 * generation disks. In new generation 19571 * disks the start stop cycle counter is 19572 * implemented as page 0xE. To properly 19573 * handle this case if an attempt for 19574 * log page 0xE is made and fails we 19575 * will try again using page 0x31. 19576 * 19577 * Network storage BU committed to 19578 * maintain the page 0x31 for this 19579 * purpose and will not have any other 19580 * page implemented with page code 0x31 19581 * until all disks transition to the 19582 * standard page. 19583 */ 19584 mutex_enter(SD_MUTEX(un)); 19585 un->un_start_stop_cycle_page = 19586 START_STOP_CYCLE_VU_PAGE; 19587 cdb.cdb_opaque[2] = 19588 (char)(page_control << 6) | 19589 un->un_start_stop_cycle_page; 19590 mutex_exit(SD_MUTEX(un)); 19591 status = sd_send_scsi_cmd( 19592 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19593 UIO_SYSSPACE, path_flag); 19594 19595 break; 19596 case TEMPERATURE_PAGE: 19597 status = ENOTTY; 19598 break; 19599 default: 19600 break; 19601 } 19602 } 19603 break; 19604 default: 19605 break; 19606 } 19607 break; 19608 default: 19609 break; 19610 } 19611 19612 if (status == 0) { 19613 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19614 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19615 } 19616 19617 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19618 19619 return (status); 19620 } 19621 19622 19623 /* 19624 * Function: sdioctl 19625 * 19626 * Description: Driver's ioctl(9e) entry point function. 19627 * 19628 * Arguments: dev - device number 19629 * cmd - ioctl operation to be performed 19630 * arg - user argument, contains data to be set or reference 19631 * parameter for get 19632 * flag - bit flag, indicating open settings, 32/64 bit type 19633 * cred_p - user credential pointer 19634 * rval_p - calling process return value (OPT) 19635 * 19636 * Return Code: EINVAL 19637 * ENOTTY 19638 * ENXIO 19639 * EIO 19640 * EFAULT 19641 * ENOTSUP 19642 * EPERM 19643 * 19644 * Context: Called from the device switch at normal priority. 19645 */ 19646 19647 static int 19648 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19649 { 19650 struct sd_lun *un = NULL; 19651 int err = 0; 19652 int i = 0; 19653 cred_t *cr; 19654 int tmprval = EINVAL; 19655 int is_valid; 19656 19657 /* 19658 * All device accesses go thru sdstrategy where we check on suspend 19659 * status 19660 */ 19661 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19662 return (ENXIO); 19663 } 19664 19665 ASSERT(!mutex_owned(SD_MUTEX(un))); 19666 19667 19668 is_valid = SD_IS_VALID_LABEL(un); 19669 19670 /* 19671 * Moved this wait from sd_uscsi_strategy to here for 19672 * reasons of deadlock prevention. Internal driver commands, 19673 * specifically those to change a devices power level, result 19674 * in a call to sd_uscsi_strategy. 19675 */ 19676 mutex_enter(SD_MUTEX(un)); 19677 while ((un->un_state == SD_STATE_SUSPENDED) || 19678 (un->un_state == SD_STATE_PM_CHANGING)) { 19679 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19680 } 19681 /* 19682 * Twiddling the counter here protects commands from now 19683 * through to the top of sd_uscsi_strategy. Without the 19684 * counter inc. a power down, for example, could get in 19685 * after the above check for state is made and before 19686 * execution gets to the top of sd_uscsi_strategy. 19687 * That would cause problems. 19688 */ 19689 un->un_ncmds_in_driver++; 19690 19691 if (!is_valid && 19692 (flag & (FNDELAY | FNONBLOCK))) { 19693 switch (cmd) { 19694 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19695 case DKIOCGVTOC: 19696 case DKIOCGAPART: 19697 case DKIOCPARTINFO: 19698 case DKIOCSGEOM: 19699 case DKIOCSAPART: 19700 case DKIOCGETEFI: 19701 case DKIOCPARTITION: 19702 case DKIOCSVTOC: 19703 case DKIOCSETEFI: 19704 case DKIOCGMBOOT: 19705 case DKIOCSMBOOT: 19706 case DKIOCG_PHYGEOM: 19707 case DKIOCG_VIRTGEOM: 19708 /* let cmlb handle it */ 19709 goto skip_ready_valid; 19710 19711 case CDROMPAUSE: 19712 case CDROMRESUME: 19713 case CDROMPLAYMSF: 19714 case CDROMPLAYTRKIND: 19715 case CDROMREADTOCHDR: 19716 case CDROMREADTOCENTRY: 19717 case CDROMSTOP: 19718 case CDROMSTART: 19719 case CDROMVOLCTRL: 19720 case CDROMSUBCHNL: 19721 case CDROMREADMODE2: 19722 case CDROMREADMODE1: 19723 case CDROMREADOFFSET: 19724 case CDROMSBLKMODE: 19725 case CDROMGBLKMODE: 19726 case CDROMGDRVSPEED: 19727 case CDROMSDRVSPEED: 19728 case CDROMCDDA: 19729 case CDROMCDXA: 19730 case CDROMSUBCODE: 19731 if (!ISCD(un)) { 19732 un->un_ncmds_in_driver--; 19733 ASSERT(un->un_ncmds_in_driver >= 0); 19734 mutex_exit(SD_MUTEX(un)); 19735 return (ENOTTY); 19736 } 19737 break; 19738 case FDEJECT: 19739 case DKIOCEJECT: 19740 case CDROMEJECT: 19741 if (!un->un_f_eject_media_supported) { 19742 un->un_ncmds_in_driver--; 19743 ASSERT(un->un_ncmds_in_driver >= 0); 19744 mutex_exit(SD_MUTEX(un)); 19745 return (ENOTTY); 19746 } 19747 break; 19748 case DKIOCFLUSHWRITECACHE: 19749 mutex_exit(SD_MUTEX(un)); 19750 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19751 if (err != 0) { 19752 mutex_enter(SD_MUTEX(un)); 19753 un->un_ncmds_in_driver--; 19754 ASSERT(un->un_ncmds_in_driver >= 0); 19755 mutex_exit(SD_MUTEX(un)); 19756 return (EIO); 19757 } 19758 mutex_enter(SD_MUTEX(un)); 19759 /* FALLTHROUGH */ 19760 case DKIOCREMOVABLE: 19761 case DKIOCHOTPLUGGABLE: 19762 case DKIOCINFO: 19763 case DKIOCGMEDIAINFO: 19764 case MHIOCENFAILFAST: 19765 case MHIOCSTATUS: 19766 case MHIOCTKOWN: 19767 case MHIOCRELEASE: 19768 case MHIOCGRP_INKEYS: 19769 case MHIOCGRP_INRESV: 19770 case MHIOCGRP_REGISTER: 19771 case MHIOCGRP_RESERVE: 19772 case MHIOCGRP_PREEMPTANDABORT: 19773 case MHIOCGRP_REGISTERANDIGNOREKEY: 19774 case CDROMCLOSETRAY: 19775 case USCSICMD: 19776 goto skip_ready_valid; 19777 default: 19778 break; 19779 } 19780 19781 mutex_exit(SD_MUTEX(un)); 19782 err = sd_ready_and_valid(un); 19783 mutex_enter(SD_MUTEX(un)); 19784 19785 if (err != SD_READY_VALID) { 19786 switch (cmd) { 19787 case DKIOCSTATE: 19788 case CDROMGDRVSPEED: 19789 case CDROMSDRVSPEED: 19790 case FDEJECT: /* for eject command */ 19791 case DKIOCEJECT: 19792 case CDROMEJECT: 19793 case DKIOCREMOVABLE: 19794 case DKIOCHOTPLUGGABLE: 19795 break; 19796 default: 19797 if (un->un_f_has_removable_media) { 19798 err = ENXIO; 19799 } else { 19800 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19801 if (err == SD_RESERVED_BY_OTHERS) { 19802 err = EACCES; 19803 } else { 19804 err = EIO; 19805 } 19806 } 19807 un->un_ncmds_in_driver--; 19808 ASSERT(un->un_ncmds_in_driver >= 0); 19809 mutex_exit(SD_MUTEX(un)); 19810 return (err); 19811 } 19812 } 19813 } 19814 19815 skip_ready_valid: 19816 mutex_exit(SD_MUTEX(un)); 19817 19818 switch (cmd) { 19819 case DKIOCINFO: 19820 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19821 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19822 break; 19823 19824 case DKIOCGMEDIAINFO: 19825 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19826 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19827 break; 19828 19829 case DKIOCGGEOM: 19830 case DKIOCGVTOC: 19831 case DKIOCGAPART: 19832 case DKIOCPARTINFO: 19833 case DKIOCSGEOM: 19834 case DKIOCSAPART: 19835 case DKIOCGETEFI: 19836 case DKIOCPARTITION: 19837 case DKIOCSVTOC: 19838 case DKIOCSETEFI: 19839 case DKIOCGMBOOT: 19840 case DKIOCSMBOOT: 19841 case DKIOCG_PHYGEOM: 19842 case DKIOCG_VIRTGEOM: 19843 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19844 19845 /* TUR should spin up */ 19846 19847 if (un->un_f_has_removable_media) 19848 err = sd_send_scsi_TEST_UNIT_READY(un, 19849 SD_CHECK_FOR_MEDIA); 19850 else 19851 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19852 19853 if (err != 0) 19854 break; 19855 19856 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19857 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19858 19859 if ((err == 0) && 19860 ((cmd == DKIOCSETEFI) || 19861 (un->un_f_pkstats_enabled) && 19862 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19863 19864 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19865 (void *)SD_PATH_DIRECT); 19866 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19867 sd_set_pstats(un); 19868 SD_TRACE(SD_LOG_IO_PARTITION, un, 19869 "sd_ioctl: un:0x%p pstats created and " 19870 "set\n", un); 19871 } 19872 } 19873 19874 if ((cmd == DKIOCSVTOC) || 19875 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19876 19877 mutex_enter(SD_MUTEX(un)); 19878 if (un->un_f_devid_supported && 19879 (un->un_f_opt_fab_devid == TRUE)) { 19880 if (un->un_devid == NULL) { 19881 sd_register_devid(un, SD_DEVINFO(un), 19882 SD_TARGET_IS_UNRESERVED); 19883 } else { 19884 /* 19885 * The device id for this disk 19886 * has been fabricated. The 19887 * device id must be preserved 19888 * by writing it back out to 19889 * disk. 19890 */ 19891 if (sd_write_deviceid(un) != 0) { 19892 ddi_devid_free(un->un_devid); 19893 un->un_devid = NULL; 19894 } 19895 } 19896 } 19897 mutex_exit(SD_MUTEX(un)); 19898 } 19899 19900 break; 19901 19902 case DKIOCLOCK: 19903 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19904 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19905 SD_PATH_STANDARD); 19906 break; 19907 19908 case DKIOCUNLOCK: 19909 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19910 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19911 SD_PATH_STANDARD); 19912 break; 19913 19914 case DKIOCSTATE: { 19915 enum dkio_state state; 19916 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19917 19918 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19919 err = EFAULT; 19920 } else { 19921 err = sd_check_media(dev, state); 19922 if (err == 0) { 19923 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19924 sizeof (int), flag) != 0) 19925 err = EFAULT; 19926 } 19927 } 19928 break; 19929 } 19930 19931 case DKIOCREMOVABLE: 19932 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19933 i = un->un_f_has_removable_media ? 1 : 0; 19934 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19935 err = EFAULT; 19936 } else { 19937 err = 0; 19938 } 19939 break; 19940 19941 case DKIOCHOTPLUGGABLE: 19942 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19943 i = un->un_f_is_hotpluggable ? 1 : 0; 19944 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19945 err = EFAULT; 19946 } else { 19947 err = 0; 19948 } 19949 break; 19950 19951 case DKIOCGTEMPERATURE: 19952 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19953 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19954 break; 19955 19956 case MHIOCENFAILFAST: 19957 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19958 if ((err = drv_priv(cred_p)) == 0) { 19959 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19960 } 19961 break; 19962 19963 case MHIOCTKOWN: 19964 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19965 if ((err = drv_priv(cred_p)) == 0) { 19966 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19967 } 19968 break; 19969 19970 case MHIOCRELEASE: 19971 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19972 if ((err = drv_priv(cred_p)) == 0) { 19973 err = sd_mhdioc_release(dev); 19974 } 19975 break; 19976 19977 case MHIOCSTATUS: 19978 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19979 if ((err = drv_priv(cred_p)) == 0) { 19980 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19981 case 0: 19982 err = 0; 19983 break; 19984 case EACCES: 19985 *rval_p = 1; 19986 err = 0; 19987 break; 19988 default: 19989 err = EIO; 19990 break; 19991 } 19992 } 19993 break; 19994 19995 case MHIOCQRESERVE: 19996 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19997 if ((err = drv_priv(cred_p)) == 0) { 19998 err = sd_reserve_release(dev, SD_RESERVE); 19999 } 20000 break; 20001 20002 case MHIOCREREGISTERDEVID: 20003 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20004 if (drv_priv(cred_p) == EPERM) { 20005 err = EPERM; 20006 } else if (!un->un_f_devid_supported) { 20007 err = ENOTTY; 20008 } else { 20009 err = sd_mhdioc_register_devid(dev); 20010 } 20011 break; 20012 20013 case MHIOCGRP_INKEYS: 20014 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20015 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20016 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20017 err = ENOTSUP; 20018 } else { 20019 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20020 flag); 20021 } 20022 } 20023 break; 20024 20025 case MHIOCGRP_INRESV: 20026 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20027 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20028 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20029 err = ENOTSUP; 20030 } else { 20031 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20032 } 20033 } 20034 break; 20035 20036 case MHIOCGRP_REGISTER: 20037 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20038 if ((err = drv_priv(cred_p)) != EPERM) { 20039 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20040 err = ENOTSUP; 20041 } else if (arg != NULL) { 20042 mhioc_register_t reg; 20043 if (ddi_copyin((void *)arg, ®, 20044 sizeof (mhioc_register_t), flag) != 0) { 20045 err = EFAULT; 20046 } else { 20047 err = 20048 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20049 un, SD_SCSI3_REGISTER, 20050 (uchar_t *)®); 20051 } 20052 } 20053 } 20054 break; 20055 20056 case MHIOCGRP_RESERVE: 20057 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20058 if ((err = drv_priv(cred_p)) != EPERM) { 20059 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20060 err = ENOTSUP; 20061 } else if (arg != NULL) { 20062 mhioc_resv_desc_t resv_desc; 20063 if (ddi_copyin((void *)arg, &resv_desc, 20064 sizeof (mhioc_resv_desc_t), flag) != 0) { 20065 err = EFAULT; 20066 } else { 20067 err = 20068 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20069 un, SD_SCSI3_RESERVE, 20070 (uchar_t *)&resv_desc); 20071 } 20072 } 20073 } 20074 break; 20075 20076 case MHIOCGRP_PREEMPTANDABORT: 20077 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20078 if ((err = drv_priv(cred_p)) != EPERM) { 20079 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20080 err = ENOTSUP; 20081 } else if (arg != NULL) { 20082 mhioc_preemptandabort_t preempt_abort; 20083 if (ddi_copyin((void *)arg, &preempt_abort, 20084 sizeof (mhioc_preemptandabort_t), 20085 flag) != 0) { 20086 err = EFAULT; 20087 } else { 20088 err = 20089 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20090 un, SD_SCSI3_PREEMPTANDABORT, 20091 (uchar_t *)&preempt_abort); 20092 } 20093 } 20094 } 20095 break; 20096 20097 case MHIOCGRP_REGISTERANDIGNOREKEY: 20098 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20099 if ((err = drv_priv(cred_p)) != EPERM) { 20100 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20101 err = ENOTSUP; 20102 } else if (arg != NULL) { 20103 mhioc_registerandignorekey_t r_and_i; 20104 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20105 sizeof (mhioc_registerandignorekey_t), 20106 flag) != 0) { 20107 err = EFAULT; 20108 } else { 20109 err = 20110 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20111 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20112 (uchar_t *)&r_and_i); 20113 } 20114 } 20115 } 20116 break; 20117 20118 case USCSICMD: 20119 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20120 cr = ddi_get_cred(); 20121 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20122 err = EPERM; 20123 } else { 20124 enum uio_seg uioseg; 20125 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20126 UIO_USERSPACE; 20127 if (un->un_f_format_in_progress == TRUE) { 20128 err = EAGAIN; 20129 break; 20130 } 20131 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20132 flag, uioseg, SD_PATH_STANDARD); 20133 } 20134 break; 20135 20136 case CDROMPAUSE: 20137 case CDROMRESUME: 20138 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20139 if (!ISCD(un)) { 20140 err = ENOTTY; 20141 } else { 20142 err = sr_pause_resume(dev, cmd); 20143 } 20144 break; 20145 20146 case CDROMPLAYMSF: 20147 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20148 if (!ISCD(un)) { 20149 err = ENOTTY; 20150 } else { 20151 err = sr_play_msf(dev, (caddr_t)arg, flag); 20152 } 20153 break; 20154 20155 case CDROMPLAYTRKIND: 20156 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20157 #if defined(__i386) || defined(__amd64) 20158 /* 20159 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20160 */ 20161 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20162 #else 20163 if (!ISCD(un)) { 20164 #endif 20165 err = ENOTTY; 20166 } else { 20167 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20168 } 20169 break; 20170 20171 case CDROMREADTOCHDR: 20172 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20173 if (!ISCD(un)) { 20174 err = ENOTTY; 20175 } else { 20176 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20177 } 20178 break; 20179 20180 case CDROMREADTOCENTRY: 20181 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20182 if (!ISCD(un)) { 20183 err = ENOTTY; 20184 } else { 20185 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20186 } 20187 break; 20188 20189 case CDROMSTOP: 20190 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20191 if (!ISCD(un)) { 20192 err = ENOTTY; 20193 } else { 20194 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20195 SD_PATH_STANDARD); 20196 } 20197 break; 20198 20199 case CDROMSTART: 20200 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20201 if (!ISCD(un)) { 20202 err = ENOTTY; 20203 } else { 20204 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20205 SD_PATH_STANDARD); 20206 } 20207 break; 20208 20209 case CDROMCLOSETRAY: 20210 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20211 if (!ISCD(un)) { 20212 err = ENOTTY; 20213 } else { 20214 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20215 SD_PATH_STANDARD); 20216 } 20217 break; 20218 20219 case FDEJECT: /* for eject command */ 20220 case DKIOCEJECT: 20221 case CDROMEJECT: 20222 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20223 if (!un->un_f_eject_media_supported) { 20224 err = ENOTTY; 20225 } else { 20226 err = sr_eject(dev); 20227 } 20228 break; 20229 20230 case CDROMVOLCTRL: 20231 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20232 if (!ISCD(un)) { 20233 err = ENOTTY; 20234 } else { 20235 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20236 } 20237 break; 20238 20239 case CDROMSUBCHNL: 20240 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20241 if (!ISCD(un)) { 20242 err = ENOTTY; 20243 } else { 20244 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20245 } 20246 break; 20247 20248 case CDROMREADMODE2: 20249 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20250 if (!ISCD(un)) { 20251 err = ENOTTY; 20252 } else if (un->un_f_cfg_is_atapi == TRUE) { 20253 /* 20254 * If the drive supports READ CD, use that instead of 20255 * switching the LBA size via a MODE SELECT 20256 * Block Descriptor 20257 */ 20258 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20259 } else { 20260 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20261 } 20262 break; 20263 20264 case CDROMREADMODE1: 20265 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20266 if (!ISCD(un)) { 20267 err = ENOTTY; 20268 } else { 20269 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20270 } 20271 break; 20272 20273 case CDROMREADOFFSET: 20274 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20275 if (!ISCD(un)) { 20276 err = ENOTTY; 20277 } else { 20278 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20279 flag); 20280 } 20281 break; 20282 20283 case CDROMSBLKMODE: 20284 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20285 /* 20286 * There is no means of changing block size in case of atapi 20287 * drives, thus return ENOTTY if drive type is atapi 20288 */ 20289 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20290 err = ENOTTY; 20291 } else if (un->un_f_mmc_cap == TRUE) { 20292 20293 /* 20294 * MMC Devices do not support changing the 20295 * logical block size 20296 * 20297 * Note: EINVAL is being returned instead of ENOTTY to 20298 * maintain consistancy with the original mmc 20299 * driver update. 20300 */ 20301 err = EINVAL; 20302 } else { 20303 mutex_enter(SD_MUTEX(un)); 20304 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20305 (un->un_ncmds_in_transport > 0)) { 20306 mutex_exit(SD_MUTEX(un)); 20307 err = EINVAL; 20308 } else { 20309 mutex_exit(SD_MUTEX(un)); 20310 err = sr_change_blkmode(dev, cmd, arg, flag); 20311 } 20312 } 20313 break; 20314 20315 case CDROMGBLKMODE: 20316 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20317 if (!ISCD(un)) { 20318 err = ENOTTY; 20319 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20320 (un->un_f_blockcount_is_valid != FALSE)) { 20321 /* 20322 * Drive is an ATAPI drive so return target block 20323 * size for ATAPI drives since we cannot change the 20324 * blocksize on ATAPI drives. Used primarily to detect 20325 * if an ATAPI cdrom is present. 20326 */ 20327 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20328 sizeof (int), flag) != 0) { 20329 err = EFAULT; 20330 } else { 20331 err = 0; 20332 } 20333 20334 } else { 20335 /* 20336 * Drive supports changing block sizes via a Mode 20337 * Select. 20338 */ 20339 err = sr_change_blkmode(dev, cmd, arg, flag); 20340 } 20341 break; 20342 20343 case CDROMGDRVSPEED: 20344 case CDROMSDRVSPEED: 20345 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20346 if (!ISCD(un)) { 20347 err = ENOTTY; 20348 } else if (un->un_f_mmc_cap == TRUE) { 20349 /* 20350 * Note: In the future the driver implementation 20351 * for getting and 20352 * setting cd speed should entail: 20353 * 1) If non-mmc try the Toshiba mode page 20354 * (sr_change_speed) 20355 * 2) If mmc but no support for Real Time Streaming try 20356 * the SET CD SPEED (0xBB) command 20357 * (sr_atapi_change_speed) 20358 * 3) If mmc and support for Real Time Streaming 20359 * try the GET PERFORMANCE and SET STREAMING 20360 * commands (not yet implemented, 4380808) 20361 */ 20362 /* 20363 * As per recent MMC spec, CD-ROM speed is variable 20364 * and changes with LBA. Since there is no such 20365 * things as drive speed now, fail this ioctl. 20366 * 20367 * Note: EINVAL is returned for consistancy of original 20368 * implementation which included support for getting 20369 * the drive speed of mmc devices but not setting 20370 * the drive speed. Thus EINVAL would be returned 20371 * if a set request was made for an mmc device. 20372 * We no longer support get or set speed for 20373 * mmc but need to remain consistent with regard 20374 * to the error code returned. 20375 */ 20376 err = EINVAL; 20377 } else if (un->un_f_cfg_is_atapi == TRUE) { 20378 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20379 } else { 20380 err = sr_change_speed(dev, cmd, arg, flag); 20381 } 20382 break; 20383 20384 case CDROMCDDA: 20385 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20386 if (!ISCD(un)) { 20387 err = ENOTTY; 20388 } else { 20389 err = sr_read_cdda(dev, (void *)arg, flag); 20390 } 20391 break; 20392 20393 case CDROMCDXA: 20394 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20395 if (!ISCD(un)) { 20396 err = ENOTTY; 20397 } else { 20398 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20399 } 20400 break; 20401 20402 case CDROMSUBCODE: 20403 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20404 if (!ISCD(un)) { 20405 err = ENOTTY; 20406 } else { 20407 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20408 } 20409 break; 20410 20411 20412 #ifdef SDDEBUG 20413 /* RESET/ABORTS testing ioctls */ 20414 case DKIOCRESET: { 20415 int reset_level; 20416 20417 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20418 err = EFAULT; 20419 } else { 20420 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20421 "reset_level = 0x%lx\n", reset_level); 20422 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20423 err = 0; 20424 } else { 20425 err = EIO; 20426 } 20427 } 20428 break; 20429 } 20430 20431 case DKIOCABORT: 20432 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20433 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20434 err = 0; 20435 } else { 20436 err = EIO; 20437 } 20438 break; 20439 #endif 20440 20441 #ifdef SD_FAULT_INJECTION 20442 /* SDIOC FaultInjection testing ioctls */ 20443 case SDIOCSTART: 20444 case SDIOCSTOP: 20445 case SDIOCINSERTPKT: 20446 case SDIOCINSERTXB: 20447 case SDIOCINSERTUN: 20448 case SDIOCINSERTARQ: 20449 case SDIOCPUSH: 20450 case SDIOCRETRIEVE: 20451 case SDIOCRUN: 20452 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20453 "SDIOC detected cmd:0x%X:\n", cmd); 20454 /* call error generator */ 20455 sd_faultinjection_ioctl(cmd, arg, un); 20456 err = 0; 20457 break; 20458 20459 #endif /* SD_FAULT_INJECTION */ 20460 20461 case DKIOCFLUSHWRITECACHE: 20462 { 20463 struct dk_callback *dkc = (struct dk_callback *)arg; 20464 20465 mutex_enter(SD_MUTEX(un)); 20466 if (!un->un_f_sync_cache_supported || 20467 !un->un_f_write_cache_enabled) { 20468 err = un->un_f_sync_cache_supported ? 20469 0 : ENOTSUP; 20470 mutex_exit(SD_MUTEX(un)); 20471 if ((flag & FKIOCTL) && dkc != NULL && 20472 dkc->dkc_callback != NULL) { 20473 (*dkc->dkc_callback)(dkc->dkc_cookie, 20474 err); 20475 /* 20476 * Did callback and reported error. 20477 * Since we did a callback, ioctl 20478 * should return 0. 20479 */ 20480 err = 0; 20481 } 20482 break; 20483 } 20484 mutex_exit(SD_MUTEX(un)); 20485 20486 if ((flag & FKIOCTL) && dkc != NULL && 20487 dkc->dkc_callback != NULL) { 20488 /* async SYNC CACHE request */ 20489 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20490 } else { 20491 /* synchronous SYNC CACHE request */ 20492 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20493 } 20494 } 20495 break; 20496 20497 case DKIOCGETWCE: { 20498 20499 int wce; 20500 20501 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20502 break; 20503 } 20504 20505 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20506 err = EFAULT; 20507 } 20508 break; 20509 } 20510 20511 case DKIOCSETWCE: { 20512 20513 int wce, sync_supported; 20514 20515 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20516 err = EFAULT; 20517 break; 20518 } 20519 20520 /* 20521 * Synchronize multiple threads trying to enable 20522 * or disable the cache via the un_f_wcc_cv 20523 * condition variable. 20524 */ 20525 mutex_enter(SD_MUTEX(un)); 20526 20527 /* 20528 * Don't allow the cache to be enabled if the 20529 * config file has it disabled. 20530 */ 20531 if (un->un_f_opt_disable_cache && wce) { 20532 mutex_exit(SD_MUTEX(un)); 20533 err = EINVAL; 20534 break; 20535 } 20536 20537 /* 20538 * Wait for write cache change in progress 20539 * bit to be clear before proceeding. 20540 */ 20541 while (un->un_f_wcc_inprog) 20542 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20543 20544 un->un_f_wcc_inprog = 1; 20545 20546 if (un->un_f_write_cache_enabled && wce == 0) { 20547 /* 20548 * Disable the write cache. Don't clear 20549 * un_f_write_cache_enabled until after 20550 * the mode select and flush are complete. 20551 */ 20552 sync_supported = un->un_f_sync_cache_supported; 20553 20554 /* 20555 * If cache flush is suppressed, we assume that the 20556 * controller firmware will take care of managing the 20557 * write cache for us: no need to explicitly 20558 * disable it. 20559 */ 20560 if (!un->un_f_suppress_cache_flush) { 20561 mutex_exit(SD_MUTEX(un)); 20562 if ((err = sd_cache_control(un, 20563 SD_CACHE_NOCHANGE, 20564 SD_CACHE_DISABLE)) == 0 && 20565 sync_supported) { 20566 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20567 NULL); 20568 } 20569 } else { 20570 mutex_exit(SD_MUTEX(un)); 20571 } 20572 20573 mutex_enter(SD_MUTEX(un)); 20574 if (err == 0) { 20575 un->un_f_write_cache_enabled = 0; 20576 } 20577 20578 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20579 /* 20580 * Set un_f_write_cache_enabled first, so there is 20581 * no window where the cache is enabled, but the 20582 * bit says it isn't. 20583 */ 20584 un->un_f_write_cache_enabled = 1; 20585 20586 /* 20587 * If cache flush is suppressed, we assume that the 20588 * controller firmware will take care of managing the 20589 * write cache for us: no need to explicitly 20590 * enable it. 20591 */ 20592 if (!un->un_f_suppress_cache_flush) { 20593 mutex_exit(SD_MUTEX(un)); 20594 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20595 SD_CACHE_ENABLE); 20596 } else { 20597 mutex_exit(SD_MUTEX(un)); 20598 } 20599 20600 mutex_enter(SD_MUTEX(un)); 20601 20602 if (err) { 20603 un->un_f_write_cache_enabled = 0; 20604 } 20605 } 20606 20607 un->un_f_wcc_inprog = 0; 20608 cv_broadcast(&un->un_wcc_cv); 20609 mutex_exit(SD_MUTEX(un)); 20610 break; 20611 } 20612 20613 default: 20614 err = ENOTTY; 20615 break; 20616 } 20617 mutex_enter(SD_MUTEX(un)); 20618 un->un_ncmds_in_driver--; 20619 ASSERT(un->un_ncmds_in_driver >= 0); 20620 mutex_exit(SD_MUTEX(un)); 20621 20622 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20623 return (err); 20624 } 20625 20626 20627 /* 20628 * Function: sd_dkio_ctrl_info 20629 * 20630 * Description: This routine is the driver entry point for handling controller 20631 * information ioctl requests (DKIOCINFO). 20632 * 20633 * Arguments: dev - the device number 20634 * arg - pointer to user provided dk_cinfo structure 20635 * specifying the controller type and attributes. 20636 * flag - this argument is a pass through to ddi_copyxxx() 20637 * directly from the mode argument of ioctl(). 20638 * 20639 * Return Code: 0 20640 * EFAULT 20641 * ENXIO 20642 */ 20643 20644 static int 20645 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20646 { 20647 struct sd_lun *un = NULL; 20648 struct dk_cinfo *info; 20649 dev_info_t *pdip; 20650 int lun, tgt; 20651 20652 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20653 return (ENXIO); 20654 } 20655 20656 info = (struct dk_cinfo *) 20657 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20658 20659 switch (un->un_ctype) { 20660 case CTYPE_CDROM: 20661 info->dki_ctype = DKC_CDROM; 20662 break; 20663 default: 20664 info->dki_ctype = DKC_SCSI_CCS; 20665 break; 20666 } 20667 pdip = ddi_get_parent(SD_DEVINFO(un)); 20668 info->dki_cnum = ddi_get_instance(pdip); 20669 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20670 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20671 } else { 20672 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20673 DK_DEVLEN - 1); 20674 } 20675 20676 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20677 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20678 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20679 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20680 20681 /* Unit Information */ 20682 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20683 info->dki_slave = ((tgt << 3) | lun); 20684 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20685 DK_DEVLEN - 1); 20686 info->dki_flags = DKI_FMTVOL; 20687 info->dki_partition = SDPART(dev); 20688 20689 /* Max Transfer size of this device in blocks */ 20690 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20691 info->dki_addr = 0; 20692 info->dki_space = 0; 20693 info->dki_prio = 0; 20694 info->dki_vec = 0; 20695 20696 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20697 kmem_free(info, sizeof (struct dk_cinfo)); 20698 return (EFAULT); 20699 } else { 20700 kmem_free(info, sizeof (struct dk_cinfo)); 20701 return (0); 20702 } 20703 } 20704 20705 20706 /* 20707 * Function: sd_get_media_info 20708 * 20709 * Description: This routine is the driver entry point for handling ioctl 20710 * requests for the media type or command set profile used by the 20711 * drive to operate on the media (DKIOCGMEDIAINFO). 20712 * 20713 * Arguments: dev - the device number 20714 * arg - pointer to user provided dk_minfo structure 20715 * specifying the media type, logical block size and 20716 * drive capacity. 20717 * flag - this argument is a pass through to ddi_copyxxx() 20718 * directly from the mode argument of ioctl(). 20719 * 20720 * Return Code: 0 20721 * EACCESS 20722 * EFAULT 20723 * ENXIO 20724 * EIO 20725 */ 20726 20727 static int 20728 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20729 { 20730 struct sd_lun *un = NULL; 20731 struct uscsi_cmd com; 20732 struct scsi_inquiry *sinq; 20733 struct dk_minfo media_info; 20734 u_longlong_t media_capacity; 20735 uint64_t capacity; 20736 uint_t lbasize; 20737 uchar_t *out_data; 20738 uchar_t *rqbuf; 20739 int rval = 0; 20740 int rtn; 20741 20742 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20743 (un->un_state == SD_STATE_OFFLINE)) { 20744 return (ENXIO); 20745 } 20746 20747 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20748 20749 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20750 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20751 20752 /* Issue a TUR to determine if the drive is ready with media present */ 20753 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20754 if (rval == ENXIO) { 20755 goto done; 20756 } 20757 20758 /* Now get configuration data */ 20759 if (ISCD(un)) { 20760 media_info.dki_media_type = DK_CDROM; 20761 20762 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20763 if (un->un_f_mmc_cap == TRUE) { 20764 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20765 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20766 SD_PATH_STANDARD); 20767 20768 if (rtn) { 20769 /* 20770 * Failed for other than an illegal request 20771 * or command not supported 20772 */ 20773 if ((com.uscsi_status == STATUS_CHECK) && 20774 (com.uscsi_rqstatus == STATUS_GOOD)) { 20775 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20776 (rqbuf[12] != 0x20)) { 20777 rval = EIO; 20778 goto done; 20779 } 20780 } 20781 } else { 20782 /* 20783 * The GET CONFIGURATION command succeeded 20784 * so set the media type according to the 20785 * returned data 20786 */ 20787 media_info.dki_media_type = out_data[6]; 20788 media_info.dki_media_type <<= 8; 20789 media_info.dki_media_type |= out_data[7]; 20790 } 20791 } 20792 } else { 20793 /* 20794 * The profile list is not available, so we attempt to identify 20795 * the media type based on the inquiry data 20796 */ 20797 sinq = un->un_sd->sd_inq; 20798 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20799 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20800 /* This is a direct access device or optical disk */ 20801 media_info.dki_media_type = DK_FIXED_DISK; 20802 20803 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20804 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20805 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20806 media_info.dki_media_type = DK_ZIP; 20807 } else if ( 20808 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20809 media_info.dki_media_type = DK_JAZ; 20810 } 20811 } 20812 } else { 20813 /* 20814 * Not a CD, direct access or optical disk so return 20815 * unknown media 20816 */ 20817 media_info.dki_media_type = DK_UNKNOWN; 20818 } 20819 } 20820 20821 /* Now read the capacity so we can provide the lbasize and capacity */ 20822 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20823 SD_PATH_DIRECT)) { 20824 case 0: 20825 break; 20826 case EACCES: 20827 rval = EACCES; 20828 goto done; 20829 default: 20830 rval = EIO; 20831 goto done; 20832 } 20833 20834 media_info.dki_lbsize = lbasize; 20835 media_capacity = capacity; 20836 20837 /* 20838 * sd_send_scsi_READ_CAPACITY() reports capacity in 20839 * un->un_sys_blocksize chunks. So we need to convert it into 20840 * cap.lbasize chunks. 20841 */ 20842 media_capacity *= un->un_sys_blocksize; 20843 media_capacity /= lbasize; 20844 media_info.dki_capacity = media_capacity; 20845 20846 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20847 rval = EFAULT; 20848 /* Put goto. Anybody might add some code below in future */ 20849 goto done; 20850 } 20851 done: 20852 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20853 kmem_free(rqbuf, SENSE_LENGTH); 20854 return (rval); 20855 } 20856 20857 20858 /* 20859 * Function: sd_check_media 20860 * 20861 * Description: This utility routine implements the functionality for the 20862 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20863 * driver state changes from that specified by the user 20864 * (inserted or ejected). For example, if the user specifies 20865 * DKIO_EJECTED and the current media state is inserted this 20866 * routine will immediately return DKIO_INSERTED. However, if the 20867 * current media state is not inserted the user thread will be 20868 * blocked until the drive state changes. If DKIO_NONE is specified 20869 * the user thread will block until a drive state change occurs. 20870 * 20871 * Arguments: dev - the device number 20872 * state - user pointer to a dkio_state, updated with the current 20873 * drive state at return. 20874 * 20875 * Return Code: ENXIO 20876 * EIO 20877 * EAGAIN 20878 * EINTR 20879 */ 20880 20881 static int 20882 sd_check_media(dev_t dev, enum dkio_state state) 20883 { 20884 struct sd_lun *un = NULL; 20885 enum dkio_state prev_state; 20886 opaque_t token = NULL; 20887 int rval = 0; 20888 20889 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20890 return (ENXIO); 20891 } 20892 20893 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20894 20895 mutex_enter(SD_MUTEX(un)); 20896 20897 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20898 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20899 20900 prev_state = un->un_mediastate; 20901 20902 /* is there anything to do? */ 20903 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20904 /* 20905 * submit the request to the scsi_watch service; 20906 * scsi_media_watch_cb() does the real work 20907 */ 20908 mutex_exit(SD_MUTEX(un)); 20909 20910 /* 20911 * This change handles the case where a scsi watch request is 20912 * added to a device that is powered down. To accomplish this 20913 * we power up the device before adding the scsi watch request, 20914 * since the scsi watch sends a TUR directly to the device 20915 * which the device cannot handle if it is powered down. 20916 */ 20917 if (sd_pm_entry(un) != DDI_SUCCESS) { 20918 mutex_enter(SD_MUTEX(un)); 20919 goto done; 20920 } 20921 20922 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20923 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20924 (caddr_t)dev); 20925 20926 sd_pm_exit(un); 20927 20928 mutex_enter(SD_MUTEX(un)); 20929 if (token == NULL) { 20930 rval = EAGAIN; 20931 goto done; 20932 } 20933 20934 /* 20935 * This is a special case IOCTL that doesn't return 20936 * until the media state changes. Routine sdpower 20937 * knows about and handles this so don't count it 20938 * as an active cmd in the driver, which would 20939 * keep the device busy to the pm framework. 20940 * If the count isn't decremented the device can't 20941 * be powered down. 20942 */ 20943 un->un_ncmds_in_driver--; 20944 ASSERT(un->un_ncmds_in_driver >= 0); 20945 20946 /* 20947 * if a prior request had been made, this will be the same 20948 * token, as scsi_watch was designed that way. 20949 */ 20950 un->un_swr_token = token; 20951 un->un_specified_mediastate = state; 20952 20953 /* 20954 * now wait for media change 20955 * we will not be signalled unless mediastate == state but it is 20956 * still better to test for this condition, since there is a 20957 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20958 */ 20959 SD_TRACE(SD_LOG_COMMON, un, 20960 "sd_check_media: waiting for media state change\n"); 20961 while (un->un_mediastate == state) { 20962 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20963 SD_TRACE(SD_LOG_COMMON, un, 20964 "sd_check_media: waiting for media state " 20965 "was interrupted\n"); 20966 un->un_ncmds_in_driver++; 20967 rval = EINTR; 20968 goto done; 20969 } 20970 SD_TRACE(SD_LOG_COMMON, un, 20971 "sd_check_media: received signal, state=%x\n", 20972 un->un_mediastate); 20973 } 20974 /* 20975 * Inc the counter to indicate the device once again 20976 * has an active outstanding cmd. 20977 */ 20978 un->un_ncmds_in_driver++; 20979 } 20980 20981 /* invalidate geometry */ 20982 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20983 sr_ejected(un); 20984 } 20985 20986 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20987 uint64_t capacity; 20988 uint_t lbasize; 20989 20990 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20991 mutex_exit(SD_MUTEX(un)); 20992 /* 20993 * Since the following routines use SD_PATH_DIRECT, we must 20994 * call PM directly before the upcoming disk accesses. This 20995 * may cause the disk to be power/spin up. 20996 */ 20997 20998 if (sd_pm_entry(un) == DDI_SUCCESS) { 20999 rval = sd_send_scsi_READ_CAPACITY(un, 21000 &capacity, 21001 &lbasize, SD_PATH_DIRECT); 21002 if (rval != 0) { 21003 sd_pm_exit(un); 21004 mutex_enter(SD_MUTEX(un)); 21005 goto done; 21006 } 21007 } else { 21008 rval = EIO; 21009 mutex_enter(SD_MUTEX(un)); 21010 goto done; 21011 } 21012 mutex_enter(SD_MUTEX(un)); 21013 21014 sd_update_block_info(un, lbasize, capacity); 21015 21016 /* 21017 * Check if the media in the device is writable or not 21018 */ 21019 if (ISCD(un)) 21020 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21021 21022 mutex_exit(SD_MUTEX(un)); 21023 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21024 if ((cmlb_validate(un->un_cmlbhandle, 0, 21025 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21026 sd_set_pstats(un); 21027 SD_TRACE(SD_LOG_IO_PARTITION, un, 21028 "sd_check_media: un:0x%p pstats created and " 21029 "set\n", un); 21030 } 21031 21032 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21033 SD_PATH_DIRECT); 21034 sd_pm_exit(un); 21035 21036 mutex_enter(SD_MUTEX(un)); 21037 } 21038 done: 21039 un->un_f_watcht_stopped = FALSE; 21040 if (un->un_swr_token) { 21041 /* 21042 * Use of this local token and the mutex ensures that we avoid 21043 * some race conditions associated with terminating the 21044 * scsi watch. 21045 */ 21046 token = un->un_swr_token; 21047 un->un_swr_token = (opaque_t)NULL; 21048 mutex_exit(SD_MUTEX(un)); 21049 (void) scsi_watch_request_terminate(token, 21050 SCSI_WATCH_TERMINATE_WAIT); 21051 mutex_enter(SD_MUTEX(un)); 21052 } 21053 21054 /* 21055 * Update the capacity kstat value, if no media previously 21056 * (capacity kstat is 0) and a media has been inserted 21057 * (un_f_blockcount_is_valid == TRUE) 21058 */ 21059 if (un->un_errstats) { 21060 struct sd_errstats *stp = NULL; 21061 21062 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21063 if ((stp->sd_capacity.value.ui64 == 0) && 21064 (un->un_f_blockcount_is_valid == TRUE)) { 21065 stp->sd_capacity.value.ui64 = 21066 (uint64_t)((uint64_t)un->un_blockcount * 21067 un->un_sys_blocksize); 21068 } 21069 } 21070 mutex_exit(SD_MUTEX(un)); 21071 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21072 return (rval); 21073 } 21074 21075 21076 /* 21077 * Function: sd_delayed_cv_broadcast 21078 * 21079 * Description: Delayed cv_broadcast to allow for target to recover from media 21080 * insertion. 21081 * 21082 * Arguments: arg - driver soft state (unit) structure 21083 */ 21084 21085 static void 21086 sd_delayed_cv_broadcast(void *arg) 21087 { 21088 struct sd_lun *un = arg; 21089 21090 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21091 21092 mutex_enter(SD_MUTEX(un)); 21093 un->un_dcvb_timeid = NULL; 21094 cv_broadcast(&un->un_state_cv); 21095 mutex_exit(SD_MUTEX(un)); 21096 } 21097 21098 21099 /* 21100 * Function: sd_media_watch_cb 21101 * 21102 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21103 * routine processes the TUR sense data and updates the driver 21104 * state if a transition has occurred. The user thread 21105 * (sd_check_media) is then signalled. 21106 * 21107 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21108 * among multiple watches that share this callback function 21109 * resultp - scsi watch facility result packet containing scsi 21110 * packet, status byte and sense data 21111 * 21112 * Return Code: 0 for success, -1 for failure 21113 */ 21114 21115 static int 21116 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21117 { 21118 struct sd_lun *un; 21119 struct scsi_status *statusp = resultp->statusp; 21120 uint8_t *sensep = (uint8_t *)resultp->sensep; 21121 enum dkio_state state = DKIO_NONE; 21122 dev_t dev = (dev_t)arg; 21123 uchar_t actual_sense_length; 21124 uint8_t skey, asc, ascq; 21125 21126 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21127 return (-1); 21128 } 21129 actual_sense_length = resultp->actual_sense_length; 21130 21131 mutex_enter(SD_MUTEX(un)); 21132 SD_TRACE(SD_LOG_COMMON, un, 21133 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21134 *((char *)statusp), (void *)sensep, actual_sense_length); 21135 21136 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21137 un->un_mediastate = DKIO_DEV_GONE; 21138 cv_broadcast(&un->un_state_cv); 21139 mutex_exit(SD_MUTEX(un)); 21140 21141 return (0); 21142 } 21143 21144 /* 21145 * If there was a check condition then sensep points to valid sense data 21146 * If status was not a check condition but a reservation or busy status 21147 * then the new state is DKIO_NONE 21148 */ 21149 if (sensep != NULL) { 21150 skey = scsi_sense_key(sensep); 21151 asc = scsi_sense_asc(sensep); 21152 ascq = scsi_sense_ascq(sensep); 21153 21154 SD_INFO(SD_LOG_COMMON, un, 21155 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21156 skey, asc, ascq); 21157 /* This routine only uses up to 13 bytes of sense data. */ 21158 if (actual_sense_length >= 13) { 21159 if (skey == KEY_UNIT_ATTENTION) { 21160 if (asc == 0x28) { 21161 state = DKIO_INSERTED; 21162 } 21163 } else if (skey == KEY_NOT_READY) { 21164 /* 21165 * if 02/04/02 means that the host 21166 * should send start command. Explicitly 21167 * leave the media state as is 21168 * (inserted) as the media is inserted 21169 * and host has stopped device for PM 21170 * reasons. Upon next true read/write 21171 * to this media will bring the 21172 * device to the right state good for 21173 * media access. 21174 */ 21175 if (asc == 0x3a) { 21176 state = DKIO_EJECTED; 21177 } else { 21178 /* 21179 * If the drive is busy with an 21180 * operation or long write, keep the 21181 * media in an inserted state. 21182 */ 21183 21184 if ((asc == 0x04) && 21185 ((ascq == 0x02) || 21186 (ascq == 0x07) || 21187 (ascq == 0x08))) { 21188 state = DKIO_INSERTED; 21189 } 21190 } 21191 } else if (skey == KEY_NO_SENSE) { 21192 if ((asc == 0x00) && (ascq == 0x00)) { 21193 /* 21194 * Sense Data 00/00/00 does not provide 21195 * any information about the state of 21196 * the media. Ignore it. 21197 */ 21198 mutex_exit(SD_MUTEX(un)); 21199 return (0); 21200 } 21201 } 21202 } 21203 } else if ((*((char *)statusp) == STATUS_GOOD) && 21204 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21205 state = DKIO_INSERTED; 21206 } 21207 21208 SD_TRACE(SD_LOG_COMMON, un, 21209 "sd_media_watch_cb: state=%x, specified=%x\n", 21210 state, un->un_specified_mediastate); 21211 21212 /* 21213 * now signal the waiting thread if this is *not* the specified state; 21214 * delay the signal if the state is DKIO_INSERTED to allow the target 21215 * to recover 21216 */ 21217 if (state != un->un_specified_mediastate) { 21218 un->un_mediastate = state; 21219 if (state == DKIO_INSERTED) { 21220 /* 21221 * delay the signal to give the drive a chance 21222 * to do what it apparently needs to do 21223 */ 21224 SD_TRACE(SD_LOG_COMMON, un, 21225 "sd_media_watch_cb: delayed cv_broadcast\n"); 21226 if (un->un_dcvb_timeid == NULL) { 21227 un->un_dcvb_timeid = 21228 timeout(sd_delayed_cv_broadcast, un, 21229 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21230 } 21231 } else { 21232 SD_TRACE(SD_LOG_COMMON, un, 21233 "sd_media_watch_cb: immediate cv_broadcast\n"); 21234 cv_broadcast(&un->un_state_cv); 21235 } 21236 } 21237 mutex_exit(SD_MUTEX(un)); 21238 return (0); 21239 } 21240 21241 21242 /* 21243 * Function: sd_dkio_get_temp 21244 * 21245 * Description: This routine is the driver entry point for handling ioctl 21246 * requests to get the disk temperature. 21247 * 21248 * Arguments: dev - the device number 21249 * arg - pointer to user provided dk_temperature structure. 21250 * flag - this argument is a pass through to ddi_copyxxx() 21251 * directly from the mode argument of ioctl(). 21252 * 21253 * Return Code: 0 21254 * EFAULT 21255 * ENXIO 21256 * EAGAIN 21257 */ 21258 21259 static int 21260 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21261 { 21262 struct sd_lun *un = NULL; 21263 struct dk_temperature *dktemp = NULL; 21264 uchar_t *temperature_page; 21265 int rval = 0; 21266 int path_flag = SD_PATH_STANDARD; 21267 21268 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21269 return (ENXIO); 21270 } 21271 21272 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21273 21274 /* copyin the disk temp argument to get the user flags */ 21275 if (ddi_copyin((void *)arg, dktemp, 21276 sizeof (struct dk_temperature), flag) != 0) { 21277 rval = EFAULT; 21278 goto done; 21279 } 21280 21281 /* Initialize the temperature to invalid. */ 21282 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21283 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21284 21285 /* 21286 * Note: Investigate removing the "bypass pm" semantic. 21287 * Can we just bypass PM always? 21288 */ 21289 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21290 path_flag = SD_PATH_DIRECT; 21291 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21292 mutex_enter(&un->un_pm_mutex); 21293 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21294 /* 21295 * If DKT_BYPASS_PM is set, and the drive happens to be 21296 * in low power mode, we can not wake it up, Need to 21297 * return EAGAIN. 21298 */ 21299 mutex_exit(&un->un_pm_mutex); 21300 rval = EAGAIN; 21301 goto done; 21302 } else { 21303 /* 21304 * Indicate to PM the device is busy. This is required 21305 * to avoid a race - i.e. the ioctl is issuing a 21306 * command and the pm framework brings down the device 21307 * to low power mode (possible power cut-off on some 21308 * platforms). 21309 */ 21310 mutex_exit(&un->un_pm_mutex); 21311 if (sd_pm_entry(un) != DDI_SUCCESS) { 21312 rval = EAGAIN; 21313 goto done; 21314 } 21315 } 21316 } 21317 21318 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21319 21320 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21321 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21322 goto done2; 21323 } 21324 21325 /* 21326 * For the current temperature verify that the parameter length is 0x02 21327 * and the parameter code is 0x00 21328 */ 21329 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21330 (temperature_page[5] == 0x00)) { 21331 if (temperature_page[9] == 0xFF) { 21332 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21333 } else { 21334 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21335 } 21336 } 21337 21338 /* 21339 * For the reference temperature verify that the parameter 21340 * length is 0x02 and the parameter code is 0x01 21341 */ 21342 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21343 (temperature_page[11] == 0x01)) { 21344 if (temperature_page[15] == 0xFF) { 21345 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21346 } else { 21347 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21348 } 21349 } 21350 21351 /* Do the copyout regardless of the temperature commands status. */ 21352 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21353 flag) != 0) { 21354 rval = EFAULT; 21355 } 21356 21357 done2: 21358 if (path_flag == SD_PATH_DIRECT) { 21359 sd_pm_exit(un); 21360 } 21361 21362 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21363 done: 21364 if (dktemp != NULL) { 21365 kmem_free(dktemp, sizeof (struct dk_temperature)); 21366 } 21367 21368 return (rval); 21369 } 21370 21371 21372 /* 21373 * Function: sd_log_page_supported 21374 * 21375 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21376 * supported log pages. 21377 * 21378 * Arguments: un - 21379 * log_page - 21380 * 21381 * Return Code: -1 - on error (log sense is optional and may not be supported). 21382 * 0 - log page not found. 21383 * 1 - log page found. 21384 */ 21385 21386 static int 21387 sd_log_page_supported(struct sd_lun *un, int log_page) 21388 { 21389 uchar_t *log_page_data; 21390 int i; 21391 int match = 0; 21392 int log_size; 21393 21394 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21395 21396 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21397 SD_PATH_DIRECT) != 0) { 21398 SD_ERROR(SD_LOG_COMMON, un, 21399 "sd_log_page_supported: failed log page retrieval\n"); 21400 kmem_free(log_page_data, 0xFF); 21401 return (-1); 21402 } 21403 log_size = log_page_data[3]; 21404 21405 /* 21406 * The list of supported log pages start from the fourth byte. Check 21407 * until we run out of log pages or a match is found. 21408 */ 21409 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21410 if (log_page_data[i] == log_page) { 21411 match++; 21412 } 21413 } 21414 kmem_free(log_page_data, 0xFF); 21415 return (match); 21416 } 21417 21418 21419 /* 21420 * Function: sd_mhdioc_failfast 21421 * 21422 * Description: This routine is the driver entry point for handling ioctl 21423 * requests to enable/disable the multihost failfast option. 21424 * (MHIOCENFAILFAST) 21425 * 21426 * Arguments: dev - the device number 21427 * arg - user specified probing interval. 21428 * flag - this argument is a pass through to ddi_copyxxx() 21429 * directly from the mode argument of ioctl(). 21430 * 21431 * Return Code: 0 21432 * EFAULT 21433 * ENXIO 21434 */ 21435 21436 static int 21437 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21438 { 21439 struct sd_lun *un = NULL; 21440 int mh_time; 21441 int rval = 0; 21442 21443 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21444 return (ENXIO); 21445 } 21446 21447 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21448 return (EFAULT); 21449 21450 if (mh_time) { 21451 mutex_enter(SD_MUTEX(un)); 21452 un->un_resvd_status |= SD_FAILFAST; 21453 mutex_exit(SD_MUTEX(un)); 21454 /* 21455 * If mh_time is INT_MAX, then this ioctl is being used for 21456 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21457 */ 21458 if (mh_time != INT_MAX) { 21459 rval = sd_check_mhd(dev, mh_time); 21460 } 21461 } else { 21462 (void) sd_check_mhd(dev, 0); 21463 mutex_enter(SD_MUTEX(un)); 21464 un->un_resvd_status &= ~SD_FAILFAST; 21465 mutex_exit(SD_MUTEX(un)); 21466 } 21467 return (rval); 21468 } 21469 21470 21471 /* 21472 * Function: sd_mhdioc_takeown 21473 * 21474 * Description: This routine is the driver entry point for handling ioctl 21475 * requests to forcefully acquire exclusive access rights to the 21476 * multihost disk (MHIOCTKOWN). 21477 * 21478 * Arguments: dev - the device number 21479 * arg - user provided structure specifying the delay 21480 * parameters in milliseconds 21481 * flag - this argument is a pass through to ddi_copyxxx() 21482 * directly from the mode argument of ioctl(). 21483 * 21484 * Return Code: 0 21485 * EFAULT 21486 * ENXIO 21487 */ 21488 21489 static int 21490 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21491 { 21492 struct sd_lun *un = NULL; 21493 struct mhioctkown *tkown = NULL; 21494 int rval = 0; 21495 21496 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21497 return (ENXIO); 21498 } 21499 21500 if (arg != NULL) { 21501 tkown = (struct mhioctkown *) 21502 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21503 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21504 if (rval != 0) { 21505 rval = EFAULT; 21506 goto error; 21507 } 21508 } 21509 21510 rval = sd_take_ownership(dev, tkown); 21511 mutex_enter(SD_MUTEX(un)); 21512 if (rval == 0) { 21513 un->un_resvd_status |= SD_RESERVE; 21514 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21515 sd_reinstate_resv_delay = 21516 tkown->reinstate_resv_delay * 1000; 21517 } else { 21518 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21519 } 21520 /* 21521 * Give the scsi_watch routine interval set by 21522 * the MHIOCENFAILFAST ioctl precedence here. 21523 */ 21524 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21525 mutex_exit(SD_MUTEX(un)); 21526 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21527 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21528 "sd_mhdioc_takeown : %d\n", 21529 sd_reinstate_resv_delay); 21530 } else { 21531 mutex_exit(SD_MUTEX(un)); 21532 } 21533 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21534 sd_mhd_reset_notify_cb, (caddr_t)un); 21535 } else { 21536 un->un_resvd_status &= ~SD_RESERVE; 21537 mutex_exit(SD_MUTEX(un)); 21538 } 21539 21540 error: 21541 if (tkown != NULL) { 21542 kmem_free(tkown, sizeof (struct mhioctkown)); 21543 } 21544 return (rval); 21545 } 21546 21547 21548 /* 21549 * Function: sd_mhdioc_release 21550 * 21551 * Description: This routine is the driver entry point for handling ioctl 21552 * requests to release exclusive access rights to the multihost 21553 * disk (MHIOCRELEASE). 21554 * 21555 * Arguments: dev - the device number 21556 * 21557 * Return Code: 0 21558 * ENXIO 21559 */ 21560 21561 static int 21562 sd_mhdioc_release(dev_t dev) 21563 { 21564 struct sd_lun *un = NULL; 21565 timeout_id_t resvd_timeid_save; 21566 int resvd_status_save; 21567 int rval = 0; 21568 21569 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21570 return (ENXIO); 21571 } 21572 21573 mutex_enter(SD_MUTEX(un)); 21574 resvd_status_save = un->un_resvd_status; 21575 un->un_resvd_status &= 21576 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21577 if (un->un_resvd_timeid) { 21578 resvd_timeid_save = un->un_resvd_timeid; 21579 un->un_resvd_timeid = NULL; 21580 mutex_exit(SD_MUTEX(un)); 21581 (void) untimeout(resvd_timeid_save); 21582 } else { 21583 mutex_exit(SD_MUTEX(un)); 21584 } 21585 21586 /* 21587 * destroy any pending timeout thread that may be attempting to 21588 * reinstate reservation on this device. 21589 */ 21590 sd_rmv_resv_reclaim_req(dev); 21591 21592 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21593 mutex_enter(SD_MUTEX(un)); 21594 if ((un->un_mhd_token) && 21595 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21596 mutex_exit(SD_MUTEX(un)); 21597 (void) sd_check_mhd(dev, 0); 21598 } else { 21599 mutex_exit(SD_MUTEX(un)); 21600 } 21601 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21602 sd_mhd_reset_notify_cb, (caddr_t)un); 21603 } else { 21604 /* 21605 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21606 */ 21607 mutex_enter(SD_MUTEX(un)); 21608 un->un_resvd_status = resvd_status_save; 21609 mutex_exit(SD_MUTEX(un)); 21610 } 21611 return (rval); 21612 } 21613 21614 21615 /* 21616 * Function: sd_mhdioc_register_devid 21617 * 21618 * Description: This routine is the driver entry point for handling ioctl 21619 * requests to register the device id (MHIOCREREGISTERDEVID). 21620 * 21621 * Note: The implementation for this ioctl has been updated to 21622 * be consistent with the original PSARC case (1999/357) 21623 * (4375899, 4241671, 4220005) 21624 * 21625 * Arguments: dev - the device number 21626 * 21627 * Return Code: 0 21628 * ENXIO 21629 */ 21630 21631 static int 21632 sd_mhdioc_register_devid(dev_t dev) 21633 { 21634 struct sd_lun *un = NULL; 21635 int rval = 0; 21636 21637 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21638 return (ENXIO); 21639 } 21640 21641 ASSERT(!mutex_owned(SD_MUTEX(un))); 21642 21643 mutex_enter(SD_MUTEX(un)); 21644 21645 /* If a devid already exists, de-register it */ 21646 if (un->un_devid != NULL) { 21647 ddi_devid_unregister(SD_DEVINFO(un)); 21648 /* 21649 * After unregister devid, needs to free devid memory 21650 */ 21651 ddi_devid_free(un->un_devid); 21652 un->un_devid = NULL; 21653 } 21654 21655 /* Check for reservation conflict */ 21656 mutex_exit(SD_MUTEX(un)); 21657 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21658 mutex_enter(SD_MUTEX(un)); 21659 21660 switch (rval) { 21661 case 0: 21662 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21663 break; 21664 case EACCES: 21665 break; 21666 default: 21667 rval = EIO; 21668 } 21669 21670 mutex_exit(SD_MUTEX(un)); 21671 return (rval); 21672 } 21673 21674 21675 /* 21676 * Function: sd_mhdioc_inkeys 21677 * 21678 * Description: This routine is the driver entry point for handling ioctl 21679 * requests to issue the SCSI-3 Persistent In Read Keys command 21680 * to the device (MHIOCGRP_INKEYS). 21681 * 21682 * Arguments: dev - the device number 21683 * arg - user provided in_keys structure 21684 * flag - this argument is a pass through to ddi_copyxxx() 21685 * directly from the mode argument of ioctl(). 21686 * 21687 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21688 * ENXIO 21689 * EFAULT 21690 */ 21691 21692 static int 21693 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21694 { 21695 struct sd_lun *un; 21696 mhioc_inkeys_t inkeys; 21697 int rval = 0; 21698 21699 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21700 return (ENXIO); 21701 } 21702 21703 #ifdef _MULTI_DATAMODEL 21704 switch (ddi_model_convert_from(flag & FMODELS)) { 21705 case DDI_MODEL_ILP32: { 21706 struct mhioc_inkeys32 inkeys32; 21707 21708 if (ddi_copyin(arg, &inkeys32, 21709 sizeof (struct mhioc_inkeys32), flag) != 0) { 21710 return (EFAULT); 21711 } 21712 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21713 if ((rval = sd_persistent_reservation_in_read_keys(un, 21714 &inkeys, flag)) != 0) { 21715 return (rval); 21716 } 21717 inkeys32.generation = inkeys.generation; 21718 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21719 flag) != 0) { 21720 return (EFAULT); 21721 } 21722 break; 21723 } 21724 case DDI_MODEL_NONE: 21725 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21726 flag) != 0) { 21727 return (EFAULT); 21728 } 21729 if ((rval = sd_persistent_reservation_in_read_keys(un, 21730 &inkeys, flag)) != 0) { 21731 return (rval); 21732 } 21733 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21734 flag) != 0) { 21735 return (EFAULT); 21736 } 21737 break; 21738 } 21739 21740 #else /* ! _MULTI_DATAMODEL */ 21741 21742 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21743 return (EFAULT); 21744 } 21745 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21746 if (rval != 0) { 21747 return (rval); 21748 } 21749 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21750 return (EFAULT); 21751 } 21752 21753 #endif /* _MULTI_DATAMODEL */ 21754 21755 return (rval); 21756 } 21757 21758 21759 /* 21760 * Function: sd_mhdioc_inresv 21761 * 21762 * Description: This routine is the driver entry point for handling ioctl 21763 * requests to issue the SCSI-3 Persistent In Read Reservations 21764 * command to the device (MHIOCGRP_INKEYS). 21765 * 21766 * Arguments: dev - the device number 21767 * arg - user provided in_resv structure 21768 * flag - this argument is a pass through to ddi_copyxxx() 21769 * directly from the mode argument of ioctl(). 21770 * 21771 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21772 * ENXIO 21773 * EFAULT 21774 */ 21775 21776 static int 21777 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21778 { 21779 struct sd_lun *un; 21780 mhioc_inresvs_t inresvs; 21781 int rval = 0; 21782 21783 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21784 return (ENXIO); 21785 } 21786 21787 #ifdef _MULTI_DATAMODEL 21788 21789 switch (ddi_model_convert_from(flag & FMODELS)) { 21790 case DDI_MODEL_ILP32: { 21791 struct mhioc_inresvs32 inresvs32; 21792 21793 if (ddi_copyin(arg, &inresvs32, 21794 sizeof (struct mhioc_inresvs32), flag) != 0) { 21795 return (EFAULT); 21796 } 21797 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21798 if ((rval = sd_persistent_reservation_in_read_resv(un, 21799 &inresvs, flag)) != 0) { 21800 return (rval); 21801 } 21802 inresvs32.generation = inresvs.generation; 21803 if (ddi_copyout(&inresvs32, arg, 21804 sizeof (struct mhioc_inresvs32), flag) != 0) { 21805 return (EFAULT); 21806 } 21807 break; 21808 } 21809 case DDI_MODEL_NONE: 21810 if (ddi_copyin(arg, &inresvs, 21811 sizeof (mhioc_inresvs_t), flag) != 0) { 21812 return (EFAULT); 21813 } 21814 if ((rval = sd_persistent_reservation_in_read_resv(un, 21815 &inresvs, flag)) != 0) { 21816 return (rval); 21817 } 21818 if (ddi_copyout(&inresvs, arg, 21819 sizeof (mhioc_inresvs_t), flag) != 0) { 21820 return (EFAULT); 21821 } 21822 break; 21823 } 21824 21825 #else /* ! _MULTI_DATAMODEL */ 21826 21827 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21828 return (EFAULT); 21829 } 21830 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21831 if (rval != 0) { 21832 return (rval); 21833 } 21834 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21835 return (EFAULT); 21836 } 21837 21838 #endif /* ! _MULTI_DATAMODEL */ 21839 21840 return (rval); 21841 } 21842 21843 21844 /* 21845 * The following routines support the clustering functionality described below 21846 * and implement lost reservation reclaim functionality. 21847 * 21848 * Clustering 21849 * ---------- 21850 * The clustering code uses two different, independent forms of SCSI 21851 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21852 * Persistent Group Reservations. For any particular disk, it will use either 21853 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21854 * 21855 * SCSI-2 21856 * The cluster software takes ownership of a multi-hosted disk by issuing the 21857 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21858 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 21859 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 21860 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 21861 * driver. The meaning of failfast is that if the driver (on this host) ever 21862 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 21863 * it should immediately panic the host. The motivation for this ioctl is that 21864 * if this host does encounter reservation conflict, the underlying cause is 21865 * that some other host of the cluster has decided that this host is no longer 21866 * in the cluster and has seized control of the disks for itself. Since this 21867 * host is no longer in the cluster, it ought to panic itself. The 21868 * MHIOCENFAILFAST ioctl does two things: 21869 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21870 * error to panic the host 21871 * (b) it sets up a periodic timer to test whether this host still has 21872 * "access" (in that no other host has reserved the device): if the 21873 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21874 * purpose of that periodic timer is to handle scenarios where the host is 21875 * otherwise temporarily quiescent, temporarily doing no real i/o. 21876 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21877 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21878 * the device itself. 21879 * 21880 * SCSI-3 PGR 21881 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21882 * facility is supported through the shared multihost disk ioctls 21883 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21884 * MHIOCGRP_PREEMPTANDABORT) 21885 * 21886 * Reservation Reclaim: 21887 * -------------------- 21888 * To support the lost reservation reclaim operations this driver creates a 21889 * single thread to handle reinstating reservations on all devices that have 21890 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21891 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21892 * and the reservation reclaim thread loops through the requests to regain the 21893 * lost reservations. 21894 */ 21895 21896 /* 21897 * Function: sd_check_mhd() 21898 * 21899 * Description: This function sets up and submits a scsi watch request or 21900 * terminates an existing watch request. This routine is used in 21901 * support of reservation reclaim. 21902 * 21903 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21904 * among multiple watches that share the callback function 21905 * interval - the number of microseconds specifying the watch 21906 * interval for issuing TEST UNIT READY commands. If 21907 * set to 0 the watch should be terminated. If the 21908 * interval is set to 0 and if the device is required 21909 * to hold reservation while disabling failfast, the 21910 * watch is restarted with an interval of 21911 * reinstate_resv_delay. 21912 * 21913 * Return Code: 0 - Successful submit/terminate of scsi watch request 21914 * ENXIO - Indicates an invalid device was specified 21915 * EAGAIN - Unable to submit the scsi watch request 21916 */ 21917 21918 static int 21919 sd_check_mhd(dev_t dev, int interval) 21920 { 21921 struct sd_lun *un; 21922 opaque_t token; 21923 21924 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21925 return (ENXIO); 21926 } 21927 21928 /* is this a watch termination request? */ 21929 if (interval == 0) { 21930 mutex_enter(SD_MUTEX(un)); 21931 /* if there is an existing watch task then terminate it */ 21932 if (un->un_mhd_token) { 21933 token = un->un_mhd_token; 21934 un->un_mhd_token = NULL; 21935 mutex_exit(SD_MUTEX(un)); 21936 (void) scsi_watch_request_terminate(token, 21937 SCSI_WATCH_TERMINATE_WAIT); 21938 mutex_enter(SD_MUTEX(un)); 21939 } else { 21940 mutex_exit(SD_MUTEX(un)); 21941 /* 21942 * Note: If we return here we don't check for the 21943 * failfast case. This is the original legacy 21944 * implementation but perhaps we should be checking 21945 * the failfast case. 21946 */ 21947 return (0); 21948 } 21949 /* 21950 * If the device is required to hold reservation while 21951 * disabling failfast, we need to restart the scsi_watch 21952 * routine with an interval of reinstate_resv_delay. 21953 */ 21954 if (un->un_resvd_status & SD_RESERVE) { 21955 interval = sd_reinstate_resv_delay/1000; 21956 } else { 21957 /* no failfast so bail */ 21958 mutex_exit(SD_MUTEX(un)); 21959 return (0); 21960 } 21961 mutex_exit(SD_MUTEX(un)); 21962 } 21963 21964 /* 21965 * adjust minimum time interval to 1 second, 21966 * and convert from msecs to usecs 21967 */ 21968 if (interval > 0 && interval < 1000) { 21969 interval = 1000; 21970 } 21971 interval *= 1000; 21972 21973 /* 21974 * submit the request to the scsi_watch service 21975 */ 21976 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21977 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21978 if (token == NULL) { 21979 return (EAGAIN); 21980 } 21981 21982 /* 21983 * save token for termination later on 21984 */ 21985 mutex_enter(SD_MUTEX(un)); 21986 un->un_mhd_token = token; 21987 mutex_exit(SD_MUTEX(un)); 21988 return (0); 21989 } 21990 21991 21992 /* 21993 * Function: sd_mhd_watch_cb() 21994 * 21995 * Description: This function is the call back function used by the scsi watch 21996 * facility. The scsi watch facility sends the "Test Unit Ready" 21997 * and processes the status. If applicable (i.e. a "Unit Attention" 21998 * status and automatic "Request Sense" not used) the scsi watch 21999 * facility will send a "Request Sense" and retrieve the sense data 22000 * to be passed to this callback function. In either case the 22001 * automatic "Request Sense" or the facility submitting one, this 22002 * callback is passed the status and sense data. 22003 * 22004 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22005 * among multiple watches that share this callback function 22006 * resultp - scsi watch facility result packet containing scsi 22007 * packet, status byte and sense data 22008 * 22009 * Return Code: 0 - continue the watch task 22010 * non-zero - terminate the watch task 22011 */ 22012 22013 static int 22014 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22015 { 22016 struct sd_lun *un; 22017 struct scsi_status *statusp; 22018 uint8_t *sensep; 22019 struct scsi_pkt *pkt; 22020 uchar_t actual_sense_length; 22021 dev_t dev = (dev_t)arg; 22022 22023 ASSERT(resultp != NULL); 22024 statusp = resultp->statusp; 22025 sensep = (uint8_t *)resultp->sensep; 22026 pkt = resultp->pkt; 22027 actual_sense_length = resultp->actual_sense_length; 22028 22029 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22030 return (ENXIO); 22031 } 22032 22033 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22034 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22035 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22036 22037 /* Begin processing of the status and/or sense data */ 22038 if (pkt->pkt_reason != CMD_CMPLT) { 22039 /* Handle the incomplete packet */ 22040 sd_mhd_watch_incomplete(un, pkt); 22041 return (0); 22042 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22043 if (*((unsigned char *)statusp) 22044 == STATUS_RESERVATION_CONFLICT) { 22045 /* 22046 * Handle a reservation conflict by panicking if 22047 * configured for failfast or by logging the conflict 22048 * and updating the reservation status 22049 */ 22050 mutex_enter(SD_MUTEX(un)); 22051 if ((un->un_resvd_status & SD_FAILFAST) && 22052 (sd_failfast_enable)) { 22053 sd_panic_for_res_conflict(un); 22054 /*NOTREACHED*/ 22055 } 22056 SD_INFO(SD_LOG_IOCTL_MHD, un, 22057 "sd_mhd_watch_cb: Reservation Conflict\n"); 22058 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22059 mutex_exit(SD_MUTEX(un)); 22060 } 22061 } 22062 22063 if (sensep != NULL) { 22064 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22065 mutex_enter(SD_MUTEX(un)); 22066 if ((scsi_sense_asc(sensep) == 22067 SD_SCSI_RESET_SENSE_CODE) && 22068 (un->un_resvd_status & SD_RESERVE)) { 22069 /* 22070 * The additional sense code indicates a power 22071 * on or bus device reset has occurred; update 22072 * the reservation status. 22073 */ 22074 un->un_resvd_status |= 22075 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22076 SD_INFO(SD_LOG_IOCTL_MHD, un, 22077 "sd_mhd_watch_cb: Lost Reservation\n"); 22078 } 22079 } else { 22080 return (0); 22081 } 22082 } else { 22083 mutex_enter(SD_MUTEX(un)); 22084 } 22085 22086 if ((un->un_resvd_status & SD_RESERVE) && 22087 (un->un_resvd_status & SD_LOST_RESERVE)) { 22088 if (un->un_resvd_status & SD_WANT_RESERVE) { 22089 /* 22090 * A reset occurred in between the last probe and this 22091 * one so if a timeout is pending cancel it. 22092 */ 22093 if (un->un_resvd_timeid) { 22094 timeout_id_t temp_id = un->un_resvd_timeid; 22095 un->un_resvd_timeid = NULL; 22096 mutex_exit(SD_MUTEX(un)); 22097 (void) untimeout(temp_id); 22098 mutex_enter(SD_MUTEX(un)); 22099 } 22100 un->un_resvd_status &= ~SD_WANT_RESERVE; 22101 } 22102 if (un->un_resvd_timeid == 0) { 22103 /* Schedule a timeout to handle the lost reservation */ 22104 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22105 (void *)dev, 22106 drv_usectohz(sd_reinstate_resv_delay)); 22107 } 22108 } 22109 mutex_exit(SD_MUTEX(un)); 22110 return (0); 22111 } 22112 22113 22114 /* 22115 * Function: sd_mhd_watch_incomplete() 22116 * 22117 * Description: This function is used to find out why a scsi pkt sent by the 22118 * scsi watch facility was not completed. Under some scenarios this 22119 * routine will return. Otherwise it will send a bus reset to see 22120 * if the drive is still online. 22121 * 22122 * Arguments: un - driver soft state (unit) structure 22123 * pkt - incomplete scsi pkt 22124 */ 22125 22126 static void 22127 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22128 { 22129 int be_chatty; 22130 int perr; 22131 22132 ASSERT(pkt != NULL); 22133 ASSERT(un != NULL); 22134 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22135 perr = (pkt->pkt_statistics & STAT_PERR); 22136 22137 mutex_enter(SD_MUTEX(un)); 22138 if (un->un_state == SD_STATE_DUMPING) { 22139 mutex_exit(SD_MUTEX(un)); 22140 return; 22141 } 22142 22143 switch (pkt->pkt_reason) { 22144 case CMD_UNX_BUS_FREE: 22145 /* 22146 * If we had a parity error that caused the target to drop BSY*, 22147 * don't be chatty about it. 22148 */ 22149 if (perr && be_chatty) { 22150 be_chatty = 0; 22151 } 22152 break; 22153 case CMD_TAG_REJECT: 22154 /* 22155 * The SCSI-2 spec states that a tag reject will be sent by the 22156 * target if tagged queuing is not supported. A tag reject may 22157 * also be sent during certain initialization periods or to 22158 * control internal resources. For the latter case the target 22159 * may also return Queue Full. 22160 * 22161 * If this driver receives a tag reject from a target that is 22162 * going through an init period or controlling internal 22163 * resources tagged queuing will be disabled. This is a less 22164 * than optimal behavior but the driver is unable to determine 22165 * the target state and assumes tagged queueing is not supported 22166 */ 22167 pkt->pkt_flags = 0; 22168 un->un_tagflags = 0; 22169 22170 if (un->un_f_opt_queueing == TRUE) { 22171 un->un_throttle = min(un->un_throttle, 3); 22172 } else { 22173 un->un_throttle = 1; 22174 } 22175 mutex_exit(SD_MUTEX(un)); 22176 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22177 mutex_enter(SD_MUTEX(un)); 22178 break; 22179 case CMD_INCOMPLETE: 22180 /* 22181 * The transport stopped with an abnormal state, fallthrough and 22182 * reset the target and/or bus unless selection did not complete 22183 * (indicated by STATE_GOT_BUS) in which case we don't want to 22184 * go through a target/bus reset 22185 */ 22186 if (pkt->pkt_state == STATE_GOT_BUS) { 22187 break; 22188 } 22189 /*FALLTHROUGH*/ 22190 22191 case CMD_TIMEOUT: 22192 default: 22193 /* 22194 * The lun may still be running the command, so a lun reset 22195 * should be attempted. If the lun reset fails or cannot be 22196 * issued, than try a target reset. Lastly try a bus reset. 22197 */ 22198 if ((pkt->pkt_statistics & 22199 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22200 int reset_retval = 0; 22201 mutex_exit(SD_MUTEX(un)); 22202 if (un->un_f_allow_bus_device_reset == TRUE) { 22203 if (un->un_f_lun_reset_enabled == TRUE) { 22204 reset_retval = 22205 scsi_reset(SD_ADDRESS(un), 22206 RESET_LUN); 22207 } 22208 if (reset_retval == 0) { 22209 reset_retval = 22210 scsi_reset(SD_ADDRESS(un), 22211 RESET_TARGET); 22212 } 22213 } 22214 if (reset_retval == 0) { 22215 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22216 } 22217 mutex_enter(SD_MUTEX(un)); 22218 } 22219 break; 22220 } 22221 22222 /* A device/bus reset has occurred; update the reservation status. */ 22223 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22224 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22225 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22226 un->un_resvd_status |= 22227 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22228 SD_INFO(SD_LOG_IOCTL_MHD, un, 22229 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22230 } 22231 } 22232 22233 /* 22234 * The disk has been turned off; Update the device state. 22235 * 22236 * Note: Should we be offlining the disk here? 22237 */ 22238 if (pkt->pkt_state == STATE_GOT_BUS) { 22239 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22240 "Disk not responding to selection\n"); 22241 if (un->un_state != SD_STATE_OFFLINE) { 22242 New_state(un, SD_STATE_OFFLINE); 22243 } 22244 } else if (be_chatty) { 22245 /* 22246 * suppress messages if they are all the same pkt reason; 22247 * with TQ, many (up to 256) are returned with the same 22248 * pkt_reason 22249 */ 22250 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22251 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22252 "sd_mhd_watch_incomplete: " 22253 "SCSI transport failed: reason '%s'\n", 22254 scsi_rname(pkt->pkt_reason)); 22255 } 22256 } 22257 un->un_last_pkt_reason = pkt->pkt_reason; 22258 mutex_exit(SD_MUTEX(un)); 22259 } 22260 22261 22262 /* 22263 * Function: sd_sname() 22264 * 22265 * Description: This is a simple little routine to return a string containing 22266 * a printable description of command status byte for use in 22267 * logging. 22268 * 22269 * Arguments: status - pointer to a status byte 22270 * 22271 * Return Code: char * - string containing status description. 22272 */ 22273 22274 static char * 22275 sd_sname(uchar_t status) 22276 { 22277 switch (status & STATUS_MASK) { 22278 case STATUS_GOOD: 22279 return ("good status"); 22280 case STATUS_CHECK: 22281 return ("check condition"); 22282 case STATUS_MET: 22283 return ("condition met"); 22284 case STATUS_BUSY: 22285 return ("busy"); 22286 case STATUS_INTERMEDIATE: 22287 return ("intermediate"); 22288 case STATUS_INTERMEDIATE_MET: 22289 return ("intermediate - condition met"); 22290 case STATUS_RESERVATION_CONFLICT: 22291 return ("reservation_conflict"); 22292 case STATUS_TERMINATED: 22293 return ("command terminated"); 22294 case STATUS_QFULL: 22295 return ("queue full"); 22296 default: 22297 return ("<unknown status>"); 22298 } 22299 } 22300 22301 22302 /* 22303 * Function: sd_mhd_resvd_recover() 22304 * 22305 * Description: This function adds a reservation entry to the 22306 * sd_resv_reclaim_request list and signals the reservation 22307 * reclaim thread that there is work pending. If the reservation 22308 * reclaim thread has not been previously created this function 22309 * will kick it off. 22310 * 22311 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22312 * among multiple watches that share this callback function 22313 * 22314 * Context: This routine is called by timeout() and is run in interrupt 22315 * context. It must not sleep or call other functions which may 22316 * sleep. 22317 */ 22318 22319 static void 22320 sd_mhd_resvd_recover(void *arg) 22321 { 22322 dev_t dev = (dev_t)arg; 22323 struct sd_lun *un; 22324 struct sd_thr_request *sd_treq = NULL; 22325 struct sd_thr_request *sd_cur = NULL; 22326 struct sd_thr_request *sd_prev = NULL; 22327 int already_there = 0; 22328 22329 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22330 return; 22331 } 22332 22333 mutex_enter(SD_MUTEX(un)); 22334 un->un_resvd_timeid = NULL; 22335 if (un->un_resvd_status & SD_WANT_RESERVE) { 22336 /* 22337 * There was a reset so don't issue the reserve, allow the 22338 * sd_mhd_watch_cb callback function to notice this and 22339 * reschedule the timeout for reservation. 22340 */ 22341 mutex_exit(SD_MUTEX(un)); 22342 return; 22343 } 22344 mutex_exit(SD_MUTEX(un)); 22345 22346 /* 22347 * Add this device to the sd_resv_reclaim_request list and the 22348 * sd_resv_reclaim_thread should take care of the rest. 22349 * 22350 * Note: We can't sleep in this context so if the memory allocation 22351 * fails allow the sd_mhd_watch_cb callback function to notice this and 22352 * reschedule the timeout for reservation. (4378460) 22353 */ 22354 sd_treq = (struct sd_thr_request *) 22355 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22356 if (sd_treq == NULL) { 22357 return; 22358 } 22359 22360 sd_treq->sd_thr_req_next = NULL; 22361 sd_treq->dev = dev; 22362 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22363 if (sd_tr.srq_thr_req_head == NULL) { 22364 sd_tr.srq_thr_req_head = sd_treq; 22365 } else { 22366 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22367 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22368 if (sd_cur->dev == dev) { 22369 /* 22370 * already in Queue so don't log 22371 * another request for the device 22372 */ 22373 already_there = 1; 22374 break; 22375 } 22376 sd_prev = sd_cur; 22377 } 22378 if (!already_there) { 22379 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22380 "logging request for %lx\n", dev); 22381 sd_prev->sd_thr_req_next = sd_treq; 22382 } else { 22383 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22384 } 22385 } 22386 22387 /* 22388 * Create a kernel thread to do the reservation reclaim and free up this 22389 * thread. We cannot block this thread while we go away to do the 22390 * reservation reclaim 22391 */ 22392 if (sd_tr.srq_resv_reclaim_thread == NULL) 22393 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22394 sd_resv_reclaim_thread, NULL, 22395 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22396 22397 /* Tell the reservation reclaim thread that it has work to do */ 22398 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22399 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22400 } 22401 22402 /* 22403 * Function: sd_resv_reclaim_thread() 22404 * 22405 * Description: This function implements the reservation reclaim operations 22406 * 22407 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22408 * among multiple watches that share this callback function 22409 */ 22410 22411 static void 22412 sd_resv_reclaim_thread() 22413 { 22414 struct sd_lun *un; 22415 struct sd_thr_request *sd_mhreq; 22416 22417 /* Wait for work */ 22418 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22419 if (sd_tr.srq_thr_req_head == NULL) { 22420 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22421 &sd_tr.srq_resv_reclaim_mutex); 22422 } 22423 22424 /* Loop while we have work */ 22425 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22426 un = ddi_get_soft_state(sd_state, 22427 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22428 if (un == NULL) { 22429 /* 22430 * softstate structure is NULL so just 22431 * dequeue the request and continue 22432 */ 22433 sd_tr.srq_thr_req_head = 22434 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22435 kmem_free(sd_tr.srq_thr_cur_req, 22436 sizeof (struct sd_thr_request)); 22437 continue; 22438 } 22439 22440 /* dequeue the request */ 22441 sd_mhreq = sd_tr.srq_thr_cur_req; 22442 sd_tr.srq_thr_req_head = 22443 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22444 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22445 22446 /* 22447 * Reclaim reservation only if SD_RESERVE is still set. There 22448 * may have been a call to MHIOCRELEASE before we got here. 22449 */ 22450 mutex_enter(SD_MUTEX(un)); 22451 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22452 /* 22453 * Note: The SD_LOST_RESERVE flag is cleared before 22454 * reclaiming the reservation. If this is done after the 22455 * call to sd_reserve_release a reservation loss in the 22456 * window between pkt completion of reserve cmd and 22457 * mutex_enter below may not be recognized 22458 */ 22459 un->un_resvd_status &= ~SD_LOST_RESERVE; 22460 mutex_exit(SD_MUTEX(un)); 22461 22462 if (sd_reserve_release(sd_mhreq->dev, 22463 SD_RESERVE) == 0) { 22464 mutex_enter(SD_MUTEX(un)); 22465 un->un_resvd_status |= SD_RESERVE; 22466 mutex_exit(SD_MUTEX(un)); 22467 SD_INFO(SD_LOG_IOCTL_MHD, un, 22468 "sd_resv_reclaim_thread: " 22469 "Reservation Recovered\n"); 22470 } else { 22471 mutex_enter(SD_MUTEX(un)); 22472 un->un_resvd_status |= SD_LOST_RESERVE; 22473 mutex_exit(SD_MUTEX(un)); 22474 SD_INFO(SD_LOG_IOCTL_MHD, un, 22475 "sd_resv_reclaim_thread: Failed " 22476 "Reservation Recovery\n"); 22477 } 22478 } else { 22479 mutex_exit(SD_MUTEX(un)); 22480 } 22481 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22482 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22483 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22484 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22485 /* 22486 * wakeup the destroy thread if anyone is waiting on 22487 * us to complete. 22488 */ 22489 cv_signal(&sd_tr.srq_inprocess_cv); 22490 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22491 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22492 } 22493 22494 /* 22495 * cleanup the sd_tr structure now that this thread will not exist 22496 */ 22497 ASSERT(sd_tr.srq_thr_req_head == NULL); 22498 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22499 sd_tr.srq_resv_reclaim_thread = NULL; 22500 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22501 thread_exit(); 22502 } 22503 22504 22505 /* 22506 * Function: sd_rmv_resv_reclaim_req() 22507 * 22508 * Description: This function removes any pending reservation reclaim requests 22509 * for the specified device. 22510 * 22511 * Arguments: dev - the device 'dev_t' 22512 */ 22513 22514 static void 22515 sd_rmv_resv_reclaim_req(dev_t dev) 22516 { 22517 struct sd_thr_request *sd_mhreq; 22518 struct sd_thr_request *sd_prev; 22519 22520 /* Remove a reservation reclaim request from the list */ 22521 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22522 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22523 /* 22524 * We are attempting to reinstate reservation for 22525 * this device. We wait for sd_reserve_release() 22526 * to return before we return. 22527 */ 22528 cv_wait(&sd_tr.srq_inprocess_cv, 22529 &sd_tr.srq_resv_reclaim_mutex); 22530 } else { 22531 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22532 if (sd_mhreq && sd_mhreq->dev == dev) { 22533 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22534 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22535 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22536 return; 22537 } 22538 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22539 if (sd_mhreq && sd_mhreq->dev == dev) { 22540 break; 22541 } 22542 sd_prev = sd_mhreq; 22543 } 22544 if (sd_mhreq != NULL) { 22545 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22546 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22547 } 22548 } 22549 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22550 } 22551 22552 22553 /* 22554 * Function: sd_mhd_reset_notify_cb() 22555 * 22556 * Description: This is a call back function for scsi_reset_notify. This 22557 * function updates the softstate reserved status and logs the 22558 * reset. The driver scsi watch facility callback function 22559 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22560 * will reclaim the reservation. 22561 * 22562 * Arguments: arg - driver soft state (unit) structure 22563 */ 22564 22565 static void 22566 sd_mhd_reset_notify_cb(caddr_t arg) 22567 { 22568 struct sd_lun *un = (struct sd_lun *)arg; 22569 22570 mutex_enter(SD_MUTEX(un)); 22571 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22572 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22573 SD_INFO(SD_LOG_IOCTL_MHD, un, 22574 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22575 } 22576 mutex_exit(SD_MUTEX(un)); 22577 } 22578 22579 22580 /* 22581 * Function: sd_take_ownership() 22582 * 22583 * Description: This routine implements an algorithm to achieve a stable 22584 * reservation on disks which don't implement priority reserve, 22585 * and makes sure that other host lose re-reservation attempts. 22586 * This algorithm contains of a loop that keeps issuing the RESERVE 22587 * for some period of time (min_ownership_delay, default 6 seconds) 22588 * During that loop, it looks to see if there has been a bus device 22589 * reset or bus reset (both of which cause an existing reservation 22590 * to be lost). If the reservation is lost issue RESERVE until a 22591 * period of min_ownership_delay with no resets has gone by, or 22592 * until max_ownership_delay has expired. This loop ensures that 22593 * the host really did manage to reserve the device, in spite of 22594 * resets. The looping for min_ownership_delay (default six 22595 * seconds) is important to early generation clustering products, 22596 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22597 * MHIOCENFAILFAST periodic timer of two seconds. By having 22598 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22599 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22600 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22601 * have already noticed, via the MHIOCENFAILFAST polling, that it 22602 * no longer "owns" the disk and will have panicked itself. Thus, 22603 * the host issuing the MHIOCTKOWN is assured (with timing 22604 * dependencies) that by the time it actually starts to use the 22605 * disk for real work, the old owner is no longer accessing it. 22606 * 22607 * min_ownership_delay is the minimum amount of time for which the 22608 * disk must be reserved continuously devoid of resets before the 22609 * MHIOCTKOWN ioctl will return success. 22610 * 22611 * max_ownership_delay indicates the amount of time by which the 22612 * take ownership should succeed or timeout with an error. 22613 * 22614 * Arguments: dev - the device 'dev_t' 22615 * *p - struct containing timing info. 22616 * 22617 * Return Code: 0 for success or error code 22618 */ 22619 22620 static int 22621 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22622 { 22623 struct sd_lun *un; 22624 int rval; 22625 int err; 22626 int reservation_count = 0; 22627 int min_ownership_delay = 6000000; /* in usec */ 22628 int max_ownership_delay = 30000000; /* in usec */ 22629 clock_t start_time; /* starting time of this algorithm */ 22630 clock_t end_time; /* time limit for giving up */ 22631 clock_t ownership_time; /* time limit for stable ownership */ 22632 clock_t current_time; 22633 clock_t previous_current_time; 22634 22635 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22636 return (ENXIO); 22637 } 22638 22639 /* 22640 * Attempt a device reservation. A priority reservation is requested. 22641 */ 22642 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22643 != SD_SUCCESS) { 22644 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22645 "sd_take_ownership: return(1)=%d\n", rval); 22646 return (rval); 22647 } 22648 22649 /* Update the softstate reserved status to indicate the reservation */ 22650 mutex_enter(SD_MUTEX(un)); 22651 un->un_resvd_status |= SD_RESERVE; 22652 un->un_resvd_status &= 22653 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22654 mutex_exit(SD_MUTEX(un)); 22655 22656 if (p != NULL) { 22657 if (p->min_ownership_delay != 0) { 22658 min_ownership_delay = p->min_ownership_delay * 1000; 22659 } 22660 if (p->max_ownership_delay != 0) { 22661 max_ownership_delay = p->max_ownership_delay * 1000; 22662 } 22663 } 22664 SD_INFO(SD_LOG_IOCTL_MHD, un, 22665 "sd_take_ownership: min, max delays: %d, %d\n", 22666 min_ownership_delay, max_ownership_delay); 22667 22668 start_time = ddi_get_lbolt(); 22669 current_time = start_time; 22670 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22671 end_time = start_time + drv_usectohz(max_ownership_delay); 22672 22673 while (current_time - end_time < 0) { 22674 delay(drv_usectohz(500000)); 22675 22676 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22677 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22678 mutex_enter(SD_MUTEX(un)); 22679 rval = (un->un_resvd_status & 22680 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22681 mutex_exit(SD_MUTEX(un)); 22682 break; 22683 } 22684 } 22685 previous_current_time = current_time; 22686 current_time = ddi_get_lbolt(); 22687 mutex_enter(SD_MUTEX(un)); 22688 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22689 ownership_time = ddi_get_lbolt() + 22690 drv_usectohz(min_ownership_delay); 22691 reservation_count = 0; 22692 } else { 22693 reservation_count++; 22694 } 22695 un->un_resvd_status |= SD_RESERVE; 22696 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22697 mutex_exit(SD_MUTEX(un)); 22698 22699 SD_INFO(SD_LOG_IOCTL_MHD, un, 22700 "sd_take_ownership: ticks for loop iteration=%ld, " 22701 "reservation=%s\n", (current_time - previous_current_time), 22702 reservation_count ? "ok" : "reclaimed"); 22703 22704 if (current_time - ownership_time >= 0 && 22705 reservation_count >= 4) { 22706 rval = 0; /* Achieved a stable ownership */ 22707 break; 22708 } 22709 if (current_time - end_time >= 0) { 22710 rval = EACCES; /* No ownership in max possible time */ 22711 break; 22712 } 22713 } 22714 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22715 "sd_take_ownership: return(2)=%d\n", rval); 22716 return (rval); 22717 } 22718 22719 22720 /* 22721 * Function: sd_reserve_release() 22722 * 22723 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22724 * PRIORITY RESERVE commands based on a user specified command type 22725 * 22726 * Arguments: dev - the device 'dev_t' 22727 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22728 * SD_RESERVE, SD_RELEASE 22729 * 22730 * Return Code: 0 or Error Code 22731 */ 22732 22733 static int 22734 sd_reserve_release(dev_t dev, int cmd) 22735 { 22736 struct uscsi_cmd *com = NULL; 22737 struct sd_lun *un = NULL; 22738 char cdb[CDB_GROUP0]; 22739 int rval; 22740 22741 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22742 (cmd == SD_PRIORITY_RESERVE)); 22743 22744 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22745 return (ENXIO); 22746 } 22747 22748 /* instantiate and initialize the command and cdb */ 22749 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22750 bzero(cdb, CDB_GROUP0); 22751 com->uscsi_flags = USCSI_SILENT; 22752 com->uscsi_timeout = un->un_reserve_release_time; 22753 com->uscsi_cdblen = CDB_GROUP0; 22754 com->uscsi_cdb = cdb; 22755 if (cmd == SD_RELEASE) { 22756 cdb[0] = SCMD_RELEASE; 22757 } else { 22758 cdb[0] = SCMD_RESERVE; 22759 } 22760 22761 /* Send the command. */ 22762 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22763 SD_PATH_STANDARD); 22764 22765 /* 22766 * "break" a reservation that is held by another host, by issuing a 22767 * reset if priority reserve is desired, and we could not get the 22768 * device. 22769 */ 22770 if ((cmd == SD_PRIORITY_RESERVE) && 22771 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22772 /* 22773 * First try to reset the LUN. If we cannot, then try a target 22774 * reset, followed by a bus reset if the target reset fails. 22775 */ 22776 int reset_retval = 0; 22777 if (un->un_f_lun_reset_enabled == TRUE) { 22778 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22779 } 22780 if (reset_retval == 0) { 22781 /* The LUN reset either failed or was not issued */ 22782 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22783 } 22784 if ((reset_retval == 0) && 22785 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22786 rval = EIO; 22787 kmem_free(com, sizeof (*com)); 22788 return (rval); 22789 } 22790 22791 bzero(com, sizeof (struct uscsi_cmd)); 22792 com->uscsi_flags = USCSI_SILENT; 22793 com->uscsi_cdb = cdb; 22794 com->uscsi_cdblen = CDB_GROUP0; 22795 com->uscsi_timeout = 5; 22796 22797 /* 22798 * Reissue the last reserve command, this time without request 22799 * sense. Assume that it is just a regular reserve command. 22800 */ 22801 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22802 SD_PATH_STANDARD); 22803 } 22804 22805 /* Return an error if still getting a reservation conflict. */ 22806 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22807 rval = EACCES; 22808 } 22809 22810 kmem_free(com, sizeof (*com)); 22811 return (rval); 22812 } 22813 22814 22815 #define SD_NDUMP_RETRIES 12 22816 /* 22817 * System Crash Dump routine 22818 */ 22819 22820 static int 22821 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22822 { 22823 int instance; 22824 int partition; 22825 int i; 22826 int err; 22827 struct sd_lun *un; 22828 struct scsi_pkt *wr_pktp; 22829 struct buf *wr_bp; 22830 struct buf wr_buf; 22831 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22832 daddr_t tgt_blkno; /* rmw - blkno for target */ 22833 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22834 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22835 size_t io_start_offset; 22836 int doing_rmw = FALSE; 22837 int rval; 22838 ssize_t dma_resid; 22839 daddr_t oblkno; 22840 diskaddr_t nblks = 0; 22841 diskaddr_t start_block; 22842 22843 instance = SDUNIT(dev); 22844 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22845 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22846 return (ENXIO); 22847 } 22848 22849 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22850 22851 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22852 22853 partition = SDPART(dev); 22854 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22855 22856 /* Validate blocks to dump at against partition size. */ 22857 22858 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22859 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22860 22861 if ((blkno + nblk) > nblks) { 22862 SD_TRACE(SD_LOG_DUMP, un, 22863 "sddump: dump range larger than partition: " 22864 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22865 blkno, nblk, nblks); 22866 return (EINVAL); 22867 } 22868 22869 mutex_enter(&un->un_pm_mutex); 22870 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22871 struct scsi_pkt *start_pktp; 22872 22873 mutex_exit(&un->un_pm_mutex); 22874 22875 /* 22876 * use pm framework to power on HBA 1st 22877 */ 22878 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22879 22880 /* 22881 * Dump no long uses sdpower to power on a device, it's 22882 * in-line here so it can be done in polled mode. 22883 */ 22884 22885 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22886 22887 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22888 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22889 22890 if (start_pktp == NULL) { 22891 /* We were not given a SCSI packet, fail. */ 22892 return (EIO); 22893 } 22894 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22895 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22896 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22897 start_pktp->pkt_flags = FLAG_NOINTR; 22898 22899 mutex_enter(SD_MUTEX(un)); 22900 SD_FILL_SCSI1_LUN(un, start_pktp); 22901 mutex_exit(SD_MUTEX(un)); 22902 /* 22903 * Scsi_poll returns 0 (success) if the command completes and 22904 * the status block is STATUS_GOOD. 22905 */ 22906 if (sd_scsi_poll(un, start_pktp) != 0) { 22907 scsi_destroy_pkt(start_pktp); 22908 return (EIO); 22909 } 22910 scsi_destroy_pkt(start_pktp); 22911 (void) sd_ddi_pm_resume(un); 22912 } else { 22913 mutex_exit(&un->un_pm_mutex); 22914 } 22915 22916 mutex_enter(SD_MUTEX(un)); 22917 un->un_throttle = 0; 22918 22919 /* 22920 * The first time through, reset the specific target device. 22921 * However, when cpr calls sddump we know that sd is in a 22922 * a good state so no bus reset is required. 22923 * Clear sense data via Request Sense cmd. 22924 * In sddump we don't care about allow_bus_device_reset anymore 22925 */ 22926 22927 if ((un->un_state != SD_STATE_SUSPENDED) && 22928 (un->un_state != SD_STATE_DUMPING)) { 22929 22930 New_state(un, SD_STATE_DUMPING); 22931 22932 if (un->un_f_is_fibre == FALSE) { 22933 mutex_exit(SD_MUTEX(un)); 22934 /* 22935 * Attempt a bus reset for parallel scsi. 22936 * 22937 * Note: A bus reset is required because on some host 22938 * systems (i.e. E420R) a bus device reset is 22939 * insufficient to reset the state of the target. 22940 * 22941 * Note: Don't issue the reset for fibre-channel, 22942 * because this tends to hang the bus (loop) for 22943 * too long while everyone is logging out and in 22944 * and the deadman timer for dumping will fire 22945 * before the dump is complete. 22946 */ 22947 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22948 mutex_enter(SD_MUTEX(un)); 22949 Restore_state(un); 22950 mutex_exit(SD_MUTEX(un)); 22951 return (EIO); 22952 } 22953 22954 /* Delay to give the device some recovery time. */ 22955 drv_usecwait(10000); 22956 22957 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22958 SD_INFO(SD_LOG_DUMP, un, 22959 "sddump: sd_send_polled_RQS failed\n"); 22960 } 22961 mutex_enter(SD_MUTEX(un)); 22962 } 22963 } 22964 22965 /* 22966 * Convert the partition-relative block number to a 22967 * disk physical block number. 22968 */ 22969 blkno += start_block; 22970 22971 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22972 22973 22974 /* 22975 * Check if the device has a non-512 block size. 22976 */ 22977 wr_bp = NULL; 22978 if (NOT_DEVBSIZE(un)) { 22979 tgt_byte_offset = blkno * un->un_sys_blocksize; 22980 tgt_byte_count = nblk * un->un_sys_blocksize; 22981 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22982 (tgt_byte_count % un->un_tgt_blocksize)) { 22983 doing_rmw = TRUE; 22984 /* 22985 * Calculate the block number and number of block 22986 * in terms of the media block size. 22987 */ 22988 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22989 tgt_nblk = 22990 ((tgt_byte_offset + tgt_byte_count + 22991 (un->un_tgt_blocksize - 1)) / 22992 un->un_tgt_blocksize) - tgt_blkno; 22993 22994 /* 22995 * Invoke the routine which is going to do read part 22996 * of read-modify-write. 22997 * Note that this routine returns a pointer to 22998 * a valid bp in wr_bp. 22999 */ 23000 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 23001 &wr_bp); 23002 if (err) { 23003 mutex_exit(SD_MUTEX(un)); 23004 return (err); 23005 } 23006 /* 23007 * Offset is being calculated as - 23008 * (original block # * system block size) - 23009 * (new block # * target block size) 23010 */ 23011 io_start_offset = 23012 ((uint64_t)(blkno * un->un_sys_blocksize)) - 23013 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 23014 23015 ASSERT((io_start_offset >= 0) && 23016 (io_start_offset < un->un_tgt_blocksize)); 23017 /* 23018 * Do the modify portion of read modify write. 23019 */ 23020 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23021 (size_t)nblk * un->un_sys_blocksize); 23022 } else { 23023 doing_rmw = FALSE; 23024 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23025 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23026 } 23027 23028 /* Convert blkno and nblk to target blocks */ 23029 blkno = tgt_blkno; 23030 nblk = tgt_nblk; 23031 } else { 23032 wr_bp = &wr_buf; 23033 bzero(wr_bp, sizeof (struct buf)); 23034 wr_bp->b_flags = B_BUSY; 23035 wr_bp->b_un.b_addr = addr; 23036 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23037 wr_bp->b_resid = 0; 23038 } 23039 23040 mutex_exit(SD_MUTEX(un)); 23041 23042 /* 23043 * Obtain a SCSI packet for the write command. 23044 * It should be safe to call the allocator here without 23045 * worrying about being locked for DVMA mapping because 23046 * the address we're passed is already a DVMA mapping 23047 * 23048 * We are also not going to worry about semaphore ownership 23049 * in the dump buffer. Dumping is single threaded at present. 23050 */ 23051 23052 wr_pktp = NULL; 23053 23054 dma_resid = wr_bp->b_bcount; 23055 oblkno = blkno; 23056 23057 while (dma_resid != 0) { 23058 23059 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23060 wr_bp->b_flags &= ~B_ERROR; 23061 23062 if (un->un_partial_dma_supported == 1) { 23063 blkno = oblkno + 23064 ((wr_bp->b_bcount - dma_resid) / 23065 un->un_tgt_blocksize); 23066 nblk = dma_resid / un->un_tgt_blocksize; 23067 23068 if (wr_pktp) { 23069 /* 23070 * Partial DMA transfers after initial transfer 23071 */ 23072 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23073 blkno, nblk); 23074 } else { 23075 /* Initial transfer */ 23076 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23077 un->un_pkt_flags, NULL_FUNC, NULL, 23078 blkno, nblk); 23079 } 23080 } else { 23081 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23082 0, NULL_FUNC, NULL, blkno, nblk); 23083 } 23084 23085 if (rval == 0) { 23086 /* We were given a SCSI packet, continue. */ 23087 break; 23088 } 23089 23090 if (i == 0) { 23091 if (wr_bp->b_flags & B_ERROR) { 23092 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23093 "no resources for dumping; " 23094 "error code: 0x%x, retrying", 23095 geterror(wr_bp)); 23096 } else { 23097 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23098 "no resources for dumping; retrying"); 23099 } 23100 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23101 if (wr_bp->b_flags & B_ERROR) { 23102 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23103 "no resources for dumping; error code: " 23104 "0x%x, retrying\n", geterror(wr_bp)); 23105 } 23106 } else { 23107 if (wr_bp->b_flags & B_ERROR) { 23108 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23109 "no resources for dumping; " 23110 "error code: 0x%x, retries failed, " 23111 "giving up.\n", geterror(wr_bp)); 23112 } else { 23113 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23114 "no resources for dumping; " 23115 "retries failed, giving up.\n"); 23116 } 23117 mutex_enter(SD_MUTEX(un)); 23118 Restore_state(un); 23119 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23120 mutex_exit(SD_MUTEX(un)); 23121 scsi_free_consistent_buf(wr_bp); 23122 } else { 23123 mutex_exit(SD_MUTEX(un)); 23124 } 23125 return (EIO); 23126 } 23127 drv_usecwait(10000); 23128 } 23129 23130 if (un->un_partial_dma_supported == 1) { 23131 /* 23132 * save the resid from PARTIAL_DMA 23133 */ 23134 dma_resid = wr_pktp->pkt_resid; 23135 if (dma_resid != 0) 23136 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23137 wr_pktp->pkt_resid = 0; 23138 } else { 23139 dma_resid = 0; 23140 } 23141 23142 /* SunBug 1222170 */ 23143 wr_pktp->pkt_flags = FLAG_NOINTR; 23144 23145 err = EIO; 23146 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23147 23148 /* 23149 * Scsi_poll returns 0 (success) if the command completes and 23150 * the status block is STATUS_GOOD. We should only check 23151 * errors if this condition is not true. Even then we should 23152 * send our own request sense packet only if we have a check 23153 * condition and auto request sense has not been performed by 23154 * the hba. 23155 */ 23156 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23157 23158 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23159 (wr_pktp->pkt_resid == 0)) { 23160 err = SD_SUCCESS; 23161 break; 23162 } 23163 23164 /* 23165 * Check CMD_DEV_GONE 1st, give up if device is gone. 23166 */ 23167 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23168 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23169 "Error while dumping state...Device is gone\n"); 23170 break; 23171 } 23172 23173 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23174 SD_INFO(SD_LOG_DUMP, un, 23175 "sddump: write failed with CHECK, try # %d\n", i); 23176 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23177 (void) sd_send_polled_RQS(un); 23178 } 23179 23180 continue; 23181 } 23182 23183 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23184 int reset_retval = 0; 23185 23186 SD_INFO(SD_LOG_DUMP, un, 23187 "sddump: write failed with BUSY, try # %d\n", i); 23188 23189 if (un->un_f_lun_reset_enabled == TRUE) { 23190 reset_retval = scsi_reset(SD_ADDRESS(un), 23191 RESET_LUN); 23192 } 23193 if (reset_retval == 0) { 23194 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23195 } 23196 (void) sd_send_polled_RQS(un); 23197 23198 } else { 23199 SD_INFO(SD_LOG_DUMP, un, 23200 "sddump: write failed with 0x%x, try # %d\n", 23201 SD_GET_PKT_STATUS(wr_pktp), i); 23202 mutex_enter(SD_MUTEX(un)); 23203 sd_reset_target(un, wr_pktp); 23204 mutex_exit(SD_MUTEX(un)); 23205 } 23206 23207 /* 23208 * If we are not getting anywhere with lun/target resets, 23209 * let's reset the bus. 23210 */ 23211 if (i == SD_NDUMP_RETRIES/2) { 23212 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23213 (void) sd_send_polled_RQS(un); 23214 } 23215 } 23216 } 23217 23218 scsi_destroy_pkt(wr_pktp); 23219 mutex_enter(SD_MUTEX(un)); 23220 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23221 mutex_exit(SD_MUTEX(un)); 23222 scsi_free_consistent_buf(wr_bp); 23223 } else { 23224 mutex_exit(SD_MUTEX(un)); 23225 } 23226 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23227 return (err); 23228 } 23229 23230 /* 23231 * Function: sd_scsi_poll() 23232 * 23233 * Description: This is a wrapper for the scsi_poll call. 23234 * 23235 * Arguments: sd_lun - The unit structure 23236 * scsi_pkt - The scsi packet being sent to the device. 23237 * 23238 * Return Code: 0 - Command completed successfully with good status 23239 * -1 - Command failed. This could indicate a check condition 23240 * or other status value requiring recovery action. 23241 * 23242 * NOTE: This code is only called off sddump(). 23243 */ 23244 23245 static int 23246 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23247 { 23248 int status; 23249 23250 ASSERT(un != NULL); 23251 ASSERT(!mutex_owned(SD_MUTEX(un))); 23252 ASSERT(pktp != NULL); 23253 23254 status = SD_SUCCESS; 23255 23256 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23257 pktp->pkt_flags |= un->un_tagflags; 23258 pktp->pkt_flags &= ~FLAG_NODISCON; 23259 } 23260 23261 status = sd_ddi_scsi_poll(pktp); 23262 /* 23263 * Scsi_poll returns 0 (success) if the command completes and the 23264 * status block is STATUS_GOOD. We should only check errors if this 23265 * condition is not true. Even then we should send our own request 23266 * sense packet only if we have a check condition and auto 23267 * request sense has not been performed by the hba. 23268 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23269 */ 23270 if ((status != SD_SUCCESS) && 23271 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23272 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23273 (pktp->pkt_reason != CMD_DEV_GONE)) 23274 (void) sd_send_polled_RQS(un); 23275 23276 return (status); 23277 } 23278 23279 /* 23280 * Function: sd_send_polled_RQS() 23281 * 23282 * Description: This sends the request sense command to a device. 23283 * 23284 * Arguments: sd_lun - The unit structure 23285 * 23286 * Return Code: 0 - Command completed successfully with good status 23287 * -1 - Command failed. 23288 * 23289 */ 23290 23291 static int 23292 sd_send_polled_RQS(struct sd_lun *un) 23293 { 23294 int ret_val; 23295 struct scsi_pkt *rqs_pktp; 23296 struct buf *rqs_bp; 23297 23298 ASSERT(un != NULL); 23299 ASSERT(!mutex_owned(SD_MUTEX(un))); 23300 23301 ret_val = SD_SUCCESS; 23302 23303 rqs_pktp = un->un_rqs_pktp; 23304 rqs_bp = un->un_rqs_bp; 23305 23306 mutex_enter(SD_MUTEX(un)); 23307 23308 if (un->un_sense_isbusy) { 23309 ret_val = SD_FAILURE; 23310 mutex_exit(SD_MUTEX(un)); 23311 return (ret_val); 23312 } 23313 23314 /* 23315 * If the request sense buffer (and packet) is not in use, 23316 * let's set the un_sense_isbusy and send our packet 23317 */ 23318 un->un_sense_isbusy = 1; 23319 rqs_pktp->pkt_resid = 0; 23320 rqs_pktp->pkt_reason = 0; 23321 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23322 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23323 23324 mutex_exit(SD_MUTEX(un)); 23325 23326 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23327 " 0x%p\n", rqs_bp->b_un.b_addr); 23328 23329 /* 23330 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23331 * axle - it has a call into us! 23332 */ 23333 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23334 SD_INFO(SD_LOG_COMMON, un, 23335 "sd_send_polled_RQS: RQS failed\n"); 23336 } 23337 23338 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23339 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23340 23341 mutex_enter(SD_MUTEX(un)); 23342 un->un_sense_isbusy = 0; 23343 mutex_exit(SD_MUTEX(un)); 23344 23345 return (ret_val); 23346 } 23347 23348 /* 23349 * Defines needed for localized version of the scsi_poll routine. 23350 */ 23351 #define CSEC 10000 /* usecs */ 23352 #define SEC_TO_CSEC (1000000/CSEC) 23353 23354 /* 23355 * Function: sd_ddi_scsi_poll() 23356 * 23357 * Description: Localized version of the scsi_poll routine. The purpose is to 23358 * send a scsi_pkt to a device as a polled command. This version 23359 * is to ensure more robust handling of transport errors. 23360 * Specifically this routine cures not ready, coming ready 23361 * transition for power up and reset of sonoma's. This can take 23362 * up to 45 seconds for power-on and 20 seconds for reset of a 23363 * sonoma lun. 23364 * 23365 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23366 * 23367 * Return Code: 0 - Command completed successfully with good status 23368 * -1 - Command failed. 23369 * 23370 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 23371 * be fixed (removing this code), we need to determine how to handle the 23372 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 23373 * 23374 * NOTE: This code is only called off sddump(). 23375 */ 23376 static int 23377 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23378 { 23379 int rval = -1; 23380 int savef; 23381 long savet; 23382 void (*savec)(); 23383 int timeout; 23384 int busy_count; 23385 int poll_delay; 23386 int rc; 23387 uint8_t *sensep; 23388 struct scsi_arq_status *arqstat; 23389 extern int do_polled_io; 23390 23391 ASSERT(pkt->pkt_scbp); 23392 23393 /* 23394 * save old flags.. 23395 */ 23396 savef = pkt->pkt_flags; 23397 savec = pkt->pkt_comp; 23398 savet = pkt->pkt_time; 23399 23400 pkt->pkt_flags |= FLAG_NOINTR; 23401 23402 /* 23403 * XXX there is nothing in the SCSA spec that states that we should not 23404 * do a callback for polled cmds; however, removing this will break sd 23405 * and probably other target drivers 23406 */ 23407 pkt->pkt_comp = NULL; 23408 23409 /* 23410 * we don't like a polled command without timeout. 23411 * 60 seconds seems long enough. 23412 */ 23413 if (pkt->pkt_time == 0) 23414 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23415 23416 /* 23417 * Send polled cmd. 23418 * 23419 * We do some error recovery for various errors. Tran_busy, 23420 * queue full, and non-dispatched commands are retried every 10 msec. 23421 * as they are typically transient failures. Busy status and Not 23422 * Ready are retried every second as this status takes a while to 23423 * change. 23424 */ 23425 timeout = pkt->pkt_time * SEC_TO_CSEC; 23426 23427 for (busy_count = 0; busy_count < timeout; busy_count++) { 23428 /* 23429 * Initialize pkt status variables. 23430 */ 23431 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23432 23433 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23434 if (rc != TRAN_BUSY) { 23435 /* Transport failed - give up. */ 23436 break; 23437 } else { 23438 /* Transport busy - try again. */ 23439 poll_delay = 1 * CSEC; /* 10 msec. */ 23440 } 23441 } else { 23442 /* 23443 * Transport accepted - check pkt status. 23444 */ 23445 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23446 if ((pkt->pkt_reason == CMD_CMPLT) && 23447 (rc == STATUS_CHECK) && 23448 (pkt->pkt_state & STATE_ARQ_DONE)) { 23449 arqstat = 23450 (struct scsi_arq_status *)(pkt->pkt_scbp); 23451 sensep = (uint8_t *)&arqstat->sts_sensedata; 23452 } else { 23453 sensep = NULL; 23454 } 23455 23456 if ((pkt->pkt_reason == CMD_CMPLT) && 23457 (rc == STATUS_GOOD)) { 23458 /* No error - we're done */ 23459 rval = 0; 23460 break; 23461 23462 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23463 /* Lost connection - give up */ 23464 break; 23465 23466 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23467 (pkt->pkt_state == 0)) { 23468 /* Pkt not dispatched - try again. */ 23469 poll_delay = 1 * CSEC; /* 10 msec. */ 23470 23471 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23472 (rc == STATUS_QFULL)) { 23473 /* Queue full - try again. */ 23474 poll_delay = 1 * CSEC; /* 10 msec. */ 23475 23476 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23477 (rc == STATUS_BUSY)) { 23478 /* Busy - try again. */ 23479 poll_delay = 100 * CSEC; /* 1 sec. */ 23480 busy_count += (SEC_TO_CSEC - 1); 23481 23482 } else if ((sensep != NULL) && 23483 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 23484 /* 23485 * Unit Attention - try again. 23486 * Pretend it took 1 sec. 23487 * NOTE: 'continue' avoids poll_delay 23488 */ 23489 busy_count += (SEC_TO_CSEC - 1); 23490 continue; 23491 23492 } else if ((sensep != NULL) && 23493 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23494 (scsi_sense_asc(sensep) == 0x04) && 23495 (scsi_sense_ascq(sensep) == 0x01)) { 23496 /* 23497 * Not ready -> ready - try again. 23498 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 23499 * ...same as STATUS_BUSY 23500 */ 23501 poll_delay = 100 * CSEC; /* 1 sec. */ 23502 busy_count += (SEC_TO_CSEC - 1); 23503 23504 } else { 23505 /* BAD status - give up. */ 23506 break; 23507 } 23508 } 23509 23510 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 23511 !do_polled_io) { 23512 delay(drv_usectohz(poll_delay)); 23513 } else { 23514 /* we busy wait during cpr_dump or interrupt threads */ 23515 drv_usecwait(poll_delay); 23516 } 23517 } 23518 23519 pkt->pkt_flags = savef; 23520 pkt->pkt_comp = savec; 23521 pkt->pkt_time = savet; 23522 23523 /* return on error */ 23524 if (rval) 23525 return (rval); 23526 23527 /* 23528 * This is not a performance critical code path. 23529 * 23530 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 23531 * issues associated with looking at DMA memory prior to 23532 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 23533 */ 23534 scsi_sync_pkt(pkt); 23535 return (0); 23536 } 23537 23538 23539 23540 /* 23541 * Function: sd_persistent_reservation_in_read_keys 23542 * 23543 * Description: This routine is the driver entry point for handling CD-ROM 23544 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23545 * by sending the SCSI-3 PRIN commands to the device. 23546 * Processes the read keys command response by copying the 23547 * reservation key information into the user provided buffer. 23548 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23549 * 23550 * Arguments: un - Pointer to soft state struct for the target. 23551 * usrp - user provided pointer to multihost Persistent In Read 23552 * Keys structure (mhioc_inkeys_t) 23553 * flag - this argument is a pass through to ddi_copyxxx() 23554 * directly from the mode argument of ioctl(). 23555 * 23556 * Return Code: 0 - Success 23557 * EACCES 23558 * ENOTSUP 23559 * errno return code from sd_send_scsi_cmd() 23560 * 23561 * Context: Can sleep. Does not return until command is completed. 23562 */ 23563 23564 static int 23565 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23566 mhioc_inkeys_t *usrp, int flag) 23567 { 23568 #ifdef _MULTI_DATAMODEL 23569 struct mhioc_key_list32 li32; 23570 #endif 23571 sd_prin_readkeys_t *in; 23572 mhioc_inkeys_t *ptr; 23573 mhioc_key_list_t li; 23574 uchar_t *data_bufp; 23575 int data_len; 23576 int rval; 23577 size_t copysz; 23578 23579 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23580 return (EINVAL); 23581 } 23582 bzero(&li, sizeof (mhioc_key_list_t)); 23583 23584 /* 23585 * Get the listsize from user 23586 */ 23587 #ifdef _MULTI_DATAMODEL 23588 23589 switch (ddi_model_convert_from(flag & FMODELS)) { 23590 case DDI_MODEL_ILP32: 23591 copysz = sizeof (struct mhioc_key_list32); 23592 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23593 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23594 "sd_persistent_reservation_in_read_keys: " 23595 "failed ddi_copyin: mhioc_key_list32_t\n"); 23596 rval = EFAULT; 23597 goto done; 23598 } 23599 li.listsize = li32.listsize; 23600 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23601 break; 23602 23603 case DDI_MODEL_NONE: 23604 copysz = sizeof (mhioc_key_list_t); 23605 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23606 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23607 "sd_persistent_reservation_in_read_keys: " 23608 "failed ddi_copyin: mhioc_key_list_t\n"); 23609 rval = EFAULT; 23610 goto done; 23611 } 23612 break; 23613 } 23614 23615 #else /* ! _MULTI_DATAMODEL */ 23616 copysz = sizeof (mhioc_key_list_t); 23617 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23618 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23619 "sd_persistent_reservation_in_read_keys: " 23620 "failed ddi_copyin: mhioc_key_list_t\n"); 23621 rval = EFAULT; 23622 goto done; 23623 } 23624 #endif 23625 23626 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23627 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23628 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23629 23630 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23631 data_len, data_bufp)) != 0) { 23632 goto done; 23633 } 23634 in = (sd_prin_readkeys_t *)data_bufp; 23635 ptr->generation = BE_32(in->generation); 23636 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23637 23638 /* 23639 * Return the min(listsize, listlen) keys 23640 */ 23641 #ifdef _MULTI_DATAMODEL 23642 23643 switch (ddi_model_convert_from(flag & FMODELS)) { 23644 case DDI_MODEL_ILP32: 23645 li32.listlen = li.listlen; 23646 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23647 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23648 "sd_persistent_reservation_in_read_keys: " 23649 "failed ddi_copyout: mhioc_key_list32_t\n"); 23650 rval = EFAULT; 23651 goto done; 23652 } 23653 break; 23654 23655 case DDI_MODEL_NONE: 23656 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23657 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23658 "sd_persistent_reservation_in_read_keys: " 23659 "failed ddi_copyout: mhioc_key_list_t\n"); 23660 rval = EFAULT; 23661 goto done; 23662 } 23663 break; 23664 } 23665 23666 #else /* ! _MULTI_DATAMODEL */ 23667 23668 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23669 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23670 "sd_persistent_reservation_in_read_keys: " 23671 "failed ddi_copyout: mhioc_key_list_t\n"); 23672 rval = EFAULT; 23673 goto done; 23674 } 23675 23676 #endif /* _MULTI_DATAMODEL */ 23677 23678 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23679 li.listsize * MHIOC_RESV_KEY_SIZE); 23680 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23681 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23682 "sd_persistent_reservation_in_read_keys: " 23683 "failed ddi_copyout: keylist\n"); 23684 rval = EFAULT; 23685 } 23686 done: 23687 kmem_free(data_bufp, data_len); 23688 return (rval); 23689 } 23690 23691 23692 /* 23693 * Function: sd_persistent_reservation_in_read_resv 23694 * 23695 * Description: This routine is the driver entry point for handling CD-ROM 23696 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23697 * by sending the SCSI-3 PRIN commands to the device. 23698 * Process the read persistent reservations command response by 23699 * copying the reservation information into the user provided 23700 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23701 * 23702 * Arguments: un - Pointer to soft state struct for the target. 23703 * usrp - user provided pointer to multihost Persistent In Read 23704 * Keys structure (mhioc_inkeys_t) 23705 * flag - this argument is a pass through to ddi_copyxxx() 23706 * directly from the mode argument of ioctl(). 23707 * 23708 * Return Code: 0 - Success 23709 * EACCES 23710 * ENOTSUP 23711 * errno return code from sd_send_scsi_cmd() 23712 * 23713 * Context: Can sleep. Does not return until command is completed. 23714 */ 23715 23716 static int 23717 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23718 mhioc_inresvs_t *usrp, int flag) 23719 { 23720 #ifdef _MULTI_DATAMODEL 23721 struct mhioc_resv_desc_list32 resvlist32; 23722 #endif 23723 sd_prin_readresv_t *in; 23724 mhioc_inresvs_t *ptr; 23725 sd_readresv_desc_t *readresv_ptr; 23726 mhioc_resv_desc_list_t resvlist; 23727 mhioc_resv_desc_t resvdesc; 23728 uchar_t *data_bufp; 23729 int data_len; 23730 int rval; 23731 int i; 23732 size_t copysz; 23733 mhioc_resv_desc_t *bufp; 23734 23735 if ((ptr = usrp) == NULL) { 23736 return (EINVAL); 23737 } 23738 23739 /* 23740 * Get the listsize from user 23741 */ 23742 #ifdef _MULTI_DATAMODEL 23743 switch (ddi_model_convert_from(flag & FMODELS)) { 23744 case DDI_MODEL_ILP32: 23745 copysz = sizeof (struct mhioc_resv_desc_list32); 23746 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23747 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23748 "sd_persistent_reservation_in_read_resv: " 23749 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23750 rval = EFAULT; 23751 goto done; 23752 } 23753 resvlist.listsize = resvlist32.listsize; 23754 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23755 break; 23756 23757 case DDI_MODEL_NONE: 23758 copysz = sizeof (mhioc_resv_desc_list_t); 23759 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23760 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23761 "sd_persistent_reservation_in_read_resv: " 23762 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23763 rval = EFAULT; 23764 goto done; 23765 } 23766 break; 23767 } 23768 #else /* ! _MULTI_DATAMODEL */ 23769 copysz = sizeof (mhioc_resv_desc_list_t); 23770 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23771 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23772 "sd_persistent_reservation_in_read_resv: " 23773 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23774 rval = EFAULT; 23775 goto done; 23776 } 23777 #endif /* ! _MULTI_DATAMODEL */ 23778 23779 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23780 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23781 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23782 23783 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23784 data_len, data_bufp)) != 0) { 23785 goto done; 23786 } 23787 in = (sd_prin_readresv_t *)data_bufp; 23788 ptr->generation = BE_32(in->generation); 23789 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23790 23791 /* 23792 * Return the min(listsize, listlen( keys 23793 */ 23794 #ifdef _MULTI_DATAMODEL 23795 23796 switch (ddi_model_convert_from(flag & FMODELS)) { 23797 case DDI_MODEL_ILP32: 23798 resvlist32.listlen = resvlist.listlen; 23799 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23800 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23801 "sd_persistent_reservation_in_read_resv: " 23802 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23803 rval = EFAULT; 23804 goto done; 23805 } 23806 break; 23807 23808 case DDI_MODEL_NONE: 23809 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23810 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23811 "sd_persistent_reservation_in_read_resv: " 23812 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23813 rval = EFAULT; 23814 goto done; 23815 } 23816 break; 23817 } 23818 23819 #else /* ! _MULTI_DATAMODEL */ 23820 23821 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23822 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23823 "sd_persistent_reservation_in_read_resv: " 23824 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23825 rval = EFAULT; 23826 goto done; 23827 } 23828 23829 #endif /* ! _MULTI_DATAMODEL */ 23830 23831 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23832 bufp = resvlist.list; 23833 copysz = sizeof (mhioc_resv_desc_t); 23834 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23835 i++, readresv_ptr++, bufp++) { 23836 23837 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23838 MHIOC_RESV_KEY_SIZE); 23839 resvdesc.type = readresv_ptr->type; 23840 resvdesc.scope = readresv_ptr->scope; 23841 resvdesc.scope_specific_addr = 23842 BE_32(readresv_ptr->scope_specific_addr); 23843 23844 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23845 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23846 "sd_persistent_reservation_in_read_resv: " 23847 "failed ddi_copyout: resvlist\n"); 23848 rval = EFAULT; 23849 goto done; 23850 } 23851 } 23852 done: 23853 kmem_free(data_bufp, data_len); 23854 return (rval); 23855 } 23856 23857 23858 /* 23859 * Function: sr_change_blkmode() 23860 * 23861 * Description: This routine is the driver entry point for handling CD-ROM 23862 * block mode ioctl requests. Support for returning and changing 23863 * the current block size in use by the device is implemented. The 23864 * LBA size is changed via a MODE SELECT Block Descriptor. 23865 * 23866 * This routine issues a mode sense with an allocation length of 23867 * 12 bytes for the mode page header and a single block descriptor. 23868 * 23869 * Arguments: dev - the device 'dev_t' 23870 * cmd - the request type; one of CDROMGBLKMODE (get) or 23871 * CDROMSBLKMODE (set) 23872 * data - current block size or requested block size 23873 * flag - this argument is a pass through to ddi_copyxxx() directly 23874 * from the mode argument of ioctl(). 23875 * 23876 * Return Code: the code returned by sd_send_scsi_cmd() 23877 * EINVAL if invalid arguments are provided 23878 * EFAULT if ddi_copyxxx() fails 23879 * ENXIO if fail ddi_get_soft_state 23880 * EIO if invalid mode sense block descriptor length 23881 * 23882 */ 23883 23884 static int 23885 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23886 { 23887 struct sd_lun *un = NULL; 23888 struct mode_header *sense_mhp, *select_mhp; 23889 struct block_descriptor *sense_desc, *select_desc; 23890 int current_bsize; 23891 int rval = EINVAL; 23892 uchar_t *sense = NULL; 23893 uchar_t *select = NULL; 23894 23895 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23896 23897 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23898 return (ENXIO); 23899 } 23900 23901 /* 23902 * The block length is changed via the Mode Select block descriptor, the 23903 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23904 * required as part of this routine. Therefore the mode sense allocation 23905 * length is specified to be the length of a mode page header and a 23906 * block descriptor. 23907 */ 23908 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23909 23910 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23911 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23912 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23913 "sr_change_blkmode: Mode Sense Failed\n"); 23914 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23915 return (rval); 23916 } 23917 23918 /* Check the block descriptor len to handle only 1 block descriptor */ 23919 sense_mhp = (struct mode_header *)sense; 23920 if ((sense_mhp->bdesc_length == 0) || 23921 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23922 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23923 "sr_change_blkmode: Mode Sense returned invalid block" 23924 " descriptor length\n"); 23925 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23926 return (EIO); 23927 } 23928 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23929 current_bsize = ((sense_desc->blksize_hi << 16) | 23930 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23931 23932 /* Process command */ 23933 switch (cmd) { 23934 case CDROMGBLKMODE: 23935 /* Return the block size obtained during the mode sense */ 23936 if (ddi_copyout(¤t_bsize, (void *)data, 23937 sizeof (int), flag) != 0) 23938 rval = EFAULT; 23939 break; 23940 case CDROMSBLKMODE: 23941 /* Validate the requested block size */ 23942 switch (data) { 23943 case CDROM_BLK_512: 23944 case CDROM_BLK_1024: 23945 case CDROM_BLK_2048: 23946 case CDROM_BLK_2056: 23947 case CDROM_BLK_2336: 23948 case CDROM_BLK_2340: 23949 case CDROM_BLK_2352: 23950 case CDROM_BLK_2368: 23951 case CDROM_BLK_2448: 23952 case CDROM_BLK_2646: 23953 case CDROM_BLK_2647: 23954 break; 23955 default: 23956 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23957 "sr_change_blkmode: " 23958 "Block Size '%ld' Not Supported\n", data); 23959 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23960 return (EINVAL); 23961 } 23962 23963 /* 23964 * The current block size matches the requested block size so 23965 * there is no need to send the mode select to change the size 23966 */ 23967 if (current_bsize == data) { 23968 break; 23969 } 23970 23971 /* Build the select data for the requested block size */ 23972 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23973 select_mhp = (struct mode_header *)select; 23974 select_desc = 23975 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23976 /* 23977 * The LBA size is changed via the block descriptor, so the 23978 * descriptor is built according to the user data 23979 */ 23980 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23981 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23982 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23983 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23984 23985 /* Send the mode select for the requested block size */ 23986 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23987 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23988 SD_PATH_STANDARD)) != 0) { 23989 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23990 "sr_change_blkmode: Mode Select Failed\n"); 23991 /* 23992 * The mode select failed for the requested block size, 23993 * so reset the data for the original block size and 23994 * send it to the target. The error is indicated by the 23995 * return value for the failed mode select. 23996 */ 23997 select_desc->blksize_hi = sense_desc->blksize_hi; 23998 select_desc->blksize_mid = sense_desc->blksize_mid; 23999 select_desc->blksize_lo = sense_desc->blksize_lo; 24000 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24001 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24002 SD_PATH_STANDARD); 24003 } else { 24004 ASSERT(!mutex_owned(SD_MUTEX(un))); 24005 mutex_enter(SD_MUTEX(un)); 24006 sd_update_block_info(un, (uint32_t)data, 0); 24007 mutex_exit(SD_MUTEX(un)); 24008 } 24009 break; 24010 default: 24011 /* should not reach here, but check anyway */ 24012 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24013 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 24014 rval = EINVAL; 24015 break; 24016 } 24017 24018 if (select) { 24019 kmem_free(select, BUFLEN_CHG_BLK_MODE); 24020 } 24021 if (sense) { 24022 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24023 } 24024 return (rval); 24025 } 24026 24027 24028 /* 24029 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 24030 * implement driver support for getting and setting the CD speed. The command 24031 * set used will be based on the device type. If the device has not been 24032 * identified as MMC the Toshiba vendor specific mode page will be used. If 24033 * the device is MMC but does not support the Real Time Streaming feature 24034 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24035 * be used to read the speed. 24036 */ 24037 24038 /* 24039 * Function: sr_change_speed() 24040 * 24041 * Description: This routine is the driver entry point for handling CD-ROM 24042 * drive speed ioctl requests for devices supporting the Toshiba 24043 * vendor specific drive speed mode page. Support for returning 24044 * and changing the current drive speed in use by the device is 24045 * implemented. 24046 * 24047 * Arguments: dev - the device 'dev_t' 24048 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24049 * CDROMSDRVSPEED (set) 24050 * data - current drive speed or requested drive speed 24051 * flag - this argument is a pass through to ddi_copyxxx() directly 24052 * from the mode argument of ioctl(). 24053 * 24054 * Return Code: the code returned by sd_send_scsi_cmd() 24055 * EINVAL if invalid arguments are provided 24056 * EFAULT if ddi_copyxxx() fails 24057 * ENXIO if fail ddi_get_soft_state 24058 * EIO if invalid mode sense block descriptor length 24059 */ 24060 24061 static int 24062 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24063 { 24064 struct sd_lun *un = NULL; 24065 struct mode_header *sense_mhp, *select_mhp; 24066 struct mode_speed *sense_page, *select_page; 24067 int current_speed; 24068 int rval = EINVAL; 24069 int bd_len; 24070 uchar_t *sense = NULL; 24071 uchar_t *select = NULL; 24072 24073 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24074 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24075 return (ENXIO); 24076 } 24077 24078 /* 24079 * Note: The drive speed is being modified here according to a Toshiba 24080 * vendor specific mode page (0x31). 24081 */ 24082 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24083 24084 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24085 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24086 SD_PATH_STANDARD)) != 0) { 24087 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24088 "sr_change_speed: Mode Sense Failed\n"); 24089 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24090 return (rval); 24091 } 24092 sense_mhp = (struct mode_header *)sense; 24093 24094 /* Check the block descriptor len to handle only 1 block descriptor */ 24095 bd_len = sense_mhp->bdesc_length; 24096 if (bd_len > MODE_BLK_DESC_LENGTH) { 24097 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24098 "sr_change_speed: Mode Sense returned invalid block " 24099 "descriptor length\n"); 24100 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24101 return (EIO); 24102 } 24103 24104 sense_page = (struct mode_speed *) 24105 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24106 current_speed = sense_page->speed; 24107 24108 /* Process command */ 24109 switch (cmd) { 24110 case CDROMGDRVSPEED: 24111 /* Return the drive speed obtained during the mode sense */ 24112 if (current_speed == 0x2) { 24113 current_speed = CDROM_TWELVE_SPEED; 24114 } 24115 if (ddi_copyout(¤t_speed, (void *)data, 24116 sizeof (int), flag) != 0) { 24117 rval = EFAULT; 24118 } 24119 break; 24120 case CDROMSDRVSPEED: 24121 /* Validate the requested drive speed */ 24122 switch ((uchar_t)data) { 24123 case CDROM_TWELVE_SPEED: 24124 data = 0x2; 24125 /*FALLTHROUGH*/ 24126 case CDROM_NORMAL_SPEED: 24127 case CDROM_DOUBLE_SPEED: 24128 case CDROM_QUAD_SPEED: 24129 case CDROM_MAXIMUM_SPEED: 24130 break; 24131 default: 24132 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24133 "sr_change_speed: " 24134 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24135 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24136 return (EINVAL); 24137 } 24138 24139 /* 24140 * The current drive speed matches the requested drive speed so 24141 * there is no need to send the mode select to change the speed 24142 */ 24143 if (current_speed == data) { 24144 break; 24145 } 24146 24147 /* Build the select data for the requested drive speed */ 24148 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24149 select_mhp = (struct mode_header *)select; 24150 select_mhp->bdesc_length = 0; 24151 select_page = 24152 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24153 select_page = 24154 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24155 select_page->mode_page.code = CDROM_MODE_SPEED; 24156 select_page->mode_page.length = 2; 24157 select_page->speed = (uchar_t)data; 24158 24159 /* Send the mode select for the requested block size */ 24160 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24161 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24162 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24163 /* 24164 * The mode select failed for the requested drive speed, 24165 * so reset the data for the original drive speed and 24166 * send it to the target. The error is indicated by the 24167 * return value for the failed mode select. 24168 */ 24169 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24170 "sr_drive_speed: Mode Select Failed\n"); 24171 select_page->speed = sense_page->speed; 24172 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24173 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24174 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24175 } 24176 break; 24177 default: 24178 /* should not reach here, but check anyway */ 24179 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24180 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24181 rval = EINVAL; 24182 break; 24183 } 24184 24185 if (select) { 24186 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24187 } 24188 if (sense) { 24189 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24190 } 24191 24192 return (rval); 24193 } 24194 24195 24196 /* 24197 * Function: sr_atapi_change_speed() 24198 * 24199 * Description: This routine is the driver entry point for handling CD-ROM 24200 * drive speed ioctl requests for MMC devices that do not support 24201 * the Real Time Streaming feature (0x107). 24202 * 24203 * Note: This routine will use the SET SPEED command which may not 24204 * be supported by all devices. 24205 * 24206 * Arguments: dev- the device 'dev_t' 24207 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24208 * CDROMSDRVSPEED (set) 24209 * data- current drive speed or requested drive speed 24210 * flag- this argument is a pass through to ddi_copyxxx() directly 24211 * from the mode argument of ioctl(). 24212 * 24213 * Return Code: the code returned by sd_send_scsi_cmd() 24214 * EINVAL if invalid arguments are provided 24215 * EFAULT if ddi_copyxxx() fails 24216 * ENXIO if fail ddi_get_soft_state 24217 * EIO if invalid mode sense block descriptor length 24218 */ 24219 24220 static int 24221 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24222 { 24223 struct sd_lun *un; 24224 struct uscsi_cmd *com = NULL; 24225 struct mode_header_grp2 *sense_mhp; 24226 uchar_t *sense_page; 24227 uchar_t *sense = NULL; 24228 char cdb[CDB_GROUP5]; 24229 int bd_len; 24230 int current_speed = 0; 24231 int max_speed = 0; 24232 int rval; 24233 24234 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24235 24236 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24237 return (ENXIO); 24238 } 24239 24240 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24241 24242 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24243 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24244 SD_PATH_STANDARD)) != 0) { 24245 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24246 "sr_atapi_change_speed: Mode Sense Failed\n"); 24247 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24248 return (rval); 24249 } 24250 24251 /* Check the block descriptor len to handle only 1 block descriptor */ 24252 sense_mhp = (struct mode_header_grp2 *)sense; 24253 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24254 if (bd_len > MODE_BLK_DESC_LENGTH) { 24255 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24256 "sr_atapi_change_speed: Mode Sense returned invalid " 24257 "block descriptor length\n"); 24258 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24259 return (EIO); 24260 } 24261 24262 /* Calculate the current and maximum drive speeds */ 24263 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24264 current_speed = (sense_page[14] << 8) | sense_page[15]; 24265 max_speed = (sense_page[8] << 8) | sense_page[9]; 24266 24267 /* Process the command */ 24268 switch (cmd) { 24269 case CDROMGDRVSPEED: 24270 current_speed /= SD_SPEED_1X; 24271 if (ddi_copyout(¤t_speed, (void *)data, 24272 sizeof (int), flag) != 0) 24273 rval = EFAULT; 24274 break; 24275 case CDROMSDRVSPEED: 24276 /* Convert the speed code to KB/sec */ 24277 switch ((uchar_t)data) { 24278 case CDROM_NORMAL_SPEED: 24279 current_speed = SD_SPEED_1X; 24280 break; 24281 case CDROM_DOUBLE_SPEED: 24282 current_speed = 2 * SD_SPEED_1X; 24283 break; 24284 case CDROM_QUAD_SPEED: 24285 current_speed = 4 * SD_SPEED_1X; 24286 break; 24287 case CDROM_TWELVE_SPEED: 24288 current_speed = 12 * SD_SPEED_1X; 24289 break; 24290 case CDROM_MAXIMUM_SPEED: 24291 current_speed = 0xffff; 24292 break; 24293 default: 24294 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24295 "sr_atapi_change_speed: invalid drive speed %d\n", 24296 (uchar_t)data); 24297 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24298 return (EINVAL); 24299 } 24300 24301 /* Check the request against the drive's max speed. */ 24302 if (current_speed != 0xffff) { 24303 if (current_speed > max_speed) { 24304 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24305 return (EINVAL); 24306 } 24307 } 24308 24309 /* 24310 * Build and send the SET SPEED command 24311 * 24312 * Note: The SET SPEED (0xBB) command used in this routine is 24313 * obsolete per the SCSI MMC spec but still supported in the 24314 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24315 * therefore the command is still implemented in this routine. 24316 */ 24317 bzero(cdb, sizeof (cdb)); 24318 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24319 cdb[2] = (uchar_t)(current_speed >> 8); 24320 cdb[3] = (uchar_t)current_speed; 24321 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24322 com->uscsi_cdb = (caddr_t)cdb; 24323 com->uscsi_cdblen = CDB_GROUP5; 24324 com->uscsi_bufaddr = NULL; 24325 com->uscsi_buflen = 0; 24326 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24327 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24328 break; 24329 default: 24330 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24331 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24332 rval = EINVAL; 24333 } 24334 24335 if (sense) { 24336 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24337 } 24338 if (com) { 24339 kmem_free(com, sizeof (*com)); 24340 } 24341 return (rval); 24342 } 24343 24344 24345 /* 24346 * Function: sr_pause_resume() 24347 * 24348 * Description: This routine is the driver entry point for handling CD-ROM 24349 * pause/resume ioctl requests. This only affects the audio play 24350 * operation. 24351 * 24352 * Arguments: dev - the device 'dev_t' 24353 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24354 * for setting the resume bit of the cdb. 24355 * 24356 * Return Code: the code returned by sd_send_scsi_cmd() 24357 * EINVAL if invalid mode specified 24358 * 24359 */ 24360 24361 static int 24362 sr_pause_resume(dev_t dev, int cmd) 24363 { 24364 struct sd_lun *un; 24365 struct uscsi_cmd *com; 24366 char cdb[CDB_GROUP1]; 24367 int rval; 24368 24369 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24370 return (ENXIO); 24371 } 24372 24373 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24374 bzero(cdb, CDB_GROUP1); 24375 cdb[0] = SCMD_PAUSE_RESUME; 24376 switch (cmd) { 24377 case CDROMRESUME: 24378 cdb[8] = 1; 24379 break; 24380 case CDROMPAUSE: 24381 cdb[8] = 0; 24382 break; 24383 default: 24384 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24385 " Command '%x' Not Supported\n", cmd); 24386 rval = EINVAL; 24387 goto done; 24388 } 24389 24390 com->uscsi_cdb = cdb; 24391 com->uscsi_cdblen = CDB_GROUP1; 24392 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24393 24394 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24395 SD_PATH_STANDARD); 24396 24397 done: 24398 kmem_free(com, sizeof (*com)); 24399 return (rval); 24400 } 24401 24402 24403 /* 24404 * Function: sr_play_msf() 24405 * 24406 * Description: This routine is the driver entry point for handling CD-ROM 24407 * ioctl requests to output the audio signals at the specified 24408 * starting address and continue the audio play until the specified 24409 * ending address (CDROMPLAYMSF) The address is in Minute Second 24410 * Frame (MSF) format. 24411 * 24412 * Arguments: dev - the device 'dev_t' 24413 * data - pointer to user provided audio msf structure, 24414 * specifying start/end addresses. 24415 * flag - this argument is a pass through to ddi_copyxxx() 24416 * directly from the mode argument of ioctl(). 24417 * 24418 * Return Code: the code returned by sd_send_scsi_cmd() 24419 * EFAULT if ddi_copyxxx() fails 24420 * ENXIO if fail ddi_get_soft_state 24421 * EINVAL if data pointer is NULL 24422 */ 24423 24424 static int 24425 sr_play_msf(dev_t dev, caddr_t data, int flag) 24426 { 24427 struct sd_lun *un; 24428 struct uscsi_cmd *com; 24429 struct cdrom_msf msf_struct; 24430 struct cdrom_msf *msf = &msf_struct; 24431 char cdb[CDB_GROUP1]; 24432 int rval; 24433 24434 if (data == NULL) { 24435 return (EINVAL); 24436 } 24437 24438 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24439 return (ENXIO); 24440 } 24441 24442 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24443 return (EFAULT); 24444 } 24445 24446 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24447 bzero(cdb, CDB_GROUP1); 24448 cdb[0] = SCMD_PLAYAUDIO_MSF; 24449 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24450 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24451 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24452 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24453 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24454 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24455 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24456 } else { 24457 cdb[3] = msf->cdmsf_min0; 24458 cdb[4] = msf->cdmsf_sec0; 24459 cdb[5] = msf->cdmsf_frame0; 24460 cdb[6] = msf->cdmsf_min1; 24461 cdb[7] = msf->cdmsf_sec1; 24462 cdb[8] = msf->cdmsf_frame1; 24463 } 24464 com->uscsi_cdb = cdb; 24465 com->uscsi_cdblen = CDB_GROUP1; 24466 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24467 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24468 SD_PATH_STANDARD); 24469 kmem_free(com, sizeof (*com)); 24470 return (rval); 24471 } 24472 24473 24474 /* 24475 * Function: sr_play_trkind() 24476 * 24477 * Description: This routine is the driver entry point for handling CD-ROM 24478 * ioctl requests to output the audio signals at the specified 24479 * starting address and continue the audio play until the specified 24480 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24481 * format. 24482 * 24483 * Arguments: dev - the device 'dev_t' 24484 * data - pointer to user provided audio track/index structure, 24485 * specifying start/end addresses. 24486 * flag - this argument is a pass through to ddi_copyxxx() 24487 * directly from the mode argument of ioctl(). 24488 * 24489 * Return Code: the code returned by sd_send_scsi_cmd() 24490 * EFAULT if ddi_copyxxx() fails 24491 * ENXIO if fail ddi_get_soft_state 24492 * EINVAL if data pointer is NULL 24493 */ 24494 24495 static int 24496 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24497 { 24498 struct cdrom_ti ti_struct; 24499 struct cdrom_ti *ti = &ti_struct; 24500 struct uscsi_cmd *com = NULL; 24501 char cdb[CDB_GROUP1]; 24502 int rval; 24503 24504 if (data == NULL) { 24505 return (EINVAL); 24506 } 24507 24508 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24509 return (EFAULT); 24510 } 24511 24512 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24513 bzero(cdb, CDB_GROUP1); 24514 cdb[0] = SCMD_PLAYAUDIO_TI; 24515 cdb[4] = ti->cdti_trk0; 24516 cdb[5] = ti->cdti_ind0; 24517 cdb[7] = ti->cdti_trk1; 24518 cdb[8] = ti->cdti_ind1; 24519 com->uscsi_cdb = cdb; 24520 com->uscsi_cdblen = CDB_GROUP1; 24521 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24522 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24523 SD_PATH_STANDARD); 24524 kmem_free(com, sizeof (*com)); 24525 return (rval); 24526 } 24527 24528 24529 /* 24530 * Function: sr_read_all_subcodes() 24531 * 24532 * Description: This routine is the driver entry point for handling CD-ROM 24533 * ioctl requests to return raw subcode data while the target is 24534 * playing audio (CDROMSUBCODE). 24535 * 24536 * Arguments: dev - the device 'dev_t' 24537 * data - pointer to user provided cdrom subcode structure, 24538 * specifying the transfer length and address. 24539 * flag - this argument is a pass through to ddi_copyxxx() 24540 * directly from the mode argument of ioctl(). 24541 * 24542 * Return Code: the code returned by sd_send_scsi_cmd() 24543 * EFAULT if ddi_copyxxx() fails 24544 * ENXIO if fail ddi_get_soft_state 24545 * EINVAL if data pointer is NULL 24546 */ 24547 24548 static int 24549 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24550 { 24551 struct sd_lun *un = NULL; 24552 struct uscsi_cmd *com = NULL; 24553 struct cdrom_subcode *subcode = NULL; 24554 int rval; 24555 size_t buflen; 24556 char cdb[CDB_GROUP5]; 24557 24558 #ifdef _MULTI_DATAMODEL 24559 /* To support ILP32 applications in an LP64 world */ 24560 struct cdrom_subcode32 cdrom_subcode32; 24561 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24562 #endif 24563 if (data == NULL) { 24564 return (EINVAL); 24565 } 24566 24567 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24568 return (ENXIO); 24569 } 24570 24571 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24572 24573 #ifdef _MULTI_DATAMODEL 24574 switch (ddi_model_convert_from(flag & FMODELS)) { 24575 case DDI_MODEL_ILP32: 24576 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24577 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24578 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24579 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24580 return (EFAULT); 24581 } 24582 /* Convert the ILP32 uscsi data from the application to LP64 */ 24583 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24584 break; 24585 case DDI_MODEL_NONE: 24586 if (ddi_copyin(data, subcode, 24587 sizeof (struct cdrom_subcode), flag)) { 24588 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24589 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24590 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24591 return (EFAULT); 24592 } 24593 break; 24594 } 24595 #else /* ! _MULTI_DATAMODEL */ 24596 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24597 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24598 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24599 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24600 return (EFAULT); 24601 } 24602 #endif /* _MULTI_DATAMODEL */ 24603 24604 /* 24605 * Since MMC-2 expects max 3 bytes for length, check if the 24606 * length input is greater than 3 bytes 24607 */ 24608 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24609 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24610 "sr_read_all_subcodes: " 24611 "cdrom transfer length too large: %d (limit %d)\n", 24612 subcode->cdsc_length, 0xFFFFFF); 24613 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24614 return (EINVAL); 24615 } 24616 24617 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24618 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24619 bzero(cdb, CDB_GROUP5); 24620 24621 if (un->un_f_mmc_cap == TRUE) { 24622 cdb[0] = (char)SCMD_READ_CD; 24623 cdb[2] = (char)0xff; 24624 cdb[3] = (char)0xff; 24625 cdb[4] = (char)0xff; 24626 cdb[5] = (char)0xff; 24627 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24628 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24629 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24630 cdb[10] = 1; 24631 } else { 24632 /* 24633 * Note: A vendor specific command (0xDF) is being used her to 24634 * request a read of all subcodes. 24635 */ 24636 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24637 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24638 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24639 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24640 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24641 } 24642 com->uscsi_cdb = cdb; 24643 com->uscsi_cdblen = CDB_GROUP5; 24644 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24645 com->uscsi_buflen = buflen; 24646 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24647 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24648 SD_PATH_STANDARD); 24649 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24650 kmem_free(com, sizeof (*com)); 24651 return (rval); 24652 } 24653 24654 24655 /* 24656 * Function: sr_read_subchannel() 24657 * 24658 * Description: This routine is the driver entry point for handling CD-ROM 24659 * ioctl requests to return the Q sub-channel data of the CD 24660 * current position block. (CDROMSUBCHNL) The data includes the 24661 * track number, index number, absolute CD-ROM address (LBA or MSF 24662 * format per the user) , track relative CD-ROM address (LBA or MSF 24663 * format per the user), control data and audio status. 24664 * 24665 * Arguments: dev - the device 'dev_t' 24666 * data - pointer to user provided cdrom sub-channel structure 24667 * flag - this argument is a pass through to ddi_copyxxx() 24668 * directly from the mode argument of ioctl(). 24669 * 24670 * Return Code: the code returned by sd_send_scsi_cmd() 24671 * EFAULT if ddi_copyxxx() fails 24672 * ENXIO if fail ddi_get_soft_state 24673 * EINVAL if data pointer is NULL 24674 */ 24675 24676 static int 24677 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24678 { 24679 struct sd_lun *un; 24680 struct uscsi_cmd *com; 24681 struct cdrom_subchnl subchanel; 24682 struct cdrom_subchnl *subchnl = &subchanel; 24683 char cdb[CDB_GROUP1]; 24684 caddr_t buffer; 24685 int rval; 24686 24687 if (data == NULL) { 24688 return (EINVAL); 24689 } 24690 24691 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24692 (un->un_state == SD_STATE_OFFLINE)) { 24693 return (ENXIO); 24694 } 24695 24696 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24697 return (EFAULT); 24698 } 24699 24700 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24701 bzero(cdb, CDB_GROUP1); 24702 cdb[0] = SCMD_READ_SUBCHANNEL; 24703 /* Set the MSF bit based on the user requested address format */ 24704 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24705 /* 24706 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24707 * returned 24708 */ 24709 cdb[2] = 0x40; 24710 /* 24711 * Set byte 3 to specify the return data format. A value of 0x01 24712 * indicates that the CD-ROM current position should be returned. 24713 */ 24714 cdb[3] = 0x01; 24715 cdb[8] = 0x10; 24716 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24717 com->uscsi_cdb = cdb; 24718 com->uscsi_cdblen = CDB_GROUP1; 24719 com->uscsi_bufaddr = buffer; 24720 com->uscsi_buflen = 16; 24721 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24722 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24723 SD_PATH_STANDARD); 24724 if (rval != 0) { 24725 kmem_free(buffer, 16); 24726 kmem_free(com, sizeof (*com)); 24727 return (rval); 24728 } 24729 24730 /* Process the returned Q sub-channel data */ 24731 subchnl->cdsc_audiostatus = buffer[1]; 24732 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24733 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24734 subchnl->cdsc_trk = buffer[6]; 24735 subchnl->cdsc_ind = buffer[7]; 24736 if (subchnl->cdsc_format & CDROM_LBA) { 24737 subchnl->cdsc_absaddr.lba = 24738 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24739 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24740 subchnl->cdsc_reladdr.lba = 24741 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24742 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24743 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24744 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24745 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24746 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24747 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24748 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24749 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24750 } else { 24751 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24752 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24753 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24754 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24755 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24756 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24757 } 24758 kmem_free(buffer, 16); 24759 kmem_free(com, sizeof (*com)); 24760 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24761 != 0) { 24762 return (EFAULT); 24763 } 24764 return (rval); 24765 } 24766 24767 24768 /* 24769 * Function: sr_read_tocentry() 24770 * 24771 * Description: This routine is the driver entry point for handling CD-ROM 24772 * ioctl requests to read from the Table of Contents (TOC) 24773 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24774 * fields, the starting address (LBA or MSF format per the user) 24775 * and the data mode if the user specified track is a data track. 24776 * 24777 * Note: The READ HEADER (0x44) command used in this routine is 24778 * obsolete per the SCSI MMC spec but still supported in the 24779 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24780 * therefore the command is still implemented in this routine. 24781 * 24782 * Arguments: dev - the device 'dev_t' 24783 * data - pointer to user provided toc entry structure, 24784 * specifying the track # and the address format 24785 * (LBA or MSF). 24786 * flag - this argument is a pass through to ddi_copyxxx() 24787 * directly from the mode argument of ioctl(). 24788 * 24789 * Return Code: the code returned by sd_send_scsi_cmd() 24790 * EFAULT if ddi_copyxxx() fails 24791 * ENXIO if fail ddi_get_soft_state 24792 * EINVAL if data pointer is NULL 24793 */ 24794 24795 static int 24796 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24797 { 24798 struct sd_lun *un = NULL; 24799 struct uscsi_cmd *com; 24800 struct cdrom_tocentry toc_entry; 24801 struct cdrom_tocentry *entry = &toc_entry; 24802 caddr_t buffer; 24803 int rval; 24804 char cdb[CDB_GROUP1]; 24805 24806 if (data == NULL) { 24807 return (EINVAL); 24808 } 24809 24810 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24811 (un->un_state == SD_STATE_OFFLINE)) { 24812 return (ENXIO); 24813 } 24814 24815 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24816 return (EFAULT); 24817 } 24818 24819 /* Validate the requested track and address format */ 24820 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24821 return (EINVAL); 24822 } 24823 24824 if (entry->cdte_track == 0) { 24825 return (EINVAL); 24826 } 24827 24828 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24829 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24830 bzero(cdb, CDB_GROUP1); 24831 24832 cdb[0] = SCMD_READ_TOC; 24833 /* Set the MSF bit based on the user requested address format */ 24834 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24835 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24836 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24837 } else { 24838 cdb[6] = entry->cdte_track; 24839 } 24840 24841 /* 24842 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24843 * (4 byte TOC response header + 8 byte track descriptor) 24844 */ 24845 cdb[8] = 12; 24846 com->uscsi_cdb = cdb; 24847 com->uscsi_cdblen = CDB_GROUP1; 24848 com->uscsi_bufaddr = buffer; 24849 com->uscsi_buflen = 0x0C; 24850 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24851 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24852 SD_PATH_STANDARD); 24853 if (rval != 0) { 24854 kmem_free(buffer, 12); 24855 kmem_free(com, sizeof (*com)); 24856 return (rval); 24857 } 24858 24859 /* Process the toc entry */ 24860 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24861 entry->cdte_ctrl = (buffer[5] & 0x0F); 24862 if (entry->cdte_format & CDROM_LBA) { 24863 entry->cdte_addr.lba = 24864 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24865 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24866 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24867 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24868 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24869 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24870 /* 24871 * Send a READ TOC command using the LBA address format to get 24872 * the LBA for the track requested so it can be used in the 24873 * READ HEADER request 24874 * 24875 * Note: The MSF bit of the READ HEADER command specifies the 24876 * output format. The block address specified in that command 24877 * must be in LBA format. 24878 */ 24879 cdb[1] = 0; 24880 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24881 SD_PATH_STANDARD); 24882 if (rval != 0) { 24883 kmem_free(buffer, 12); 24884 kmem_free(com, sizeof (*com)); 24885 return (rval); 24886 } 24887 } else { 24888 entry->cdte_addr.msf.minute = buffer[9]; 24889 entry->cdte_addr.msf.second = buffer[10]; 24890 entry->cdte_addr.msf.frame = buffer[11]; 24891 /* 24892 * Send a READ TOC command using the LBA address format to get 24893 * the LBA for the track requested so it can be used in the 24894 * READ HEADER request 24895 * 24896 * Note: The MSF bit of the READ HEADER command specifies the 24897 * output format. The block address specified in that command 24898 * must be in LBA format. 24899 */ 24900 cdb[1] = 0; 24901 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24902 SD_PATH_STANDARD); 24903 if (rval != 0) { 24904 kmem_free(buffer, 12); 24905 kmem_free(com, sizeof (*com)); 24906 return (rval); 24907 } 24908 } 24909 24910 /* 24911 * Build and send the READ HEADER command to determine the data mode of 24912 * the user specified track. 24913 */ 24914 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24915 (entry->cdte_track != CDROM_LEADOUT)) { 24916 bzero(cdb, CDB_GROUP1); 24917 cdb[0] = SCMD_READ_HEADER; 24918 cdb[2] = buffer[8]; 24919 cdb[3] = buffer[9]; 24920 cdb[4] = buffer[10]; 24921 cdb[5] = buffer[11]; 24922 cdb[8] = 0x08; 24923 com->uscsi_buflen = 0x08; 24924 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24925 SD_PATH_STANDARD); 24926 if (rval == 0) { 24927 entry->cdte_datamode = buffer[0]; 24928 } else { 24929 /* 24930 * READ HEADER command failed, since this is 24931 * obsoleted in one spec, its better to return 24932 * -1 for an invlid track so that we can still 24933 * receive the rest of the TOC data. 24934 */ 24935 entry->cdte_datamode = (uchar_t)-1; 24936 } 24937 } else { 24938 entry->cdte_datamode = (uchar_t)-1; 24939 } 24940 24941 kmem_free(buffer, 12); 24942 kmem_free(com, sizeof (*com)); 24943 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24944 return (EFAULT); 24945 24946 return (rval); 24947 } 24948 24949 24950 /* 24951 * Function: sr_read_tochdr() 24952 * 24953 * Description: This routine is the driver entry point for handling CD-ROM 24954 * ioctl requests to read the Table of Contents (TOC) header 24955 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24956 * and ending track numbers 24957 * 24958 * Arguments: dev - the device 'dev_t' 24959 * data - pointer to user provided toc header structure, 24960 * specifying the starting and ending track numbers. 24961 * flag - this argument is a pass through to ddi_copyxxx() 24962 * directly from the mode argument of ioctl(). 24963 * 24964 * Return Code: the code returned by sd_send_scsi_cmd() 24965 * EFAULT if ddi_copyxxx() fails 24966 * ENXIO if fail ddi_get_soft_state 24967 * EINVAL if data pointer is NULL 24968 */ 24969 24970 static int 24971 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24972 { 24973 struct sd_lun *un; 24974 struct uscsi_cmd *com; 24975 struct cdrom_tochdr toc_header; 24976 struct cdrom_tochdr *hdr = &toc_header; 24977 char cdb[CDB_GROUP1]; 24978 int rval; 24979 caddr_t buffer; 24980 24981 if (data == NULL) { 24982 return (EINVAL); 24983 } 24984 24985 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24986 (un->un_state == SD_STATE_OFFLINE)) { 24987 return (ENXIO); 24988 } 24989 24990 buffer = kmem_zalloc(4, KM_SLEEP); 24991 bzero(cdb, CDB_GROUP1); 24992 cdb[0] = SCMD_READ_TOC; 24993 /* 24994 * Specifying a track number of 0x00 in the READ TOC command indicates 24995 * that the TOC header should be returned 24996 */ 24997 cdb[6] = 0x00; 24998 /* 24999 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 25000 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 25001 */ 25002 cdb[8] = 0x04; 25003 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25004 com->uscsi_cdb = cdb; 25005 com->uscsi_cdblen = CDB_GROUP1; 25006 com->uscsi_bufaddr = buffer; 25007 com->uscsi_buflen = 0x04; 25008 com->uscsi_timeout = 300; 25009 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25010 25011 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25012 SD_PATH_STANDARD); 25013 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25014 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 25015 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 25016 } else { 25017 hdr->cdth_trk0 = buffer[2]; 25018 hdr->cdth_trk1 = buffer[3]; 25019 } 25020 kmem_free(buffer, 4); 25021 kmem_free(com, sizeof (*com)); 25022 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 25023 return (EFAULT); 25024 } 25025 return (rval); 25026 } 25027 25028 25029 /* 25030 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 25031 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 25032 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 25033 * digital audio and extended architecture digital audio. These modes are 25034 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25035 * MMC specs. 25036 * 25037 * In addition to support for the various data formats these routines also 25038 * include support for devices that implement only the direct access READ 25039 * commands (0x08, 0x28), devices that implement the READ_CD commands 25040 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25041 * READ CDXA commands (0xD8, 0xDB) 25042 */ 25043 25044 /* 25045 * Function: sr_read_mode1() 25046 * 25047 * Description: This routine is the driver entry point for handling CD-ROM 25048 * ioctl read mode1 requests (CDROMREADMODE1). 25049 * 25050 * Arguments: dev - the device 'dev_t' 25051 * data - pointer to user provided cd read structure specifying 25052 * the lba buffer address and length. 25053 * flag - this argument is a pass through to ddi_copyxxx() 25054 * directly from the mode argument of ioctl(). 25055 * 25056 * Return Code: the code returned by sd_send_scsi_cmd() 25057 * EFAULT if ddi_copyxxx() fails 25058 * ENXIO if fail ddi_get_soft_state 25059 * EINVAL if data pointer is NULL 25060 */ 25061 25062 static int 25063 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25064 { 25065 struct sd_lun *un; 25066 struct cdrom_read mode1_struct; 25067 struct cdrom_read *mode1 = &mode1_struct; 25068 int rval; 25069 #ifdef _MULTI_DATAMODEL 25070 /* To support ILP32 applications in an LP64 world */ 25071 struct cdrom_read32 cdrom_read32; 25072 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25073 #endif /* _MULTI_DATAMODEL */ 25074 25075 if (data == NULL) { 25076 return (EINVAL); 25077 } 25078 25079 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25080 (un->un_state == SD_STATE_OFFLINE)) { 25081 return (ENXIO); 25082 } 25083 25084 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25085 "sd_read_mode1: entry: un:0x%p\n", un); 25086 25087 #ifdef _MULTI_DATAMODEL 25088 switch (ddi_model_convert_from(flag & FMODELS)) { 25089 case DDI_MODEL_ILP32: 25090 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25091 return (EFAULT); 25092 } 25093 /* Convert the ILP32 uscsi data from the application to LP64 */ 25094 cdrom_read32tocdrom_read(cdrd32, mode1); 25095 break; 25096 case DDI_MODEL_NONE: 25097 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25098 return (EFAULT); 25099 } 25100 } 25101 #else /* ! _MULTI_DATAMODEL */ 25102 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25103 return (EFAULT); 25104 } 25105 #endif /* _MULTI_DATAMODEL */ 25106 25107 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25108 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25109 25110 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25111 "sd_read_mode1: exit: un:0x%p\n", un); 25112 25113 return (rval); 25114 } 25115 25116 25117 /* 25118 * Function: sr_read_cd_mode2() 25119 * 25120 * Description: This routine is the driver entry point for handling CD-ROM 25121 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25122 * support the READ CD (0xBE) command or the 1st generation 25123 * READ CD (0xD4) command. 25124 * 25125 * Arguments: dev - the device 'dev_t' 25126 * data - pointer to user provided cd read structure specifying 25127 * the lba buffer address and length. 25128 * flag - this argument is a pass through to ddi_copyxxx() 25129 * directly from the mode argument of ioctl(). 25130 * 25131 * Return Code: the code returned by sd_send_scsi_cmd() 25132 * EFAULT if ddi_copyxxx() fails 25133 * ENXIO if fail ddi_get_soft_state 25134 * EINVAL if data pointer is NULL 25135 */ 25136 25137 static int 25138 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25139 { 25140 struct sd_lun *un; 25141 struct uscsi_cmd *com; 25142 struct cdrom_read mode2_struct; 25143 struct cdrom_read *mode2 = &mode2_struct; 25144 uchar_t cdb[CDB_GROUP5]; 25145 int nblocks; 25146 int rval; 25147 #ifdef _MULTI_DATAMODEL 25148 /* To support ILP32 applications in an LP64 world */ 25149 struct cdrom_read32 cdrom_read32; 25150 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25151 #endif /* _MULTI_DATAMODEL */ 25152 25153 if (data == NULL) { 25154 return (EINVAL); 25155 } 25156 25157 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25158 (un->un_state == SD_STATE_OFFLINE)) { 25159 return (ENXIO); 25160 } 25161 25162 #ifdef _MULTI_DATAMODEL 25163 switch (ddi_model_convert_from(flag & FMODELS)) { 25164 case DDI_MODEL_ILP32: 25165 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25166 return (EFAULT); 25167 } 25168 /* Convert the ILP32 uscsi data from the application to LP64 */ 25169 cdrom_read32tocdrom_read(cdrd32, mode2); 25170 break; 25171 case DDI_MODEL_NONE: 25172 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25173 return (EFAULT); 25174 } 25175 break; 25176 } 25177 25178 #else /* ! _MULTI_DATAMODEL */ 25179 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25180 return (EFAULT); 25181 } 25182 #endif /* _MULTI_DATAMODEL */ 25183 25184 bzero(cdb, sizeof (cdb)); 25185 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25186 /* Read command supported by 1st generation atapi drives */ 25187 cdb[0] = SCMD_READ_CDD4; 25188 } else { 25189 /* Universal CD Access Command */ 25190 cdb[0] = SCMD_READ_CD; 25191 } 25192 25193 /* 25194 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25195 */ 25196 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25197 25198 /* set the start address */ 25199 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25200 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25201 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25202 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25203 25204 /* set the transfer length */ 25205 nblocks = mode2->cdread_buflen / 2336; 25206 cdb[6] = (uchar_t)(nblocks >> 16); 25207 cdb[7] = (uchar_t)(nblocks >> 8); 25208 cdb[8] = (uchar_t)nblocks; 25209 25210 /* set the filter bits */ 25211 cdb[9] = CDROM_READ_CD_USERDATA; 25212 25213 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25214 com->uscsi_cdb = (caddr_t)cdb; 25215 com->uscsi_cdblen = sizeof (cdb); 25216 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25217 com->uscsi_buflen = mode2->cdread_buflen; 25218 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25219 25220 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25221 SD_PATH_STANDARD); 25222 kmem_free(com, sizeof (*com)); 25223 return (rval); 25224 } 25225 25226 25227 /* 25228 * Function: sr_read_mode2() 25229 * 25230 * Description: This routine is the driver entry point for handling CD-ROM 25231 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25232 * do not support the READ CD (0xBE) command. 25233 * 25234 * Arguments: dev - the device 'dev_t' 25235 * data - pointer to user provided cd read structure specifying 25236 * the lba buffer address and length. 25237 * flag - this argument is a pass through to ddi_copyxxx() 25238 * directly from the mode argument of ioctl(). 25239 * 25240 * Return Code: the code returned by sd_send_scsi_cmd() 25241 * EFAULT if ddi_copyxxx() fails 25242 * ENXIO if fail ddi_get_soft_state 25243 * EINVAL if data pointer is NULL 25244 * EIO if fail to reset block size 25245 * EAGAIN if commands are in progress in the driver 25246 */ 25247 25248 static int 25249 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25250 { 25251 struct sd_lun *un; 25252 struct cdrom_read mode2_struct; 25253 struct cdrom_read *mode2 = &mode2_struct; 25254 int rval; 25255 uint32_t restore_blksize; 25256 struct uscsi_cmd *com; 25257 uchar_t cdb[CDB_GROUP0]; 25258 int nblocks; 25259 25260 #ifdef _MULTI_DATAMODEL 25261 /* To support ILP32 applications in an LP64 world */ 25262 struct cdrom_read32 cdrom_read32; 25263 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25264 #endif /* _MULTI_DATAMODEL */ 25265 25266 if (data == NULL) { 25267 return (EINVAL); 25268 } 25269 25270 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25271 (un->un_state == SD_STATE_OFFLINE)) { 25272 return (ENXIO); 25273 } 25274 25275 /* 25276 * Because this routine will update the device and driver block size 25277 * being used we want to make sure there are no commands in progress. 25278 * If commands are in progress the user will have to try again. 25279 * 25280 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25281 * in sdioctl to protect commands from sdioctl through to the top of 25282 * sd_uscsi_strategy. See sdioctl for details. 25283 */ 25284 mutex_enter(SD_MUTEX(un)); 25285 if (un->un_ncmds_in_driver != 1) { 25286 mutex_exit(SD_MUTEX(un)); 25287 return (EAGAIN); 25288 } 25289 mutex_exit(SD_MUTEX(un)); 25290 25291 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25292 "sd_read_mode2: entry: un:0x%p\n", un); 25293 25294 #ifdef _MULTI_DATAMODEL 25295 switch (ddi_model_convert_from(flag & FMODELS)) { 25296 case DDI_MODEL_ILP32: 25297 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25298 return (EFAULT); 25299 } 25300 /* Convert the ILP32 uscsi data from the application to LP64 */ 25301 cdrom_read32tocdrom_read(cdrd32, mode2); 25302 break; 25303 case DDI_MODEL_NONE: 25304 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25305 return (EFAULT); 25306 } 25307 break; 25308 } 25309 #else /* ! _MULTI_DATAMODEL */ 25310 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25311 return (EFAULT); 25312 } 25313 #endif /* _MULTI_DATAMODEL */ 25314 25315 /* Store the current target block size for restoration later */ 25316 restore_blksize = un->un_tgt_blocksize; 25317 25318 /* Change the device and soft state target block size to 2336 */ 25319 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25320 rval = EIO; 25321 goto done; 25322 } 25323 25324 25325 bzero(cdb, sizeof (cdb)); 25326 25327 /* set READ operation */ 25328 cdb[0] = SCMD_READ; 25329 25330 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25331 mode2->cdread_lba >>= 2; 25332 25333 /* set the start address */ 25334 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25335 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25336 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25337 25338 /* set the transfer length */ 25339 nblocks = mode2->cdread_buflen / 2336; 25340 cdb[4] = (uchar_t)nblocks & 0xFF; 25341 25342 /* build command */ 25343 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25344 com->uscsi_cdb = (caddr_t)cdb; 25345 com->uscsi_cdblen = sizeof (cdb); 25346 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25347 com->uscsi_buflen = mode2->cdread_buflen; 25348 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25349 25350 /* 25351 * Issue SCSI command with user space address for read buffer. 25352 * 25353 * This sends the command through main channel in the driver. 25354 * 25355 * Since this is accessed via an IOCTL call, we go through the 25356 * standard path, so that if the device was powered down, then 25357 * it would be 'awakened' to handle the command. 25358 */ 25359 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25360 SD_PATH_STANDARD); 25361 25362 kmem_free(com, sizeof (*com)); 25363 25364 /* Restore the device and soft state target block size */ 25365 if (sr_sector_mode(dev, restore_blksize) != 0) { 25366 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25367 "can't do switch back to mode 1\n"); 25368 /* 25369 * If sd_send_scsi_READ succeeded we still need to report 25370 * an error because we failed to reset the block size 25371 */ 25372 if (rval == 0) { 25373 rval = EIO; 25374 } 25375 } 25376 25377 done: 25378 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25379 "sd_read_mode2: exit: un:0x%p\n", un); 25380 25381 return (rval); 25382 } 25383 25384 25385 /* 25386 * Function: sr_sector_mode() 25387 * 25388 * Description: This utility function is used by sr_read_mode2 to set the target 25389 * block size based on the user specified size. This is a legacy 25390 * implementation based upon a vendor specific mode page 25391 * 25392 * Arguments: dev - the device 'dev_t' 25393 * data - flag indicating if block size is being set to 2336 or 25394 * 512. 25395 * 25396 * Return Code: the code returned by sd_send_scsi_cmd() 25397 * EFAULT if ddi_copyxxx() fails 25398 * ENXIO if fail ddi_get_soft_state 25399 * EINVAL if data pointer is NULL 25400 */ 25401 25402 static int 25403 sr_sector_mode(dev_t dev, uint32_t blksize) 25404 { 25405 struct sd_lun *un; 25406 uchar_t *sense; 25407 uchar_t *select; 25408 int rval; 25409 25410 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25411 (un->un_state == SD_STATE_OFFLINE)) { 25412 return (ENXIO); 25413 } 25414 25415 sense = kmem_zalloc(20, KM_SLEEP); 25416 25417 /* Note: This is a vendor specific mode page (0x81) */ 25418 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25419 SD_PATH_STANDARD)) != 0) { 25420 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25421 "sr_sector_mode: Mode Sense failed\n"); 25422 kmem_free(sense, 20); 25423 return (rval); 25424 } 25425 select = kmem_zalloc(20, KM_SLEEP); 25426 select[3] = 0x08; 25427 select[10] = ((blksize >> 8) & 0xff); 25428 select[11] = (blksize & 0xff); 25429 select[12] = 0x01; 25430 select[13] = 0x06; 25431 select[14] = sense[14]; 25432 select[15] = sense[15]; 25433 if (blksize == SD_MODE2_BLKSIZE) { 25434 select[14] |= 0x01; 25435 } 25436 25437 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25438 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25439 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25440 "sr_sector_mode: Mode Select failed\n"); 25441 } else { 25442 /* 25443 * Only update the softstate block size if we successfully 25444 * changed the device block mode. 25445 */ 25446 mutex_enter(SD_MUTEX(un)); 25447 sd_update_block_info(un, blksize, 0); 25448 mutex_exit(SD_MUTEX(un)); 25449 } 25450 kmem_free(sense, 20); 25451 kmem_free(select, 20); 25452 return (rval); 25453 } 25454 25455 25456 /* 25457 * Function: sr_read_cdda() 25458 * 25459 * Description: This routine is the driver entry point for handling CD-ROM 25460 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25461 * the target supports CDDA these requests are handled via a vendor 25462 * specific command (0xD8) If the target does not support CDDA 25463 * these requests are handled via the READ CD command (0xBE). 25464 * 25465 * Arguments: dev - the device 'dev_t' 25466 * data - pointer to user provided CD-DA structure specifying 25467 * the track starting address, transfer length, and 25468 * subcode options. 25469 * flag - this argument is a pass through to ddi_copyxxx() 25470 * directly from the mode argument of ioctl(). 25471 * 25472 * Return Code: the code returned by sd_send_scsi_cmd() 25473 * EFAULT if ddi_copyxxx() fails 25474 * ENXIO if fail ddi_get_soft_state 25475 * EINVAL if invalid arguments are provided 25476 * ENOTTY 25477 */ 25478 25479 static int 25480 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25481 { 25482 struct sd_lun *un; 25483 struct uscsi_cmd *com; 25484 struct cdrom_cdda *cdda; 25485 int rval; 25486 size_t buflen; 25487 char cdb[CDB_GROUP5]; 25488 25489 #ifdef _MULTI_DATAMODEL 25490 /* To support ILP32 applications in an LP64 world */ 25491 struct cdrom_cdda32 cdrom_cdda32; 25492 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25493 #endif /* _MULTI_DATAMODEL */ 25494 25495 if (data == NULL) { 25496 return (EINVAL); 25497 } 25498 25499 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25500 return (ENXIO); 25501 } 25502 25503 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25504 25505 #ifdef _MULTI_DATAMODEL 25506 switch (ddi_model_convert_from(flag & FMODELS)) { 25507 case DDI_MODEL_ILP32: 25508 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25509 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25510 "sr_read_cdda: ddi_copyin Failed\n"); 25511 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25512 return (EFAULT); 25513 } 25514 /* Convert the ILP32 uscsi data from the application to LP64 */ 25515 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25516 break; 25517 case DDI_MODEL_NONE: 25518 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25519 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25520 "sr_read_cdda: ddi_copyin Failed\n"); 25521 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25522 return (EFAULT); 25523 } 25524 break; 25525 } 25526 #else /* ! _MULTI_DATAMODEL */ 25527 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25528 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25529 "sr_read_cdda: ddi_copyin Failed\n"); 25530 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25531 return (EFAULT); 25532 } 25533 #endif /* _MULTI_DATAMODEL */ 25534 25535 /* 25536 * Since MMC-2 expects max 3 bytes for length, check if the 25537 * length input is greater than 3 bytes 25538 */ 25539 if ((cdda->cdda_length & 0xFF000000) != 0) { 25540 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25541 "cdrom transfer length too large: %d (limit %d)\n", 25542 cdda->cdda_length, 0xFFFFFF); 25543 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25544 return (EINVAL); 25545 } 25546 25547 switch (cdda->cdda_subcode) { 25548 case CDROM_DA_NO_SUBCODE: 25549 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25550 break; 25551 case CDROM_DA_SUBQ: 25552 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25553 break; 25554 case CDROM_DA_ALL_SUBCODE: 25555 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25556 break; 25557 case CDROM_DA_SUBCODE_ONLY: 25558 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25559 break; 25560 default: 25561 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25562 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25563 cdda->cdda_subcode); 25564 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25565 return (EINVAL); 25566 } 25567 25568 /* Build and send the command */ 25569 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25570 bzero(cdb, CDB_GROUP5); 25571 25572 if (un->un_f_cfg_cdda == TRUE) { 25573 cdb[0] = (char)SCMD_READ_CD; 25574 cdb[1] = 0x04; 25575 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25576 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25577 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25578 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25579 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25580 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25581 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25582 cdb[9] = 0x10; 25583 switch (cdda->cdda_subcode) { 25584 case CDROM_DA_NO_SUBCODE : 25585 cdb[10] = 0x0; 25586 break; 25587 case CDROM_DA_SUBQ : 25588 cdb[10] = 0x2; 25589 break; 25590 case CDROM_DA_ALL_SUBCODE : 25591 cdb[10] = 0x1; 25592 break; 25593 case CDROM_DA_SUBCODE_ONLY : 25594 /* FALLTHROUGH */ 25595 default : 25596 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25597 kmem_free(com, sizeof (*com)); 25598 return (ENOTTY); 25599 } 25600 } else { 25601 cdb[0] = (char)SCMD_READ_CDDA; 25602 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25603 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25604 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25605 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25606 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25607 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25608 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25609 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25610 cdb[10] = cdda->cdda_subcode; 25611 } 25612 25613 com->uscsi_cdb = cdb; 25614 com->uscsi_cdblen = CDB_GROUP5; 25615 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25616 com->uscsi_buflen = buflen; 25617 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25618 25619 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25620 SD_PATH_STANDARD); 25621 25622 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25623 kmem_free(com, sizeof (*com)); 25624 return (rval); 25625 } 25626 25627 25628 /* 25629 * Function: sr_read_cdxa() 25630 * 25631 * Description: This routine is the driver entry point for handling CD-ROM 25632 * ioctl requests to return CD-XA (Extended Architecture) data. 25633 * (CDROMCDXA). 25634 * 25635 * Arguments: dev - the device 'dev_t' 25636 * data - pointer to user provided CD-XA structure specifying 25637 * the data starting address, transfer length, and format 25638 * flag - this argument is a pass through to ddi_copyxxx() 25639 * directly from the mode argument of ioctl(). 25640 * 25641 * Return Code: the code returned by sd_send_scsi_cmd() 25642 * EFAULT if ddi_copyxxx() fails 25643 * ENXIO if fail ddi_get_soft_state 25644 * EINVAL if data pointer is NULL 25645 */ 25646 25647 static int 25648 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25649 { 25650 struct sd_lun *un; 25651 struct uscsi_cmd *com; 25652 struct cdrom_cdxa *cdxa; 25653 int rval; 25654 size_t buflen; 25655 char cdb[CDB_GROUP5]; 25656 uchar_t read_flags; 25657 25658 #ifdef _MULTI_DATAMODEL 25659 /* To support ILP32 applications in an LP64 world */ 25660 struct cdrom_cdxa32 cdrom_cdxa32; 25661 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25662 #endif /* _MULTI_DATAMODEL */ 25663 25664 if (data == NULL) { 25665 return (EINVAL); 25666 } 25667 25668 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25669 return (ENXIO); 25670 } 25671 25672 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25673 25674 #ifdef _MULTI_DATAMODEL 25675 switch (ddi_model_convert_from(flag & FMODELS)) { 25676 case DDI_MODEL_ILP32: 25677 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25678 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25679 return (EFAULT); 25680 } 25681 /* 25682 * Convert the ILP32 uscsi data from the 25683 * application to LP64 for internal use. 25684 */ 25685 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25686 break; 25687 case DDI_MODEL_NONE: 25688 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25689 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25690 return (EFAULT); 25691 } 25692 break; 25693 } 25694 #else /* ! _MULTI_DATAMODEL */ 25695 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25696 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25697 return (EFAULT); 25698 } 25699 #endif /* _MULTI_DATAMODEL */ 25700 25701 /* 25702 * Since MMC-2 expects max 3 bytes for length, check if the 25703 * length input is greater than 3 bytes 25704 */ 25705 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25706 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25707 "cdrom transfer length too large: %d (limit %d)\n", 25708 cdxa->cdxa_length, 0xFFFFFF); 25709 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25710 return (EINVAL); 25711 } 25712 25713 switch (cdxa->cdxa_format) { 25714 case CDROM_XA_DATA: 25715 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25716 read_flags = 0x10; 25717 break; 25718 case CDROM_XA_SECTOR_DATA: 25719 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25720 read_flags = 0xf8; 25721 break; 25722 case CDROM_XA_DATA_W_ERROR: 25723 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25724 read_flags = 0xfc; 25725 break; 25726 default: 25727 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25728 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25729 cdxa->cdxa_format); 25730 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25731 return (EINVAL); 25732 } 25733 25734 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25735 bzero(cdb, CDB_GROUP5); 25736 if (un->un_f_mmc_cap == TRUE) { 25737 cdb[0] = (char)SCMD_READ_CD; 25738 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25739 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25740 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25741 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25742 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25743 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25744 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25745 cdb[9] = (char)read_flags; 25746 } else { 25747 /* 25748 * Note: A vendor specific command (0xDB) is being used her to 25749 * request a read of all subcodes. 25750 */ 25751 cdb[0] = (char)SCMD_READ_CDXA; 25752 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25753 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25754 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25755 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25756 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25757 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25758 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25759 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25760 cdb[10] = cdxa->cdxa_format; 25761 } 25762 com->uscsi_cdb = cdb; 25763 com->uscsi_cdblen = CDB_GROUP5; 25764 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25765 com->uscsi_buflen = buflen; 25766 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25767 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25768 SD_PATH_STANDARD); 25769 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25770 kmem_free(com, sizeof (*com)); 25771 return (rval); 25772 } 25773 25774 25775 /* 25776 * Function: sr_eject() 25777 * 25778 * Description: This routine is the driver entry point for handling CD-ROM 25779 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25780 * 25781 * Arguments: dev - the device 'dev_t' 25782 * 25783 * Return Code: the code returned by sd_send_scsi_cmd() 25784 */ 25785 25786 static int 25787 sr_eject(dev_t dev) 25788 { 25789 struct sd_lun *un; 25790 int rval; 25791 25792 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25793 (un->un_state == SD_STATE_OFFLINE)) { 25794 return (ENXIO); 25795 } 25796 25797 /* 25798 * To prevent race conditions with the eject 25799 * command, keep track of an eject command as 25800 * it progresses. If we are already handling 25801 * an eject command in the driver for the given 25802 * unit and another request to eject is received 25803 * immediately return EAGAIN so we don't lose 25804 * the command if the current eject command fails. 25805 */ 25806 mutex_enter(SD_MUTEX(un)); 25807 if (un->un_f_ejecting == TRUE) { 25808 mutex_exit(SD_MUTEX(un)); 25809 return (EAGAIN); 25810 } 25811 un->un_f_ejecting = TRUE; 25812 mutex_exit(SD_MUTEX(un)); 25813 25814 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25815 SD_PATH_STANDARD)) != 0) { 25816 mutex_enter(SD_MUTEX(un)); 25817 un->un_f_ejecting = FALSE; 25818 mutex_exit(SD_MUTEX(un)); 25819 return (rval); 25820 } 25821 25822 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25823 SD_PATH_STANDARD); 25824 25825 if (rval == 0) { 25826 mutex_enter(SD_MUTEX(un)); 25827 sr_ejected(un); 25828 un->un_mediastate = DKIO_EJECTED; 25829 un->un_f_ejecting = FALSE; 25830 cv_broadcast(&un->un_state_cv); 25831 mutex_exit(SD_MUTEX(un)); 25832 } else { 25833 mutex_enter(SD_MUTEX(un)); 25834 un->un_f_ejecting = FALSE; 25835 mutex_exit(SD_MUTEX(un)); 25836 } 25837 return (rval); 25838 } 25839 25840 25841 /* 25842 * Function: sr_ejected() 25843 * 25844 * Description: This routine updates the soft state structure to invalidate the 25845 * geometry information after the media has been ejected or a 25846 * media eject has been detected. 25847 * 25848 * Arguments: un - driver soft state (unit) structure 25849 */ 25850 25851 static void 25852 sr_ejected(struct sd_lun *un) 25853 { 25854 struct sd_errstats *stp; 25855 25856 ASSERT(un != NULL); 25857 ASSERT(mutex_owned(SD_MUTEX(un))); 25858 25859 un->un_f_blockcount_is_valid = FALSE; 25860 un->un_f_tgt_blocksize_is_valid = FALSE; 25861 mutex_exit(SD_MUTEX(un)); 25862 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25863 mutex_enter(SD_MUTEX(un)); 25864 25865 if (un->un_errstats != NULL) { 25866 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25867 stp->sd_capacity.value.ui64 = 0; 25868 } 25869 25870 /* remove "capacity-of-device" properties */ 25871 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25872 "device-nblocks"); 25873 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25874 "device-blksize"); 25875 } 25876 25877 25878 /* 25879 * Function: sr_check_wp() 25880 * 25881 * Description: This routine checks the write protection of a removable 25882 * media disk and hotpluggable devices via the write protect bit of 25883 * the Mode Page Header device specific field. Some devices choke 25884 * on unsupported mode page. In order to workaround this issue, 25885 * this routine has been implemented to use 0x3f mode page(request 25886 * for all pages) for all device types. 25887 * 25888 * Arguments: dev - the device 'dev_t' 25889 * 25890 * Return Code: int indicating if the device is write protected (1) or not (0) 25891 * 25892 * Context: Kernel thread. 25893 * 25894 */ 25895 25896 static int 25897 sr_check_wp(dev_t dev) 25898 { 25899 struct sd_lun *un; 25900 uchar_t device_specific; 25901 uchar_t *sense; 25902 int hdrlen; 25903 int rval = FALSE; 25904 25905 /* 25906 * Note: The return codes for this routine should be reworked to 25907 * properly handle the case of a NULL softstate. 25908 */ 25909 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25910 return (FALSE); 25911 } 25912 25913 if (un->un_f_cfg_is_atapi == TRUE) { 25914 /* 25915 * The mode page contents are not required; set the allocation 25916 * length for the mode page header only 25917 */ 25918 hdrlen = MODE_HEADER_LENGTH_GRP2; 25919 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25920 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25921 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25922 goto err_exit; 25923 device_specific = 25924 ((struct mode_header_grp2 *)sense)->device_specific; 25925 } else { 25926 hdrlen = MODE_HEADER_LENGTH; 25927 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25928 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25929 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25930 goto err_exit; 25931 device_specific = 25932 ((struct mode_header *)sense)->device_specific; 25933 } 25934 25935 /* 25936 * Write protect mode sense failed; not all disks 25937 * understand this query. Return FALSE assuming that 25938 * these devices are not writable. 25939 */ 25940 if (device_specific & WRITE_PROTECT) { 25941 rval = TRUE; 25942 } 25943 25944 err_exit: 25945 kmem_free(sense, hdrlen); 25946 return (rval); 25947 } 25948 25949 /* 25950 * Function: sr_volume_ctrl() 25951 * 25952 * Description: This routine is the driver entry point for handling CD-ROM 25953 * audio output volume ioctl requests. (CDROMVOLCTRL) 25954 * 25955 * Arguments: dev - the device 'dev_t' 25956 * data - pointer to user audio volume control structure 25957 * flag - this argument is a pass through to ddi_copyxxx() 25958 * directly from the mode argument of ioctl(). 25959 * 25960 * Return Code: the code returned by sd_send_scsi_cmd() 25961 * EFAULT if ddi_copyxxx() fails 25962 * ENXIO if fail ddi_get_soft_state 25963 * EINVAL if data pointer is NULL 25964 * 25965 */ 25966 25967 static int 25968 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25969 { 25970 struct sd_lun *un; 25971 struct cdrom_volctrl volume; 25972 struct cdrom_volctrl *vol = &volume; 25973 uchar_t *sense_page; 25974 uchar_t *select_page; 25975 uchar_t *sense; 25976 uchar_t *select; 25977 int sense_buflen; 25978 int select_buflen; 25979 int rval; 25980 25981 if (data == NULL) { 25982 return (EINVAL); 25983 } 25984 25985 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25986 (un->un_state == SD_STATE_OFFLINE)) { 25987 return (ENXIO); 25988 } 25989 25990 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25991 return (EFAULT); 25992 } 25993 25994 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25995 struct mode_header_grp2 *sense_mhp; 25996 struct mode_header_grp2 *select_mhp; 25997 int bd_len; 25998 25999 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 26000 select_buflen = MODE_HEADER_LENGTH_GRP2 + 26001 MODEPAGE_AUDIO_CTRL_LEN; 26002 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26003 select = kmem_zalloc(select_buflen, KM_SLEEP); 26004 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26005 sense_buflen, MODEPAGE_AUDIO_CTRL, 26006 SD_PATH_STANDARD)) != 0) { 26007 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 26008 "sr_volume_ctrl: Mode Sense Failed\n"); 26009 kmem_free(sense, sense_buflen); 26010 kmem_free(select, select_buflen); 26011 return (rval); 26012 } 26013 sense_mhp = (struct mode_header_grp2 *)sense; 26014 select_mhp = (struct mode_header_grp2 *)select; 26015 bd_len = (sense_mhp->bdesc_length_hi << 8) | 26016 sense_mhp->bdesc_length_lo; 26017 if (bd_len > MODE_BLK_DESC_LENGTH) { 26018 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26019 "sr_volume_ctrl: Mode Sense returned invalid " 26020 "block descriptor length\n"); 26021 kmem_free(sense, sense_buflen); 26022 kmem_free(select, select_buflen); 26023 return (EIO); 26024 } 26025 sense_page = (uchar_t *) 26026 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26027 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 26028 select_mhp->length_msb = 0; 26029 select_mhp->length_lsb = 0; 26030 select_mhp->bdesc_length_hi = 0; 26031 select_mhp->bdesc_length_lo = 0; 26032 } else { 26033 struct mode_header *sense_mhp, *select_mhp; 26034 26035 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26036 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26037 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26038 select = kmem_zalloc(select_buflen, KM_SLEEP); 26039 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26040 sense_buflen, MODEPAGE_AUDIO_CTRL, 26041 SD_PATH_STANDARD)) != 0) { 26042 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26043 "sr_volume_ctrl: Mode Sense Failed\n"); 26044 kmem_free(sense, sense_buflen); 26045 kmem_free(select, select_buflen); 26046 return (rval); 26047 } 26048 sense_mhp = (struct mode_header *)sense; 26049 select_mhp = (struct mode_header *)select; 26050 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26051 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26052 "sr_volume_ctrl: Mode Sense returned invalid " 26053 "block descriptor length\n"); 26054 kmem_free(sense, sense_buflen); 26055 kmem_free(select, select_buflen); 26056 return (EIO); 26057 } 26058 sense_page = (uchar_t *) 26059 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26060 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26061 select_mhp->length = 0; 26062 select_mhp->bdesc_length = 0; 26063 } 26064 /* 26065 * Note: An audio control data structure could be created and overlayed 26066 * on the following in place of the array indexing method implemented. 26067 */ 26068 26069 /* Build the select data for the user volume data */ 26070 select_page[0] = MODEPAGE_AUDIO_CTRL; 26071 select_page[1] = 0xE; 26072 /* Set the immediate bit */ 26073 select_page[2] = 0x04; 26074 /* Zero out reserved fields */ 26075 select_page[3] = 0x00; 26076 select_page[4] = 0x00; 26077 /* Return sense data for fields not to be modified */ 26078 select_page[5] = sense_page[5]; 26079 select_page[6] = sense_page[6]; 26080 select_page[7] = sense_page[7]; 26081 /* Set the user specified volume levels for channel 0 and 1 */ 26082 select_page[8] = 0x01; 26083 select_page[9] = vol->channel0; 26084 select_page[10] = 0x02; 26085 select_page[11] = vol->channel1; 26086 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26087 select_page[12] = sense_page[12]; 26088 select_page[13] = sense_page[13]; 26089 select_page[14] = sense_page[14]; 26090 select_page[15] = sense_page[15]; 26091 26092 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26093 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26094 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26095 } else { 26096 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26097 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26098 } 26099 26100 kmem_free(sense, sense_buflen); 26101 kmem_free(select, select_buflen); 26102 return (rval); 26103 } 26104 26105 26106 /* 26107 * Function: sr_read_sony_session_offset() 26108 * 26109 * Description: This routine is the driver entry point for handling CD-ROM 26110 * ioctl requests for session offset information. (CDROMREADOFFSET) 26111 * The address of the first track in the last session of a 26112 * multi-session CD-ROM is returned 26113 * 26114 * Note: This routine uses a vendor specific key value in the 26115 * command control field without implementing any vendor check here 26116 * or in the ioctl routine. 26117 * 26118 * Arguments: dev - the device 'dev_t' 26119 * data - pointer to an int to hold the requested address 26120 * flag - this argument is a pass through to ddi_copyxxx() 26121 * directly from the mode argument of ioctl(). 26122 * 26123 * Return Code: the code returned by sd_send_scsi_cmd() 26124 * EFAULT if ddi_copyxxx() fails 26125 * ENXIO if fail ddi_get_soft_state 26126 * EINVAL if data pointer is NULL 26127 */ 26128 26129 static int 26130 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26131 { 26132 struct sd_lun *un; 26133 struct uscsi_cmd *com; 26134 caddr_t buffer; 26135 char cdb[CDB_GROUP1]; 26136 int session_offset = 0; 26137 int rval; 26138 26139 if (data == NULL) { 26140 return (EINVAL); 26141 } 26142 26143 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26144 (un->un_state == SD_STATE_OFFLINE)) { 26145 return (ENXIO); 26146 } 26147 26148 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26149 bzero(cdb, CDB_GROUP1); 26150 cdb[0] = SCMD_READ_TOC; 26151 /* 26152 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26153 * (4 byte TOC response header + 8 byte response data) 26154 */ 26155 cdb[8] = SONY_SESSION_OFFSET_LEN; 26156 /* Byte 9 is the control byte. A vendor specific value is used */ 26157 cdb[9] = SONY_SESSION_OFFSET_KEY; 26158 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26159 com->uscsi_cdb = cdb; 26160 com->uscsi_cdblen = CDB_GROUP1; 26161 com->uscsi_bufaddr = buffer; 26162 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26163 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26164 26165 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26166 SD_PATH_STANDARD); 26167 if (rval != 0) { 26168 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26169 kmem_free(com, sizeof (*com)); 26170 return (rval); 26171 } 26172 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26173 session_offset = 26174 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26175 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26176 /* 26177 * Offset returned offset in current lbasize block's. Convert to 26178 * 2k block's to return to the user 26179 */ 26180 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26181 session_offset >>= 2; 26182 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26183 session_offset >>= 1; 26184 } 26185 } 26186 26187 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26188 rval = EFAULT; 26189 } 26190 26191 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26192 kmem_free(com, sizeof (*com)); 26193 return (rval); 26194 } 26195 26196 26197 /* 26198 * Function: sd_wm_cache_constructor() 26199 * 26200 * Description: Cache Constructor for the wmap cache for the read/modify/write 26201 * devices. 26202 * 26203 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26204 * un - sd_lun structure for the device. 26205 * flag - the km flags passed to constructor 26206 * 26207 * Return Code: 0 on success. 26208 * -1 on failure. 26209 */ 26210 26211 /*ARGSUSED*/ 26212 static int 26213 sd_wm_cache_constructor(void *wm, void *un, int flags) 26214 { 26215 bzero(wm, sizeof (struct sd_w_map)); 26216 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26217 return (0); 26218 } 26219 26220 26221 /* 26222 * Function: sd_wm_cache_destructor() 26223 * 26224 * Description: Cache destructor for the wmap cache for the read/modify/write 26225 * devices. 26226 * 26227 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26228 * un - sd_lun structure for the device. 26229 */ 26230 /*ARGSUSED*/ 26231 static void 26232 sd_wm_cache_destructor(void *wm, void *un) 26233 { 26234 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26235 } 26236 26237 26238 /* 26239 * Function: sd_range_lock() 26240 * 26241 * Description: Lock the range of blocks specified as parameter to ensure 26242 * that read, modify write is atomic and no other i/o writes 26243 * to the same location. The range is specified in terms 26244 * of start and end blocks. Block numbers are the actual 26245 * media block numbers and not system. 26246 * 26247 * Arguments: un - sd_lun structure for the device. 26248 * startb - The starting block number 26249 * endb - The end block number 26250 * typ - type of i/o - simple/read_modify_write 26251 * 26252 * Return Code: wm - pointer to the wmap structure. 26253 * 26254 * Context: This routine can sleep. 26255 */ 26256 26257 static struct sd_w_map * 26258 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26259 { 26260 struct sd_w_map *wmp = NULL; 26261 struct sd_w_map *sl_wmp = NULL; 26262 struct sd_w_map *tmp_wmp; 26263 wm_state state = SD_WM_CHK_LIST; 26264 26265 26266 ASSERT(un != NULL); 26267 ASSERT(!mutex_owned(SD_MUTEX(un))); 26268 26269 mutex_enter(SD_MUTEX(un)); 26270 26271 while (state != SD_WM_DONE) { 26272 26273 switch (state) { 26274 case SD_WM_CHK_LIST: 26275 /* 26276 * This is the starting state. Check the wmap list 26277 * to see if the range is currently available. 26278 */ 26279 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26280 /* 26281 * If this is a simple write and no rmw 26282 * i/o is pending then try to lock the 26283 * range as the range should be available. 26284 */ 26285 state = SD_WM_LOCK_RANGE; 26286 } else { 26287 tmp_wmp = sd_get_range(un, startb, endb); 26288 if (tmp_wmp != NULL) { 26289 if ((wmp != NULL) && ONLIST(un, wmp)) { 26290 /* 26291 * Should not keep onlist wmps 26292 * while waiting this macro 26293 * will also do wmp = NULL; 26294 */ 26295 FREE_ONLIST_WMAP(un, wmp); 26296 } 26297 /* 26298 * sl_wmp is the wmap on which wait 26299 * is done, since the tmp_wmp points 26300 * to the inuse wmap, set sl_wmp to 26301 * tmp_wmp and change the state to sleep 26302 */ 26303 sl_wmp = tmp_wmp; 26304 state = SD_WM_WAIT_MAP; 26305 } else { 26306 state = SD_WM_LOCK_RANGE; 26307 } 26308 26309 } 26310 break; 26311 26312 case SD_WM_LOCK_RANGE: 26313 ASSERT(un->un_wm_cache); 26314 /* 26315 * The range need to be locked, try to get a wmap. 26316 * First attempt it with NO_SLEEP, want to avoid a sleep 26317 * if possible as we will have to release the sd mutex 26318 * if we have to sleep. 26319 */ 26320 if (wmp == NULL) 26321 wmp = kmem_cache_alloc(un->un_wm_cache, 26322 KM_NOSLEEP); 26323 if (wmp == NULL) { 26324 mutex_exit(SD_MUTEX(un)); 26325 _NOTE(DATA_READABLE_WITHOUT_LOCK 26326 (sd_lun::un_wm_cache)) 26327 wmp = kmem_cache_alloc(un->un_wm_cache, 26328 KM_SLEEP); 26329 mutex_enter(SD_MUTEX(un)); 26330 /* 26331 * we released the mutex so recheck and go to 26332 * check list state. 26333 */ 26334 state = SD_WM_CHK_LIST; 26335 } else { 26336 /* 26337 * We exit out of state machine since we 26338 * have the wmap. Do the housekeeping first. 26339 * place the wmap on the wmap list if it is not 26340 * on it already and then set the state to done. 26341 */ 26342 wmp->wm_start = startb; 26343 wmp->wm_end = endb; 26344 wmp->wm_flags = typ | SD_WM_BUSY; 26345 if (typ & SD_WTYPE_RMW) { 26346 un->un_rmw_count++; 26347 } 26348 /* 26349 * If not already on the list then link 26350 */ 26351 if (!ONLIST(un, wmp)) { 26352 wmp->wm_next = un->un_wm; 26353 wmp->wm_prev = NULL; 26354 if (wmp->wm_next) 26355 wmp->wm_next->wm_prev = wmp; 26356 un->un_wm = wmp; 26357 } 26358 state = SD_WM_DONE; 26359 } 26360 break; 26361 26362 case SD_WM_WAIT_MAP: 26363 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26364 /* 26365 * Wait is done on sl_wmp, which is set in the 26366 * check_list state. 26367 */ 26368 sl_wmp->wm_wanted_count++; 26369 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26370 sl_wmp->wm_wanted_count--; 26371 /* 26372 * We can reuse the memory from the completed sl_wmp 26373 * lock range for our new lock, but only if noone is 26374 * waiting for it. 26375 */ 26376 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26377 if (sl_wmp->wm_wanted_count == 0) { 26378 if (wmp != NULL) 26379 CHK_N_FREEWMP(un, wmp); 26380 wmp = sl_wmp; 26381 } 26382 sl_wmp = NULL; 26383 /* 26384 * After waking up, need to recheck for availability of 26385 * range. 26386 */ 26387 state = SD_WM_CHK_LIST; 26388 break; 26389 26390 default: 26391 panic("sd_range_lock: " 26392 "Unknown state %d in sd_range_lock", state); 26393 /*NOTREACHED*/ 26394 } /* switch(state) */ 26395 26396 } /* while(state != SD_WM_DONE) */ 26397 26398 mutex_exit(SD_MUTEX(un)); 26399 26400 ASSERT(wmp != NULL); 26401 26402 return (wmp); 26403 } 26404 26405 26406 /* 26407 * Function: sd_get_range() 26408 * 26409 * Description: Find if there any overlapping I/O to this one 26410 * Returns the write-map of 1st such I/O, NULL otherwise. 26411 * 26412 * Arguments: un - sd_lun structure for the device. 26413 * startb - The starting block number 26414 * endb - The end block number 26415 * 26416 * Return Code: wm - pointer to the wmap structure. 26417 */ 26418 26419 static struct sd_w_map * 26420 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26421 { 26422 struct sd_w_map *wmp; 26423 26424 ASSERT(un != NULL); 26425 26426 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26427 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26428 continue; 26429 } 26430 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26431 break; 26432 } 26433 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26434 break; 26435 } 26436 } 26437 26438 return (wmp); 26439 } 26440 26441 26442 /* 26443 * Function: sd_free_inlist_wmap() 26444 * 26445 * Description: Unlink and free a write map struct. 26446 * 26447 * Arguments: un - sd_lun structure for the device. 26448 * wmp - sd_w_map which needs to be unlinked. 26449 */ 26450 26451 static void 26452 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26453 { 26454 ASSERT(un != NULL); 26455 26456 if (un->un_wm == wmp) { 26457 un->un_wm = wmp->wm_next; 26458 } else { 26459 wmp->wm_prev->wm_next = wmp->wm_next; 26460 } 26461 26462 if (wmp->wm_next) { 26463 wmp->wm_next->wm_prev = wmp->wm_prev; 26464 } 26465 26466 wmp->wm_next = wmp->wm_prev = NULL; 26467 26468 kmem_cache_free(un->un_wm_cache, wmp); 26469 } 26470 26471 26472 /* 26473 * Function: sd_range_unlock() 26474 * 26475 * Description: Unlock the range locked by wm. 26476 * Free write map if nobody else is waiting on it. 26477 * 26478 * Arguments: un - sd_lun structure for the device. 26479 * wmp - sd_w_map which needs to be unlinked. 26480 */ 26481 26482 static void 26483 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26484 { 26485 ASSERT(un != NULL); 26486 ASSERT(wm != NULL); 26487 ASSERT(!mutex_owned(SD_MUTEX(un))); 26488 26489 mutex_enter(SD_MUTEX(un)); 26490 26491 if (wm->wm_flags & SD_WTYPE_RMW) { 26492 un->un_rmw_count--; 26493 } 26494 26495 if (wm->wm_wanted_count) { 26496 wm->wm_flags = 0; 26497 /* 26498 * Broadcast that the wmap is available now. 26499 */ 26500 cv_broadcast(&wm->wm_avail); 26501 } else { 26502 /* 26503 * If no one is waiting on the map, it should be free'ed. 26504 */ 26505 sd_free_inlist_wmap(un, wm); 26506 } 26507 26508 mutex_exit(SD_MUTEX(un)); 26509 } 26510 26511 26512 /* 26513 * Function: sd_read_modify_write_task 26514 * 26515 * Description: Called from a taskq thread to initiate the write phase of 26516 * a read-modify-write request. This is used for targets where 26517 * un->un_sys_blocksize != un->un_tgt_blocksize. 26518 * 26519 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26520 * 26521 * Context: Called under taskq thread context. 26522 */ 26523 26524 static void 26525 sd_read_modify_write_task(void *arg) 26526 { 26527 struct sd_mapblocksize_info *bsp; 26528 struct buf *bp; 26529 struct sd_xbuf *xp; 26530 struct sd_lun *un; 26531 26532 bp = arg; /* The bp is given in arg */ 26533 ASSERT(bp != NULL); 26534 26535 /* Get the pointer to the layer-private data struct */ 26536 xp = SD_GET_XBUF(bp); 26537 ASSERT(xp != NULL); 26538 bsp = xp->xb_private; 26539 ASSERT(bsp != NULL); 26540 26541 un = SD_GET_UN(bp); 26542 ASSERT(un != NULL); 26543 ASSERT(!mutex_owned(SD_MUTEX(un))); 26544 26545 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26546 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26547 26548 /* 26549 * This is the write phase of a read-modify-write request, called 26550 * under the context of a taskq thread in response to the completion 26551 * of the read portion of the rmw request completing under interrupt 26552 * context. The write request must be sent from here down the iostart 26553 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26554 * we use the layer index saved in the layer-private data area. 26555 */ 26556 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26557 26558 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26559 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26560 } 26561 26562 26563 /* 26564 * Function: sddump_do_read_of_rmw() 26565 * 26566 * Description: This routine will be called from sddump, If sddump is called 26567 * with an I/O which not aligned on device blocksize boundary 26568 * then the write has to be converted to read-modify-write. 26569 * Do the read part here in order to keep sddump simple. 26570 * Note - That the sd_mutex is held across the call to this 26571 * routine. 26572 * 26573 * Arguments: un - sd_lun 26574 * blkno - block number in terms of media block size. 26575 * nblk - number of blocks. 26576 * bpp - pointer to pointer to the buf structure. On return 26577 * from this function, *bpp points to the valid buffer 26578 * to which the write has to be done. 26579 * 26580 * Return Code: 0 for success or errno-type return code 26581 */ 26582 26583 static int 26584 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26585 struct buf **bpp) 26586 { 26587 int err; 26588 int i; 26589 int rval; 26590 struct buf *bp; 26591 struct scsi_pkt *pkt = NULL; 26592 uint32_t target_blocksize; 26593 26594 ASSERT(un != NULL); 26595 ASSERT(mutex_owned(SD_MUTEX(un))); 26596 26597 target_blocksize = un->un_tgt_blocksize; 26598 26599 mutex_exit(SD_MUTEX(un)); 26600 26601 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26602 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26603 if (bp == NULL) { 26604 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26605 "no resources for dumping; giving up"); 26606 err = ENOMEM; 26607 goto done; 26608 } 26609 26610 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26611 blkno, nblk); 26612 if (rval != 0) { 26613 scsi_free_consistent_buf(bp); 26614 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26615 "no resources for dumping; giving up"); 26616 err = ENOMEM; 26617 goto done; 26618 } 26619 26620 pkt->pkt_flags |= FLAG_NOINTR; 26621 26622 err = EIO; 26623 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26624 26625 /* 26626 * Scsi_poll returns 0 (success) if the command completes and 26627 * the status block is STATUS_GOOD. We should only check 26628 * errors if this condition is not true. Even then we should 26629 * send our own request sense packet only if we have a check 26630 * condition and auto request sense has not been performed by 26631 * the hba. 26632 */ 26633 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26634 26635 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26636 err = 0; 26637 break; 26638 } 26639 26640 /* 26641 * Check CMD_DEV_GONE 1st, give up if device is gone, 26642 * no need to read RQS data. 26643 */ 26644 if (pkt->pkt_reason == CMD_DEV_GONE) { 26645 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26646 "Error while dumping state with rmw..." 26647 "Device is gone\n"); 26648 break; 26649 } 26650 26651 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26652 SD_INFO(SD_LOG_DUMP, un, 26653 "sddump: read failed with CHECK, try # %d\n", i); 26654 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26655 (void) sd_send_polled_RQS(un); 26656 } 26657 26658 continue; 26659 } 26660 26661 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26662 int reset_retval = 0; 26663 26664 SD_INFO(SD_LOG_DUMP, un, 26665 "sddump: read failed with BUSY, try # %d\n", i); 26666 26667 if (un->un_f_lun_reset_enabled == TRUE) { 26668 reset_retval = scsi_reset(SD_ADDRESS(un), 26669 RESET_LUN); 26670 } 26671 if (reset_retval == 0) { 26672 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26673 } 26674 (void) sd_send_polled_RQS(un); 26675 26676 } else { 26677 SD_INFO(SD_LOG_DUMP, un, 26678 "sddump: read failed with 0x%x, try # %d\n", 26679 SD_GET_PKT_STATUS(pkt), i); 26680 mutex_enter(SD_MUTEX(un)); 26681 sd_reset_target(un, pkt); 26682 mutex_exit(SD_MUTEX(un)); 26683 } 26684 26685 /* 26686 * If we are not getting anywhere with lun/target resets, 26687 * let's reset the bus. 26688 */ 26689 if (i > SD_NDUMP_RETRIES/2) { 26690 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26691 (void) sd_send_polled_RQS(un); 26692 } 26693 26694 } 26695 scsi_destroy_pkt(pkt); 26696 26697 if (err != 0) { 26698 scsi_free_consistent_buf(bp); 26699 *bpp = NULL; 26700 } else { 26701 *bpp = bp; 26702 } 26703 26704 done: 26705 mutex_enter(SD_MUTEX(un)); 26706 return (err); 26707 } 26708 26709 26710 /* 26711 * Function: sd_failfast_flushq 26712 * 26713 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26714 * in b_flags and move them onto the failfast queue, then kick 26715 * off a thread to return all bp's on the failfast queue to 26716 * their owners with an error set. 26717 * 26718 * Arguments: un - pointer to the soft state struct for the instance. 26719 * 26720 * Context: may execute in interrupt context. 26721 */ 26722 26723 static void 26724 sd_failfast_flushq(struct sd_lun *un) 26725 { 26726 struct buf *bp; 26727 struct buf *next_waitq_bp; 26728 struct buf *prev_waitq_bp = NULL; 26729 26730 ASSERT(un != NULL); 26731 ASSERT(mutex_owned(SD_MUTEX(un))); 26732 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26733 ASSERT(un->un_failfast_bp == NULL); 26734 26735 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26736 "sd_failfast_flushq: entry: un:0x%p\n", un); 26737 26738 /* 26739 * Check if we should flush all bufs when entering failfast state, or 26740 * just those with B_FAILFAST set. 26741 */ 26742 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26743 /* 26744 * Move *all* bp's on the wait queue to the failfast flush 26745 * queue, including those that do NOT have B_FAILFAST set. 26746 */ 26747 if (un->un_failfast_headp == NULL) { 26748 ASSERT(un->un_failfast_tailp == NULL); 26749 un->un_failfast_headp = un->un_waitq_headp; 26750 } else { 26751 ASSERT(un->un_failfast_tailp != NULL); 26752 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26753 } 26754 26755 un->un_failfast_tailp = un->un_waitq_tailp; 26756 26757 /* update kstat for each bp moved out of the waitq */ 26758 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26759 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26760 } 26761 26762 /* empty the waitq */ 26763 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26764 26765 } else { 26766 /* 26767 * Go thru the wait queue, pick off all entries with 26768 * B_FAILFAST set, and move these onto the failfast queue. 26769 */ 26770 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26771 /* 26772 * Save the pointer to the next bp on the wait queue, 26773 * so we get to it on the next iteration of this loop. 26774 */ 26775 next_waitq_bp = bp->av_forw; 26776 26777 /* 26778 * If this bp from the wait queue does NOT have 26779 * B_FAILFAST set, just move on to the next element 26780 * in the wait queue. Note, this is the only place 26781 * where it is correct to set prev_waitq_bp. 26782 */ 26783 if ((bp->b_flags & B_FAILFAST) == 0) { 26784 prev_waitq_bp = bp; 26785 continue; 26786 } 26787 26788 /* 26789 * Remove the bp from the wait queue. 26790 */ 26791 if (bp == un->un_waitq_headp) { 26792 /* The bp is the first element of the waitq. */ 26793 un->un_waitq_headp = next_waitq_bp; 26794 if (un->un_waitq_headp == NULL) { 26795 /* The wait queue is now empty */ 26796 un->un_waitq_tailp = NULL; 26797 } 26798 } else { 26799 /* 26800 * The bp is either somewhere in the middle 26801 * or at the end of the wait queue. 26802 */ 26803 ASSERT(un->un_waitq_headp != NULL); 26804 ASSERT(prev_waitq_bp != NULL); 26805 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26806 == 0); 26807 if (bp == un->un_waitq_tailp) { 26808 /* bp is the last entry on the waitq. */ 26809 ASSERT(next_waitq_bp == NULL); 26810 un->un_waitq_tailp = prev_waitq_bp; 26811 } 26812 prev_waitq_bp->av_forw = next_waitq_bp; 26813 } 26814 bp->av_forw = NULL; 26815 26816 /* 26817 * update kstat since the bp is moved out of 26818 * the waitq 26819 */ 26820 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26821 26822 /* 26823 * Now put the bp onto the failfast queue. 26824 */ 26825 if (un->un_failfast_headp == NULL) { 26826 /* failfast queue is currently empty */ 26827 ASSERT(un->un_failfast_tailp == NULL); 26828 un->un_failfast_headp = 26829 un->un_failfast_tailp = bp; 26830 } else { 26831 /* Add the bp to the end of the failfast q */ 26832 ASSERT(un->un_failfast_tailp != NULL); 26833 ASSERT(un->un_failfast_tailp->b_flags & 26834 B_FAILFAST); 26835 un->un_failfast_tailp->av_forw = bp; 26836 un->un_failfast_tailp = bp; 26837 } 26838 } 26839 } 26840 26841 /* 26842 * Now return all bp's on the failfast queue to their owners. 26843 */ 26844 while ((bp = un->un_failfast_headp) != NULL) { 26845 26846 un->un_failfast_headp = bp->av_forw; 26847 if (un->un_failfast_headp == NULL) { 26848 un->un_failfast_tailp = NULL; 26849 } 26850 26851 /* 26852 * We want to return the bp with a failure error code, but 26853 * we do not want a call to sd_start_cmds() to occur here, 26854 * so use sd_return_failed_command_no_restart() instead of 26855 * sd_return_failed_command(). 26856 */ 26857 sd_return_failed_command_no_restart(un, bp, EIO); 26858 } 26859 26860 /* Flush the xbuf queues if required. */ 26861 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26862 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26863 } 26864 26865 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26866 "sd_failfast_flushq: exit: un:0x%p\n", un); 26867 } 26868 26869 26870 /* 26871 * Function: sd_failfast_flushq_callback 26872 * 26873 * Description: Return TRUE if the given bp meets the criteria for failfast 26874 * flushing. Used with ddi_xbuf_flushq(9F). 26875 * 26876 * Arguments: bp - ptr to buf struct to be examined. 26877 * 26878 * Context: Any 26879 */ 26880 26881 static int 26882 sd_failfast_flushq_callback(struct buf *bp) 26883 { 26884 /* 26885 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26886 * state is entered; OR (2) the given bp has B_FAILFAST set. 26887 */ 26888 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26889 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26890 } 26891 26892 26893 26894 /* 26895 * Function: sd_setup_next_xfer 26896 * 26897 * Description: Prepare next I/O operation using DMA_PARTIAL 26898 * 26899 */ 26900 26901 static int 26902 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26903 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26904 { 26905 ssize_t num_blks_not_xfered; 26906 daddr_t strt_blk_num; 26907 ssize_t bytes_not_xfered; 26908 int rval; 26909 26910 ASSERT(pkt->pkt_resid == 0); 26911 26912 /* 26913 * Calculate next block number and amount to be transferred. 26914 * 26915 * How much data NOT transfered to the HBA yet. 26916 */ 26917 bytes_not_xfered = xp->xb_dma_resid; 26918 26919 /* 26920 * figure how many blocks NOT transfered to the HBA yet. 26921 */ 26922 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26923 26924 /* 26925 * set starting block number to the end of what WAS transfered. 26926 */ 26927 strt_blk_num = xp->xb_blkno + 26928 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26929 26930 /* 26931 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26932 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26933 * the disk mutex here. 26934 */ 26935 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26936 strt_blk_num, num_blks_not_xfered); 26937 26938 if (rval == 0) { 26939 26940 /* 26941 * Success. 26942 * 26943 * Adjust things if there are still more blocks to be 26944 * transfered. 26945 */ 26946 xp->xb_dma_resid = pkt->pkt_resid; 26947 pkt->pkt_resid = 0; 26948 26949 return (1); 26950 } 26951 26952 /* 26953 * There's really only one possible return value from 26954 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26955 * returns NULL. 26956 */ 26957 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26958 26959 bp->b_resid = bp->b_bcount; 26960 bp->b_flags |= B_ERROR; 26961 26962 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26963 "Error setting up next portion of DMA transfer\n"); 26964 26965 return (0); 26966 } 26967 26968 /* 26969 * Function: sd_panic_for_res_conflict 26970 * 26971 * Description: Call panic with a string formatted with "Reservation Conflict" 26972 * and a human readable identifier indicating the SD instance 26973 * that experienced the reservation conflict. 26974 * 26975 * Arguments: un - pointer to the soft state struct for the instance. 26976 * 26977 * Context: may execute in interrupt context. 26978 */ 26979 26980 #define SD_RESV_CONFLICT_FMT_LEN 40 26981 void 26982 sd_panic_for_res_conflict(struct sd_lun *un) 26983 { 26984 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26985 char path_str[MAXPATHLEN]; 26986 26987 (void) snprintf(panic_str, sizeof (panic_str), 26988 "Reservation Conflict\nDisk: %s", 26989 ddi_pathname(SD_DEVINFO(un), path_str)); 26990 26991 panic(panic_str); 26992 } 26993 26994 /* 26995 * Note: The following sd_faultinjection_ioctl( ) routines implement 26996 * driver support for handling fault injection for error analysis 26997 * causing faults in multiple layers of the driver. 26998 * 26999 */ 27000 27001 #ifdef SD_FAULT_INJECTION 27002 static uint_t sd_fault_injection_on = 0; 27003 27004 /* 27005 * Function: sd_faultinjection_ioctl() 27006 * 27007 * Description: This routine is the driver entry point for handling 27008 * faultinjection ioctls to inject errors into the 27009 * layer model 27010 * 27011 * Arguments: cmd - the ioctl cmd received 27012 * arg - the arguments from user and returns 27013 */ 27014 27015 static void 27016 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 27017 27018 uint_t i; 27019 uint_t rval; 27020 27021 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 27022 27023 mutex_enter(SD_MUTEX(un)); 27024 27025 switch (cmd) { 27026 case SDIOCRUN: 27027 /* Allow pushed faults to be injected */ 27028 SD_INFO(SD_LOG_SDTEST, un, 27029 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 27030 27031 sd_fault_injection_on = 1; 27032 27033 SD_INFO(SD_LOG_IOERR, un, 27034 "sd_faultinjection_ioctl: run finished\n"); 27035 break; 27036 27037 case SDIOCSTART: 27038 /* Start Injection Session */ 27039 SD_INFO(SD_LOG_SDTEST, un, 27040 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27041 27042 sd_fault_injection_on = 0; 27043 un->sd_injection_mask = 0xFFFFFFFF; 27044 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27045 un->sd_fi_fifo_pkt[i] = NULL; 27046 un->sd_fi_fifo_xb[i] = NULL; 27047 un->sd_fi_fifo_un[i] = NULL; 27048 un->sd_fi_fifo_arq[i] = NULL; 27049 } 27050 un->sd_fi_fifo_start = 0; 27051 un->sd_fi_fifo_end = 0; 27052 27053 mutex_enter(&(un->un_fi_mutex)); 27054 un->sd_fi_log[0] = '\0'; 27055 un->sd_fi_buf_len = 0; 27056 mutex_exit(&(un->un_fi_mutex)); 27057 27058 SD_INFO(SD_LOG_IOERR, un, 27059 "sd_faultinjection_ioctl: start finished\n"); 27060 break; 27061 27062 case SDIOCSTOP: 27063 /* Stop Injection Session */ 27064 SD_INFO(SD_LOG_SDTEST, un, 27065 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27066 sd_fault_injection_on = 0; 27067 un->sd_injection_mask = 0x0; 27068 27069 /* Empty stray or unuseds structs from fifo */ 27070 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27071 if (un->sd_fi_fifo_pkt[i] != NULL) { 27072 kmem_free(un->sd_fi_fifo_pkt[i], 27073 sizeof (struct sd_fi_pkt)); 27074 } 27075 if (un->sd_fi_fifo_xb[i] != NULL) { 27076 kmem_free(un->sd_fi_fifo_xb[i], 27077 sizeof (struct sd_fi_xb)); 27078 } 27079 if (un->sd_fi_fifo_un[i] != NULL) { 27080 kmem_free(un->sd_fi_fifo_un[i], 27081 sizeof (struct sd_fi_un)); 27082 } 27083 if (un->sd_fi_fifo_arq[i] != NULL) { 27084 kmem_free(un->sd_fi_fifo_arq[i], 27085 sizeof (struct sd_fi_arq)); 27086 } 27087 un->sd_fi_fifo_pkt[i] = NULL; 27088 un->sd_fi_fifo_un[i] = NULL; 27089 un->sd_fi_fifo_xb[i] = NULL; 27090 un->sd_fi_fifo_arq[i] = NULL; 27091 } 27092 un->sd_fi_fifo_start = 0; 27093 un->sd_fi_fifo_end = 0; 27094 27095 SD_INFO(SD_LOG_IOERR, un, 27096 "sd_faultinjection_ioctl: stop finished\n"); 27097 break; 27098 27099 case SDIOCINSERTPKT: 27100 /* Store a packet struct to be pushed onto fifo */ 27101 SD_INFO(SD_LOG_SDTEST, un, 27102 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27103 27104 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27105 27106 sd_fault_injection_on = 0; 27107 27108 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27109 if (un->sd_fi_fifo_pkt[i] != NULL) { 27110 kmem_free(un->sd_fi_fifo_pkt[i], 27111 sizeof (struct sd_fi_pkt)); 27112 } 27113 if (arg != NULL) { 27114 un->sd_fi_fifo_pkt[i] = 27115 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27116 if (un->sd_fi_fifo_pkt[i] == NULL) { 27117 /* Alloc failed don't store anything */ 27118 break; 27119 } 27120 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27121 sizeof (struct sd_fi_pkt), 0); 27122 if (rval == -1) { 27123 kmem_free(un->sd_fi_fifo_pkt[i], 27124 sizeof (struct sd_fi_pkt)); 27125 un->sd_fi_fifo_pkt[i] = NULL; 27126 } 27127 } else { 27128 SD_INFO(SD_LOG_IOERR, un, 27129 "sd_faultinjection_ioctl: pkt null\n"); 27130 } 27131 break; 27132 27133 case SDIOCINSERTXB: 27134 /* Store a xb struct to be pushed onto fifo */ 27135 SD_INFO(SD_LOG_SDTEST, un, 27136 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27137 27138 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27139 27140 sd_fault_injection_on = 0; 27141 27142 if (un->sd_fi_fifo_xb[i] != NULL) { 27143 kmem_free(un->sd_fi_fifo_xb[i], 27144 sizeof (struct sd_fi_xb)); 27145 un->sd_fi_fifo_xb[i] = NULL; 27146 } 27147 if (arg != NULL) { 27148 un->sd_fi_fifo_xb[i] = 27149 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27150 if (un->sd_fi_fifo_xb[i] == NULL) { 27151 /* Alloc failed don't store anything */ 27152 break; 27153 } 27154 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27155 sizeof (struct sd_fi_xb), 0); 27156 27157 if (rval == -1) { 27158 kmem_free(un->sd_fi_fifo_xb[i], 27159 sizeof (struct sd_fi_xb)); 27160 un->sd_fi_fifo_xb[i] = NULL; 27161 } 27162 } else { 27163 SD_INFO(SD_LOG_IOERR, un, 27164 "sd_faultinjection_ioctl: xb null\n"); 27165 } 27166 break; 27167 27168 case SDIOCINSERTUN: 27169 /* Store a un struct to be pushed onto fifo */ 27170 SD_INFO(SD_LOG_SDTEST, un, 27171 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27172 27173 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27174 27175 sd_fault_injection_on = 0; 27176 27177 if (un->sd_fi_fifo_un[i] != NULL) { 27178 kmem_free(un->sd_fi_fifo_un[i], 27179 sizeof (struct sd_fi_un)); 27180 un->sd_fi_fifo_un[i] = NULL; 27181 } 27182 if (arg != NULL) { 27183 un->sd_fi_fifo_un[i] = 27184 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27185 if (un->sd_fi_fifo_un[i] == NULL) { 27186 /* Alloc failed don't store anything */ 27187 break; 27188 } 27189 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27190 sizeof (struct sd_fi_un), 0); 27191 if (rval == -1) { 27192 kmem_free(un->sd_fi_fifo_un[i], 27193 sizeof (struct sd_fi_un)); 27194 un->sd_fi_fifo_un[i] = NULL; 27195 } 27196 27197 } else { 27198 SD_INFO(SD_LOG_IOERR, un, 27199 "sd_faultinjection_ioctl: un null\n"); 27200 } 27201 27202 break; 27203 27204 case SDIOCINSERTARQ: 27205 /* Store a arq struct to be pushed onto fifo */ 27206 SD_INFO(SD_LOG_SDTEST, un, 27207 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27208 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27209 27210 sd_fault_injection_on = 0; 27211 27212 if (un->sd_fi_fifo_arq[i] != NULL) { 27213 kmem_free(un->sd_fi_fifo_arq[i], 27214 sizeof (struct sd_fi_arq)); 27215 un->sd_fi_fifo_arq[i] = NULL; 27216 } 27217 if (arg != NULL) { 27218 un->sd_fi_fifo_arq[i] = 27219 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27220 if (un->sd_fi_fifo_arq[i] == NULL) { 27221 /* Alloc failed don't store anything */ 27222 break; 27223 } 27224 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27225 sizeof (struct sd_fi_arq), 0); 27226 if (rval == -1) { 27227 kmem_free(un->sd_fi_fifo_arq[i], 27228 sizeof (struct sd_fi_arq)); 27229 un->sd_fi_fifo_arq[i] = NULL; 27230 } 27231 27232 } else { 27233 SD_INFO(SD_LOG_IOERR, un, 27234 "sd_faultinjection_ioctl: arq null\n"); 27235 } 27236 27237 break; 27238 27239 case SDIOCPUSH: 27240 /* Push stored xb, pkt, un, and arq onto fifo */ 27241 sd_fault_injection_on = 0; 27242 27243 if (arg != NULL) { 27244 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27245 if (rval != -1 && 27246 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27247 un->sd_fi_fifo_end += i; 27248 } 27249 } else { 27250 SD_INFO(SD_LOG_IOERR, un, 27251 "sd_faultinjection_ioctl: push arg null\n"); 27252 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27253 un->sd_fi_fifo_end++; 27254 } 27255 } 27256 SD_INFO(SD_LOG_IOERR, un, 27257 "sd_faultinjection_ioctl: push to end=%d\n", 27258 un->sd_fi_fifo_end); 27259 break; 27260 27261 case SDIOCRETRIEVE: 27262 /* Return buffer of log from Injection session */ 27263 SD_INFO(SD_LOG_SDTEST, un, 27264 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27265 27266 sd_fault_injection_on = 0; 27267 27268 mutex_enter(&(un->un_fi_mutex)); 27269 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27270 un->sd_fi_buf_len+1, 0); 27271 mutex_exit(&(un->un_fi_mutex)); 27272 27273 if (rval == -1) { 27274 /* 27275 * arg is possibly invalid setting 27276 * it to NULL for return 27277 */ 27278 arg = NULL; 27279 } 27280 break; 27281 } 27282 27283 mutex_exit(SD_MUTEX(un)); 27284 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27285 " exit\n"); 27286 } 27287 27288 27289 /* 27290 * Function: sd_injection_log() 27291 * 27292 * Description: This routine adds buff to the already existing injection log 27293 * for retrieval via faultinjection_ioctl for use in fault 27294 * detection and recovery 27295 * 27296 * Arguments: buf - the string to add to the log 27297 */ 27298 27299 static void 27300 sd_injection_log(char *buf, struct sd_lun *un) 27301 { 27302 uint_t len; 27303 27304 ASSERT(un != NULL); 27305 ASSERT(buf != NULL); 27306 27307 mutex_enter(&(un->un_fi_mutex)); 27308 27309 len = min(strlen(buf), 255); 27310 /* Add logged value to Injection log to be returned later */ 27311 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27312 uint_t offset = strlen((char *)un->sd_fi_log); 27313 char *destp = (char *)un->sd_fi_log + offset; 27314 int i; 27315 for (i = 0; i < len; i++) { 27316 *destp++ = *buf++; 27317 } 27318 un->sd_fi_buf_len += len; 27319 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27320 } 27321 27322 mutex_exit(&(un->un_fi_mutex)); 27323 } 27324 27325 27326 /* 27327 * Function: sd_faultinjection() 27328 * 27329 * Description: This routine takes the pkt and changes its 27330 * content based on error injection scenerio. 27331 * 27332 * Arguments: pktp - packet to be changed 27333 */ 27334 27335 static void 27336 sd_faultinjection(struct scsi_pkt *pktp) 27337 { 27338 uint_t i; 27339 struct sd_fi_pkt *fi_pkt; 27340 struct sd_fi_xb *fi_xb; 27341 struct sd_fi_un *fi_un; 27342 struct sd_fi_arq *fi_arq; 27343 struct buf *bp; 27344 struct sd_xbuf *xb; 27345 struct sd_lun *un; 27346 27347 ASSERT(pktp != NULL); 27348 27349 /* pull bp xb and un from pktp */ 27350 bp = (struct buf *)pktp->pkt_private; 27351 xb = SD_GET_XBUF(bp); 27352 un = SD_GET_UN(bp); 27353 27354 ASSERT(un != NULL); 27355 27356 mutex_enter(SD_MUTEX(un)); 27357 27358 SD_TRACE(SD_LOG_SDTEST, un, 27359 "sd_faultinjection: entry Injection from sdintr\n"); 27360 27361 /* if injection is off return */ 27362 if (sd_fault_injection_on == 0 || 27363 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27364 mutex_exit(SD_MUTEX(un)); 27365 return; 27366 } 27367 27368 27369 /* take next set off fifo */ 27370 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27371 27372 fi_pkt = un->sd_fi_fifo_pkt[i]; 27373 fi_xb = un->sd_fi_fifo_xb[i]; 27374 fi_un = un->sd_fi_fifo_un[i]; 27375 fi_arq = un->sd_fi_fifo_arq[i]; 27376 27377 27378 /* set variables accordingly */ 27379 /* set pkt if it was on fifo */ 27380 if (fi_pkt != NULL) { 27381 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27382 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27383 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27384 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27385 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27386 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27387 27388 } 27389 27390 /* set xb if it was on fifo */ 27391 if (fi_xb != NULL) { 27392 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27393 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27394 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27395 SD_CONDSET(xb, xb, xb_victim_retry_count, 27396 "xb_victim_retry_count"); 27397 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27398 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27399 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27400 27401 /* copy in block data from sense */ 27402 if (fi_xb->xb_sense_data[0] != -1) { 27403 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27404 SENSE_LENGTH); 27405 } 27406 27407 /* copy in extended sense codes */ 27408 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27409 "es_code"); 27410 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27411 "es_key"); 27412 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27413 "es_add_code"); 27414 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27415 es_qual_code, "es_qual_code"); 27416 } 27417 27418 /* set un if it was on fifo */ 27419 if (fi_un != NULL) { 27420 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27421 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27422 SD_CONDSET(un, un, un_reset_retry_count, 27423 "un_reset_retry_count"); 27424 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27425 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27426 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27427 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27428 "un_f_allow_bus_device_reset"); 27429 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27430 27431 } 27432 27433 /* copy in auto request sense if it was on fifo */ 27434 if (fi_arq != NULL) { 27435 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27436 } 27437 27438 /* free structs */ 27439 if (un->sd_fi_fifo_pkt[i] != NULL) { 27440 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27441 } 27442 if (un->sd_fi_fifo_xb[i] != NULL) { 27443 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27444 } 27445 if (un->sd_fi_fifo_un[i] != NULL) { 27446 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27447 } 27448 if (un->sd_fi_fifo_arq[i] != NULL) { 27449 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27450 } 27451 27452 /* 27453 * kmem_free does not gurantee to set to NULL 27454 * since we uses these to determine if we set 27455 * values or not lets confirm they are always 27456 * NULL after free 27457 */ 27458 un->sd_fi_fifo_pkt[i] = NULL; 27459 un->sd_fi_fifo_un[i] = NULL; 27460 un->sd_fi_fifo_xb[i] = NULL; 27461 un->sd_fi_fifo_arq[i] = NULL; 27462 27463 un->sd_fi_fifo_start++; 27464 27465 mutex_exit(SD_MUTEX(un)); 27466 27467 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27468 } 27469 27470 #endif /* SD_FAULT_INJECTION */ 27471 27472 /* 27473 * This routine is invoked in sd_unit_attach(). Before calling it, the 27474 * properties in conf file should be processed already, and "hotpluggable" 27475 * property was processed also. 27476 * 27477 * The sd driver distinguishes 3 different type of devices: removable media, 27478 * non-removable media, and hotpluggable. Below the differences are defined: 27479 * 27480 * 1. Device ID 27481 * 27482 * The device ID of a device is used to identify this device. Refer to 27483 * ddi_devid_register(9F). 27484 * 27485 * For a non-removable media disk device which can provide 0x80 or 0x83 27486 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27487 * device ID is created to identify this device. For other non-removable 27488 * media devices, a default device ID is created only if this device has 27489 * at least 2 alter cylinders. Otherwise, this device has no devid. 27490 * 27491 * ------------------------------------------------------- 27492 * removable media hotpluggable | Can Have Device ID 27493 * ------------------------------------------------------- 27494 * false false | Yes 27495 * false true | Yes 27496 * true x | No 27497 * ------------------------------------------------------ 27498 * 27499 * 27500 * 2. SCSI group 4 commands 27501 * 27502 * In SCSI specs, only some commands in group 4 command set can use 27503 * 8-byte addresses that can be used to access >2TB storage spaces. 27504 * Other commands have no such capability. Without supporting group4, 27505 * it is impossible to make full use of storage spaces of a disk with 27506 * capacity larger than 2TB. 27507 * 27508 * ----------------------------------------------- 27509 * removable media hotpluggable LP64 | Group 27510 * ----------------------------------------------- 27511 * false false false | 1 27512 * false false true | 4 27513 * false true false | 1 27514 * false true true | 4 27515 * true x x | 5 27516 * ----------------------------------------------- 27517 * 27518 * 27519 * 3. Check for VTOC Label 27520 * 27521 * If a direct-access disk has no EFI label, sd will check if it has a 27522 * valid VTOC label. Now, sd also does that check for removable media 27523 * and hotpluggable devices. 27524 * 27525 * -------------------------------------------------------------- 27526 * Direct-Access removable media hotpluggable | Check Label 27527 * ------------------------------------------------------------- 27528 * false false false | No 27529 * false false true | No 27530 * false true false | Yes 27531 * false true true | Yes 27532 * true x x | Yes 27533 * -------------------------------------------------------------- 27534 * 27535 * 27536 * 4. Building default VTOC label 27537 * 27538 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27539 * If those devices have no valid VTOC label, sd(7d) will attempt to 27540 * create default VTOC for them. Currently sd creates default VTOC label 27541 * for all devices on x86 platform (VTOC_16), but only for removable 27542 * media devices on SPARC (VTOC_8). 27543 * 27544 * ----------------------------------------------------------- 27545 * removable media hotpluggable platform | Default Label 27546 * ----------------------------------------------------------- 27547 * false false sparc | No 27548 * false true x86 | Yes 27549 * false true sparc | Yes 27550 * true x x | Yes 27551 * ---------------------------------------------------------- 27552 * 27553 * 27554 * 5. Supported blocksizes of target devices 27555 * 27556 * Sd supports non-512-byte blocksize for removable media devices only. 27557 * For other devices, only 512-byte blocksize is supported. This may be 27558 * changed in near future because some RAID devices require non-512-byte 27559 * blocksize 27560 * 27561 * ----------------------------------------------------------- 27562 * removable media hotpluggable | non-512-byte blocksize 27563 * ----------------------------------------------------------- 27564 * false false | No 27565 * false true | No 27566 * true x | Yes 27567 * ----------------------------------------------------------- 27568 * 27569 * 27570 * 6. Automatic mount & unmount 27571 * 27572 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27573 * if a device is removable media device. It return 1 for removable media 27574 * devices, and 0 for others. 27575 * 27576 * The automatic mounting subsystem should distinguish between the types 27577 * of devices and apply automounting policies to each. 27578 * 27579 * 27580 * 7. fdisk partition management 27581 * 27582 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27583 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27584 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27585 * fdisk partitions on both x86 and SPARC platform. 27586 * 27587 * ----------------------------------------------------------- 27588 * platform removable media USB/1394 | fdisk supported 27589 * ----------------------------------------------------------- 27590 * x86 X X | true 27591 * ------------------------------------------------------------ 27592 * sparc X X | false 27593 * ------------------------------------------------------------ 27594 * 27595 * 27596 * 8. MBOOT/MBR 27597 * 27598 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27599 * read/write mboot for removable media devices on sparc platform. 27600 * 27601 * ----------------------------------------------------------- 27602 * platform removable media USB/1394 | mboot supported 27603 * ----------------------------------------------------------- 27604 * x86 X X | true 27605 * ------------------------------------------------------------ 27606 * sparc false false | false 27607 * sparc false true | true 27608 * sparc true false | true 27609 * sparc true true | true 27610 * ------------------------------------------------------------ 27611 * 27612 * 27613 * 9. error handling during opening device 27614 * 27615 * If failed to open a disk device, an errno is returned. For some kinds 27616 * of errors, different errno is returned depending on if this device is 27617 * a removable media device. This brings USB/1394 hard disks in line with 27618 * expected hard disk behavior. It is not expected that this breaks any 27619 * application. 27620 * 27621 * ------------------------------------------------------ 27622 * removable media hotpluggable | errno 27623 * ------------------------------------------------------ 27624 * false false | EIO 27625 * false true | EIO 27626 * true x | ENXIO 27627 * ------------------------------------------------------ 27628 * 27629 * 27630 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27631 * 27632 * These IOCTLs are applicable only to removable media devices. 27633 * 27634 * ----------------------------------------------------------- 27635 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27636 * ----------------------------------------------------------- 27637 * false false | No 27638 * false true | No 27639 * true x | Yes 27640 * ----------------------------------------------------------- 27641 * 27642 * 27643 * 12. Kstats for partitions 27644 * 27645 * sd creates partition kstat for non-removable media devices. USB and 27646 * Firewire hard disks now have partition kstats 27647 * 27648 * ------------------------------------------------------ 27649 * removable media hotpluggable | kstat 27650 * ------------------------------------------------------ 27651 * false false | Yes 27652 * false true | Yes 27653 * true x | No 27654 * ------------------------------------------------------ 27655 * 27656 * 27657 * 13. Removable media & hotpluggable properties 27658 * 27659 * Sd driver creates a "removable-media" property for removable media 27660 * devices. Parent nexus drivers create a "hotpluggable" property if 27661 * it supports hotplugging. 27662 * 27663 * --------------------------------------------------------------------- 27664 * removable media hotpluggable | "removable-media" " hotpluggable" 27665 * --------------------------------------------------------------------- 27666 * false false | No No 27667 * false true | No Yes 27668 * true false | Yes No 27669 * true true | Yes Yes 27670 * --------------------------------------------------------------------- 27671 * 27672 * 27673 * 14. Power Management 27674 * 27675 * sd only power manages removable media devices or devices that support 27676 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27677 * 27678 * A parent nexus that supports hotplugging can also set "pm-capable" 27679 * if the disk can be power managed. 27680 * 27681 * ------------------------------------------------------------ 27682 * removable media hotpluggable pm-capable | power manage 27683 * ------------------------------------------------------------ 27684 * false false false | No 27685 * false false true | Yes 27686 * false true false | No 27687 * false true true | Yes 27688 * true x x | Yes 27689 * ------------------------------------------------------------ 27690 * 27691 * USB and firewire hard disks can now be power managed independently 27692 * of the framebuffer 27693 * 27694 * 27695 * 15. Support for USB disks with capacity larger than 1TB 27696 * 27697 * Currently, sd doesn't permit a fixed disk device with capacity 27698 * larger than 1TB to be used in a 32-bit operating system environment. 27699 * However, sd doesn't do that for removable media devices. Instead, it 27700 * assumes that removable media devices cannot have a capacity larger 27701 * than 1TB. Therefore, using those devices on 32-bit system is partially 27702 * supported, which can cause some unexpected results. 27703 * 27704 * --------------------------------------------------------------------- 27705 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27706 * --------------------------------------------------------------------- 27707 * false false | true | no 27708 * false true | true | no 27709 * true false | true | Yes 27710 * true true | true | Yes 27711 * --------------------------------------------------------------------- 27712 * 27713 * 27714 * 16. Check write-protection at open time 27715 * 27716 * When a removable media device is being opened for writing without NDELAY 27717 * flag, sd will check if this device is writable. If attempting to open 27718 * without NDELAY flag a write-protected device, this operation will abort. 27719 * 27720 * ------------------------------------------------------------ 27721 * removable media USB/1394 | WP Check 27722 * ------------------------------------------------------------ 27723 * false false | No 27724 * false true | No 27725 * true false | Yes 27726 * true true | Yes 27727 * ------------------------------------------------------------ 27728 * 27729 * 27730 * 17. syslog when corrupted VTOC is encountered 27731 * 27732 * Currently, if an invalid VTOC is encountered, sd only print syslog 27733 * for fixed SCSI disks. 27734 * ------------------------------------------------------------ 27735 * removable media USB/1394 | print syslog 27736 * ------------------------------------------------------------ 27737 * false false | Yes 27738 * false true | No 27739 * true false | No 27740 * true true | No 27741 * ------------------------------------------------------------ 27742 */ 27743 static void 27744 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27745 { 27746 int pm_capable_prop; 27747 27748 ASSERT(un->un_sd); 27749 ASSERT(un->un_sd->sd_inq); 27750 27751 /* 27752 * Enable SYNC CACHE support for all devices. 27753 */ 27754 un->un_f_sync_cache_supported = TRUE; 27755 27756 if (un->un_sd->sd_inq->inq_rmb) { 27757 /* 27758 * The media of this device is removable. And for this kind 27759 * of devices, it is possible to change medium after opening 27760 * devices. Thus we should support this operation. 27761 */ 27762 un->un_f_has_removable_media = TRUE; 27763 27764 /* 27765 * support non-512-byte blocksize of removable media devices 27766 */ 27767 un->un_f_non_devbsize_supported = TRUE; 27768 27769 /* 27770 * Assume that all removable media devices support DOOR_LOCK 27771 */ 27772 un->un_f_doorlock_supported = TRUE; 27773 27774 /* 27775 * For a removable media device, it is possible to be opened 27776 * with NDELAY flag when there is no media in drive, in this 27777 * case we don't care if device is writable. But if without 27778 * NDELAY flag, we need to check if media is write-protected. 27779 */ 27780 un->un_f_chk_wp_open = TRUE; 27781 27782 /* 27783 * need to start a SCSI watch thread to monitor media state, 27784 * when media is being inserted or ejected, notify syseventd. 27785 */ 27786 un->un_f_monitor_media_state = TRUE; 27787 27788 /* 27789 * Some devices don't support START_STOP_UNIT command. 27790 * Therefore, we'd better check if a device supports it 27791 * before sending it. 27792 */ 27793 un->un_f_check_start_stop = TRUE; 27794 27795 /* 27796 * support eject media ioctl: 27797 * FDEJECT, DKIOCEJECT, CDROMEJECT 27798 */ 27799 un->un_f_eject_media_supported = TRUE; 27800 27801 /* 27802 * Because many removable-media devices don't support 27803 * LOG_SENSE, we couldn't use this command to check if 27804 * a removable media device support power-management. 27805 * We assume that they support power-management via 27806 * START_STOP_UNIT command and can be spun up and down 27807 * without limitations. 27808 */ 27809 un->un_f_pm_supported = TRUE; 27810 27811 /* 27812 * Need to create a zero length (Boolean) property 27813 * removable-media for the removable media devices. 27814 * Note that the return value of the property is not being 27815 * checked, since if unable to create the property 27816 * then do not want the attach to fail altogether. Consistent 27817 * with other property creation in attach. 27818 */ 27819 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27820 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27821 27822 } else { 27823 /* 27824 * create device ID for device 27825 */ 27826 un->un_f_devid_supported = TRUE; 27827 27828 /* 27829 * Spin up non-removable-media devices once it is attached 27830 */ 27831 un->un_f_attach_spinup = TRUE; 27832 27833 /* 27834 * According to SCSI specification, Sense data has two kinds of 27835 * format: fixed format, and descriptor format. At present, we 27836 * don't support descriptor format sense data for removable 27837 * media. 27838 */ 27839 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27840 un->un_f_descr_format_supported = TRUE; 27841 } 27842 27843 /* 27844 * kstats are created only for non-removable media devices. 27845 * 27846 * Set this in sd.conf to 0 in order to disable kstats. The 27847 * default is 1, so they are enabled by default. 27848 */ 27849 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27850 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27851 "enable-partition-kstats", 1)); 27852 27853 /* 27854 * Check if HBA has set the "pm-capable" property. 27855 * If "pm-capable" exists and is non-zero then we can 27856 * power manage the device without checking the start/stop 27857 * cycle count log sense page. 27858 * 27859 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27860 * then we should not power manage the device. 27861 * 27862 * If "pm-capable" doesn't exist then pm_capable_prop will 27863 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27864 * sd will check the start/stop cycle count log sense page 27865 * and power manage the device if the cycle count limit has 27866 * not been exceeded. 27867 */ 27868 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27869 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27870 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27871 un->un_f_log_sense_supported = TRUE; 27872 } else { 27873 /* 27874 * pm-capable property exists. 27875 * 27876 * Convert "TRUE" values for pm_capable_prop to 27877 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27878 * later. "TRUE" values are any values except 27879 * SD_PM_CAPABLE_FALSE (0) and 27880 * SD_PM_CAPABLE_UNDEFINED (-1) 27881 */ 27882 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27883 un->un_f_log_sense_supported = FALSE; 27884 } else { 27885 un->un_f_pm_supported = TRUE; 27886 } 27887 27888 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27889 "sd_unit_attach: un:0x%p pm-capable " 27890 "property set to %d.\n", un, un->un_f_pm_supported); 27891 } 27892 } 27893 27894 if (un->un_f_is_hotpluggable) { 27895 27896 /* 27897 * Have to watch hotpluggable devices as well, since 27898 * that's the only way for userland applications to 27899 * detect hot removal while device is busy/mounted. 27900 */ 27901 un->un_f_monitor_media_state = TRUE; 27902 27903 un->un_f_check_start_stop = TRUE; 27904 27905 } 27906 } 27907 27908 /* 27909 * sd_tg_rdwr: 27910 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27911 * in sys block size, req_length in bytes. 27912 * 27913 */ 27914 static int 27915 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27916 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27917 { 27918 struct sd_lun *un; 27919 int path_flag = (int)(uintptr_t)tg_cookie; 27920 char *dkl = NULL; 27921 diskaddr_t real_addr = start_block; 27922 diskaddr_t first_byte, end_block; 27923 27924 size_t buffer_size = reqlength; 27925 int rval; 27926 diskaddr_t cap; 27927 uint32_t lbasize; 27928 27929 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27930 if (un == NULL) 27931 return (ENXIO); 27932 27933 if (cmd != TG_READ && cmd != TG_WRITE) 27934 return (EINVAL); 27935 27936 mutex_enter(SD_MUTEX(un)); 27937 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27938 mutex_exit(SD_MUTEX(un)); 27939 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27940 &lbasize, path_flag); 27941 if (rval != 0) 27942 return (rval); 27943 mutex_enter(SD_MUTEX(un)); 27944 sd_update_block_info(un, lbasize, cap); 27945 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27946 mutex_exit(SD_MUTEX(un)); 27947 return (EIO); 27948 } 27949 } 27950 27951 if (NOT_DEVBSIZE(un)) { 27952 /* 27953 * sys_blocksize != tgt_blocksize, need to re-adjust 27954 * blkno and save the index to beginning of dk_label 27955 */ 27956 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27957 real_addr = first_byte / un->un_tgt_blocksize; 27958 27959 end_block = (first_byte + reqlength + 27960 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27961 27962 /* round up buffer size to multiple of target block size */ 27963 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27964 27965 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27966 "label_addr: 0x%x allocation size: 0x%x\n", 27967 real_addr, buffer_size); 27968 27969 if (((first_byte % un->un_tgt_blocksize) != 0) || 27970 (reqlength % un->un_tgt_blocksize) != 0) 27971 /* the request is not aligned */ 27972 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27973 } 27974 27975 /* 27976 * The MMC standard allows READ CAPACITY to be 27977 * inaccurate by a bounded amount (in the interest of 27978 * response latency). As a result, failed READs are 27979 * commonplace (due to the reading of metadata and not 27980 * data). Depending on the per-Vendor/drive Sense data, 27981 * the failed READ can cause many (unnecessary) retries. 27982 */ 27983 27984 if (ISCD(un) && (cmd == TG_READ) && 27985 (un->un_f_blockcount_is_valid == TRUE) && 27986 ((start_block == (un->un_blockcount - 1))|| 27987 (start_block == (un->un_blockcount - 2)))) { 27988 path_flag = SD_PATH_DIRECT_PRIORITY; 27989 } 27990 27991 mutex_exit(SD_MUTEX(un)); 27992 if (cmd == TG_READ) { 27993 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27994 buffer_size, real_addr, path_flag); 27995 if (dkl != NULL) 27996 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27997 real_addr), bufaddr, reqlength); 27998 } else { 27999 if (dkl) { 28000 rval = sd_send_scsi_READ(un, dkl, buffer_size, 28001 real_addr, path_flag); 28002 if (rval) { 28003 kmem_free(dkl, buffer_size); 28004 return (rval); 28005 } 28006 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 28007 real_addr), reqlength); 28008 } 28009 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 28010 buffer_size, real_addr, path_flag); 28011 } 28012 28013 if (dkl != NULL) 28014 kmem_free(dkl, buffer_size); 28015 28016 return (rval); 28017 } 28018 28019 28020 static int 28021 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 28022 { 28023 28024 struct sd_lun *un; 28025 diskaddr_t cap; 28026 uint32_t lbasize; 28027 int path_flag = (int)(uintptr_t)tg_cookie; 28028 int ret = 0; 28029 28030 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28031 if (un == NULL) 28032 return (ENXIO); 28033 28034 switch (cmd) { 28035 case TG_GETPHYGEOM: 28036 case TG_GETVIRTGEOM: 28037 case TG_GETCAPACITY: 28038 case TG_GETBLOCKSIZE: 28039 mutex_enter(SD_MUTEX(un)); 28040 28041 if ((un->un_f_blockcount_is_valid == TRUE) && 28042 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28043 cap = un->un_blockcount; 28044 lbasize = un->un_tgt_blocksize; 28045 mutex_exit(SD_MUTEX(un)); 28046 } else { 28047 mutex_exit(SD_MUTEX(un)); 28048 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28049 &lbasize, path_flag); 28050 if (ret != 0) 28051 return (ret); 28052 mutex_enter(SD_MUTEX(un)); 28053 sd_update_block_info(un, lbasize, cap); 28054 if ((un->un_f_blockcount_is_valid == FALSE) || 28055 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28056 mutex_exit(SD_MUTEX(un)); 28057 return (EIO); 28058 } 28059 mutex_exit(SD_MUTEX(un)); 28060 } 28061 28062 if (cmd == TG_GETCAPACITY) { 28063 *(diskaddr_t *)arg = cap; 28064 return (0); 28065 } 28066 28067 if (cmd == TG_GETBLOCKSIZE) { 28068 *(uint32_t *)arg = lbasize; 28069 return (0); 28070 } 28071 28072 if (cmd == TG_GETPHYGEOM) 28073 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28074 cap, lbasize, path_flag); 28075 else 28076 /* TG_GETVIRTGEOM */ 28077 ret = sd_get_virtual_geometry(un, 28078 (cmlb_geom_t *)arg, cap, lbasize); 28079 28080 return (ret); 28081 28082 case TG_GETATTR: 28083 mutex_enter(SD_MUTEX(un)); 28084 ((tg_attribute_t *)arg)->media_is_writable = 28085 un->un_f_mmc_writable_media; 28086 mutex_exit(SD_MUTEX(un)); 28087 return (0); 28088 default: 28089 return (ENOTTY); 28090 28091 } 28092 28093 } 28094